[AX25/NETROM]: Cleanup direct calls into IP stack
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ax25 / ax25_in.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
11 */
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/timer.h>
19 #include <linux/string.h>
20 #include <linux/sockios.h>
21 #include <linux/net.h>
22 #include <net/ax25.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/netfilter.h>
27 #include <net/sock.h>
28 #include <net/tcp_states.h>
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <linux/fcntl.h>
32 #include <linux/mm.h>
33 #include <linux/interrupt.h>
34
35 /*
36 * Given a fragment, queue it on the fragment queue and if the fragment
37 * is complete, send it back to ax25_rx_iframe.
38 */
39 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
40 {
41 struct sk_buff *skbn, *skbo;
42
43 if (ax25->fragno != 0) {
44 if (!(*skb->data & AX25_SEG_FIRST)) {
45 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
46 /* Enqueue fragment */
47 ax25->fragno = *skb->data & AX25_SEG_REM;
48 skb_pull(skb, 1); /* skip fragno */
49 ax25->fraglen += skb->len;
50 skb_queue_tail(&ax25->frag_queue, skb);
51
52 /* Last fragment received ? */
53 if (ax25->fragno == 0) {
54 skbn = alloc_skb(AX25_MAX_HEADER_LEN +
55 ax25->fraglen,
56 GFP_ATOMIC);
57 if (!skbn) {
58 skb_queue_purge(&ax25->frag_queue);
59 return 1;
60 }
61
62 skb_reserve(skbn, AX25_MAX_HEADER_LEN);
63
64 skbn->dev = ax25->ax25_dev->dev;
65 skbn->h.raw = skbn->data;
66 skbn->nh.raw = skbn->data;
67
68 /* Copy data from the fragments */
69 while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
70 memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
71 kfree_skb(skbo);
72 }
73
74 ax25->fraglen = 0;
75
76 if (ax25_rx_iframe(ax25, skbn) == 0)
77 kfree_skb(skbn);
78 }
79
80 return 1;
81 }
82 }
83 } else {
84 /* First fragment received */
85 if (*skb->data & AX25_SEG_FIRST) {
86 skb_queue_purge(&ax25->frag_queue);
87 ax25->fragno = *skb->data & AX25_SEG_REM;
88 skb_pull(skb, 1); /* skip fragno */
89 ax25->fraglen = skb->len;
90 skb_queue_tail(&ax25->frag_queue, skb);
91 return 1;
92 }
93 }
94
95 return 0;
96 }
97
98 /*
99 * This is where all valid I frames are sent to, to be dispatched to
100 * whichever protocol requires them.
101 */
102 int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
103 {
104 int (*func)(struct sk_buff *, ax25_cb *);
105 volatile int queued = 0;
106 unsigned char pid;
107
108 if (skb == NULL) return 0;
109
110 ax25_start_idletimer(ax25);
111
112 pid = *skb->data;
113
114 if (pid == AX25_P_IP) {
115 /* working around a TCP bug to keep additional listeners
116 * happy. TCP re-uses the buffer and destroys the original
117 * content.
118 */
119 struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
120 if (skbn != NULL) {
121 kfree_skb(skb);
122 skb = skbn;
123 }
124
125 skb_pull(skb, 1); /* Remove PID */
126 skb->h.raw = skb->data;
127 skb->nh.raw = skb->data;
128 skb->dev = ax25->ax25_dev->dev;
129 skb->pkt_type = PACKET_HOST;
130 skb->protocol = htons(ETH_P_IP);
131 netif_rx(skb);
132 return 1;
133 }
134 if (pid == AX25_P_SEGMENT) {
135 skb_pull(skb, 1); /* Remove PID */
136 return ax25_rx_fragment(ax25, skb);
137 }
138
139 if ((func = ax25_protocol_function(pid)) != NULL) {
140 skb_pull(skb, 1); /* Remove PID */
141 return (*func)(skb, ax25);
142 }
143
144 if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
145 if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
146 ax25->pidincl) {
147 if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
148 queued = 1;
149 else
150 ax25->condition |= AX25_COND_OWN_RX_BUSY;
151 }
152 }
153
154 return queued;
155 }
156
157 /*
158 * Higher level upcall for a LAPB frame
159 */
160 static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
161 {
162 int queued = 0;
163
164 if (ax25->state == AX25_STATE_0)
165 return 0;
166
167 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
168 case AX25_PROTO_STD_SIMPLEX:
169 case AX25_PROTO_STD_DUPLEX:
170 queued = ax25_std_frame_in(ax25, skb, type);
171 break;
172
173 #ifdef CONFIG_AX25_DAMA_SLAVE
174 case AX25_PROTO_DAMA_SLAVE:
175 if (dama || ax25->ax25_dev->dama.slave)
176 queued = ax25_ds_frame_in(ax25, skb, type);
177 else
178 queued = ax25_std_frame_in(ax25, skb, type);
179 break;
180 #endif
181 }
182
183 return queued;
184 }
185
186 static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
187 ax25_address *dev_addr, struct packet_type *ptype)
188 {
189 ax25_address src, dest, *next_digi = NULL;
190 int type = 0, mine = 0, dama;
191 struct sock *make, *sk;
192 ax25_digi dp, reverse_dp;
193 ax25_cb *ax25;
194 ax25_dev *ax25_dev;
195
196 /*
197 * Process the AX.25/LAPB frame.
198 */
199
200 skb->h.raw = skb->data;
201
202 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
203 kfree_skb(skb);
204 return 0;
205 }
206
207 /*
208 * Parse the address header.
209 */
210
211 if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {
212 kfree_skb(skb);
213 return 0;
214 }
215
216 /*
217 * Ours perhaps ?
218 */
219 if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */
220 next_digi = &dp.calls[dp.lastrepeat + 1];
221
222 /*
223 * Pull of the AX.25 headers leaving the CTRL/PID bytes
224 */
225 skb_pull(skb, ax25_addr_size(&dp));
226
227 /* For our port addresses ? */
228 if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
229 mine = 1;
230
231 /* Also match on any registered callsign from L3/4 */
232 if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
233 mine = 1;
234
235 /* UI frame - bypass LAPB processing */
236 if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
237 skb->h.raw = skb->data + 2; /* skip control and pid */
238
239 ax25_send_to_raw(&dest, skb, skb->data[1]);
240
241 if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
242 kfree_skb(skb);
243 return 0;
244 }
245
246 /* Now we are pointing at the pid byte */
247 switch (skb->data[1]) {
248 case AX25_P_IP:
249 skb_pull(skb,2); /* drop PID/CTRL */
250 skb->h.raw = skb->data;
251 skb->nh.raw = skb->data;
252 skb->dev = dev;
253 skb->pkt_type = PACKET_HOST;
254 skb->protocol = htons(ETH_P_IP);
255 netif_rx(skb);
256 break;
257
258 case AX25_P_ARP:
259 skb_pull(skb,2);
260 skb->h.raw = skb->data;
261 skb->nh.raw = skb->data;
262 skb->dev = dev;
263 skb->pkt_type = PACKET_HOST;
264 skb->protocol = htons(ETH_P_ARP);
265 netif_rx(skb);
266 break;
267 case AX25_P_TEXT:
268 /* Now find a suitable dgram socket */
269 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
270 if (sk != NULL) {
271 bh_lock_sock(sk);
272 if (atomic_read(&sk->sk_rmem_alloc) >=
273 sk->sk_rcvbuf) {
274 kfree_skb(skb);
275 } else {
276 /*
277 * Remove the control and PID.
278 */
279 skb_pull(skb, 2);
280 if (sock_queue_rcv_skb(sk, skb) != 0)
281 kfree_skb(skb);
282 }
283 bh_unlock_sock(sk);
284 sock_put(sk);
285 } else {
286 kfree_skb(skb);
287 }
288 break;
289
290 default:
291 kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */
292 break;
293 }
294
295 return 0;
296 }
297
298 /*
299 * Is connected mode supported on this device ?
300 * If not, should we DM the incoming frame (except DMs) or
301 * silently ignore them. For now we stay quiet.
302 */
303 if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) {
304 kfree_skb(skb);
305 return 0;
306 }
307
308 /* LAPB */
309
310 /* AX.25 state 1-4 */
311
312 ax25_digi_invert(&dp, &reverse_dp);
313
314 if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
315 /*
316 * Process the frame. If it is queued up internally it
317 * returns one otherwise we free it immediately. This
318 * routine itself wakes the user context layers so we do
319 * no further work
320 */
321 if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
322 kfree_skb(skb);
323
324 ax25_cb_put(ax25);
325 return 0;
326 }
327
328 /* AX.25 state 0 (disconnected) */
329
330 /* a) received not a SABM(E) */
331
332 if ((*skb->data & ~AX25_PF) != AX25_SABM &&
333 (*skb->data & ~AX25_PF) != AX25_SABME) {
334 /*
335 * Never reply to a DM. Also ignore any connects for
336 * addresses that are not our interfaces and not a socket.
337 */
338 if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
339 ax25_return_dm(dev, &src, &dest, &dp);
340
341 kfree_skb(skb);
342 return 0;
343 }
344
345 /* b) received SABM(E) */
346
347 if (dp.lastrepeat + 1 == dp.ndigi)
348 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
349 else
350 sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
351
352 if (sk != NULL) {
353 bh_lock_sock(sk);
354 if (sk_acceptq_is_full(sk) ||
355 (make = ax25_make_new(sk, ax25_dev)) == NULL) {
356 if (mine)
357 ax25_return_dm(dev, &src, &dest, &dp);
358 kfree_skb(skb);
359 bh_unlock_sock(sk);
360 sock_put(sk);
361
362 return 0;
363 }
364
365 ax25 = ax25_sk(make);
366 skb_set_owner_r(skb, make);
367 skb_queue_head(&sk->sk_receive_queue, skb);
368
369 make->sk_state = TCP_ESTABLISHED;
370
371 sk->sk_ack_backlog++;
372 bh_unlock_sock(sk);
373 } else {
374 if (!mine) {
375 kfree_skb(skb);
376 return 0;
377 }
378
379 if ((ax25 = ax25_create_cb()) == NULL) {
380 ax25_return_dm(dev, &src, &dest, &dp);
381 kfree_skb(skb);
382 return 0;
383 }
384
385 ax25_fillin_cb(ax25, ax25_dev);
386 }
387
388 ax25->source_addr = dest;
389 ax25->dest_addr = src;
390
391 /*
392 * Sort out any digipeated paths.
393 */
394 if (dp.ndigi && !ax25->digipeat &&
395 (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
396 kfree_skb(skb);
397 ax25_destroy_socket(ax25);
398 if (sk)
399 sock_put(sk);
400 return 0;
401 }
402
403 if (dp.ndigi == 0) {
404 if (ax25->digipeat != NULL) {
405 kfree(ax25->digipeat);
406 ax25->digipeat = NULL;
407 }
408 } else {
409 /* Reverse the source SABM's path */
410 memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
411 }
412
413 if ((*skb->data & ~AX25_PF) == AX25_SABME) {
414 ax25->modulus = AX25_EMODULUS;
415 ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
416 } else {
417 ax25->modulus = AX25_MODULUS;
418 ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
419 }
420
421 ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
422
423 #ifdef CONFIG_AX25_DAMA_SLAVE
424 if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
425 ax25_dama_on(ax25);
426 #endif
427
428 ax25->state = AX25_STATE_3;
429
430 ax25_cb_add(ax25);
431
432 ax25_start_heartbeat(ax25);
433 ax25_start_t3timer(ax25);
434 ax25_start_idletimer(ax25);
435
436 if (sk) {
437 if (!sock_flag(sk, SOCK_DEAD))
438 sk->sk_data_ready(sk, skb->len);
439 sock_put(sk);
440 } else
441 kfree_skb(skb);
442
443 return 0;
444 }
445
446 /*
447 * Receive an AX.25 frame via a SLIP interface.
448 */
449 int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
450 struct packet_type *ptype, struct net_device *orig_dev)
451 {
452 skb->sk = NULL; /* Initially we don't know who it's for */
453 skb->destructor = NULL; /* Who initializes this, dammit?! */
454
455 if ((*skb->data & 0x0F) != 0) {
456 kfree_skb(skb); /* Not a KISS data frame */
457 return 0;
458 }
459
460 skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
461
462 return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
463 }