uwb: use dev_dbg() for debug messages
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / uwb / i1480 / i1480u-wlp / rx.c
CommitLineData
a21b963a
IPG
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include "i1480u-wlp.h"
70
bce83697 71/*
a21b963a
IPG
72 * Setup the RX context
73 *
74 * Each URB is provided with a transfer_buffer that is the data field
75 * of a new socket buffer.
76 */
77int i1480u_rx_setup(struct i1480u *i1480u)
78{
79 int result, cnt;
80 struct device *dev = &i1480u->usb_iface->dev;
81 struct net_device *net_dev = i1480u->net_dev;
82 struct usb_endpoint_descriptor *epd;
83 struct sk_buff *skb;
84
85 /* Alloc RX stuff */
86 i1480u->rx_skb = NULL; /* not in process of receiving packet */
87 result = -ENOMEM;
88 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
89 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
90 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
91 rx_buf->i1480u = i1480u;
92 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
93 if (!skb) {
94 dev_err(dev,
95 "RX: cannot allocate RX buffer %d\n", cnt);
96 result = -ENOMEM;
97 goto error;
98 }
99 skb->dev = net_dev;
100 skb->ip_summed = CHECKSUM_NONE;
101 skb_reserve(skb, 2);
102 rx_buf->data = skb;
103 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
104 if (unlikely(rx_buf->urb == NULL)) {
105 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
106 result = -ENOMEM;
107 goto error;
108 }
109 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
110 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
111 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
112 i1480u_rx_cb, rx_buf);
113 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
114 if (unlikely(result < 0)) {
115 dev_err(dev, "RX: cannot submit URB %d: %d\n",
116 cnt, result);
117 goto error;
118 }
119 }
120 return 0;
121
122error:
123 i1480u_rx_release(i1480u);
124 return result;
125}
126
127
bce83697 128/* Release resources associated to the rx context */
a21b963a
IPG
129void i1480u_rx_release(struct i1480u *i1480u)
130{
131 int cnt;
132 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
133 if (i1480u->rx_buf[cnt].data)
134 dev_kfree_skb(i1480u->rx_buf[cnt].data);
135 if (i1480u->rx_buf[cnt].urb) {
136 usb_kill_urb(i1480u->rx_buf[cnt].urb);
137 usb_free_urb(i1480u->rx_buf[cnt].urb);
138 }
139 }
140 if (i1480u->rx_skb != NULL)
141 dev_kfree_skb(i1480u->rx_skb);
142}
143
144static
145void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
146{
147 int cnt;
148 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
149 if (i1480u->rx_buf[cnt].urb)
150 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
151 }
152}
153
bce83697 154/* Fix an out-of-sequence packet */
a21b963a
IPG
155#define i1480u_fix(i1480u, msg...) \
156do { \
157 if (printk_ratelimit()) \
158 dev_err(&i1480u->usb_iface->dev, msg); \
159 dev_kfree_skb_irq(i1480u->rx_skb); \
160 i1480u->rx_skb = NULL; \
161 i1480u->rx_untd_pkt_size = 0; \
162} while (0)
163
164
bce83697 165/* Drop an out-of-sequence packet */
a21b963a
IPG
166#define i1480u_drop(i1480u, msg...) \
167do { \
168 if (printk_ratelimit()) \
169 dev_err(&i1480u->usb_iface->dev, msg); \
170 i1480u->stats.rx_dropped++; \
171} while (0)
172
173
174
175
bce83697 176/* Finalizes setting up the SKB and delivers it
a21b963a
IPG
177 *
178 * We first pass the incoming frame to WLP substack for verification. It
179 * may also be a WLP association frame in which case WLP will take over the
180 * processing. If WLP does not take it over it will still verify it, if the
181 * frame is invalid the skb will be freed by WLP and we will not continue
182 * parsing.
183 * */
184static
185void i1480u_skb_deliver(struct i1480u *i1480u)
186{
187 int should_parse;
188 struct net_device *net_dev = i1480u->net_dev;
189 struct device *dev = &i1480u->usb_iface->dev;
190
a21b963a
IPG
191 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
192 &i1480u->rx_srcaddr);
193 if (!should_parse)
194 goto out;
195 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
a21b963a
IPG
196 i1480u->stats.rx_packets++;
197 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
198 net_dev->last_rx = jiffies;
199 /* FIXME: flow control: check netif_rx() retval */
200
201 netif_rx(i1480u->rx_skb); /* deliver */
202out:
203 i1480u->rx_skb = NULL;
204 i1480u->rx_untd_pkt_size = 0;
205}
206
207
bce83697 208/*
a21b963a
IPG
209 * Process a buffer of data received from the USB RX endpoint
210 *
211 * First fragment arrives with next or last fragment. All other fragments
212 * arrive alone.
213 *
214 * /me hates long functions.
215 */
216static
217void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
218{
219 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
220 size_t untd_hdr_size, untd_frg_size;
221 size_t i1480u_hdr_size;
222 struct wlp_rx_hdr *i1480u_hdr = NULL;
223
224 struct i1480u *i1480u = rx_buf->i1480u;
225 struct sk_buff *skb = rx_buf->data;
226 int size_left = rx_buf->urb->actual_length;
227 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
228 struct untd_hdr *untd_hdr;
229
230 struct net_device *net_dev = i1480u->net_dev;
231 struct device *dev = &i1480u->usb_iface->dev;
232 struct sk_buff *new_skb;
233
234#if 0
235 dev_fnstart(dev,
236 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
237 dev_err(dev, "RX packet, %zu bytes\n", size_left);
238 dump_bytes(dev, ptr, size_left);
239#endif
240 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
241
242 while (size_left > 0) {
243 if (pkt_completed) {
244 i1480u_drop(i1480u, "RX: fragment follows completed"
245 "packet in same buffer. Dropping\n");
246 break;
247 }
248 untd_hdr = ptr;
249 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
250 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
251 goto out;
252 }
253 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
254 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
255 goto out;
256 }
257 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
258 case i1480u_PKT_FRAG_1ST: {
259 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
260 dev_dbg(dev, "1st fragment\n");
261 untd_hdr_size = sizeof(struct untd_hdr_1st);
262 if (i1480u->rx_skb != NULL)
263 i1480u_fix(i1480u, "RX: 1st fragment out of "
264 "sequence! Fixing\n");
265 if (size_left < untd_hdr_size + i1480u_hdr_size) {
266 i1480u_drop(i1480u, "RX: short 1st fragment! "
267 "Dropping\n");
268 goto out;
269 }
270 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
271 - i1480u_hdr_size;
272 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
273 if (size_left < untd_hdr_size + untd_frg_size) {
274 i1480u_drop(i1480u,
275 "RX: short payload! Dropping\n");
276 goto out;
277 }
278 i1480u->rx_skb = skb;
279 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
280 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
281 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
282 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
283 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
284 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
285 rx_buf->data = NULL; /* need to create new buffer */
286 break;
287 }
288 case i1480u_PKT_FRAG_NXT: {
289 dev_dbg(dev, "nxt fragment\n");
290 untd_hdr_size = sizeof(struct untd_hdr_rst);
291 if (i1480u->rx_skb == NULL) {
292 i1480u_drop(i1480u, "RX: next fragment out of "
293 "sequence! Dropping\n");
294 goto out;
295 }
296 if (size_left < untd_hdr_size) {
297 i1480u_drop(i1480u, "RX: short NXT fragment! "
298 "Dropping\n");
299 goto out;
300 }
301 untd_frg_size = le16_to_cpu(untd_hdr->len);
302 if (size_left < untd_hdr_size + untd_frg_size) {
303 i1480u_drop(i1480u,
304 "RX: short payload! Dropping\n");
305 goto out;
306 }
307 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
308 ptr + untd_hdr_size, untd_frg_size);
309 break;
310 }
311 case i1480u_PKT_FRAG_LST: {
312 dev_dbg(dev, "Lst fragment\n");
313 untd_hdr_size = sizeof(struct untd_hdr_rst);
314 if (i1480u->rx_skb == NULL) {
315 i1480u_drop(i1480u, "RX: last fragment out of "
316 "sequence! Dropping\n");
317 goto out;
318 }
319 if (size_left < untd_hdr_size) {
320 i1480u_drop(i1480u, "RX: short LST fragment! "
321 "Dropping\n");
322 goto out;
323 }
324 untd_frg_size = le16_to_cpu(untd_hdr->len);
325 if (size_left < untd_frg_size + untd_hdr_size) {
326 i1480u_drop(i1480u,
327 "RX: short payload! Dropping\n");
328 goto out;
329 }
330 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
331 ptr + untd_hdr_size, untd_frg_size);
332 pkt_completed = 1;
333 break;
334 }
335 case i1480u_PKT_FRAG_CMP: {
336 dev_dbg(dev, "cmp fragment\n");
337 untd_hdr_size = sizeof(struct untd_hdr_cmp);
338 if (i1480u->rx_skb != NULL)
339 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
340 " fragment!\n");
341 if (size_left < untd_hdr_size + i1480u_hdr_size) {
342 i1480u_drop(i1480u, "RX: short CMP fragment! "
343 "Dropping\n");
344 goto out;
345 }
346 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
347 untd_frg_size = i1480u->rx_untd_pkt_size;
348 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
349 i1480u_drop(i1480u,
350 "RX: short payload! Dropping\n");
351 goto out;
352 }
353 i1480u->rx_skb = skb;
354 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
355 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
356 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
357 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
358 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
359 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
360 rx_buf->data = NULL; /* for hand off skb to network stack */
361 pkt_completed = 1;
362 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
363 break;
364 }
365 default:
366 i1480u_drop(i1480u, "RX: unknown packet type %u! "
367 "Dropping\n", untd_hdr_type(untd_hdr));
368 goto out;
369 }
370 size_left -= untd_hdr_size + untd_frg_size;
371 if (size_left > 0)
372 ptr += untd_hdr_size + untd_frg_size;
373 }
374 if (pkt_completed)
375 i1480u_skb_deliver(i1480u);
376out:
377 /* recreate needed RX buffers*/
378 if (rx_buf->data == NULL) {
379 /* buffer is being used to receive packet, create new */
380 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
381 if (!new_skb) {
382 if (printk_ratelimit())
383 dev_err(dev,
384 "RX: cannot allocate RX buffer\n");
385 } else {
386 new_skb->dev = net_dev;
387 new_skb->ip_summed = CHECKSUM_NONE;
388 skb_reserve(new_skb, 2);
389 rx_buf->data = new_skb;
390 }
391 }
392 return;
393}
394
395
bce83697 396/*
a21b963a
IPG
397 * Called when an RX URB has finished receiving or has found some kind
398 * of error condition.
399 *
400 * LIMITATIONS:
401 *
402 * - We read USB-transfers, each transfer contains a SINGLE fragment
403 * (can contain a complete packet, or a 1st, next, or last fragment
404 * of a packet).
405 * Looks like a transfer can contain more than one fragment (07/18/06)
406 *
407 * - Each transfer buffer is the size of the maximum packet size (minus
408 * headroom), i1480u_MAX_PKT_SIZE - 2
409 *
410 * - We always read the full USB-transfer, no partials.
411 *
412 * - Each transfer is read directly into a skb. This skb will be used to
413 * send data to the upper layers if it is the first fragment or a complete
414 * packet. In the other cases the data will be copied from the skb to
415 * another skb that is being prepared for the upper layers from a prev
416 * first fragment.
417 *
418 * It is simply too much of a pain. Gosh, there should be a unified
419 * SG infrastructure for *everything* [so that I could declare a SG
420 * buffer, pass it to USB for receiving, append some space to it if
421 * I wish, receive more until I have the whole chunk, adapt
422 * pointers on each fragment to remove hardware headers and then
423 * attach that to an skbuff and netif_rx()].
424 */
425void i1480u_rx_cb(struct urb *urb)
426{
427 int result;
428 int do_parse_buffer = 1;
429 struct i1480u_rx_buf *rx_buf = urb->context;
430 struct i1480u *i1480u = rx_buf->i1480u;
431 struct device *dev = &i1480u->usb_iface->dev;
432 unsigned long flags;
433 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
434
435 switch (urb->status) {
436 case 0:
437 break;
438 case -ECONNRESET: /* Not an error, but a controlled situation; */
439 case -ENOENT: /* (we killed the URB)...so, no broadcast */
440 case -ESHUTDOWN: /* going away! */
441 dev_err(dev, "RX URB[%u]: goind down %d\n",
442 rx_buf_idx, urb->status);
443 goto error;
444 default:
445 dev_err(dev, "RX URB[%u]: unknown status %d\n",
446 rx_buf_idx, urb->status);
447 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
448 EDC_ERROR_TIMEFRAME)) {
449 dev_err(dev, "RX: max acceptable errors exceeded,"
450 " resetting device.\n");
451 i1480u_rx_unlink_urbs(i1480u);
452 wlp_reset_all(&i1480u->wlp);
453 goto error;
454 }
455 do_parse_buffer = 0;
456 break;
457 }
458 spin_lock_irqsave(&i1480u->lock, flags);
459 /* chew the data fragments, extract network packets */
460 if (do_parse_buffer) {
461 i1480u_rx_buffer(rx_buf);
462 if (rx_buf->data) {
463 rx_buf->urb->transfer_buffer = rx_buf->data->data;
464 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
465 if (result < 0) {
466 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
467 rx_buf_idx, result);
468 }
469 }
470 }
471 spin_unlock_irqrestore(&i1480u->lock, flags);
472error:
473 return;
474}
475