include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / uwb / i1480 / i1480u-wlp / tx.c
CommitLineData
a21b963a
IPG
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
26 * fire it.
27 *
28 * ROADMAP:
29 *
30 * Entry points:
31 *
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
34 *
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
37 *
38 * i1480u_tx_timeout(): called for timeout handling from the
39 * network stack.
40 *
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
45 *
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
49 *
50 * TODO:
51 *
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
55 */
56
5a0e3ad6 57#include <linux/slab.h>
a21b963a 58#include "i1480u-wlp.h"
a21b963a
IPG
59
60enum {
61 /* This is only for Next and Last TX packets */
62 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
63 - sizeof(struct untd_hdr_rst),
64};
65
bce83697 66/* Free resources allocated to a i1480u tx context. */
a21b963a
IPG
67static
68void i1480u_tx_free(struct i1480u_tx *wtx)
69{
70 kfree(wtx->buf);
71 if (wtx->skb)
72 dev_kfree_skb_irq(wtx->skb);
73 usb_free_urb(wtx->urb);
74 kfree(wtx);
75}
76
77static
78void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
79{
80 unsigned long flags;
81 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
82 list_del(&wtx->list_node);
83 i1480u_tx_free(wtx);
84 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
85}
86
87static
88void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
89{
90 unsigned long flags;
91 struct i1480u_tx *wtx, *next;
92
93 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
94 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
95 usb_unlink_urb(wtx->urb);
96 }
97 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
98}
99
100
bce83697 101/*
a21b963a
IPG
102 * Callback for a completed tx USB URB.
103 *
104 * TODO:
105 *
106 * - FIXME: recover errors more gracefully
107 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
108 */
109static
110void i1480u_tx_cb(struct urb *urb)
111{
112 struct i1480u_tx *wtx = urb->context;
113 struct i1480u *i1480u = wtx->i1480u;
114 struct net_device *net_dev = i1480u->net_dev;
115 struct device *dev = &i1480u->usb_iface->dev;
116 unsigned long flags;
117
118 switch (urb->status) {
119 case 0:
120 spin_lock_irqsave(&i1480u->lock, flags);
c4a80d7e
SH
121 net_dev->stats.tx_packets++;
122 net_dev->stats.tx_bytes += urb->actual_length;
a21b963a
IPG
123 spin_unlock_irqrestore(&i1480u->lock, flags);
124 break;
125 case -ECONNRESET: /* Not an error, but a controlled situation; */
126 case -ENOENT: /* (we killed the URB)...so, no broadcast */
127 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
128 netif_stop_queue(net_dev);
129 break;
130 case -ESHUTDOWN: /* going away! */
131 dev_dbg(dev, "notif endp: down %d\n", urb->status);
132 netif_stop_queue(net_dev);
133 break;
134 default:
135 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
136 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
137 EDC_ERROR_TIMEFRAME)) {
138 dev_err(dev, "TX: max acceptable errors exceeded."
139 "Reset device.\n");
140 netif_stop_queue(net_dev);
141 i1480u_tx_unlink_urbs(i1480u);
142 wlp_reset_all(&i1480u->wlp);
143 }
144 break;
145 }
146 i1480u_tx_destroy(i1480u, wtx);
147 if (atomic_dec_return(&i1480u->tx_inflight.count)
148 <= i1480u->tx_inflight.threshold
149 && netif_queue_stopped(net_dev)
150 && i1480u->tx_inflight.threshold != 0) {
a21b963a
IPG
151 netif_start_queue(net_dev);
152 atomic_inc(&i1480u->tx_inflight.restart_count);
153 }
154 return;
155}
156
157
bce83697 158/*
a21b963a
IPG
159 * Given a buffer that doesn't fit in a single fragment, create an
160 * scatter/gather structure for delivery to the USB pipe.
161 *
162 * Implements functionality of i1480u_tx_create().
163 *
164 * @wtx: tx descriptor
165 * @skb: skb to send
166 * @gfp_mask: gfp allocation mask
167 * @returns: Pointer to @wtx if ok, NULL on error.
168 *
169 * Sorry, TOO LONG a function, but breaking it up is kind of hard
170 *
171 * This will break the buffer in chunks smaller than
172 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
173 * to each:
174 *
175 * 1st header \
176 * i1480 tx header | fragment 1
177 * fragment data /
178 * nxt header \ fragment 2
179 * fragment data /
180 * ..
181 * ..
182 * last header \ fragment 3
183 * last fragment data /
184 *
185 * This does not fill the i1480 TX header, it is left up to the
186 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
187 *
188 * This function consumes the skb unless there is an error.
189 */
190static
191int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
192 gfp_t gfp_mask)
193{
194 int result;
195 void *pl;
196 size_t pl_size;
197
198 void *pl_itr, *buf_itr;
199 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
200 struct untd_hdr_1st *untd_hdr_1st;
201 struct wlp_tx_hdr *wlp_tx_hdr;
202 struct untd_hdr_rst *untd_hdr_rst;
203
204 wtx->skb = NULL;
205 pl = skb->data;
206 pl_itr = pl;
207 pl_size = skb->len;
208 pl_size_left = pl_size; /* payload size */
209 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
210 * the headers */
211 pl_size_1st = i1480u_MAX_FRG_SIZE
212 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
213 BUG_ON(pl_size_1st > pl_size);
214 pl_size_left -= pl_size_1st;
215 /* The rest have an smaller header (no i1480 TX header). We
216 * need to break up the payload in blocks smaller than
217 * i1480u_MAX_PL_SIZE (payload excluding header). */
218 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
219 /* Allocate space for the new buffer. In this new buffer we'll
220 * place the headers followed by the data fragment, headers,
221 * data fragments, etc..
222 */
223 result = -ENOMEM;
224 wtx->buf_size = sizeof(*untd_hdr_1st)
225 + sizeof(*wlp_tx_hdr)
226 + frgs * sizeof(*untd_hdr_rst)
227 + pl_size;
228 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
229 if (wtx->buf == NULL)
230 goto error_buf_alloc;
231
232 buf_itr = wtx->buf; /* We got the space, let's fill it up */
233 /* Fill 1st fragment */
234 untd_hdr_1st = buf_itr;
235 buf_itr += sizeof(*untd_hdr_1st);
236 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
237 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
238 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
239 untd_hdr_1st->fragment_len =
240 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
241 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
242 /* Set up i1480 header info */
243 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
244 buf_itr += sizeof(*wlp_tx_hdr);
245 /* Copy the first fragment */
246 memcpy(buf_itr, pl_itr, pl_size_1st);
247 pl_itr += pl_size_1st;
248 buf_itr += pl_size_1st;
249
250 /* Now do each remaining fragment */
251 result = -EINVAL;
252 while (pl_size_left > 0) {
a21b963a
IPG
253 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
254 > wtx->buf_size) {
255 printk(KERN_ERR "BUG: no space for header\n");
256 goto error_bug;
257 }
a21b963a
IPG
258 untd_hdr_rst = buf_itr;
259 buf_itr += sizeof(*untd_hdr_rst);
260 if (pl_size_left > i1480u_MAX_PL_SIZE) {
261 frg_pl_size = i1480u_MAX_PL_SIZE;
262 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
263 } else {
264 frg_pl_size = pl_size_left;
265 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
266 }
a21b963a
IPG
267 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
268 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
269 untd_hdr_rst->padding = 0;
270 if (buf_itr + frg_pl_size - wtx->buf
271 > wtx->buf_size) {
272 printk(KERN_ERR "BUG: no space for payload\n");
273 goto error_bug;
274 }
275 memcpy(buf_itr, pl_itr, frg_pl_size);
276 buf_itr += frg_pl_size;
277 pl_itr += frg_pl_size;
278 pl_size_left -= frg_pl_size;
a21b963a
IPG
279 }
280 dev_kfree_skb_irq(skb);
281 return 0;
282
283error_bug:
284 printk(KERN_ERR
285 "BUG: skb %u bytes\n"
286 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
287 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
288 skb->len,
289 frg_pl_size, i1480u_MAX_FRG_SIZE,
290 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
291
292 kfree(wtx->buf);
293error_buf_alloc:
294 return result;
295}
296
297
bce83697 298/*
a21b963a
IPG
299 * Given a buffer that fits in a single fragment, fill out a @wtx
300 * struct for transmitting it down the USB pipe.
301 *
302 * Uses the fact that we have space reserved in front of the skbuff
303 * for hardware headers :]
304 *
305 * This does not fill the i1480 TX header, it is left up to the
306 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
307 *
308 * @pl: pointer to payload data
309 * @pl_size: size of the payuload
310 *
311 * This function does not consume the @skb.
312 */
313static
314int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
315 gfp_t gfp_mask)
316{
317 struct untd_hdr_cmp *untd_hdr_cmp;
318 struct wlp_tx_hdr *wlp_tx_hdr;
319
320 wtx->buf = NULL;
321 wtx->skb = skb;
322 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
323 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
324 wtx->wlp_tx_hdr = wlp_tx_hdr;
325 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
326 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
327
328 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
329 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
330 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
331 untd_hdr_cmp->padding = 0;
332 return 0;
333}
334
335
bce83697 336/*
a21b963a
IPG
337 * Given a skb to transmit, massage it to become palatable for the TX pipe
338 *
339 * This will break the buffer in chunks smaller than
340 * i1480u_MAX_FRG_SIZE and add proper headers to each.
341 *
342 * 1st header \
343 * i1480 tx header | fragment 1
344 * fragment data /
345 * nxt header \ fragment 2
346 * fragment data /
347 * ..
348 * ..
349 * last header \ fragment 3
350 * last fragment data /
351 *
352 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
353 *
354 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
355 * following is composed:
356 *
357 * complete header \
358 * i1480 tx header | single fragment
359 * packet data /
360 *
361 * We were going to use s/g support, but because the interface is
362 * synch and at the end there is plenty of overhead to do it, it
363 * didn't seem that worth for data that is going to be smaller than
364 * one page.
365 */
366static
367struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
368 struct sk_buff *skb, gfp_t gfp_mask)
369{
370 int result;
371 struct usb_endpoint_descriptor *epd;
372 int usb_pipe;
373 unsigned long flags;
374
375 struct i1480u_tx *wtx;
376 const size_t pl_max_size =
377 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
378 - sizeof(struct wlp_tx_hdr);
379
380 wtx = kmalloc(sizeof(*wtx), gfp_mask);
381 if (wtx == NULL)
382 goto error_wtx_alloc;
383 wtx->urb = usb_alloc_urb(0, gfp_mask);
384 if (wtx->urb == NULL)
385 goto error_urb_alloc;
386 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
387 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
388 /* Fits in a single complete packet or need to split? */
389 if (skb->len > pl_max_size) {
390 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
391 if (result < 0)
392 goto error_create;
393 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
394 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
395 } else {
396 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
397 if (result < 0)
398 goto error_create;
399 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
400 skb->data, skb->len, i1480u_tx_cb, wtx);
401 }
402 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
403 list_add(&wtx->list_node, &i1480u->tx_list);
404 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
405 return wtx;
406
407error_create:
408 kfree(wtx->urb);
409error_urb_alloc:
410 kfree(wtx);
411error_wtx_alloc:
412 return NULL;
413}
414
bce83697 415/*
a21b963a
IPG
416 * Actual fragmentation and transmission of frame
417 *
418 * @wlp: WLP substack data structure
419 * @skb: To be transmitted
420 * @dst: Device address of destination
421 * @returns: 0 on success, <0 on failure
422 *
423 * This function can also be called directly (not just from
424 * hard_start_xmit), so we also check here if the interface is up before
425 * taking sending anything.
426 */
427int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
428 struct uwb_dev_addr *dst)
429{
430 int result = -ENXIO;
431 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
432 struct device *dev = &i1480u->usb_iface->dev;
433 struct net_device *net_dev = i1480u->net_dev;
434 struct i1480u_tx *wtx;
435 struct wlp_tx_hdr *wlp_tx_hdr;
436 static unsigned char dev_bcast[2] = { 0xff, 0xff };
a21b963a 437
a21b963a
IPG
438 BUG_ON(i1480u->wlp.rc == NULL);
439 if ((net_dev->flags & IFF_UP) == 0)
440 goto out;
441 result = -EBUSY;
442 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
a21b963a
IPG
443 netif_stop_queue(net_dev);
444 goto error_max_inflight;
445 }
446 result = -ENOMEM;
447 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
448 if (unlikely(wtx == NULL)) {
449 if (printk_ratelimit())
450 dev_err(dev, "TX: no memory for WLP TX URB,"
451 "dropping packet (in flight %d)\n",
452 atomic_read(&i1480u->tx_inflight.count));
453 netif_stop_queue(net_dev);
454 goto error_wtx_alloc;
455 }
456 wtx->i1480u = i1480u;
457 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
458 * locking. We do so because they are kind of orthogonal to
459 * each other (and thus not changed in an atomic batch).
460 * The ETH header is right after the WLP TX header. */
461 wlp_tx_hdr = wtx->wlp_tx_hdr;
462 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
463 wlp_tx_hdr->dstaddr = *dst;
464 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
465 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
466 /*Broadcast message directed to DRP host. Send as best effort
467 * on PCA. */
468 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
469 }
470
a21b963a
IPG
471 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
472 if (result < 0) {
473 dev_err(dev, "TX: cannot submit URB: %d\n", result);
474 /* We leave the freeing of skb to calling function */
475 wtx->skb = NULL;
476 goto error_tx_urb_submit;
477 }
478 atomic_inc(&i1480u->tx_inflight.count);
479 net_dev->trans_start = jiffies;
a21b963a
IPG
480 return result;
481
482error_tx_urb_submit:
483 i1480u_tx_destroy(i1480u, wtx);
484error_wtx_alloc:
485error_max_inflight:
486out:
a21b963a
IPG
487 return result;
488}
489
490
bce83697 491/*
a21b963a
IPG
492 * Transmit an skb Called when an skbuf has to be transmitted
493 *
494 * The skb is first passed to WLP substack to ensure this is a valid
495 * frame. If valid the device address of destination will be filled and
496 * the WLP header prepended to the skb. If this step fails we fake sending
497 * the frame, if we return an error the network stack will just keep trying.
498 *
499 * Broadcast frames inside a WSS needs to be treated special as multicast is
500 * not supported. A broadcast frame is sent as unicast to each member of the
501 * WSS - this is done by the WLP substack when it finds a broadcast frame.
502 * So, we test if the WLP substack took over the skb and only transmit it
503 * if it has not (been taken over).
504 *
505 * @net_dev->xmit_lock is held
506 */
3a27c149
SH
507netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
508 struct net_device *net_dev)
a21b963a
IPG
509{
510 int result;
511 struct i1480u *i1480u = netdev_priv(net_dev);
512 struct device *dev = &i1480u->usb_iface->dev;
513 struct uwb_dev_addr dst;
514
a21b963a
IPG
515 if ((net_dev->flags & IFF_UP) == 0)
516 goto error;
517 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
518 if (result < 0) {
519 dev_err(dev, "WLP verification of TX frame failed (%d). "
520 "Dropping packet.\n", result);
521 goto error;
522 } else if (result == 1) {
a21b963a
IPG
523 /* trans_start time will be set when WLP actually transmits
524 * the frame */
525 goto out;
526 }
a21b963a
IPG
527 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
528 if (result < 0) {
529 dev_err(dev, "Frame TX failed (%d).\n", result);
530 goto error;
531 }
a21b963a
IPG
532 return NETDEV_TX_OK;
533error:
534 dev_kfree_skb_any(skb);
c4a80d7e 535 net_dev->stats.tx_dropped++;
a21b963a 536out:
a21b963a
IPG
537 return NETDEV_TX_OK;
538}
539
540
bce83697 541/*
a21b963a
IPG
542 * Called when a pkt transmission doesn't complete in a reasonable period
543 * Device reset may sleep - do it outside of interrupt context (delayed)
544 */
545void i1480u_tx_timeout(struct net_device *net_dev)
546{
547 struct i1480u *i1480u = netdev_priv(net_dev);
548
549 wlp_reset_all(&i1480u->wlp);
550}
551
552
553void i1480u_tx_release(struct i1480u *i1480u)
554{
555 unsigned long flags;
556 struct i1480u_tx *wtx, *next;
557 int count = 0, empty;
558
559 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
560 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
561 count++;
562 usb_unlink_urb(wtx->urb);
563 }
564 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
565 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
566 /*
567 * We don't like this sollution too much (dirty as it is), but
568 * it is cheaper than putting a refcount on each i1480u_tx and
569 * i1480uting for all of them to go away...
570 *
571 * Called when no more packets can be added to tx_list
572 * so can i1480ut for it to be empty.
573 */
574 while (1) {
575 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
576 empty = list_empty(&i1480u->tx_list);
577 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
578 if (empty)
579 break;
580 count--;
581 BUG_ON(count == 0);
582 msleep(20);
583 }
584}