2 * Intel Wireless WiMAX Connection 2400m
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Intel Corporation <linux-wimax@intel.com>
36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37 * - Initial implementation
38 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39 * - Use skb_clone(), break up processing in chunks
40 * - Split transport/device specific
41 * - Make buffer size dynamic to exert less memory pressure
44 * This handles the RX path on USB.
46 * When a notification is received that says 'there is RX data ready',
47 * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
48 * reads a buffer from USB and passes it to i2400m_rx() in the generic
49 * handling code. The RX buffer has an specific format that is
52 * We use a kernel thread in a loop because:
54 * - we want to be able to call the USB power management get/put
55 * functions (blocking) before each transaction.
57 * - We might get a lot of notifications and we don't want to submit
58 * a zillion reads; by serializing, we are throttling.
60 * - RX data processing can get heavy enough so that it is not
61 * appropiate for doing it in the USB callback; thus we run it in a
64 * We provide a read buffer of an arbitrary size (short of a page); if
65 * the callback reports -EOVERFLOW, it means it was too small, so we
66 * just double the size and retry (being careful to append, as
67 * sometimes the device provided some data). Every now and then we
68 * check if the average packet size is smaller than the current packet
69 * size and if so, we halve it. At the end, the size of the
70 * preallocated buffer should be following the average received
71 * transaction size, adapting dynamically to it.
75 * i2400mu_rx_kick() Called from notif.c when we get a
76 * 'data ready' notification
77 * i2400mu_rxd() Kernel RX daemon
78 * i2400mu_rx() Receive USB data
79 * i2400m_rx() Send data to generic i2400m RX handling
81 * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
83 * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
85 #include <linux/workqueue.h>
86 #include <linux/usb.h>
87 #include "i2400m-usb.h"
90 #define D_SUBMODULE rx
91 #include "usb-debug-levels.h"
96 * We can't let the rx_size be a multiple of 512 bytes (the RX
97 * endpoint's max packet size). On some USB host controllers (we
98 * haven't been able to fully characterize which), if the device is
99 * about to send (for example) X bytes and we only post a buffer to
100 * receive n*512, it will fail to mark that as babble (so that
101 * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
104 * So on growing or shrinking, if it is a multiple of the
105 * maxpacketsize, we remove some (instead of incresing some, so in a
106 * buddy allocator we try to waste less space).
108 * Note we also need a hook for this on i2400mu_rx() -- when we do the
109 * first read, we are sure we won't hit this spot because
110 * i240mm->rx_size has been set properly. However, if we have to
111 * double because of -EOVERFLOW, when we launch the read to get the
112 * rest of the data, we *have* to make sure that also is not a
113 * multiple of the max_pkt_size.
117 size_t i2400mu_rx_size_grow(struct i2400mu
*i2400mu
)
119 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
121 const size_t max_pkt_size
= 512;
123 rx_size
= 2 * i2400mu
->rx_size
;
124 if (rx_size
% max_pkt_size
== 0) {
127 "RX: expected size grew to %zu [adjusted -8] "
129 rx_size
, i2400mu
->rx_size
);
132 "RX: expected size grew to %zu from %zu\n",
133 rx_size
, i2400mu
->rx_size
);
139 void i2400mu_rx_size_maybe_shrink(struct i2400mu
*i2400mu
)
141 const size_t max_pkt_size
= 512;
142 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
144 if (unlikely(i2400mu
->rx_size_cnt
>= 100
145 && i2400mu
->rx_size_auto_shrink
)) {
147 i2400mu
->rx_size_acc
/ i2400mu
->rx_size_cnt
;
148 size_t new_rx_size
= i2400mu
->rx_size
/ 2;
149 if (avg_rx_size
< new_rx_size
) {
150 if (new_rx_size
% max_pkt_size
== 0) {
153 "RX: expected size shrank to %zu "
154 "[adjusted -8] from %zu\n",
155 new_rx_size
, i2400mu
->rx_size
);
158 "RX: expected size shrank to %zu "
160 new_rx_size
, i2400mu
->rx_size
);
161 i2400mu
->rx_size
= new_rx_size
;
162 i2400mu
->rx_size_cnt
= 0;
163 i2400mu
->rx_size_acc
= i2400mu
->rx_size
;
169 * Receive a message with payloads from the USB bus into an skb
171 * @i2400mu: USB device descriptor
172 * @rx_skb: skb where to place the received message
174 * Deals with all the USB-specifics of receiving, dynamically
175 * increasing the buffer size if so needed. Returns the payload in the
176 * skb, ready to process. On a zero-length packet, we retry.
178 * On soft USB errors, we retry (until they become too frequent and
179 * then are promoted to hard); on hard USB errors, we reset the
180 * device. On other errors (skb realloacation, we just drop it and
181 * hope for the next invocation to solve it).
183 * Returns: pointer to the skb if ok, ERR_PTR on error.
184 * NOTE: this function might realloc the skb (if it is too small),
185 * so always update with the one returned.
186 * ERR_PTR() is < 0 on error.
187 * Will return NULL if it cannot reallocate -- this can be
188 * considered a transient retryable error.
191 struct sk_buff
*i2400mu_rx(struct i2400mu
*i2400mu
, struct sk_buff
*rx_skb
)
194 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
195 int usb_pipe
, read_size
, rx_size
, do_autopm
;
196 struct usb_endpoint_descriptor
*epd
;
197 const size_t max_pkt_size
= 512;
199 d_fnstart(4, dev
, "(i2400mu %p)\n", i2400mu
);
200 do_autopm
= atomic_read(&i2400mu
->do_autopm
);
202 usb_autopm_get_interface(i2400mu
->usb_iface
) : 0;
204 dev_err(dev
, "RX: can't get autopm: %d\n", result
);
207 epd
= usb_get_epd(i2400mu
->usb_iface
, i2400mu
->endpoint_cfg
.bulk_in
);
208 usb_pipe
= usb_rcvbulkpipe(i2400mu
->usb_dev
, epd
->bEndpointAddress
);
210 rx_size
= skb_end_pointer(rx_skb
) - rx_skb
->data
- rx_skb
->len
;
211 if (unlikely(rx_size
% max_pkt_size
== 0)) {
213 d_printf(1, dev
, "RX: rx_size adapted to %d [-8]\n", rx_size
);
215 result
= usb_bulk_msg(
216 i2400mu
->usb_dev
, usb_pipe
, rx_skb
->data
+ rx_skb
->len
,
217 rx_size
, &read_size
, 200);
218 usb_mark_last_busy(i2400mu
->usb_dev
);
222 goto retry
; /* ZLP, just resubmit */
223 skb_put(rx_skb
, read_size
);
227 * Stall -- maybe the device is choking with our
228 * requests. Clear it and give it some time. If they
229 * happen to often, it might be another symptom, so we
232 * No error handling for usb_clear_halt(0; if it
233 * works, the retry works; if it fails, this switch
234 * does the error handling for us.
236 if (edc_inc(&i2400mu
->urb_edc
,
237 10 * EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
238 dev_err(dev
, "BM-CMD: too many stalls in "
239 "URB; resetting device\n");
242 usb_clear_halt(i2400mu
->usb_dev
, usb_pipe
);
243 msleep(10); /* give the device some time */
245 case -EINVAL
: /* while removing driver */
246 case -ENODEV
: /* dev disconnect ... */
247 case -ENOENT
: /* just ignore it */
251 case -EOVERFLOW
: { /* too small, reallocate */
252 struct sk_buff
*new_skb
;
253 rx_size
= i2400mu_rx_size_grow(i2400mu
);
254 if (rx_size
<= (1 << 16)) /* cap it */
255 i2400mu
->rx_size
= rx_size
;
256 else if (printk_ratelimit()) {
257 dev_err(dev
, "BUG? rx_size up to %d\n", rx_size
);
261 skb_put(rx_skb
, read_size
);
262 new_skb
= skb_copy_expand(rx_skb
, 0, rx_size
- rx_skb
->len
,
264 if (new_skb
== NULL
) {
265 if (printk_ratelimit())
266 dev_err(dev
, "RX: Can't reallocate skb to %d; "
267 "RX dropped\n", rx_size
);
270 goto out
; /* drop it...*/
274 i2400mu
->rx_size_cnt
= 0;
275 i2400mu
->rx_size_acc
= i2400mu
->rx_size
;
276 d_printf(1, dev
, "RX: size changed to %d, received %d, "
277 "copied %d, capacity %ld\n",
278 rx_size
, read_size
, rx_skb
->len
,
279 (long) (skb_end_pointer(new_skb
) - new_skb
->head
));
282 /* In most cases, it happens due to the hardware scheduling a
283 * read when there was no data - unfortunately, we have no way
284 * to tell this timeout from a USB timeout. So we just ignore
287 dev_err(dev
, "RX: timeout: %d\n", result
);
290 default: /* Any error */
291 if (edc_inc(&i2400mu
->urb_edc
,
292 EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
))
294 dev_err(dev
, "RX: error receiving URB: %d, retrying\n", result
);
299 usb_autopm_put_interface(i2400mu
->usb_iface
);
300 d_fnend(4, dev
, "(i2400mu %p) = %p\n", i2400mu
, rx_skb
);
304 dev_err(dev
, "RX: maximum errors in URB exceeded; "
305 "resetting device\n");
307 usb_queue_reset_device(i2400mu
->usb_iface
);
308 rx_skb
= ERR_PTR(result
);
314 * Kernel thread for USB reception of data
316 * This thread waits for a kick; once kicked, it will allocate an skb
317 * and receive a single message to it from USB (using
318 * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
319 * code for processing.
321 * When done processing, it runs some dirty statistics to verify if
322 * the last 100 messages received were smaller than half of the
323 * current RX buffer size. In that case, the RX buffer size is
324 * halved. This will helps lowering the pressure on the memory
327 * Hard errors force the thread to exit.
330 int i2400mu_rxd(void *_i2400mu
)
333 struct i2400mu
*i2400mu
= _i2400mu
;
334 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
335 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
336 struct net_device
*net_dev
= i2400m
->wimax_dev
.net_dev
;
339 struct sk_buff
*rx_skb
;
342 d_fnstart(4, dev
, "(i2400mu %p)\n", i2400mu
);
343 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
344 BUG_ON(i2400mu
->rx_kthread
!= NULL
);
345 i2400mu
->rx_kthread
= current
;
346 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
348 d_printf(2, dev
, "RX: waiting for messages\n");
350 wait_event_interruptible(
352 (kthread_should_stop() /* check this first! */
353 || (pending
= atomic_read(&i2400mu
->rx_pending_count
)))
355 if (kthread_should_stop())
359 rx_size
= i2400mu
->rx_size
;
360 d_printf(2, dev
, "RX: reading up to %d bytes\n", rx_size
);
361 rx_skb
= __netdev_alloc_skb(net_dev
, rx_size
, GFP_KERNEL
);
362 if (rx_skb
== NULL
) {
363 dev_err(dev
, "RX: can't allocate skb [%d bytes]\n",
365 msleep(50); /* give it some time? */
369 /* Receive the message with the payloads */
370 rx_skb
= i2400mu_rx(i2400mu
, rx_skb
);
371 result
= PTR_ERR(rx_skb
);
374 atomic_dec(&i2400mu
->rx_pending_count
);
375 if (rx_skb
== NULL
|| rx_skb
->len
== 0) {
376 /* some "ignorable" condition */
381 /* Deliver the message to the generic i2400m code */
382 i2400mu
->rx_size_cnt
++;
383 i2400mu
->rx_size_acc
+= rx_skb
->len
;
384 result
= i2400m_rx(i2400m
, rx_skb
);
386 && edc_inc(&i2400mu
->urb_edc
,
387 EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
391 /* Maybe adjust RX buffer size */
392 i2400mu_rx_size_maybe_shrink(i2400mu
);
396 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
397 i2400mu
->rx_kthread
= NULL
;
398 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
399 d_fnend(4, dev
, "(i2400mu %p) = %d\n", i2400mu
, result
);
403 dev_err(dev
, "RX: maximum errors in received buffer exceeded; "
404 "resetting device\n");
405 usb_queue_reset_device(i2400mu
->usb_iface
);
411 * Start reading from the device
413 * @i2400m: device instance
415 * Notify the RX thread that there is data pending.
417 void i2400mu_rx_kick(struct i2400mu
*i2400mu
)
419 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
420 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
422 d_fnstart(3, dev
, "(i2400mu %p)\n", i2400m
);
423 atomic_inc(&i2400mu
->rx_pending_count
);
424 wake_up_all(&i2400mu
->rx_wq
);
425 d_fnend(3, dev
, "(i2400m %p) = void\n", i2400m
);
429 int i2400mu_rx_setup(struct i2400mu
*i2400mu
)
432 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
433 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
434 struct wimax_dev
*wimax_dev
= &i2400m
->wimax_dev
;
435 struct task_struct
*kthread
;
437 kthread
= kthread_run(i2400mu_rxd
, i2400mu
, "%s-rx",
439 /* the kthread function sets i2400mu->rx_thread */
440 if (IS_ERR(kthread
)) {
441 result
= PTR_ERR(kthread
);
442 dev_err(dev
, "RX: cannot start thread: %d\n", result
);
448 void i2400mu_rx_release(struct i2400mu
*i2400mu
)
451 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
452 struct device
*dev
= i2400m_dev(i2400m
);
453 struct task_struct
*kthread
;
455 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
456 kthread
= i2400mu
->rx_kthread
;
457 i2400mu
->rx_kthread
= NULL
;
458 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
460 kthread_stop(kthread
);
462 d_printf(1, dev
, "RX: kthread had already exited\n");