include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / wusbcore / wa-xfer.c
1 /*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86
87 #include "wa-hc.h"
88 #include "wusbhc.h"
89
90 enum {
91 WA_SEGS_MAX = 255,
92 };
93
94 enum wa_seg_status {
95 WA_SEG_NOTREADY,
96 WA_SEG_READY,
97 WA_SEG_DELAYED,
98 WA_SEG_SUBMITTED,
99 WA_SEG_PENDING,
100 WA_SEG_DTI_PENDING,
101 WA_SEG_DONE,
102 WA_SEG_ERROR,
103 WA_SEG_ABORTED,
104 };
105
106 static void wa_xfer_delayed_run(struct wa_rpipe *);
107
108 /*
109 * Life cycle governed by 'struct urb' (the refcount of the struct is
110 * that of the 'struct urb' and usb_free_urb() would free the whole
111 * struct).
112 */
113 struct wa_seg {
114 struct urb urb;
115 struct urb *dto_urb; /* for data output? */
116 struct list_head list_node; /* for rpipe->req_list */
117 struct wa_xfer *xfer; /* out xfer */
118 u8 index; /* which segment we are */
119 enum wa_seg_status status;
120 ssize_t result; /* bytes xfered or error */
121 struct wa_xfer_hdr xfer_hdr;
122 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
123 };
124
125 static void wa_seg_init(struct wa_seg *seg)
126 {
127 /* usb_init_urb() repeats a lot of work, so we do it here */
128 kref_init(&seg->urb.kref);
129 }
130
131 /*
132 * Protected by xfer->lock
133 *
134 */
135 struct wa_xfer {
136 struct kref refcnt;
137 struct list_head list_node;
138 spinlock_t lock;
139 u32 id;
140
141 struct wahc *wa; /* Wire adapter we are plugged to */
142 struct usb_host_endpoint *ep;
143 struct urb *urb; /* URB we are transfering for */
144 struct wa_seg **seg; /* transfer segments */
145 u8 segs, segs_submitted, segs_done;
146 unsigned is_inbound:1;
147 unsigned is_dma:1;
148 size_t seg_size;
149 int result;
150
151 gfp_t gfp; /* allocation mask */
152
153 struct wusb_dev *wusb_dev; /* for activity timestamps */
154 };
155
156 static inline void wa_xfer_init(struct wa_xfer *xfer)
157 {
158 kref_init(&xfer->refcnt);
159 INIT_LIST_HEAD(&xfer->list_node);
160 spin_lock_init(&xfer->lock);
161 }
162
163 /*
164 * Destory a transfer structure
165 *
166 * Note that the xfer->seg[index] thingies follow the URB life cycle,
167 * so we need to put them, not free them.
168 */
169 static void wa_xfer_destroy(struct kref *_xfer)
170 {
171 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
172 if (xfer->seg) {
173 unsigned cnt;
174 for (cnt = 0; cnt < xfer->segs; cnt++) {
175 if (xfer->is_inbound)
176 usb_put_urb(xfer->seg[cnt]->dto_urb);
177 usb_put_urb(&xfer->seg[cnt]->urb);
178 }
179 }
180 kfree(xfer);
181 }
182
183 static void wa_xfer_get(struct wa_xfer *xfer)
184 {
185 kref_get(&xfer->refcnt);
186 }
187
188 static void wa_xfer_put(struct wa_xfer *xfer)
189 {
190 kref_put(&xfer->refcnt, wa_xfer_destroy);
191 }
192
193 /*
194 * xfer is referenced
195 *
196 * xfer->lock has to be unlocked
197 *
198 * We take xfer->lock for setting the result; this is a barrier
199 * against drivers/usb/core/hcd.c:unlink1() being called after we call
200 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
201 * reference to the transfer.
202 */
203 static void wa_xfer_giveback(struct wa_xfer *xfer)
204 {
205 unsigned long flags;
206
207 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
208 list_del_init(&xfer->list_node);
209 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
210 /* FIXME: segmentation broken -- kills DWA */
211 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
212 wa_put(xfer->wa);
213 wa_xfer_put(xfer);
214 }
215
216 /*
217 * xfer is referenced
218 *
219 * xfer->lock has to be unlocked
220 */
221 static void wa_xfer_completion(struct wa_xfer *xfer)
222 {
223 if (xfer->wusb_dev)
224 wusb_dev_put(xfer->wusb_dev);
225 rpipe_put(xfer->ep->hcpriv);
226 wa_xfer_giveback(xfer);
227 }
228
229 /*
230 * If transfer is done, wrap it up and return true
231 *
232 * xfer->lock has to be locked
233 */
234 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
235 {
236 struct device *dev = &xfer->wa->usb_iface->dev;
237 unsigned result, cnt;
238 struct wa_seg *seg;
239 struct urb *urb = xfer->urb;
240 unsigned found_short = 0;
241
242 result = xfer->segs_done == xfer->segs_submitted;
243 if (result == 0)
244 goto out;
245 urb->actual_length = 0;
246 for (cnt = 0; cnt < xfer->segs; cnt++) {
247 seg = xfer->seg[cnt];
248 switch (seg->status) {
249 case WA_SEG_DONE:
250 if (found_short && seg->result > 0) {
251 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
252 xfer, cnt, seg->result);
253 urb->status = -EINVAL;
254 goto out;
255 }
256 urb->actual_length += seg->result;
257 if (seg->result < xfer->seg_size
258 && cnt != xfer->segs-1)
259 found_short = 1;
260 dev_dbg(dev, "xfer %p#%u: DONE short %d "
261 "result %zu urb->actual_length %d\n",
262 xfer, seg->index, found_short, seg->result,
263 urb->actual_length);
264 break;
265 case WA_SEG_ERROR:
266 xfer->result = seg->result;
267 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
268 xfer, seg->index, seg->result);
269 goto out;
270 case WA_SEG_ABORTED:
271 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
272 xfer, seg->index, urb->status);
273 xfer->result = urb->status;
274 goto out;
275 default:
276 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
277 xfer, cnt, seg->status);
278 xfer->result = -EINVAL;
279 goto out;
280 }
281 }
282 xfer->result = 0;
283 out:
284 return result;
285 }
286
287 /*
288 * Initialize a transfer's ID
289 *
290 * We need to use a sequential number; if we use the pointer or the
291 * hash of the pointer, it can repeat over sequential transfers and
292 * then it will confuse the HWA....wonder why in hell they put a 32
293 * bit handle in there then.
294 */
295 static void wa_xfer_id_init(struct wa_xfer *xfer)
296 {
297 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
298 }
299
300 /*
301 * Return the xfer's ID associated with xfer
302 *
303 * Need to generate a
304 */
305 static u32 wa_xfer_id(struct wa_xfer *xfer)
306 {
307 return xfer->id;
308 }
309
310 /*
311 * Search for a transfer list ID on the HCD's URB list
312 *
313 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
314 * 32-bit hash of the pointer.
315 *
316 * @returns NULL if not found.
317 */
318 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
319 {
320 unsigned long flags;
321 struct wa_xfer *xfer_itr;
322 spin_lock_irqsave(&wa->xfer_list_lock, flags);
323 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
324 if (id == xfer_itr->id) {
325 wa_xfer_get(xfer_itr);
326 goto out;
327 }
328 }
329 xfer_itr = NULL;
330 out:
331 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
332 return xfer_itr;
333 }
334
335 struct wa_xfer_abort_buffer {
336 struct urb urb;
337 struct wa_xfer_abort cmd;
338 };
339
340 static void __wa_xfer_abort_cb(struct urb *urb)
341 {
342 struct wa_xfer_abort_buffer *b = urb->context;
343 usb_put_urb(&b->urb);
344 }
345
346 /*
347 * Aborts an ongoing transaction
348 *
349 * Assumes the transfer is referenced and locked and in a submitted
350 * state (mainly that there is an endpoint/rpipe assigned).
351 *
352 * The callback (see above) does nothing but freeing up the data by
353 * putting the URB. Because the URB is allocated at the head of the
354 * struct, the whole space we allocated is kfreed.
355 *
356 * We'll get an 'aborted transaction' xfer result on DTI, that'll
357 * politely ignore because at this point the transaction has been
358 * marked as aborted already.
359 */
360 static void __wa_xfer_abort(struct wa_xfer *xfer)
361 {
362 int result;
363 struct device *dev = &xfer->wa->usb_iface->dev;
364 struct wa_xfer_abort_buffer *b;
365 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
366
367 b = kmalloc(sizeof(*b), GFP_ATOMIC);
368 if (b == NULL)
369 goto error_kmalloc;
370 b->cmd.bLength = sizeof(b->cmd);
371 b->cmd.bRequestType = WA_XFER_ABORT;
372 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
373 b->cmd.dwTransferID = wa_xfer_id(xfer);
374
375 usb_init_urb(&b->urb);
376 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
377 usb_sndbulkpipe(xfer->wa->usb_dev,
378 xfer->wa->dto_epd->bEndpointAddress),
379 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
380 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
381 if (result < 0)
382 goto error_submit;
383 return; /* callback frees! */
384
385
386 error_submit:
387 if (printk_ratelimit())
388 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
389 xfer, result);
390 kfree(b);
391 error_kmalloc:
392 return;
393
394 }
395
396 /*
397 *
398 * @returns < 0 on error, transfer segment request size if ok
399 */
400 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
401 enum wa_xfer_type *pxfer_type)
402 {
403 ssize_t result;
404 struct device *dev = &xfer->wa->usb_iface->dev;
405 size_t maxpktsize;
406 struct urb *urb = xfer->urb;
407 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
408
409 switch (rpipe->descr.bmAttribute & 0x3) {
410 case USB_ENDPOINT_XFER_CONTROL:
411 *pxfer_type = WA_XFER_TYPE_CTL;
412 result = sizeof(struct wa_xfer_ctl);
413 break;
414 case USB_ENDPOINT_XFER_INT:
415 case USB_ENDPOINT_XFER_BULK:
416 *pxfer_type = WA_XFER_TYPE_BI;
417 result = sizeof(struct wa_xfer_bi);
418 break;
419 case USB_ENDPOINT_XFER_ISOC:
420 dev_err(dev, "FIXME: ISOC not implemented\n");
421 result = -ENOSYS;
422 goto error;
423 default:
424 /* never happens */
425 BUG();
426 result = -EINVAL; /* shut gcc up */
427 };
428 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
429 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
430 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
431 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
432 /* Compute the segment size and make sure it is a multiple of
433 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
434 * a check (FIXME) */
435 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
436 if (xfer->seg_size < maxpktsize) {
437 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
438 "%zu\n", xfer->seg_size, maxpktsize);
439 result = -EINVAL;
440 goto error;
441 }
442 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
443 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
444 / xfer->seg_size;
445 if (xfer->segs >= WA_SEGS_MAX) {
446 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
447 (int)(urb->transfer_buffer_length / xfer->seg_size),
448 WA_SEGS_MAX);
449 result = -EINVAL;
450 goto error;
451 }
452 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
453 xfer->segs = 1;
454 error:
455 return result;
456 }
457
458 /* Fill in the common request header and xfer-type specific data. */
459 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
460 struct wa_xfer_hdr *xfer_hdr0,
461 enum wa_xfer_type xfer_type,
462 size_t xfer_hdr_size)
463 {
464 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
465
466 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
467 xfer_hdr0->bLength = xfer_hdr_size;
468 xfer_hdr0->bRequestType = xfer_type;
469 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
470 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
471 xfer_hdr0->bTransferSegment = 0;
472 switch (xfer_type) {
473 case WA_XFER_TYPE_CTL: {
474 struct wa_xfer_ctl *xfer_ctl =
475 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
476 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
477 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
478 && xfer->urb->setup_packet == NULL);
479 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
480 sizeof(xfer_ctl->baSetupData));
481 break;
482 }
483 case WA_XFER_TYPE_BI:
484 break;
485 case WA_XFER_TYPE_ISO:
486 printk(KERN_ERR "FIXME: ISOC not implemented\n");
487 default:
488 BUG();
489 };
490 }
491
492 /*
493 * Callback for the OUT data phase of the segment request
494 *
495 * Check wa_seg_cb(); most comments also apply here because this
496 * function does almost the same thing and they work closely
497 * together.
498 *
499 * If the seg request has failed but this DTO phase has suceeded,
500 * wa_seg_cb() has already failed the segment and moved the
501 * status to WA_SEG_ERROR, so this will go through 'case 0' and
502 * effectively do nothing.
503 */
504 static void wa_seg_dto_cb(struct urb *urb)
505 {
506 struct wa_seg *seg = urb->context;
507 struct wa_xfer *xfer = seg->xfer;
508 struct wahc *wa;
509 struct device *dev;
510 struct wa_rpipe *rpipe;
511 unsigned long flags;
512 unsigned rpipe_ready = 0;
513 u8 done = 0;
514
515 switch (urb->status) {
516 case 0:
517 spin_lock_irqsave(&xfer->lock, flags);
518 wa = xfer->wa;
519 dev = &wa->usb_iface->dev;
520 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
521 xfer, seg->index, urb->actual_length);
522 if (seg->status < WA_SEG_PENDING)
523 seg->status = WA_SEG_PENDING;
524 seg->result = urb->actual_length;
525 spin_unlock_irqrestore(&xfer->lock, flags);
526 break;
527 case -ECONNRESET: /* URB unlinked; no need to do anything */
528 case -ENOENT: /* as it was done by the who unlinked us */
529 break;
530 default: /* Other errors ... */
531 spin_lock_irqsave(&xfer->lock, flags);
532 wa = xfer->wa;
533 dev = &wa->usb_iface->dev;
534 rpipe = xfer->ep->hcpriv;
535 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
536 xfer, seg->index, urb->status);
537 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
538 EDC_ERROR_TIMEFRAME)){
539 dev_err(dev, "DTO: URB max acceptable errors "
540 "exceeded, resetting device\n");
541 wa_reset_all(wa);
542 }
543 if (seg->status != WA_SEG_ERROR) {
544 seg->status = WA_SEG_ERROR;
545 seg->result = urb->status;
546 xfer->segs_done++;
547 __wa_xfer_abort(xfer);
548 rpipe_ready = rpipe_avail_inc(rpipe);
549 done = __wa_xfer_is_done(xfer);
550 }
551 spin_unlock_irqrestore(&xfer->lock, flags);
552 if (done)
553 wa_xfer_completion(xfer);
554 if (rpipe_ready)
555 wa_xfer_delayed_run(rpipe);
556 }
557 }
558
559 /*
560 * Callback for the segment request
561 *
562 * If successful transition state (unless already transitioned or
563 * outbound transfer); otherwise, take a note of the error, mark this
564 * segment done and try completion.
565 *
566 * Note we don't access until we are sure that the transfer hasn't
567 * been cancelled (ECONNRESET, ENOENT), which could mean that
568 * seg->xfer could be already gone.
569 *
570 * We have to check before setting the status to WA_SEG_PENDING
571 * because sometimes the xfer result callback arrives before this
572 * callback (geeeeeeze), so it might happen that we are already in
573 * another state. As well, we don't set it if the transfer is inbound,
574 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
575 * finishes.
576 */
577 static void wa_seg_cb(struct urb *urb)
578 {
579 struct wa_seg *seg = urb->context;
580 struct wa_xfer *xfer = seg->xfer;
581 struct wahc *wa;
582 struct device *dev;
583 struct wa_rpipe *rpipe;
584 unsigned long flags;
585 unsigned rpipe_ready;
586 u8 done = 0;
587
588 switch (urb->status) {
589 case 0:
590 spin_lock_irqsave(&xfer->lock, flags);
591 wa = xfer->wa;
592 dev = &wa->usb_iface->dev;
593 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
594 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
595 seg->status = WA_SEG_PENDING;
596 spin_unlock_irqrestore(&xfer->lock, flags);
597 break;
598 case -ECONNRESET: /* URB unlinked; no need to do anything */
599 case -ENOENT: /* as it was done by the who unlinked us */
600 break;
601 default: /* Other errors ... */
602 spin_lock_irqsave(&xfer->lock, flags);
603 wa = xfer->wa;
604 dev = &wa->usb_iface->dev;
605 rpipe = xfer->ep->hcpriv;
606 if (printk_ratelimit())
607 dev_err(dev, "xfer %p#%u: request error %d\n",
608 xfer, seg->index, urb->status);
609 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
610 EDC_ERROR_TIMEFRAME)){
611 dev_err(dev, "DTO: URB max acceptable errors "
612 "exceeded, resetting device\n");
613 wa_reset_all(wa);
614 }
615 usb_unlink_urb(seg->dto_urb);
616 seg->status = WA_SEG_ERROR;
617 seg->result = urb->status;
618 xfer->segs_done++;
619 __wa_xfer_abort(xfer);
620 rpipe_ready = rpipe_avail_inc(rpipe);
621 done = __wa_xfer_is_done(xfer);
622 spin_unlock_irqrestore(&xfer->lock, flags);
623 if (done)
624 wa_xfer_completion(xfer);
625 if (rpipe_ready)
626 wa_xfer_delayed_run(rpipe);
627 }
628 }
629
630 /*
631 * Allocate the segs array and initialize each of them
632 *
633 * The segments are freed by wa_xfer_destroy() when the xfer use count
634 * drops to zero; however, because each segment is given the same life
635 * cycle as the USB URB it contains, it is actually freed by
636 * usb_put_urb() on the contained USB URB (twisted, eh?).
637 */
638 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
639 {
640 int result, cnt;
641 size_t alloc_size = sizeof(*xfer->seg[0])
642 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
643 struct usb_device *usb_dev = xfer->wa->usb_dev;
644 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
645 struct wa_seg *seg;
646 size_t buf_itr, buf_size, buf_itr_size;
647
648 result = -ENOMEM;
649 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
650 if (xfer->seg == NULL)
651 goto error_segs_kzalloc;
652 buf_itr = 0;
653 buf_size = xfer->urb->transfer_buffer_length;
654 for (cnt = 0; cnt < xfer->segs; cnt++) {
655 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
656 if (seg == NULL)
657 goto error_seg_kzalloc;
658 wa_seg_init(seg);
659 seg->xfer = xfer;
660 seg->index = cnt;
661 usb_fill_bulk_urb(&seg->urb, usb_dev,
662 usb_sndbulkpipe(usb_dev,
663 dto_epd->bEndpointAddress),
664 &seg->xfer_hdr, xfer_hdr_size,
665 wa_seg_cb, seg);
666 buf_itr_size = buf_size > xfer->seg_size ?
667 xfer->seg_size : buf_size;
668 if (xfer->is_inbound == 0 && buf_size > 0) {
669 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
670 if (seg->dto_urb == NULL)
671 goto error_dto_alloc;
672 usb_fill_bulk_urb(
673 seg->dto_urb, usb_dev,
674 usb_sndbulkpipe(usb_dev,
675 dto_epd->bEndpointAddress),
676 NULL, 0, wa_seg_dto_cb, seg);
677 if (xfer->is_dma) {
678 seg->dto_urb->transfer_dma =
679 xfer->urb->transfer_dma + buf_itr;
680 seg->dto_urb->transfer_flags |=
681 URB_NO_TRANSFER_DMA_MAP;
682 } else
683 seg->dto_urb->transfer_buffer =
684 xfer->urb->transfer_buffer + buf_itr;
685 seg->dto_urb->transfer_buffer_length = buf_itr_size;
686 }
687 seg->status = WA_SEG_READY;
688 buf_itr += buf_itr_size;
689 buf_size -= buf_itr_size;
690 }
691 return 0;
692
693 error_dto_alloc:
694 kfree(xfer->seg[cnt]);
695 cnt--;
696 error_seg_kzalloc:
697 /* use the fact that cnt is left at were it failed */
698 for (; cnt > 0; cnt--) {
699 if (xfer->is_inbound == 0)
700 kfree(xfer->seg[cnt]->dto_urb);
701 kfree(xfer->seg[cnt]);
702 }
703 error_segs_kzalloc:
704 return result;
705 }
706
707 /*
708 * Allocates all the stuff needed to submit a transfer
709 *
710 * Breaks the whole data buffer in a list of segments, each one has a
711 * structure allocated to it and linked in xfer->seg[index]
712 *
713 * FIXME: merge setup_segs() and the last part of this function, no
714 * need to do two for loops when we could run everything in a
715 * single one
716 */
717 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
718 {
719 int result;
720 struct device *dev = &xfer->wa->usb_iface->dev;
721 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
722 size_t xfer_hdr_size, cnt, transfer_size;
723 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
724
725 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
726 if (result < 0)
727 goto error_setup_sizes;
728 xfer_hdr_size = result;
729 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
730 if (result < 0) {
731 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
732 xfer, xfer->segs, result);
733 goto error_setup_segs;
734 }
735 /* Fill the first header */
736 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
737 wa_xfer_id_init(xfer);
738 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
739
740 /* Fill remainig headers */
741 xfer_hdr = xfer_hdr0;
742 transfer_size = urb->transfer_buffer_length;
743 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
744 xfer->seg_size : transfer_size;
745 transfer_size -= xfer->seg_size;
746 for (cnt = 1; cnt < xfer->segs; cnt++) {
747 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
748 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
749 xfer_hdr->bTransferSegment = cnt;
750 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
751 cpu_to_le32(xfer->seg_size)
752 : cpu_to_le32(transfer_size);
753 xfer->seg[cnt]->status = WA_SEG_READY;
754 transfer_size -= xfer->seg_size;
755 }
756 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
757 result = 0;
758 error_setup_segs:
759 error_setup_sizes:
760 return result;
761 }
762
763 /*
764 *
765 *
766 * rpipe->seg_lock is held!
767 */
768 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
769 struct wa_seg *seg)
770 {
771 int result;
772 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
773 if (result < 0) {
774 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
775 xfer, seg->index, result);
776 goto error_seg_submit;
777 }
778 if (seg->dto_urb) {
779 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
780 if (result < 0) {
781 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
782 xfer, seg->index, result);
783 goto error_dto_submit;
784 }
785 }
786 seg->status = WA_SEG_SUBMITTED;
787 rpipe_avail_dec(rpipe);
788 return 0;
789
790 error_dto_submit:
791 usb_unlink_urb(&seg->urb);
792 error_seg_submit:
793 seg->status = WA_SEG_ERROR;
794 seg->result = result;
795 return result;
796 }
797
798 /*
799 * Execute more queued request segments until the maximum concurrent allowed
800 *
801 * The ugly unlock/lock sequence on the error path is needed as the
802 * xfer->lock normally nests the seg_lock and not viceversa.
803 *
804 */
805 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
806 {
807 int result;
808 struct device *dev = &rpipe->wa->usb_iface->dev;
809 struct wa_seg *seg;
810 struct wa_xfer *xfer;
811 unsigned long flags;
812
813 spin_lock_irqsave(&rpipe->seg_lock, flags);
814 while (atomic_read(&rpipe->segs_available) > 0
815 && !list_empty(&rpipe->seg_list)) {
816 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
817 list_node);
818 list_del(&seg->list_node);
819 xfer = seg->xfer;
820 result = __wa_seg_submit(rpipe, xfer, seg);
821 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
822 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
823 if (unlikely(result < 0)) {
824 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
825 spin_lock_irqsave(&xfer->lock, flags);
826 __wa_xfer_abort(xfer);
827 xfer->segs_done++;
828 spin_unlock_irqrestore(&xfer->lock, flags);
829 spin_lock_irqsave(&rpipe->seg_lock, flags);
830 }
831 }
832 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
833 }
834
835 /*
836 *
837 * xfer->lock is taken
838 *
839 * On failure submitting we just stop submitting and return error;
840 * wa_urb_enqueue_b() will execute the completion path
841 */
842 static int __wa_xfer_submit(struct wa_xfer *xfer)
843 {
844 int result;
845 struct wahc *wa = xfer->wa;
846 struct device *dev = &wa->usb_iface->dev;
847 unsigned cnt;
848 struct wa_seg *seg;
849 unsigned long flags;
850 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
851 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
852 u8 available;
853 u8 empty;
854
855 spin_lock_irqsave(&wa->xfer_list_lock, flags);
856 list_add_tail(&xfer->list_node, &wa->xfer_list);
857 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
858
859 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
860 result = 0;
861 spin_lock_irqsave(&rpipe->seg_lock, flags);
862 for (cnt = 0; cnt < xfer->segs; cnt++) {
863 available = atomic_read(&rpipe->segs_available);
864 empty = list_empty(&rpipe->seg_list);
865 seg = xfer->seg[cnt];
866 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
867 xfer, cnt, available, empty,
868 available == 0 || !empty ? "delayed" : "submitted");
869 if (available == 0 || !empty) {
870 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
871 seg->status = WA_SEG_DELAYED;
872 list_add_tail(&seg->list_node, &rpipe->seg_list);
873 } else {
874 result = __wa_seg_submit(rpipe, xfer, seg);
875 if (result < 0) {
876 __wa_xfer_abort(xfer);
877 goto error_seg_submit;
878 }
879 }
880 xfer->segs_submitted++;
881 }
882 error_seg_submit:
883 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
884 return result;
885 }
886
887 /*
888 * Second part of a URB/transfer enqueuement
889 *
890 * Assumes this comes from wa_urb_enqueue() [maybe through
891 * wa_urb_enqueue_run()]. At this point:
892 *
893 * xfer->wa filled and refcounted
894 * xfer->ep filled with rpipe refcounted if
895 * delayed == 0
896 * xfer->urb filled and refcounted (this is the case when called
897 * from wa_urb_enqueue() as we come from usb_submit_urb()
898 * and when called by wa_urb_enqueue_run(), as we took an
899 * extra ref dropped by _run() after we return).
900 * xfer->gfp filled
901 *
902 * If we fail at __wa_xfer_submit(), then we just check if we are done
903 * and if so, we run the completion procedure. However, if we are not
904 * yet done, we do nothing and wait for the completion handlers from
905 * the submitted URBs or from the xfer-result path to kick in. If xfer
906 * result never kicks in, the xfer will timeout from the USB code and
907 * dequeue() will be called.
908 */
909 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
910 {
911 int result;
912 unsigned long flags;
913 struct urb *urb = xfer->urb;
914 struct wahc *wa = xfer->wa;
915 struct wusbhc *wusbhc = wa->wusb;
916 struct wusb_dev *wusb_dev;
917 unsigned done;
918
919 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
920 if (result < 0)
921 goto error_rpipe_get;
922 result = -ENODEV;
923 /* FIXME: segmentation broken -- kills DWA */
924 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
925 if (urb->dev == NULL) {
926 mutex_unlock(&wusbhc->mutex);
927 goto error_dev_gone;
928 }
929 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
930 if (wusb_dev == NULL) {
931 mutex_unlock(&wusbhc->mutex);
932 goto error_dev_gone;
933 }
934 mutex_unlock(&wusbhc->mutex);
935
936 spin_lock_irqsave(&xfer->lock, flags);
937 xfer->wusb_dev = wusb_dev;
938 result = urb->status;
939 if (urb->status != -EINPROGRESS)
940 goto error_dequeued;
941
942 result = __wa_xfer_setup(xfer, urb);
943 if (result < 0)
944 goto error_xfer_setup;
945 result = __wa_xfer_submit(xfer);
946 if (result < 0)
947 goto error_xfer_submit;
948 spin_unlock_irqrestore(&xfer->lock, flags);
949 return;
950
951 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
952 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
953 * upundo setup().
954 */
955 error_xfer_setup:
956 error_dequeued:
957 spin_unlock_irqrestore(&xfer->lock, flags);
958 /* FIXME: segmentation broken, kills DWA */
959 if (wusb_dev)
960 wusb_dev_put(wusb_dev);
961 error_dev_gone:
962 rpipe_put(xfer->ep->hcpriv);
963 error_rpipe_get:
964 xfer->result = result;
965 wa_xfer_giveback(xfer);
966 return;
967
968 error_xfer_submit:
969 done = __wa_xfer_is_done(xfer);
970 xfer->result = result;
971 spin_unlock_irqrestore(&xfer->lock, flags);
972 if (done)
973 wa_xfer_completion(xfer);
974 }
975
976 /*
977 * Execute the delayed transfers in the Wire Adapter @wa
978 *
979 * We need to be careful here, as dequeue() could be called in the
980 * middle. That's why we do the whole thing under the
981 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
982 * and then checks the list -- so as we would be acquiring in inverse
983 * order, we just drop the lock once we have the xfer and reacquire it
984 * later.
985 */
986 void wa_urb_enqueue_run(struct work_struct *ws)
987 {
988 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
989 struct wa_xfer *xfer, *next;
990 struct urb *urb;
991
992 spin_lock_irq(&wa->xfer_list_lock);
993 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
994 list_node) {
995 list_del_init(&xfer->list_node);
996 spin_unlock_irq(&wa->xfer_list_lock);
997
998 urb = xfer->urb;
999 wa_urb_enqueue_b(xfer);
1000 usb_put_urb(urb); /* taken when queuing */
1001
1002 spin_lock_irq(&wa->xfer_list_lock);
1003 }
1004 spin_unlock_irq(&wa->xfer_list_lock);
1005 }
1006 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1007
1008 /*
1009 * Submit a transfer to the Wire Adapter in a delayed way
1010 *
1011 * The process of enqueuing involves possible sleeps() [see
1012 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1013 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1014 *
1015 * @urb: We own a reference to it done by the HCI Linux USB stack that
1016 * will be given up by calling usb_hcd_giveback_urb() or by
1017 * returning error from this function -> ergo we don't have to
1018 * refcount it.
1019 */
1020 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1021 struct urb *urb, gfp_t gfp)
1022 {
1023 int result;
1024 struct device *dev = &wa->usb_iface->dev;
1025 struct wa_xfer *xfer;
1026 unsigned long my_flags;
1027 unsigned cant_sleep = irqs_disabled() | in_atomic();
1028
1029 if (urb->transfer_buffer == NULL
1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1031 && urb->transfer_buffer_length != 0) {
1032 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1033 dump_stack();
1034 }
1035
1036 result = -ENOMEM;
1037 xfer = kzalloc(sizeof(*xfer), gfp);
1038 if (xfer == NULL)
1039 goto error_kmalloc;
1040
1041 result = -ENOENT;
1042 if (urb->status != -EINPROGRESS) /* cancelled */
1043 goto error_dequeued; /* before starting? */
1044 wa_xfer_init(xfer);
1045 xfer->wa = wa_get(wa);
1046 xfer->urb = urb;
1047 xfer->gfp = gfp;
1048 xfer->ep = ep;
1049 urb->hcpriv = xfer;
1050
1051 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1052 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1053 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1054 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1055 cant_sleep ? "deferred" : "inline");
1056
1057 if (cant_sleep) {
1058 usb_get_urb(urb);
1059 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1060 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1061 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1062 queue_work(wusbd, &wa->xfer_work);
1063 } else {
1064 wa_urb_enqueue_b(xfer);
1065 }
1066 return 0;
1067
1068 error_dequeued:
1069 kfree(xfer);
1070 error_kmalloc:
1071 return result;
1072 }
1073 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1074
1075 /*
1076 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1077 * handler] is called.
1078 *
1079 * Until a transfer goes successfully through wa_urb_enqueue() it
1080 * needs to be dequeued with completion calling; when stuck in delayed
1081 * or before wa_xfer_setup() is called, we need to do completion.
1082 *
1083 * not setup If there is no hcpriv yet, that means that that enqueue
1084 * still had no time to set the xfer up. Because
1085 * urb->status should be other than -EINPROGRESS,
1086 * enqueue() will catch that and bail out.
1087 *
1088 * If the transfer has gone through setup, we just need to clean it
1089 * up. If it has gone through submit(), we have to abort it [with an
1090 * asynch request] and then make sure we cancel each segment.
1091 *
1092 */
1093 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1094 {
1095 unsigned long flags, flags2;
1096 struct wa_xfer *xfer;
1097 struct wa_seg *seg;
1098 struct wa_rpipe *rpipe;
1099 unsigned cnt;
1100 unsigned rpipe_ready = 0;
1101
1102 xfer = urb->hcpriv;
1103 if (xfer == NULL) {
1104 /* NOthing setup yet enqueue will see urb->status !=
1105 * -EINPROGRESS (by hcd layer) and bail out with
1106 * error, no need to do completion
1107 */
1108 BUG_ON(urb->status == -EINPROGRESS);
1109 goto out;
1110 }
1111 spin_lock_irqsave(&xfer->lock, flags);
1112 rpipe = xfer->ep->hcpriv;
1113 /* Check the delayed list -> if there, release and complete */
1114 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1115 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1116 goto dequeue_delayed;
1117 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1118 if (xfer->seg == NULL) /* still hasn't reached */
1119 goto out_unlock; /* setup(), enqueue_b() completes */
1120 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1121 __wa_xfer_abort(xfer);
1122 for (cnt = 0; cnt < xfer->segs; cnt++) {
1123 seg = xfer->seg[cnt];
1124 switch (seg->status) {
1125 case WA_SEG_NOTREADY:
1126 case WA_SEG_READY:
1127 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1128 xfer, cnt, seg->status);
1129 WARN_ON(1);
1130 break;
1131 case WA_SEG_DELAYED:
1132 seg->status = WA_SEG_ABORTED;
1133 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1134 list_del(&seg->list_node);
1135 xfer->segs_done++;
1136 rpipe_ready = rpipe_avail_inc(rpipe);
1137 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1138 break;
1139 case WA_SEG_SUBMITTED:
1140 seg->status = WA_SEG_ABORTED;
1141 usb_unlink_urb(&seg->urb);
1142 if (xfer->is_inbound == 0)
1143 usb_unlink_urb(seg->dto_urb);
1144 xfer->segs_done++;
1145 rpipe_ready = rpipe_avail_inc(rpipe);
1146 break;
1147 case WA_SEG_PENDING:
1148 seg->status = WA_SEG_ABORTED;
1149 xfer->segs_done++;
1150 rpipe_ready = rpipe_avail_inc(rpipe);
1151 break;
1152 case WA_SEG_DTI_PENDING:
1153 usb_unlink_urb(wa->dti_urb);
1154 seg->status = WA_SEG_ABORTED;
1155 xfer->segs_done++;
1156 rpipe_ready = rpipe_avail_inc(rpipe);
1157 break;
1158 case WA_SEG_DONE:
1159 case WA_SEG_ERROR:
1160 case WA_SEG_ABORTED:
1161 break;
1162 }
1163 }
1164 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1165 __wa_xfer_is_done(xfer);
1166 spin_unlock_irqrestore(&xfer->lock, flags);
1167 wa_xfer_completion(xfer);
1168 if (rpipe_ready)
1169 wa_xfer_delayed_run(rpipe);
1170 return 0;
1171
1172 out_unlock:
1173 spin_unlock_irqrestore(&xfer->lock, flags);
1174 out:
1175 return 0;
1176
1177 dequeue_delayed:
1178 list_del_init(&xfer->list_node);
1179 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1180 xfer->result = urb->status;
1181 spin_unlock_irqrestore(&xfer->lock, flags);
1182 wa_xfer_giveback(xfer);
1183 usb_put_urb(urb); /* we got a ref in enqueue() */
1184 return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1187
1188 /*
1189 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1190 * codes
1191 *
1192 * Positive errno values are internal inconsistencies and should be
1193 * flagged louder. Negative are to be passed up to the user in the
1194 * normal way.
1195 *
1196 * @status: USB WA status code -- high two bits are stripped.
1197 */
1198 static int wa_xfer_status_to_errno(u8 status)
1199 {
1200 int errno;
1201 u8 real_status = status;
1202 static int xlat[] = {
1203 [WA_XFER_STATUS_SUCCESS] = 0,
1204 [WA_XFER_STATUS_HALTED] = -EPIPE,
1205 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1206 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1207 [WA_XFER_RESERVED] = EINVAL,
1208 [WA_XFER_STATUS_NOT_FOUND] = 0,
1209 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1210 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1211 [WA_XFER_STATUS_ABORTED] = -EINTR,
1212 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1213 [WA_XFER_INVALID_FORMAT] = EINVAL,
1214 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1215 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1216 };
1217 status &= 0x3f;
1218
1219 if (status == 0)
1220 return 0;
1221 if (status >= ARRAY_SIZE(xlat)) {
1222 if (printk_ratelimit())
1223 printk(KERN_ERR "%s(): BUG? "
1224 "Unknown WA transfer status 0x%02x\n",
1225 __func__, real_status);
1226 return -EINVAL;
1227 }
1228 errno = xlat[status];
1229 if (unlikely(errno > 0)) {
1230 if (printk_ratelimit())
1231 printk(KERN_ERR "%s(): BUG? "
1232 "Inconsistent WA status: 0x%02x\n",
1233 __func__, real_status);
1234 errno = -errno;
1235 }
1236 return errno;
1237 }
1238
1239 /*
1240 * Process a xfer result completion message
1241 *
1242 * inbound transfers: need to schedule a DTI read
1243 *
1244 * FIXME: this functio needs to be broken up in parts
1245 */
1246 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1247 {
1248 int result;
1249 struct device *dev = &wa->usb_iface->dev;
1250 unsigned long flags;
1251 u8 seg_idx;
1252 struct wa_seg *seg;
1253 struct wa_rpipe *rpipe;
1254 struct wa_xfer_result *xfer_result = wa->xfer_result;
1255 u8 done = 0;
1256 u8 usb_status;
1257 unsigned rpipe_ready = 0;
1258
1259 spin_lock_irqsave(&xfer->lock, flags);
1260 seg_idx = xfer_result->bTransferSegment & 0x7f;
1261 if (unlikely(seg_idx >= xfer->segs))
1262 goto error_bad_seg;
1263 seg = xfer->seg[seg_idx];
1264 rpipe = xfer->ep->hcpriv;
1265 usb_status = xfer_result->bTransferStatus;
1266 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1267 xfer, seg_idx, usb_status, seg->status);
1268 if (seg->status == WA_SEG_ABORTED
1269 || seg->status == WA_SEG_ERROR) /* already handled */
1270 goto segment_aborted;
1271 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1272 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1273 if (seg->status != WA_SEG_PENDING) {
1274 if (printk_ratelimit())
1275 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1276 xfer, seg_idx, seg->status);
1277 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1278 }
1279 if (usb_status & 0x80) {
1280 seg->result = wa_xfer_status_to_errno(usb_status);
1281 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1282 xfer, seg->index, usb_status);
1283 goto error_complete;
1284 }
1285 /* FIXME: we ignore warnings, tally them for stats */
1286 if (usb_status & 0x40) /* Warning?... */
1287 usb_status = 0; /* ... pass */
1288 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1289 seg->status = WA_SEG_DTI_PENDING;
1290 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1291 if (xfer->is_dma) {
1292 wa->buf_in_urb->transfer_dma =
1293 xfer->urb->transfer_dma
1294 + seg_idx * xfer->seg_size;
1295 wa->buf_in_urb->transfer_flags
1296 |= URB_NO_TRANSFER_DMA_MAP;
1297 } else {
1298 wa->buf_in_urb->transfer_buffer =
1299 xfer->urb->transfer_buffer
1300 + seg_idx * xfer->seg_size;
1301 wa->buf_in_urb->transfer_flags
1302 &= ~URB_NO_TRANSFER_DMA_MAP;
1303 }
1304 wa->buf_in_urb->transfer_buffer_length =
1305 le32_to_cpu(xfer_result->dwTransferLength);
1306 wa->buf_in_urb->context = seg;
1307 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1308 if (result < 0)
1309 goto error_submit_buf_in;
1310 } else {
1311 /* OUT data phase, complete it -- */
1312 seg->status = WA_SEG_DONE;
1313 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1314 xfer->segs_done++;
1315 rpipe_ready = rpipe_avail_inc(rpipe);
1316 done = __wa_xfer_is_done(xfer);
1317 }
1318 spin_unlock_irqrestore(&xfer->lock, flags);
1319 if (done)
1320 wa_xfer_completion(xfer);
1321 if (rpipe_ready)
1322 wa_xfer_delayed_run(rpipe);
1323 return;
1324
1325 error_submit_buf_in:
1326 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1327 dev_err(dev, "DTI: URB max acceptable errors "
1328 "exceeded, resetting device\n");
1329 wa_reset_all(wa);
1330 }
1331 if (printk_ratelimit())
1332 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1333 xfer, seg_idx, result);
1334 seg->result = result;
1335 error_complete:
1336 seg->status = WA_SEG_ERROR;
1337 xfer->segs_done++;
1338 rpipe_ready = rpipe_avail_inc(rpipe);
1339 __wa_xfer_abort(xfer);
1340 done = __wa_xfer_is_done(xfer);
1341 spin_unlock_irqrestore(&xfer->lock, flags);
1342 if (done)
1343 wa_xfer_completion(xfer);
1344 if (rpipe_ready)
1345 wa_xfer_delayed_run(rpipe);
1346 return;
1347
1348 error_bad_seg:
1349 spin_unlock_irqrestore(&xfer->lock, flags);
1350 wa_urb_dequeue(wa, xfer->urb);
1351 if (printk_ratelimit())
1352 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1353 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1354 dev_err(dev, "DTI: URB max acceptable errors "
1355 "exceeded, resetting device\n");
1356 wa_reset_all(wa);
1357 }
1358 return;
1359
1360 segment_aborted:
1361 /* nothing to do, as the aborter did the completion */
1362 spin_unlock_irqrestore(&xfer->lock, flags);
1363 }
1364
1365 /*
1366 * Callback for the IN data phase
1367 *
1368 * If successful transition state; otherwise, take a note of the
1369 * error, mark this segment done and try completion.
1370 *
1371 * Note we don't access until we are sure that the transfer hasn't
1372 * been cancelled (ECONNRESET, ENOENT), which could mean that
1373 * seg->xfer could be already gone.
1374 */
1375 static void wa_buf_in_cb(struct urb *urb)
1376 {
1377 struct wa_seg *seg = urb->context;
1378 struct wa_xfer *xfer = seg->xfer;
1379 struct wahc *wa;
1380 struct device *dev;
1381 struct wa_rpipe *rpipe;
1382 unsigned rpipe_ready;
1383 unsigned long flags;
1384 u8 done = 0;
1385
1386 switch (urb->status) {
1387 case 0:
1388 spin_lock_irqsave(&xfer->lock, flags);
1389 wa = xfer->wa;
1390 dev = &wa->usb_iface->dev;
1391 rpipe = xfer->ep->hcpriv;
1392 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1393 xfer, seg->index, (size_t)urb->actual_length);
1394 seg->status = WA_SEG_DONE;
1395 seg->result = urb->actual_length;
1396 xfer->segs_done++;
1397 rpipe_ready = rpipe_avail_inc(rpipe);
1398 done = __wa_xfer_is_done(xfer);
1399 spin_unlock_irqrestore(&xfer->lock, flags);
1400 if (done)
1401 wa_xfer_completion(xfer);
1402 if (rpipe_ready)
1403 wa_xfer_delayed_run(rpipe);
1404 break;
1405 case -ECONNRESET: /* URB unlinked; no need to do anything */
1406 case -ENOENT: /* as it was done by the who unlinked us */
1407 break;
1408 default: /* Other errors ... */
1409 spin_lock_irqsave(&xfer->lock, flags);
1410 wa = xfer->wa;
1411 dev = &wa->usb_iface->dev;
1412 rpipe = xfer->ep->hcpriv;
1413 if (printk_ratelimit())
1414 dev_err(dev, "xfer %p#%u: data in error %d\n",
1415 xfer, seg->index, urb->status);
1416 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1417 EDC_ERROR_TIMEFRAME)){
1418 dev_err(dev, "DTO: URB max acceptable errors "
1419 "exceeded, resetting device\n");
1420 wa_reset_all(wa);
1421 }
1422 seg->status = WA_SEG_ERROR;
1423 seg->result = urb->status;
1424 xfer->segs_done++;
1425 rpipe_ready = rpipe_avail_inc(rpipe);
1426 __wa_xfer_abort(xfer);
1427 done = __wa_xfer_is_done(xfer);
1428 spin_unlock_irqrestore(&xfer->lock, flags);
1429 if (done)
1430 wa_xfer_completion(xfer);
1431 if (rpipe_ready)
1432 wa_xfer_delayed_run(rpipe);
1433 }
1434 }
1435
1436 /*
1437 * Handle an incoming transfer result buffer
1438 *
1439 * Given a transfer result buffer, it completes the transfer (possibly
1440 * scheduling and buffer in read) and then resubmits the DTI URB for a
1441 * new transfer result read.
1442 *
1443 *
1444 * The xfer_result DTI URB state machine
1445 *
1446 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1447 *
1448 * We start in OFF mode, the first xfer_result notification [through
1449 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1450 * read.
1451 *
1452 * We receive a buffer -- if it is not a xfer_result, we complain and
1453 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1454 * request accounting. If it is an IN segment, we move to RBI and post
1455 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1456 * repost the DTI-URB and move to RXR state. if there was no IN
1457 * segment, it will repost the DTI-URB.
1458 *
1459 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1460 * errors) in the URBs.
1461 */
1462 static void wa_xfer_result_cb(struct urb *urb)
1463 {
1464 int result;
1465 struct wahc *wa = urb->context;
1466 struct device *dev = &wa->usb_iface->dev;
1467 struct wa_xfer_result *xfer_result;
1468 u32 xfer_id;
1469 struct wa_xfer *xfer;
1470 u8 usb_status;
1471
1472 BUG_ON(wa->dti_urb != urb);
1473 switch (wa->dti_urb->status) {
1474 case 0:
1475 /* We have a xfer result buffer; check it */
1476 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1477 urb->actual_length, urb->transfer_buffer);
1478 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1479 dev_err(dev, "DTI Error: xfer result--bad size "
1480 "xfer result (%d bytes vs %zu needed)\n",
1481 urb->actual_length, sizeof(*xfer_result));
1482 break;
1483 }
1484 xfer_result = wa->xfer_result;
1485 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1486 dev_err(dev, "DTI Error: xfer result--"
1487 "bad header length %u\n",
1488 xfer_result->hdr.bLength);
1489 break;
1490 }
1491 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1492 dev_err(dev, "DTI Error: xfer result--"
1493 "bad header type 0x%02x\n",
1494 xfer_result->hdr.bNotifyType);
1495 break;
1496 }
1497 usb_status = xfer_result->bTransferStatus & 0x3f;
1498 if (usb_status == WA_XFER_STATUS_ABORTED
1499 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1500 /* taken care of already */
1501 break;
1502 xfer_id = xfer_result->dwTransferID;
1503 xfer = wa_xfer_get_by_id(wa, xfer_id);
1504 if (xfer == NULL) {
1505 /* FIXME: transaction might have been cancelled */
1506 dev_err(dev, "DTI Error: xfer result--"
1507 "unknown xfer 0x%08x (status 0x%02x)\n",
1508 xfer_id, usb_status);
1509 break;
1510 }
1511 wa_xfer_result_chew(wa, xfer);
1512 wa_xfer_put(xfer);
1513 break;
1514 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1515 case -ESHUTDOWN: /* going away! */
1516 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1517 goto out;
1518 default:
1519 /* Unknown error */
1520 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1521 EDC_ERROR_TIMEFRAME)) {
1522 dev_err(dev, "DTI: URB max acceptable errors "
1523 "exceeded, resetting device\n");
1524 wa_reset_all(wa);
1525 goto out;
1526 }
1527 if (printk_ratelimit())
1528 dev_err(dev, "DTI: URB error %d\n", urb->status);
1529 break;
1530 }
1531 /* Resubmit the DTI URB */
1532 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1533 if (result < 0) {
1534 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1535 "resetting\n", result);
1536 wa_reset_all(wa);
1537 }
1538 out:
1539 return;
1540 }
1541
1542 /*
1543 * Transfer complete notification
1544 *
1545 * Called from the notif.c code. We get a notification on EP2 saying
1546 * that some endpoint has some transfer result data available. We are
1547 * about to read it.
1548 *
1549 * To speed up things, we always have a URB reading the DTI URB; we
1550 * don't really set it up and start it until the first xfer complete
1551 * notification arrives, which is what we do here.
1552 *
1553 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1554 * machine starts.
1555 *
1556 * So here we just initialize the DTI URB for reading transfer result
1557 * notifications and also the buffer-in URB, for reading buffers. Then
1558 * we just submit the DTI URB.
1559 *
1560 * @wa shall be referenced
1561 */
1562 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1563 {
1564 int result;
1565 struct device *dev = &wa->usb_iface->dev;
1566 struct wa_notif_xfer *notif_xfer;
1567 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1568
1569 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1570 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1571
1572 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1573 /* FIXME: hardcoded limitation, adapt */
1574 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1575 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1576 goto error;
1577 }
1578 if (wa->dti_urb != NULL) /* DTI URB already started */
1579 goto out;
1580
1581 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1582 if (wa->dti_urb == NULL) {
1583 dev_err(dev, "Can't allocate DTI URB\n");
1584 goto error_dti_urb_alloc;
1585 }
1586 usb_fill_bulk_urb(
1587 wa->dti_urb, wa->usb_dev,
1588 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1589 wa->xfer_result, wa->xfer_result_size,
1590 wa_xfer_result_cb, wa);
1591
1592 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1593 if (wa->buf_in_urb == NULL) {
1594 dev_err(dev, "Can't allocate BUF-IN URB\n");
1595 goto error_buf_in_urb_alloc;
1596 }
1597 usb_fill_bulk_urb(
1598 wa->buf_in_urb, wa->usb_dev,
1599 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1600 NULL, 0, wa_buf_in_cb, wa);
1601 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1602 if (result < 0) {
1603 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1604 "resetting\n", result);
1605 goto error_dti_urb_submit;
1606 }
1607 out:
1608 return;
1609
1610 error_dti_urb_submit:
1611 usb_put_urb(wa->buf_in_urb);
1612 error_buf_in_urb_alloc:
1613 usb_put_urb(wa->dti_urb);
1614 wa->dti_urb = NULL;
1615 error_dti_urb_alloc:
1616 error:
1617 wa_reset_all(wa);
1618 }