Merge tag 'v3.10.72' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / host / xhci-ring.c
1 /*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 /*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 struct xhci_virt_device *virt_dev,
73 struct xhci_event_cmd *event);
74
75 /*
76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77 * address of the TRB.
78 */
79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 union xhci_trb *trb)
81 {
82 unsigned long segment_offset;
83
84 if (!seg || !trb || trb < seg->trbs)
85 return 0;
86 /* offset in TRBs */
87 segment_offset = trb - seg->trbs;
88 if (segment_offset > TRBS_PER_SEGMENT)
89 return 0;
90 return seg->dma + (segment_offset * sizeof(*trb));
91 }
92
93 /* Does this link TRB point to the first segment in a ring,
94 * or was the previous TRB the last TRB on the last segment in the ERST?
95 */
96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 struct xhci_segment *seg, union xhci_trb *trb)
98 {
99 if (ring == xhci->event_ring)
100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 (seg->next == xhci->event_ring->first_seg);
102 else
103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 }
105
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107 * segment? I.e. would the updated event TRB pointer step off the end of the
108 * event seg?
109 */
110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 struct xhci_segment *seg, union xhci_trb *trb)
112 {
113 if (ring == xhci->event_ring)
114 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 else
116 return TRB_TYPE_LINK_LE32(trb->link.control);
117 }
118
119 static int enqueue_is_link_trb(struct xhci_ring *ring)
120 {
121 struct xhci_link_trb *link = &ring->enqueue->link;
122 return TRB_TYPE_LINK_LE32(link->control);
123 }
124
125 union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
126 {
127 /* Enqueue pointer can be left pointing to the link TRB,
128 * we must handle that
129 */
130 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
131 return ring->enq_seg->next->trbs;
132 return ring->enqueue;
133 }
134
135 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
136 * TRB is in a new segment. This does not skip over link TRBs, and it does not
137 * effect the ring dequeue or enqueue pointers.
138 */
139 static void next_trb(struct xhci_hcd *xhci,
140 struct xhci_ring *ring,
141 struct xhci_segment **seg,
142 union xhci_trb **trb)
143 {
144 if (last_trb(xhci, ring, *seg, *trb)) {
145 *seg = (*seg)->next;
146 *trb = ((*seg)->trbs);
147 } else {
148 (*trb)++;
149 }
150 }
151
152 /*
153 * See Cycle bit rules. SW is the consumer for the event ring only.
154 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
155 */
156 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
157 {
158 unsigned long long addr;
159
160 ring->deq_updates++;
161
162 /*
163 * If this is not event ring, and the dequeue pointer
164 * is not on a link TRB, there is one more usable TRB
165 */
166 if (ring->type != TYPE_EVENT &&
167 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
168 ring->num_trbs_free++;
169
170 do {
171 /*
172 * Update the dequeue pointer further if that was a link TRB or
173 * we're at the end of an event ring segment (which doesn't have
174 * link TRBS)
175 */
176 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
177 if (ring->type == TYPE_EVENT &&
178 last_trb_on_last_seg(xhci, ring,
179 ring->deq_seg, ring->dequeue)) {
180 ring->cycle_state = (ring->cycle_state ? 0 : 1);
181 }
182 ring->deq_seg = ring->deq_seg->next;
183 ring->dequeue = ring->deq_seg->trbs;
184 } else {
185 ring->dequeue++;
186 }
187 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
188
189 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
190 }
191
192 /*
193 * See Cycle bit rules. SW is the consumer for the event ring only.
194 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
195 *
196 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
197 * chain bit is set), then set the chain bit in all the following link TRBs.
198 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
199 * have their chain bit cleared (so that each Link TRB is a separate TD).
200 *
201 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
202 * set, but other sections talk about dealing with the chain bit set. This was
203 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
204 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
205 *
206 * @more_trbs_coming: Will you enqueue more TRBs before calling
207 * prepare_transfer()?
208 */
209 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
210 bool more_trbs_coming)
211 {
212 u32 chain;
213 union xhci_trb *next;
214 unsigned long long addr;
215
216 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
217 /* If this is not event ring, there is one less usable TRB */
218 if (ring->type != TYPE_EVENT &&
219 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
220 ring->num_trbs_free--;
221 next = ++(ring->enqueue);
222
223 ring->enq_updates++;
224 /* Update the dequeue pointer further if that was a link TRB or we're at
225 * the end of an event ring segment (which doesn't have link TRBS)
226 */
227 while (last_trb(xhci, ring, ring->enq_seg, next)) {
228 if (ring->type != TYPE_EVENT) {
229 /*
230 * If the caller doesn't plan on enqueueing more
231 * TDs before ringing the doorbell, then we
232 * don't want to give the link TRB to the
233 * hardware just yet. We'll give the link TRB
234 * back in prepare_ring() just before we enqueue
235 * the TD at the top of the ring.
236 */
237 if (!chain && !more_trbs_coming)
238 break;
239
240 /* If we're not dealing with 0.95 hardware or
241 * isoc rings on AMD 0.96 host,
242 * carry over the chain bit of the previous TRB
243 * (which may mean the chain bit is cleared).
244 */
245 #ifdef CONFIG_MTK_XHCI
246 if (!xhci_link_trb_quirk(xhci)) {
247 #else
248 if (!(ring->type == TYPE_ISOC &&
249 (xhci->quirks & XHCI_AMD_0x96_HOST))
250 && !xhci_link_trb_quirk(xhci)) {
251 #endif
252 next->link.control &=
253 cpu_to_le32(~TRB_CHAIN);
254 next->link.control |=
255 cpu_to_le32(chain);
256 }
257 /* Give this link TRB to the hardware */
258 wmb();
259 next->link.control ^= cpu_to_le32(TRB_CYCLE);
260
261 /* Toggle the cycle bit after the last ring segment. */
262 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
263 ring->cycle_state = (ring->cycle_state ? 0 : 1);
264 }
265 }
266 ring->enq_seg = ring->enq_seg->next;
267 ring->enqueue = ring->enq_seg->trbs;
268 next = ring->enqueue;
269 }
270 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
271 }
272
273 /*
274 * Check to see if there's room to enqueue num_trbs on the ring and make sure
275 * enqueue pointer will not advance into dequeue segment. See rules above.
276 */
277 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
278 unsigned int num_trbs)
279 {
280 #ifndef CONFIG_MTK_XHCI
281 int num_trbs_in_deq_seg;
282 #endif
283
284 if (ring->num_trbs_free < num_trbs)
285 return 0;
286
287 #ifndef CONFIG_MTK_XHCI
288 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
289 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
290 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
291 return 0;
292 }
293 #endif
294
295 return 1;
296 }
297
298 /* Ring the host controller doorbell after placing a command on the ring */
299 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
300 {
301 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
302 return;
303
304 xhci_dbg(xhci, "// Ding dong!\n");
305 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
306 /* Flush PCI posted writes */
307 xhci_readl(xhci, &xhci->dba->doorbell[0]);
308 }
309
310 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
311 {
312 u64 temp_64;
313 int ret;
314
315 xhci_dbg(xhci, "Abort command ring\n");
316
317 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
318 xhci_dbg(xhci, "The command ring isn't running, "
319 "Have the command ring been stopped?\n");
320 return 0;
321 }
322
323 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
324 if (!(temp_64 & CMD_RING_RUNNING)) {
325 xhci_dbg(xhci, "Command ring had been stopped\n");
326 return 0;
327 }
328 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
329 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
330 &xhci->op_regs->cmd_ring);
331
332 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
333 * time the completion od all xHCI commands, including
334 * the Command Abort operation. If software doesn't see
335 * CRR negated in a timely manner (e.g. longer than 5
336 * seconds), then it should assume that the there are
337 * larger problems with the xHC and assert HCRST.
338 */
339 ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
340 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
341 if (ret < 0) {
342 xhci_err(xhci, "Stopped the command ring failed, "
343 "maybe the host is dead\n");
344 xhci->xhc_state |= XHCI_STATE_DYING;
345 xhci_quiesce(xhci);
346 xhci_halt(xhci);
347 return -ESHUTDOWN;
348 }
349
350 return 0;
351 }
352
353 static int xhci_queue_cd(struct xhci_hcd *xhci,
354 struct xhci_command *command,
355 union xhci_trb *cmd_trb)
356 {
357 struct xhci_cd *cd;
358 cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
359 if (!cd)
360 return -ENOMEM;
361 INIT_LIST_HEAD(&cd->cancel_cmd_list);
362
363 cd->command = command;
364 cd->cmd_trb = cmd_trb;
365 list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
366
367 return 0;
368 }
369
370 /*
371 * Cancel the command which has issue.
372 *
373 * Some commands may hang due to waiting for acknowledgement from
374 * usb device. It is outside of the xHC's ability to control and
375 * will cause the command ring is blocked. When it occurs software
376 * should intervene to recover the command ring.
377 * See Section 4.6.1.1 and 4.6.1.2
378 */
379 int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
380 union xhci_trb *cmd_trb)
381 {
382 int retval = 0;
383 unsigned long flags;
384
385 spin_lock_irqsave(&xhci->lock, flags);
386
387 if (xhci->xhc_state & XHCI_STATE_DYING) {
388 xhci_warn(xhci, "Abort the command ring,"
389 " but the xHCI is dead.\n");
390 retval = -ESHUTDOWN;
391 goto fail;
392 }
393
394 /* queue the cmd desriptor to cancel_cmd_list */
395 retval = xhci_queue_cd(xhci, command, cmd_trb);
396 if (retval) {
397 xhci_warn(xhci, "Queuing command descriptor failed.\n");
398 goto fail;
399 }
400
401 /* abort command ring */
402 retval = xhci_abort_cmd_ring(xhci);
403 if (retval) {
404 xhci_err(xhci, "Abort command ring failed\n");
405 if (unlikely(retval == -ESHUTDOWN)) {
406 spin_unlock_irqrestore(&xhci->lock, flags);
407 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
408 xhci_dbg(xhci, "xHCI host controller is dead.\n");
409 return retval;
410 }
411 }
412
413 fail:
414 spin_unlock_irqrestore(&xhci->lock, flags);
415 return retval;
416 }
417
418 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
419 unsigned int slot_id,
420 unsigned int ep_index,
421 unsigned int stream_id)
422 {
423 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
424 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
425 unsigned int ep_state = ep->ep_state;
426
427 /* Don't ring the doorbell for this endpoint if there are pending
428 * cancellations because we don't want to interrupt processing.
429 * We don't want to restart any stream rings if there's a set dequeue
430 * pointer command pending because the device can choose to start any
431 * stream once the endpoint is on the HW schedule.
432 * FIXME - check all the stream rings for pending cancellations.
433 */
434 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
435 (ep_state & EP_HALTED))
436 return;
437 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
438 /* The CPU has better things to do at this point than wait for a
439 * write-posting flush. It'll get there soon enough.
440 */
441 }
442
443 /* Ring the doorbell for any rings with pending URBs */
444 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
445 unsigned int slot_id,
446 unsigned int ep_index)
447 {
448 unsigned int stream_id;
449 struct xhci_virt_ep *ep;
450
451 ep = &xhci->devs[slot_id]->eps[ep_index];
452
453 /* A ring has pending URBs if its TD list is not empty */
454 if (!(ep->ep_state & EP_HAS_STREAMS)) {
455 if (ep->ring && !(list_empty(&ep->ring->td_list)))
456 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
457 return;
458 }
459
460 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
461 stream_id++) {
462 struct xhci_stream_info *stream_info = ep->stream_info;
463 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
464 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
465 stream_id);
466 }
467 }
468
469 /*
470 * Find the segment that trb is in. Start searching in start_seg.
471 * If we must move past a segment that has a link TRB with a toggle cycle state
472 * bit set, then we will toggle the value pointed at by cycle_state.
473 */
474 static struct xhci_segment *find_trb_seg(
475 struct xhci_segment *start_seg,
476 union xhci_trb *trb, int *cycle_state)
477 {
478 struct xhci_segment *cur_seg = start_seg;
479 struct xhci_generic_trb *generic_trb;
480
481 while (cur_seg->trbs > trb ||
482 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
483 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
484 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
485 *cycle_state ^= 0x1;
486 cur_seg = cur_seg->next;
487 if (cur_seg == start_seg)
488 /* Looped over the entire list. Oops! */
489 return NULL;
490 }
491 return cur_seg;
492 }
493
494
495 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
496 unsigned int slot_id, unsigned int ep_index,
497 unsigned int stream_id)
498 {
499 struct xhci_virt_ep *ep;
500
501 ep = &xhci->devs[slot_id]->eps[ep_index];
502 /* Common case: no streams */
503 if (!(ep->ep_state & EP_HAS_STREAMS))
504 return ep->ring;
505
506 if (stream_id == 0) {
507 xhci_warn(xhci,
508 "WARN: Slot ID %u, ep index %u has streams, "
509 "but URB has no stream ID.\n",
510 slot_id, ep_index);
511 return NULL;
512 }
513
514 if (stream_id < ep->stream_info->num_streams)
515 return ep->stream_info->stream_rings[stream_id];
516
517 xhci_warn(xhci,
518 "WARN: Slot ID %u, ep index %u has "
519 "stream IDs 1 to %u allocated, "
520 "but stream ID %u is requested.\n",
521 slot_id, ep_index,
522 ep->stream_info->num_streams - 1,
523 stream_id);
524 return NULL;
525 }
526
527 /* Get the right ring for the given URB.
528 * If the endpoint supports streams, boundary check the URB's stream ID.
529 * If the endpoint doesn't support streams, return the singular endpoint ring.
530 */
531 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
532 struct urb *urb)
533 {
534 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
535 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
536 }
537
538 /*
539 * Move the xHC's endpoint ring dequeue pointer past cur_td.
540 * Record the new state of the xHC's endpoint ring dequeue segment,
541 * dequeue pointer, and new consumer cycle state in state.
542 * Update our internal representation of the ring's dequeue pointer.
543 *
544 * We do this in three jumps:
545 * - First we update our new ring state to be the same as when the xHC stopped.
546 * - Then we traverse the ring to find the segment that contains
547 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
548 * any link TRBs with the toggle cycle bit set.
549 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
550 * if we've moved it past a link TRB with the toggle cycle bit set.
551 *
552 * Some of the uses of xhci_generic_trb are grotty, but if they're done
553 * with correct __le32 accesses they should work fine. Only users of this are
554 * in here.
555 */
556 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
557 unsigned int slot_id, unsigned int ep_index,
558 unsigned int stream_id, struct xhci_td *cur_td,
559 struct xhci_dequeue_state *state)
560 {
561 struct xhci_virt_device *dev = xhci->devs[slot_id];
562 struct xhci_ring *ep_ring;
563 struct xhci_generic_trb *trb;
564 struct xhci_ep_ctx *ep_ctx;
565 dma_addr_t addr;
566
567 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
568 ep_index, stream_id);
569 if (!ep_ring) {
570 xhci_warn(xhci, "WARN can't find new dequeue state "
571 "for invalid stream ID %u.\n",
572 stream_id);
573 return;
574 }
575 state->new_cycle_state = 0;
576 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
577 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
578 dev->eps[ep_index].stopped_trb,
579 &state->new_cycle_state);
580 if (!state->new_deq_seg) {
581 WARN_ON(1);
582 return;
583 }
584
585 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
586 xhci_dbg(xhci, "Finding endpoint context\n");
587 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
588 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
589
590 state->new_deq_ptr = cur_td->last_trb;
591 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
592 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
593 state->new_deq_ptr,
594 &state->new_cycle_state);
595 if (!state->new_deq_seg) {
596 WARN_ON(1);
597 return;
598 }
599
600 trb = &state->new_deq_ptr->generic;
601 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
602 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
603 state->new_cycle_state ^= 0x1;
604 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
605
606 /*
607 * If there is only one segment in a ring, find_trb_seg()'s while loop
608 * will not run, and it will return before it has a chance to see if it
609 * needs to toggle the cycle bit. It can't tell if the stalled transfer
610 * ended just before the link TRB on a one-segment ring, or if the TD
611 * wrapped around the top of the ring, because it doesn't have the TD in
612 * question. Look for the one-segment case where stalled TRB's address
613 * is greater than the new dequeue pointer address.
614 */
615 if (ep_ring->first_seg == ep_ring->first_seg->next &&
616 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
617 state->new_cycle_state ^= 0x1;
618 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
619
620 /* Don't update the ring cycle state for the producer (us). */
621 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
622 state->new_deq_seg);
623 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
624 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
625 (unsigned long long) addr);
626 }
627
628 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
629 * (The last TRB actually points to the ring enqueue pointer, which is not part
630 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
631 */
632 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
633 struct xhci_td *cur_td, bool flip_cycle)
634 {
635 struct xhci_segment *cur_seg;
636 union xhci_trb *cur_trb;
637
638 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
639 true;
640 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
641 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
642 /* Unchain any chained Link TRBs, but
643 * leave the pointers intact.
644 */
645 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
646 /* Flip the cycle bit (link TRBs can't be the first
647 * or last TRB).
648 */
649 if (flip_cycle)
650 cur_trb->generic.field[3] ^=
651 cpu_to_le32(TRB_CYCLE);
652 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
653 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
654 "in seg %p (0x%llx dma)\n",
655 cur_trb,
656 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
657 cur_seg,
658 (unsigned long long)cur_seg->dma);
659 } else {
660 cur_trb->generic.field[0] = 0;
661 cur_trb->generic.field[1] = 0;
662 cur_trb->generic.field[2] = 0;
663 /* Preserve only the cycle bit of this TRB */
664 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
665 /* Flip the cycle bit except on the first or last TRB */
666 if (flip_cycle && cur_trb != cur_td->first_trb &&
667 cur_trb != cur_td->last_trb)
668 cur_trb->generic.field[3] ^=
669 cpu_to_le32(TRB_CYCLE);
670 cur_trb->generic.field[3] |= cpu_to_le32(
671 TRB_TYPE(TRB_TR_NOOP));
672 xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
673 (unsigned long long)
674 xhci_trb_virt_to_dma(cur_seg, cur_trb));
675 }
676 if (cur_trb == cur_td->last_trb)
677 break;
678 }
679 }
680
681 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
682 unsigned int ep_index, unsigned int stream_id,
683 struct xhci_segment *deq_seg,
684 union xhci_trb *deq_ptr, u32 cycle_state);
685
686 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
687 unsigned int slot_id, unsigned int ep_index,
688 unsigned int stream_id,
689 struct xhci_dequeue_state *deq_state)
690 {
691 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
692
693 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
694 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
695 deq_state->new_deq_seg,
696 (unsigned long long)deq_state->new_deq_seg->dma,
697 deq_state->new_deq_ptr,
698 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
699 deq_state->new_cycle_state);
700 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
701 deq_state->new_deq_seg,
702 deq_state->new_deq_ptr,
703 (u32) deq_state->new_cycle_state);
704 /* Stop the TD queueing code from ringing the doorbell until
705 * this command completes. The HC won't set the dequeue pointer
706 * if the ring is running, and ringing the doorbell starts the
707 * ring running.
708 */
709 ep->ep_state |= SET_DEQ_PENDING;
710 }
711
712 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
713 struct xhci_virt_ep *ep)
714 {
715 ep->ep_state &= ~EP_HALT_PENDING;
716 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
717 * timer is running on another CPU, we don't decrement stop_cmds_pending
718 * (since we didn't successfully stop the watchdog timer).
719 */
720 if (del_timer(&ep->stop_cmd_timer))
721 ep->stop_cmds_pending--;
722 }
723
724 /* Must be called with xhci->lock held in interrupt context */
725 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
726 struct xhci_td *cur_td, int status, char *adjective)
727 {
728 struct usb_hcd *hcd;
729 struct urb *urb;
730 struct urb_priv *urb_priv;
731
732 urb = cur_td->urb;
733 urb_priv = urb->hcpriv;
734 urb_priv->td_cnt++;
735 hcd = bus_to_hcd(urb->dev->bus);
736
737 /* Only giveback urb when this is the last td in urb */
738 if (urb_priv->td_cnt == urb_priv->length) {
739 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
740 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
741 #ifndef CONFIG_MTK_XHCI
742 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
743 if (xhci->quirks & XHCI_AMD_PLL_FIX)
744 usb_amd_quirk_pll_enable();
745 }
746 #endif
747 }
748 usb_hcd_unlink_urb_from_ep(hcd, urb);
749
750 spin_unlock(&xhci->lock);
751 usb_hcd_giveback_urb(hcd, urb, status);
752 xhci_urb_free_priv(xhci, urb_priv);
753 spin_lock(&xhci->lock);
754 }
755 }
756
757 /*
758 * When we get a command completion for a Stop Endpoint Command, we need to
759 * unlink any cancelled TDs from the ring. There are two ways to do that:
760 *
761 * 1. If the HW was in the middle of processing the TD that needs to be
762 * cancelled, then we must move the ring's dequeue pointer past the last TRB
763 * in the TD with a Set Dequeue Pointer Command.
764 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
765 * bit cleared) so that the HW will skip over them.
766 */
767 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
768 union xhci_trb *trb, struct xhci_event_cmd *event)
769 {
770 unsigned int slot_id;
771 unsigned int ep_index;
772 struct xhci_virt_device *virt_dev;
773 struct xhci_ring *ep_ring;
774 struct xhci_virt_ep *ep;
775 struct list_head *entry;
776 struct xhci_td *cur_td = NULL;
777 struct xhci_td *last_unlinked_td;
778
779 struct xhci_dequeue_state deq_state;
780
781 if (unlikely(TRB_TO_SUSPEND_PORT(
782 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
783 slot_id = TRB_TO_SLOT_ID(
784 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
785 virt_dev = xhci->devs[slot_id];
786 if (virt_dev)
787 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
788 event);
789 else
790 xhci_warn(xhci, "Stop endpoint command "
791 "completion for disabled slot %u\n",
792 slot_id);
793 return;
794 }
795
796 memset(&deq_state, 0, sizeof(deq_state));
797 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
798 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
799 ep = &xhci->devs[slot_id]->eps[ep_index];
800
801 if (list_empty(&ep->cancelled_td_list)) {
802 xhci_stop_watchdog_timer_in_irq(xhci, ep);
803 ep->stopped_td = NULL;
804 ep->stopped_trb = NULL;
805 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
806 return;
807 }
808
809 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
810 * We have the xHCI lock, so nothing can modify this list until we drop
811 * it. We're also in the event handler, so we can't get re-interrupted
812 * if another Stop Endpoint command completes
813 */
814 list_for_each(entry, &ep->cancelled_td_list) {
815 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
816 xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
817 (unsigned long long)xhci_trb_virt_to_dma(
818 cur_td->start_seg, cur_td->first_trb));
819 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
820 if (!ep_ring) {
821 /* This shouldn't happen unless a driver is mucking
822 * with the stream ID after submission. This will
823 * leave the TD on the hardware ring, and the hardware
824 * will try to execute it, and may access a buffer
825 * that has already been freed. In the best case, the
826 * hardware will execute it, and the event handler will
827 * ignore the completion event for that TD, since it was
828 * removed from the td_list for that endpoint. In
829 * short, don't muck with the stream ID after
830 * submission.
831 */
832 xhci_warn(xhci, "WARN Cancelled URB %p "
833 "has invalid stream ID %u.\n",
834 cur_td->urb,
835 cur_td->urb->stream_id);
836 goto remove_finished_td;
837 }
838 /*
839 * If we stopped on the TD we need to cancel, then we have to
840 * move the xHC endpoint ring dequeue pointer past this TD.
841 */
842 if (cur_td == ep->stopped_td)
843 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
844 cur_td->urb->stream_id,
845 cur_td, &deq_state);
846 else
847 td_to_noop(xhci, ep_ring, cur_td, false);
848 remove_finished_td:
849 /*
850 * The event handler won't see a completion for this TD anymore,
851 * so remove it from the endpoint ring's TD list. Keep it in
852 * the cancelled TD list for URB completion later.
853 */
854 list_del_init(&cur_td->td_list);
855 }
856 last_unlinked_td = cur_td;
857 xhci_stop_watchdog_timer_in_irq(xhci, ep);
858
859 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
860 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
861 xhci_queue_new_dequeue_state(xhci,
862 slot_id, ep_index,
863 ep->stopped_td->urb->stream_id,
864 &deq_state);
865 xhci_ring_cmd_db(xhci);
866 } else {
867 /* Otherwise ring the doorbell(s) to restart queued transfers */
868 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
869 }
870
871 /* Clear stopped_td and stopped_trb if endpoint is not halted */
872 if (!(ep->ep_state & EP_HALTED)) {
873 ep->stopped_td = NULL;
874 ep->stopped_trb = NULL;
875 }
876
877 /*
878 * Drop the lock and complete the URBs in the cancelled TD list.
879 * New TDs to be cancelled might be added to the end of the list before
880 * we can complete all the URBs for the TDs we already unlinked.
881 * So stop when we've completed the URB for the last TD we unlinked.
882 */
883 do {
884 cur_td = list_entry(ep->cancelled_td_list.next,
885 struct xhci_td, cancelled_td_list);
886 list_del_init(&cur_td->cancelled_td_list);
887
888 /* Clean up the cancelled URB */
889 /* Doesn't matter what we pass for status, since the core will
890 * just overwrite it (because the URB has been unlinked).
891 */
892 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
893
894 /* Stop processing the cancelled list if the watchdog timer is
895 * running.
896 */
897 if (xhci->xhc_state & XHCI_STATE_DYING)
898 return;
899 } while (cur_td != last_unlinked_td);
900
901 /* Return to the event handler with xhci->lock re-acquired */
902 }
903
904 /* Watchdog timer function for when a stop endpoint command fails to complete.
905 * In this case, we assume the host controller is broken or dying or dead. The
906 * host may still be completing some other events, so we have to be careful to
907 * let the event ring handler and the URB dequeueing/enqueueing functions know
908 * through xhci->state.
909 *
910 * The timer may also fire if the host takes a very long time to respond to the
911 * command, and the stop endpoint command completion handler cannot delete the
912 * timer before the timer function is called. Another endpoint cancellation may
913 * sneak in before the timer function can grab the lock, and that may queue
914 * another stop endpoint command and add the timer back. So we cannot use a
915 * simple flag to say whether there is a pending stop endpoint command for a
916 * particular endpoint.
917 *
918 * Instead we use a combination of that flag and a counter for the number of
919 * pending stop endpoint commands. If the timer is the tail end of the last
920 * stop endpoint command, and the endpoint's command is still pending, we assume
921 * the host is dying.
922 */
923 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
924 {
925 struct xhci_hcd *xhci;
926 struct xhci_virt_ep *ep;
927 struct xhci_virt_ep *temp_ep;
928 struct xhci_ring *ring;
929 struct xhci_td *cur_td;
930 int ret, i, j;
931 unsigned long flags;
932
933 ep = (struct xhci_virt_ep *) arg;
934 xhci = ep->xhci;
935
936 spin_lock_irqsave(&xhci->lock, flags);
937
938 ep->stop_cmds_pending--;
939 if (xhci->xhc_state & XHCI_STATE_DYING) {
940 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
941 "xHCI as DYING, exiting.\n");
942 spin_unlock_irqrestore(&xhci->lock, flags);
943 return;
944 }
945 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
946 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
947 "exiting.\n");
948 spin_unlock_irqrestore(&xhci->lock, flags);
949 return;
950 }
951
952 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
953 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
954 /* Oops, HC is dead or dying or at least not responding to the stop
955 * endpoint command.
956 */
957 xhci->xhc_state |= XHCI_STATE_DYING;
958 /* Disable interrupts from the host controller and start halting it */
959 xhci_quiesce(xhci);
960 spin_unlock_irqrestore(&xhci->lock, flags);
961
962 ret = xhci_halt(xhci);
963
964 spin_lock_irqsave(&xhci->lock, flags);
965 if (ret < 0) {
966 /* This is bad; the host is not responding to commands and it's
967 * not allowing itself to be halted. At least interrupts are
968 * disabled. If we call usb_hc_died(), it will attempt to
969 * disconnect all device drivers under this host. Those
970 * disconnect() methods will wait for all URBs to be unlinked,
971 * so we must complete them.
972 */
973 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
974 xhci_warn(xhci, "Completing active URBs anyway.\n");
975 /* We could turn all TDs on the rings to no-ops. This won't
976 * help if the host has cached part of the ring, and is slow if
977 * we want to preserve the cycle bit. Skip it and hope the host
978 * doesn't touch the memory.
979 */
980 }
981 for (i = 0; i < MAX_HC_SLOTS; i++) {
982 if (!xhci->devs[i])
983 continue;
984 for (j = 0; j < 31; j++) {
985 temp_ep = &xhci->devs[i]->eps[j];
986 ring = temp_ep->ring;
987 if (!ring)
988 continue;
989 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
990 "ep index %u\n", i, j);
991 while (!list_empty(&ring->td_list)) {
992 cur_td = list_first_entry(&ring->td_list,
993 struct xhci_td,
994 td_list);
995 list_del_init(&cur_td->td_list);
996 if (!list_empty(&cur_td->cancelled_td_list))
997 list_del_init(&cur_td->cancelled_td_list);
998 xhci_giveback_urb_in_irq(xhci, cur_td,
999 -ESHUTDOWN, "killed");
1000 }
1001 while (!list_empty(&temp_ep->cancelled_td_list)) {
1002 cur_td = list_first_entry(
1003 &temp_ep->cancelled_td_list,
1004 struct xhci_td,
1005 cancelled_td_list);
1006 list_del_init(&cur_td->cancelled_td_list);
1007 xhci_giveback_urb_in_irq(xhci, cur_td,
1008 -ESHUTDOWN, "killed");
1009 }
1010 }
1011 }
1012 spin_unlock_irqrestore(&xhci->lock, flags);
1013 xhci_dbg(xhci, "Calling usb_hc_died()\n");
1014 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1015 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1016 }
1017
1018
1019 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1020 struct xhci_virt_device *dev,
1021 struct xhci_ring *ep_ring,
1022 unsigned int ep_index)
1023 {
1024 union xhci_trb *dequeue_temp;
1025 int num_trbs_free_temp;
1026 bool revert = false;
1027
1028 num_trbs_free_temp = ep_ring->num_trbs_free;
1029 dequeue_temp = ep_ring->dequeue;
1030
1031 /* If we get two back-to-back stalls, and the first stalled transfer
1032 * ends just before a link TRB, the dequeue pointer will be left on
1033 * the link TRB by the code in the while loop. So we have to update
1034 * the dequeue pointer one segment further, or we'll jump off
1035 * the segment into la-la-land.
1036 */
1037 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
1038 ep_ring->deq_seg = ep_ring->deq_seg->next;
1039 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1040 }
1041
1042 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1043 /* We have more usable TRBs */
1044 ep_ring->num_trbs_free++;
1045 ep_ring->dequeue++;
1046 if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
1047 ep_ring->dequeue)) {
1048 if (ep_ring->dequeue ==
1049 dev->eps[ep_index].queued_deq_ptr)
1050 break;
1051 ep_ring->deq_seg = ep_ring->deq_seg->next;
1052 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1053 }
1054 if (ep_ring->dequeue == dequeue_temp) {
1055 revert = true;
1056 break;
1057 }
1058 }
1059
1060 if (revert) {
1061 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1062 ep_ring->num_trbs_free = num_trbs_free_temp;
1063 }
1064 }
1065
1066 /*
1067 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1068 * we need to clear the set deq pending flag in the endpoint ring state, so that
1069 * the TD queueing code can ring the doorbell again. We also need to ring the
1070 * endpoint doorbell to restart the ring, but only if there aren't more
1071 * cancellations pending.
1072 */
1073 static void handle_set_deq_completion(struct xhci_hcd *xhci,
1074 struct xhci_event_cmd *event,
1075 union xhci_trb *trb)
1076 {
1077 unsigned int slot_id;
1078 unsigned int ep_index;
1079 unsigned int stream_id;
1080 struct xhci_ring *ep_ring;
1081 struct xhci_virt_device *dev;
1082 struct xhci_ep_ctx *ep_ctx;
1083 struct xhci_slot_ctx *slot_ctx;
1084
1085 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1086 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1087 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1088 dev = xhci->devs[slot_id];
1089
1090 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1091 if (!ep_ring) {
1092 xhci_warn(xhci, "WARN Set TR deq ptr command for "
1093 "freed stream ID %u\n",
1094 stream_id);
1095 /* XXX: Harmless??? */
1096 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1097 return;
1098 }
1099
1100 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1101 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1102
1103 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
1104 unsigned int ep_state;
1105 unsigned int slot_state;
1106
1107 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
1108 case COMP_TRB_ERR:
1109 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
1110 "of stream ID configuration\n");
1111 break;
1112 case COMP_CTX_STATE:
1113 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
1114 "to incorrect slot or ep state.\n");
1115 ep_state = le32_to_cpu(ep_ctx->ep_info);
1116 ep_state &= EP_STATE_MASK;
1117 slot_state = le32_to_cpu(slot_ctx->dev_state);
1118 slot_state = GET_SLOT_STATE(slot_state);
1119 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
1120 slot_state, ep_state);
1121 break;
1122 case COMP_EBADSLT:
1123 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
1124 "slot %u was not enabled.\n", slot_id);
1125 break;
1126 default:
1127 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
1128 "completion code of %u.\n",
1129 GET_COMP_CODE(le32_to_cpu(event->status)));
1130 break;
1131 }
1132 /* OK what do we do now? The endpoint state is hosed, and we
1133 * should never get to this point if the synchronization between
1134 * queueing, and endpoint state are correct. This might happen
1135 * if the device gets disconnected after we've finished
1136 * cancelling URBs, which might not be an error...
1137 */
1138 } else {
1139 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
1140 le64_to_cpu(ep_ctx->deq));
1141 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1142 dev->eps[ep_index].queued_deq_ptr) ==
1143 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1144 /* Update the ring's dequeue segment and dequeue pointer
1145 * to reflect the new position.
1146 */
1147 update_ring_for_set_deq_completion(xhci, dev,
1148 ep_ring, ep_index);
1149 } else {
1150 xhci_warn(xhci, "Mismatch between completed Set TR Deq "
1151 "Ptr command & xHCI internal state.\n");
1152 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1153 dev->eps[ep_index].queued_deq_seg,
1154 dev->eps[ep_index].queued_deq_ptr);
1155 }
1156 }
1157
1158 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1159 dev->eps[ep_index].queued_deq_seg = NULL;
1160 dev->eps[ep_index].queued_deq_ptr = NULL;
1161 /* Restart any rings with pending URBs */
1162 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1163 }
1164
1165 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1166 struct xhci_event_cmd *event,
1167 union xhci_trb *trb)
1168 {
1169 int slot_id;
1170 unsigned int ep_index;
1171
1172 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1173 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1174 /* This command will only fail if the endpoint wasn't halted,
1175 * but we don't care.
1176 */
1177 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1178 GET_COMP_CODE(le32_to_cpu(event->status)));
1179
1180 /* HW with the reset endpoint quirk needs to have a configure endpoint
1181 * command complete before the endpoint can be used. Queue that here
1182 * because the HW can't handle two commands being queued in a row.
1183 */
1184 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1185 xhci_dbg(xhci, "Queueing configure endpoint command\n");
1186 xhci_queue_configure_endpoint(xhci,
1187 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1188 false);
1189 xhci_ring_cmd_db(xhci);
1190 } else {
1191 /* Clear our internal halted state */
1192 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1193 }
1194 }
1195
1196 /* Complete the command and detele it from the devcie's command queue.
1197 */
1198 static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1199 struct xhci_command *command, u32 status)
1200 {
1201 command->status = status;
1202 list_del(&command->cmd_list);
1203 if (command->completion)
1204 complete(command->completion);
1205 else
1206 xhci_free_command(xhci, command);
1207 }
1208
1209
1210 /* Check to see if a command in the device's command queue matches this one.
1211 * Signal the completion or free the command, and return 1. Return 0 if the
1212 * completed command isn't at the head of the command list.
1213 */
1214 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1215 struct xhci_virt_device *virt_dev,
1216 struct xhci_event_cmd *event)
1217 {
1218 struct xhci_command *command;
1219
1220 if (list_empty(&virt_dev->cmd_list))
1221 return 0;
1222
1223 command = list_entry(virt_dev->cmd_list.next,
1224 struct xhci_command, cmd_list);
1225 if (xhci->cmd_ring->dequeue != command->command_trb)
1226 return 0;
1227
1228 xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1229 GET_COMP_CODE(le32_to_cpu(event->status)));
1230 return 1;
1231 }
1232
1233 /*
1234 * Finding the command trb need to be cancelled and modifying it to
1235 * NO OP command. And if the command is in device's command wait
1236 * list, finishing and freeing it.
1237 *
1238 * If we can't find the command trb, we think it had already been
1239 * executed.
1240 */
1241 static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1242 {
1243 struct xhci_segment *cur_seg;
1244 union xhci_trb *cmd_trb;
1245 u32 cycle_state;
1246
1247 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1248 return;
1249
1250 /* find the current segment of command ring */
1251 cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1252 xhci->cmd_ring->dequeue, &cycle_state);
1253
1254 if (!cur_seg) {
1255 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1256 xhci->cmd_ring->dequeue,
1257 (unsigned long long)
1258 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1259 xhci->cmd_ring->dequeue));
1260 xhci_debug_ring(xhci, xhci->cmd_ring);
1261 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1262 return;
1263 }
1264
1265 /* find the command trb matched by cd from command ring */
1266 for (cmd_trb = xhci->cmd_ring->dequeue;
1267 cmd_trb != xhci->cmd_ring->enqueue;
1268 next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1269 /* If the trb is link trb, continue */
1270 if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1271 continue;
1272
1273 if (cur_cd->cmd_trb == cmd_trb) {
1274
1275 /* If the command in device's command list, we should
1276 * finish it and free the command structure.
1277 */
1278 if (cur_cd->command)
1279 xhci_complete_cmd_in_cmd_wait_list(xhci,
1280 cur_cd->command, COMP_CMD_STOP);
1281
1282 /* get cycle state from the origin command trb */
1283 cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1284 & TRB_CYCLE;
1285
1286 /* modify the command trb to NO OP command */
1287 cmd_trb->generic.field[0] = 0;
1288 cmd_trb->generic.field[1] = 0;
1289 cmd_trb->generic.field[2] = 0;
1290 cmd_trb->generic.field[3] = cpu_to_le32(
1291 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1292 break;
1293 }
1294 }
1295 }
1296
1297 static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1298 {
1299 struct xhci_cd *cur_cd, *next_cd;
1300
1301 if (list_empty(&xhci->cancel_cmd_list))
1302 return;
1303
1304 list_for_each_entry_safe(cur_cd, next_cd,
1305 &xhci->cancel_cmd_list, cancel_cmd_list) {
1306 xhci_cmd_to_noop(xhci, cur_cd);
1307 list_del(&cur_cd->cancel_cmd_list);
1308 kfree(cur_cd);
1309 }
1310 }
1311
1312 /*
1313 * traversing the cancel_cmd_list. If the command descriptor according
1314 * to cmd_trb is found, the function free it and return 1, otherwise
1315 * return 0.
1316 */
1317 static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1318 union xhci_trb *cmd_trb)
1319 {
1320 struct xhci_cd *cur_cd, *next_cd;
1321
1322 if (list_empty(&xhci->cancel_cmd_list))
1323 return 0;
1324
1325 list_for_each_entry_safe(cur_cd, next_cd,
1326 &xhci->cancel_cmd_list, cancel_cmd_list) {
1327 if (cur_cd->cmd_trb == cmd_trb) {
1328 if (cur_cd->command)
1329 xhci_complete_cmd_in_cmd_wait_list(xhci,
1330 cur_cd->command, COMP_CMD_STOP);
1331 list_del(&cur_cd->cancel_cmd_list);
1332 kfree(cur_cd);
1333 return 1;
1334 }
1335 }
1336
1337 return 0;
1338 }
1339
1340 /*
1341 * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1342 * trb pointed by the command ring dequeue pointer is the trb we want to
1343 * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1344 * traverse the cancel_cmd_list to trun the all of the commands according
1345 * to command descriptor to NO-OP trb.
1346 */
1347 static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1348 int cmd_trb_comp_code)
1349 {
1350 int cur_trb_is_good = 0;
1351
1352 /* Searching the cmd trb pointed by the command ring dequeue
1353 * pointer in command descriptor list. If it is found, free it.
1354 */
1355 cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1356 xhci->cmd_ring->dequeue);
1357
1358 if (cmd_trb_comp_code == COMP_CMD_ABORT)
1359 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1360 else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1361 /* traversing the cancel_cmd_list and canceling
1362 * the command according to command descriptor
1363 */
1364 xhci_cancel_cmd_in_cd_list(xhci);
1365
1366 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1367 /*
1368 * ring command ring doorbell again to restart the
1369 * command ring
1370 */
1371 if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1372 xhci_ring_cmd_db(xhci);
1373 }
1374 return cur_trb_is_good;
1375 }
1376
1377 static void handle_cmd_completion(struct xhci_hcd *xhci,
1378 struct xhci_event_cmd *event)
1379 {
1380 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1381 u64 cmd_dma;
1382 dma_addr_t cmd_dequeue_dma;
1383 struct xhci_input_control_ctx *ctrl_ctx;
1384 struct xhci_virt_device *virt_dev;
1385 unsigned int ep_index;
1386 struct xhci_ring *ep_ring;
1387 unsigned int ep_state;
1388
1389 cmd_dma = le64_to_cpu(event->cmd_trb);
1390 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1391 xhci->cmd_ring->dequeue);
1392 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1393 if (cmd_dequeue_dma == 0) {
1394 xhci->error_bitmask |= 1 << 4;
1395 return;
1396 }
1397 /* Does the DMA address match our internal dequeue pointer address? */
1398 if (cmd_dma != (u64) cmd_dequeue_dma) {
1399 xhci->error_bitmask |= 1 << 5;
1400 return;
1401 }
1402
1403 if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
1404 (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
1405 /* If the return value is 0, we think the trb pointed by
1406 * command ring dequeue pointer is a good trb. The good
1407 * trb means we don't want to cancel the trb, but it have
1408 * been stopped by host. So we should handle it normally.
1409 * Otherwise, driver should invoke inc_deq() and return.
1410 */
1411 if (handle_stopped_cmd_ring(xhci,
1412 GET_COMP_CODE(le32_to_cpu(event->status)))) {
1413 inc_deq(xhci, xhci->cmd_ring);
1414 return;
1415 }
1416 /* There is no command to handle if we get a stop event when the
1417 * command ring is empty, event->cmd_trb points to the next
1418 * unset command
1419 */
1420 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1421 return;
1422 }
1423
1424 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1425 & TRB_TYPE_BITMASK) {
1426 case TRB_TYPE(TRB_ENABLE_SLOT):
1427 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1428 xhci->slot_id = slot_id;
1429 else
1430 xhci->slot_id = 0;
1431 complete(&xhci->addr_dev);
1432 break;
1433 case TRB_TYPE(TRB_DISABLE_SLOT):
1434 if (xhci->devs[slot_id]) {
1435 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1436 /* Delete default control endpoint resources */
1437 xhci_free_device_endpoint_resources(xhci,
1438 xhci->devs[slot_id], true);
1439 xhci_free_virt_device(xhci, slot_id);
1440 }
1441 break;
1442 case TRB_TYPE(TRB_CONFIG_EP):
1443 virt_dev = xhci->devs[slot_id];
1444 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1445 break;
1446 /*
1447 * Configure endpoint commands can come from the USB core
1448 * configuration or alt setting changes, or because the HW
1449 * needed an extra configure endpoint command after a reset
1450 * endpoint command or streams were being configured.
1451 * If the command was for a halted endpoint, the xHCI driver
1452 * is not waiting on the configure endpoint command.
1453 */
1454 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1455 virt_dev->in_ctx);
1456 /* Input ctx add_flags are the endpoint index plus one */
1457 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1458 /* A usb_set_interface() call directly after clearing a halted
1459 * condition may race on this quirky hardware. Not worth
1460 * worrying about, since this is prototype hardware. Not sure
1461 * if this will work for streams, but streams support was
1462 * untested on this prototype.
1463 */
1464 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1465 ep_index != (unsigned int) -1 &&
1466 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1467 le32_to_cpu(ctrl_ctx->drop_flags)) {
1468 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1469 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1470 if (!(ep_state & EP_HALTED))
1471 goto bandwidth_change;
1472 xhci_dbg(xhci, "Completed config ep cmd - "
1473 "last ep index = %d, state = %d\n",
1474 ep_index, ep_state);
1475 /* Clear internal halted state and restart ring(s) */
1476 xhci->devs[slot_id]->eps[ep_index].ep_state &=
1477 ~EP_HALTED;
1478 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1479 break;
1480 }
1481 bandwidth_change:
1482 xhci_dbg(xhci, "Completed config ep cmd\n");
1483 xhci->devs[slot_id]->cmd_status =
1484 GET_COMP_CODE(le32_to_cpu(event->status));
1485 complete(&xhci->devs[slot_id]->cmd_completion);
1486 break;
1487 case TRB_TYPE(TRB_EVAL_CONTEXT):
1488 virt_dev = xhci->devs[slot_id];
1489 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1490 break;
1491 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1492 complete(&xhci->devs[slot_id]->cmd_completion);
1493 break;
1494 case TRB_TYPE(TRB_ADDR_DEV):
1495 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1496 complete(&xhci->addr_dev);
1497 break;
1498 case TRB_TYPE(TRB_STOP_RING):
1499 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1500 break;
1501 case TRB_TYPE(TRB_SET_DEQ):
1502 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1503 break;
1504 case TRB_TYPE(TRB_CMD_NOOP):
1505 break;
1506 case TRB_TYPE(TRB_RESET_EP):
1507 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1508 break;
1509 case TRB_TYPE(TRB_RESET_DEV):
1510 xhci_dbg(xhci, "Completed reset device command.\n");
1511 slot_id = TRB_TO_SLOT_ID(
1512 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1513 virt_dev = xhci->devs[slot_id];
1514 if (virt_dev)
1515 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1516 else
1517 xhci_warn(xhci, "Reset device command completion "
1518 "for disabled slot %u\n", slot_id);
1519 break;
1520 case TRB_TYPE(TRB_NEC_GET_FW):
1521 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1522 xhci->error_bitmask |= 1 << 6;
1523 break;
1524 }
1525 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1526 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1527 NEC_FW_MINOR(le32_to_cpu(event->status)));
1528 break;
1529 default:
1530 /* Skip over unknown commands on the event ring */
1531 xhci->error_bitmask |= 1 << 6;
1532 break;
1533 }
1534 inc_deq(xhci, xhci->cmd_ring);
1535 }
1536
1537 static void handle_vendor_event(struct xhci_hcd *xhci,
1538 union xhci_trb *event)
1539 {
1540 u32 trb_type;
1541
1542 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1543 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1544 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1545 handle_cmd_completion(xhci, &event->event_cmd);
1546 }
1547
1548 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1549 * port registers -- USB 3.0 and USB 2.0).
1550 *
1551 * Returns a zero-based port number, which is suitable for indexing into each of
1552 * the split roothubs' port arrays and bus state arrays.
1553 * Add one to it in order to call xhci_find_slot_id_by_port.
1554 */
1555 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1556 struct xhci_hcd *xhci, u32 port_id)
1557 {
1558 unsigned int i;
1559 unsigned int num_similar_speed_ports = 0;
1560
1561 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1562 * and usb2_ports are 0-based indexes. Count the number of similar
1563 * speed ports, up to 1 port before this port.
1564 */
1565 for (i = 0; i < (port_id - 1); i++) {
1566 u8 port_speed = xhci->port_array[i];
1567
1568 /*
1569 * Skip ports that don't have known speeds, or have duplicate
1570 * Extended Capabilities port speed entries.
1571 */
1572 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1573 continue;
1574
1575 /*
1576 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1577 * 1.1 ports are under the USB 2.0 hub. If the port speed
1578 * matches the device speed, it's a similar speed port.
1579 */
1580 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1581 num_similar_speed_ports++;
1582 }
1583 return num_similar_speed_ports;
1584 }
1585
1586 static void handle_device_notification(struct xhci_hcd *xhci,
1587 union xhci_trb *event)
1588 {
1589 u32 slot_id;
1590 struct usb_device *udev;
1591
1592 slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
1593 if (!xhci->devs[slot_id]) {
1594 xhci_warn(xhci, "Device Notification event for "
1595 "unused slot %u\n", slot_id);
1596 return;
1597 }
1598
1599 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1600 slot_id);
1601 udev = xhci->devs[slot_id]->udev;
1602 if (udev && udev->parent)
1603 usb_wakeup_notification(udev->parent, udev->portnum);
1604 }
1605
1606 static void handle_port_status(struct xhci_hcd *xhci,
1607 union xhci_trb *event)
1608 {
1609 struct usb_hcd *hcd;
1610 u32 port_id;
1611 u32 temp, temp1;
1612 int max_ports;
1613 int slot_id;
1614 unsigned int faked_port_index;
1615 u8 major_revision;
1616 struct xhci_bus_state *bus_state;
1617 __le32 __iomem **port_array;
1618 bool bogus_port_status = false;
1619
1620 /* Port status change events always have a successful completion code */
1621 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1622 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1623 xhci->error_bitmask |= 1 << 8;
1624 }
1625 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1626 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1627
1628 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1629 if ((port_id <= 0) || (port_id > max_ports)) {
1630 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1631 inc_deq(xhci, xhci->event_ring);
1632 return;
1633 }
1634
1635 /* Figure out which usb_hcd this port is attached to:
1636 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1637 */
1638 major_revision = xhci->port_array[port_id - 1];
1639
1640 /* Find the right roothub. */
1641 hcd = xhci_to_hcd(xhci);
1642 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1643 hcd = xhci->shared_hcd;
1644
1645 if (major_revision == 0) {
1646 xhci_warn(xhci, "Event for port %u not in "
1647 "Extended Capabilities, ignoring.\n",
1648 port_id);
1649 bogus_port_status = true;
1650 goto cleanup;
1651 }
1652 if (major_revision == DUPLICATE_ENTRY) {
1653 xhci_warn(xhci, "Event for port %u duplicated in"
1654 "Extended Capabilities, ignoring.\n",
1655 port_id);
1656 bogus_port_status = true;
1657 goto cleanup;
1658 }
1659
1660 /*
1661 * Hardware port IDs reported by a Port Status Change Event include USB
1662 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1663 * resume event, but we first need to translate the hardware port ID
1664 * into the index into the ports on the correct split roothub, and the
1665 * correct bus_state structure.
1666 */
1667 bus_state = &xhci->bus_state[hcd_index(hcd)];
1668 if (hcd->speed == HCD_USB3)
1669 port_array = xhci->usb3_ports;
1670 else
1671 port_array = xhci->usb2_ports;
1672 /* Find the faked port hub number */
1673 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1674 port_id);
1675
1676 temp = xhci_readl(xhci, port_array[faked_port_index]);
1677 if (hcd->state == HC_STATE_SUSPENDED) {
1678 xhci_dbg(xhci, "resume root hub\n");
1679 usb_hcd_resume_root_hub(hcd);
1680 }
1681
1682 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1683 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1684
1685 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1686 if (!(temp1 & CMD_RUN)) {
1687 xhci_warn(xhci, "xHC is not running.\n");
1688 goto cleanup;
1689 }
1690
1691 if (DEV_SUPERSPEED(temp)) {
1692 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1693 /* Set a flag to say the port signaled remote wakeup,
1694 * so we can tell the difference between the end of
1695 * device and host initiated resume.
1696 */
1697 bus_state->port_remote_wakeup |= 1 << faked_port_index;
1698 xhci_test_and_clear_bit(xhci, port_array,
1699 faked_port_index, PORT_PLC);
1700 xhci_set_link_state(xhci, port_array, faked_port_index,
1701 XDEV_U0);
1702 /* Need to wait until the next link state change
1703 * indicates the device is actually in U0.
1704 */
1705 bogus_port_status = true;
1706 goto cleanup;
1707 } else {
1708 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1709 bus_state->resume_done[faked_port_index] = jiffies +
1710 msecs_to_jiffies(20);
1711 set_bit(faked_port_index, &bus_state->resuming_ports);
1712 mod_timer(&hcd->rh_timer,
1713 bus_state->resume_done[faked_port_index]);
1714 /* Do the rest in GetPortStatus */
1715 }
1716 }
1717
1718 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1719 DEV_SUPERSPEED(temp)) {
1720 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1721 /* We've just brought the device into U0 through either the
1722 * Resume state after a device remote wakeup, or through the
1723 * U3Exit state after a host-initiated resume. If it's a device
1724 * initiated remote wake, don't pass up the link state change,
1725 * so the roothub behavior is consistent with external
1726 * USB 3.0 hub behavior.
1727 */
1728 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1729 faked_port_index + 1);
1730 if (slot_id && xhci->devs[slot_id])
1731 xhci_ring_device(xhci, slot_id);
1732 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1733 bus_state->port_remote_wakeup &=
1734 ~(1 << faked_port_index);
1735 xhci_test_and_clear_bit(xhci, port_array,
1736 faked_port_index, PORT_PLC);
1737 usb_wakeup_notification(hcd->self.root_hub,
1738 faked_port_index + 1);
1739 bogus_port_status = true;
1740 goto cleanup;
1741 }
1742 }
1743
1744 if (hcd->speed != HCD_USB3)
1745 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1746 PORT_PLC);
1747
1748 cleanup:
1749 /* Update event ring dequeue pointer before dropping the lock */
1750 inc_deq(xhci, xhci->event_ring);
1751
1752 /* Don't make the USB core poll the roothub if we got a bad port status
1753 * change event. Besides, at that point we can't tell which roothub
1754 * (USB 2.0 or USB 3.0) to kick.
1755 */
1756 if (bogus_port_status)
1757 return;
1758
1759 /*
1760 * xHCI port-status-change events occur when the "or" of all the
1761 * status-change bits in the portsc register changes from 0 to 1.
1762 * New status changes won't cause an event if any other change
1763 * bits are still set. When an event occurs, switch over to
1764 * polling to avoid losing status changes.
1765 */
1766 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1767 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1768 spin_unlock(&xhci->lock);
1769 /* Pass this up to the core */
1770 usb_hcd_poll_rh_status(hcd);
1771 spin_lock(&xhci->lock);
1772 }
1773
1774 /*
1775 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1776 * at end_trb, which may be in another segment. If the suspect DMA address is a
1777 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1778 * returns 0.
1779 */
1780 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1781 union xhci_trb *start_trb,
1782 union xhci_trb *end_trb,
1783 dma_addr_t suspect_dma)
1784 {
1785 dma_addr_t start_dma;
1786 dma_addr_t end_seg_dma;
1787 dma_addr_t end_trb_dma;
1788 struct xhci_segment *cur_seg;
1789
1790 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1791 cur_seg = start_seg;
1792
1793 do {
1794 if (start_dma == 0)
1795 return NULL;
1796 /* We may get an event for a Link TRB in the middle of a TD */
1797 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1798 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1799 /* If the end TRB isn't in this segment, this is set to 0 */
1800 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1801
1802 if (end_trb_dma > 0) {
1803 /* The end TRB is in this segment, so suspect should be here */
1804 if (start_dma <= end_trb_dma) {
1805 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1806 return cur_seg;
1807 } else {
1808 /* Case for one segment with
1809 * a TD wrapped around to the top
1810 */
1811 if ((suspect_dma >= start_dma &&
1812 suspect_dma <= end_seg_dma) ||
1813 (suspect_dma >= cur_seg->dma &&
1814 suspect_dma <= end_trb_dma))
1815 return cur_seg;
1816 }
1817 return NULL;
1818 } else {
1819 /* Might still be somewhere in this segment */
1820 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1821 return cur_seg;
1822 }
1823 cur_seg = cur_seg->next;
1824 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1825 } while (cur_seg != start_seg);
1826
1827 return NULL;
1828 }
1829
1830 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1831 unsigned int slot_id, unsigned int ep_index,
1832 unsigned int stream_id,
1833 struct xhci_td *td, union xhci_trb *event_trb)
1834 {
1835 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1836 ep->ep_state |= EP_HALTED;
1837 ep->stopped_td = td;
1838 ep->stopped_trb = event_trb;
1839 ep->stopped_stream = stream_id;
1840
1841 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1842 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1843
1844 ep->stopped_td = NULL;
1845 ep->stopped_trb = NULL;
1846 ep->stopped_stream = 0;
1847
1848 xhci_ring_cmd_db(xhci);
1849 }
1850
1851 /* Check if an error has halted the endpoint ring. The class driver will
1852 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1853 * However, a babble and other errors also halt the endpoint ring, and the class
1854 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1855 * Ring Dequeue Pointer command manually.
1856 */
1857 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1858 struct xhci_ep_ctx *ep_ctx,
1859 unsigned int trb_comp_code)
1860 {
1861 /* TRB completion codes that may require a manual halt cleanup */
1862 if (trb_comp_code == COMP_TX_ERR ||
1863 trb_comp_code == COMP_BABBLE ||
1864 trb_comp_code == COMP_SPLIT_ERR)
1865 /* The 0.96 spec says a babbling control endpoint
1866 * is not halted. The 0.96 spec says it is. Some HW
1867 * claims to be 0.95 compliant, but it halts the control
1868 * endpoint anyway. Check if a babble halted the
1869 * endpoint.
1870 */
1871 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1872 cpu_to_le32(EP_STATE_HALTED))
1873 return 1;
1874
1875 return 0;
1876 }
1877
1878 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1879 {
1880 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1881 /* Vendor defined "informational" completion code,
1882 * treat as not-an-error.
1883 */
1884 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1885 trb_comp_code);
1886 xhci_dbg(xhci, "Treating code as success.\n");
1887 return 1;
1888 }
1889 return 0;
1890 }
1891
1892 /*
1893 * Finish the td processing, remove the td from td list;
1894 * Return 1 if the urb can be given back.
1895 */
1896 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1897 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1898 struct xhci_virt_ep *ep, int *status, bool skip)
1899 {
1900 struct xhci_virt_device *xdev;
1901 struct xhci_ring *ep_ring;
1902 unsigned int slot_id;
1903 int ep_index;
1904 struct urb *urb = NULL;
1905 struct xhci_ep_ctx *ep_ctx;
1906 int ret = 0;
1907 struct urb_priv *urb_priv;
1908 u32 trb_comp_code;
1909
1910 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1911 xdev = xhci->devs[slot_id];
1912 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1913 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1914 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1915 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1916
1917 if (skip)
1918 goto td_cleanup;
1919
1920 if (trb_comp_code == COMP_STOP_INVAL ||
1921 trb_comp_code == COMP_STOP) {
1922 /* The Endpoint Stop Command completion will take care of any
1923 * stopped TDs. A stopped TD may be restarted, so don't update
1924 * the ring dequeue pointer or take this TD off any lists yet.
1925 */
1926 ep->stopped_td = td;
1927 ep->stopped_trb = event_trb;
1928 return 0;
1929 } else {
1930 if (trb_comp_code == COMP_STALL) {
1931 /* The transfer is completed from the driver's
1932 * perspective, but we need to issue a set dequeue
1933 * command for this stalled endpoint to move the dequeue
1934 * pointer past the TD. We can't do that here because
1935 * the halt condition must be cleared first. Let the
1936 * USB class driver clear the stall later.
1937 */
1938 ep->stopped_td = td;
1939 ep->stopped_trb = event_trb;
1940 ep->stopped_stream = ep_ring->stream_id;
1941 } else if (xhci_requires_manual_halt_cleanup(xhci,
1942 ep_ctx, trb_comp_code)) {
1943 /* Other types of errors halt the endpoint, but the
1944 * class driver doesn't call usb_reset_endpoint() unless
1945 * the error is -EPIPE. Clear the halted status in the
1946 * xHCI hardware manually.
1947 */
1948 xhci_cleanup_halted_endpoint(xhci,
1949 slot_id, ep_index, ep_ring->stream_id,
1950 td, event_trb);
1951 } else {
1952 /* Update ring dequeue pointer */
1953 while (ep_ring->dequeue != td->last_trb)
1954 inc_deq(xhci, ep_ring);
1955 inc_deq(xhci, ep_ring);
1956 }
1957
1958 td_cleanup:
1959 /* Clean up the endpoint's TD list */
1960 urb = td->urb;
1961 urb_priv = urb->hcpriv;
1962
1963 /* Do one last check of the actual transfer length.
1964 * If the host controller said we transferred more data than
1965 * the buffer length, urb->actual_length will be a very big
1966 * number (since it's unsigned). Play it safe and say we didn't
1967 * transfer anything.
1968 */
1969 if (urb->actual_length > urb->transfer_buffer_length) {
1970 xhci_warn(xhci, "URB transfer length is wrong, "
1971 "xHC issue? req. len = %u, "
1972 "act. len = %u\n",
1973 urb->transfer_buffer_length,
1974 urb->actual_length);
1975 urb->actual_length = 0;
1976 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1977 *status = -EREMOTEIO;
1978 else
1979 *status = 0;
1980 }
1981 list_del_init(&td->td_list);
1982 /* Was this TD slated to be cancelled but completed anyway? */
1983 if (!list_empty(&td->cancelled_td_list))
1984 list_del_init(&td->cancelled_td_list);
1985
1986 urb_priv->td_cnt++;
1987 /* Giveback the urb when all the tds are completed */
1988 if (urb_priv->td_cnt == urb_priv->length) {
1989 ret = 1;
1990 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1991 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1992 #ifndef CONFIG_MTK_XHCI
1993 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1994 == 0) {
1995 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1996 usb_amd_quirk_pll_enable();
1997 }
1998 #endif
1999 }
2000 }
2001 }
2002
2003 return ret;
2004 }
2005
2006 /*
2007 * Process control tds, update urb status and actual_length.
2008 */
2009 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2010 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2011 struct xhci_virt_ep *ep, int *status)
2012 {
2013 struct xhci_virt_device *xdev;
2014 struct xhci_ring *ep_ring;
2015 unsigned int slot_id;
2016 int ep_index;
2017 struct xhci_ep_ctx *ep_ctx;
2018 u32 trb_comp_code;
2019
2020 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2021 xdev = xhci->devs[slot_id];
2022 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2023 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2024 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2025 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2026
2027 switch (trb_comp_code) {
2028 case COMP_SUCCESS:
2029 if (event_trb == ep_ring->dequeue) {
2030 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
2031 "without IOC set??\n");
2032 *status = -ESHUTDOWN;
2033 } else if (event_trb != td->last_trb) {
2034 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
2035 "without IOC set??\n");
2036 *status = -ESHUTDOWN;
2037 } else {
2038 *status = 0;
2039 }
2040 break;
2041 case COMP_SHORT_TX:
2042 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2043 *status = -EREMOTEIO;
2044 else
2045 *status = 0;
2046 break;
2047 case COMP_STOP_INVAL:
2048 case COMP_STOP:
2049 return finish_td(xhci, td, event_trb, event, ep, status, false);
2050 default:
2051 if (!xhci_requires_manual_halt_cleanup(xhci,
2052 ep_ctx, trb_comp_code))
2053 break;
2054 xhci_dbg(xhci, "TRB error code %u, "
2055 "halted endpoint index = %u\n",
2056 trb_comp_code, ep_index);
2057 /* else fall through */
2058 case COMP_STALL:
2059 /* Did we transfer part of the data (middle) phase? */
2060 if (event_trb != ep_ring->dequeue &&
2061 event_trb != td->last_trb)
2062 td->urb->actual_length =
2063 td->urb->transfer_buffer_length -
2064 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2065 else
2066 td->urb->actual_length = 0;
2067
2068 xhci_cleanup_halted_endpoint(xhci,
2069 slot_id, ep_index, 0, td, event_trb);
2070 return finish_td(xhci, td, event_trb, event, ep, status, true);
2071 }
2072 /*
2073 * Did we transfer any data, despite the errors that might have
2074 * happened? I.e. did we get past the setup stage?
2075 */
2076 if (event_trb != ep_ring->dequeue) {
2077 /* The event was for the status stage */
2078 if (event_trb == td->last_trb) {
2079 if (td->urb_length_set) {
2080 /* Don't overwrite a previously set error code
2081 */
2082 if ((*status == -EINPROGRESS || *status == 0) &&
2083 (td->urb->transfer_flags
2084 & URB_SHORT_NOT_OK))
2085 /* Did we already see a short data
2086 * stage? */
2087 *status = -EREMOTEIO;
2088 } else {
2089 td->urb->actual_length =
2090 td->urb->transfer_buffer_length;
2091 }
2092 } else {
2093 /*
2094 * Maybe the event was for the data stage? If so, update
2095 * already the actual_length of the URB and flag it as
2096 * set, so that it is not overwritten in the event for
2097 * the last TRB.
2098 */
2099 td->urb_length_set = true;
2100 td->urb->actual_length =
2101 td->urb->transfer_buffer_length -
2102 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2103 xhci_dbg(xhci, "Waiting for status "
2104 "stage event\n");
2105 return 0;
2106 }
2107 }
2108
2109 return finish_td(xhci, td, event_trb, event, ep, status, false);
2110 }
2111
2112 /*
2113 * Process isochronous tds, update urb packet status and actual_length.
2114 */
2115 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2116 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2117 struct xhci_virt_ep *ep, int *status)
2118 {
2119 struct xhci_ring *ep_ring;
2120 struct urb_priv *urb_priv;
2121 int idx;
2122 int len = 0;
2123 union xhci_trb *cur_trb;
2124 struct xhci_segment *cur_seg;
2125 struct usb_iso_packet_descriptor *frame;
2126 u32 trb_comp_code;
2127 bool skip_td = false;
2128
2129 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2130 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2131 urb_priv = td->urb->hcpriv;
2132 idx = urb_priv->td_cnt;
2133 frame = &td->urb->iso_frame_desc[idx];
2134
2135 /* handle completion code */
2136 switch (trb_comp_code) {
2137 case COMP_SUCCESS:
2138 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2139 frame->status = 0;
2140 break;
2141 }
2142 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2143 trb_comp_code = COMP_SHORT_TX;
2144 case COMP_SHORT_TX:
2145 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2146 -EREMOTEIO : 0;
2147 break;
2148 case COMP_BW_OVER:
2149 frame->status = -ECOMM;
2150 skip_td = true;
2151 break;
2152 case COMP_BUFF_OVER:
2153 case COMP_BABBLE:
2154 frame->status = -EOVERFLOW;
2155 skip_td = true;
2156 break;
2157 case COMP_DEV_ERR:
2158 case COMP_STALL:
2159 case COMP_TX_ERR:
2160 frame->status = -EPROTO;
2161 skip_td = true;
2162 break;
2163 case COMP_STOP:
2164 case COMP_STOP_INVAL:
2165 break;
2166 default:
2167 frame->status = -1;
2168 break;
2169 }
2170
2171 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2172 frame->actual_length = frame->length;
2173 td->urb->actual_length += frame->length;
2174 } else {
2175 for (cur_trb = ep_ring->dequeue,
2176 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2177 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2178 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2179 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2180 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2181 }
2182 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2183 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2184
2185 if (trb_comp_code != COMP_STOP_INVAL) {
2186 frame->actual_length = len;
2187 td->urb->actual_length += len;
2188 }
2189 }
2190
2191 return finish_td(xhci, td, event_trb, event, ep, status, false);
2192 }
2193
2194 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2195 struct xhci_transfer_event *event,
2196 struct xhci_virt_ep *ep, int *status)
2197 {
2198 struct xhci_ring *ep_ring;
2199 struct urb_priv *urb_priv;
2200 struct usb_iso_packet_descriptor *frame;
2201 int idx;
2202
2203 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2204 urb_priv = td->urb->hcpriv;
2205 idx = urb_priv->td_cnt;
2206 frame = &td->urb->iso_frame_desc[idx];
2207
2208 /* The transfer is partly done. */
2209 frame->status = -EXDEV;
2210
2211 /* calc actual length */
2212 frame->actual_length = 0;
2213
2214 /* Update ring dequeue pointer */
2215 while (ep_ring->dequeue != td->last_trb)
2216 inc_deq(xhci, ep_ring);
2217 inc_deq(xhci, ep_ring);
2218
2219 return finish_td(xhci, td, NULL, event, ep, status, true);
2220 }
2221
2222 /*
2223 * Process bulk and interrupt tds, update urb status and actual_length.
2224 */
2225 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2226 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2227 struct xhci_virt_ep *ep, int *status)
2228 {
2229 struct xhci_ring *ep_ring;
2230 union xhci_trb *cur_trb;
2231 struct xhci_segment *cur_seg;
2232 u32 trb_comp_code;
2233
2234 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2235 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2236
2237 switch (trb_comp_code) {
2238 case COMP_SUCCESS:
2239 /* Double check that the HW transferred everything. */
2240 if (event_trb != td->last_trb ||
2241 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2242 xhci_warn(xhci, "WARN Successful completion "
2243 "on short TX\n");
2244 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2245 *status = -EREMOTEIO;
2246 else
2247 *status = 0;
2248 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2249 trb_comp_code = COMP_SHORT_TX;
2250 } else {
2251 *status = 0;
2252 }
2253 break;
2254 case COMP_SHORT_TX:
2255 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2256 *status = -EREMOTEIO;
2257 else
2258 *status = 0;
2259 break;
2260 default:
2261 /* Others already handled above */
2262 break;
2263 }
2264 if (trb_comp_code == COMP_SHORT_TX)
2265 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2266 "%d bytes untransferred\n",
2267 td->urb->ep->desc.bEndpointAddress,
2268 td->urb->transfer_buffer_length,
2269 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2270 /* Fast path - was this the last TRB in the TD for this URB? */
2271 if (event_trb == td->last_trb) {
2272 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2273 td->urb->actual_length =
2274 td->urb->transfer_buffer_length -
2275 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2276 if (td->urb->transfer_buffer_length <
2277 td->urb->actual_length) {
2278 xhci_warn(xhci, "HC gave bad length "
2279 "of %d bytes left\n",
2280 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2281 td->urb->actual_length = 0;
2282 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2283 *status = -EREMOTEIO;
2284 else
2285 *status = 0;
2286 }
2287 /* Don't overwrite a previously set error code */
2288 if (*status == -EINPROGRESS) {
2289 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2290 *status = -EREMOTEIO;
2291 else
2292 *status = 0;
2293 }
2294 } else {
2295 td->urb->actual_length =
2296 td->urb->transfer_buffer_length;
2297 /* Ignore a short packet completion if the
2298 * untransferred length was zero.
2299 */
2300 if (*status == -EREMOTEIO)
2301 *status = 0;
2302 }
2303 } else {
2304 /* Slow path - walk the list, starting from the dequeue
2305 * pointer, to get the actual length transferred.
2306 */
2307 td->urb->actual_length = 0;
2308 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2309 cur_trb != event_trb;
2310 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2311 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2312 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2313 td->urb->actual_length +=
2314 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2315 }
2316 /* If the ring didn't stop on a Link or No-op TRB, add
2317 * in the actual bytes transferred from the Normal TRB
2318 */
2319 if (trb_comp_code != COMP_STOP_INVAL)
2320 td->urb->actual_length +=
2321 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2322 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2323 }
2324
2325 return finish_td(xhci, td, event_trb, event, ep, status, false);
2326 }
2327
2328 /*
2329 * If this function returns an error condition, it means it got a Transfer
2330 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2331 * At this point, the host controller is probably hosed and should be reset.
2332 */
2333 static int handle_tx_event(struct xhci_hcd *xhci,
2334 struct xhci_transfer_event *event)
2335 __releases(&xhci->lock)
2336 __acquires(&xhci->lock)
2337 {
2338 struct xhci_virt_device *xdev;
2339 struct xhci_virt_ep *ep;
2340 struct xhci_ring *ep_ring;
2341 unsigned int slot_id;
2342 int ep_index;
2343 struct xhci_td *td = NULL;
2344 dma_addr_t event_dma;
2345 struct xhci_segment *event_seg;
2346 union xhci_trb *event_trb;
2347 struct urb *urb = NULL;
2348 int status = -EINPROGRESS;
2349 struct urb_priv *urb_priv;
2350 struct xhci_ep_ctx *ep_ctx;
2351 struct list_head *tmp;
2352 u32 trb_comp_code;
2353 int ret = 0;
2354 int td_num = 0;
2355
2356 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2357 xdev = xhci->devs[slot_id];
2358 if (!xdev) {
2359 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2360 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2361 (unsigned long long) xhci_trb_virt_to_dma(
2362 xhci->event_ring->deq_seg,
2363 xhci->event_ring->dequeue),
2364 lower_32_bits(le64_to_cpu(event->buffer)),
2365 upper_32_bits(le64_to_cpu(event->buffer)),
2366 le32_to_cpu(event->transfer_len),
2367 le32_to_cpu(event->flags));
2368 xhci_dbg(xhci, "Event ring:\n");
2369 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2370 return -ENODEV;
2371 }
2372
2373 /* Endpoint ID is 1 based, our index is zero based */
2374 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2375 ep = &xdev->eps[ep_index];
2376 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2377 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2378 if (!ep_ring ||
2379 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2380 EP_STATE_DISABLED) {
2381 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2382 "or incorrect stream ring\n");
2383 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2384 (unsigned long long) xhci_trb_virt_to_dma(
2385 xhci->event_ring->deq_seg,
2386 xhci->event_ring->dequeue),
2387 lower_32_bits(le64_to_cpu(event->buffer)),
2388 upper_32_bits(le64_to_cpu(event->buffer)),
2389 le32_to_cpu(event->transfer_len),
2390 le32_to_cpu(event->flags));
2391 xhci_dbg(xhci, "Event ring:\n");
2392 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2393 return -ENODEV;
2394 }
2395
2396 /* Count current td numbers if ep->skip is set */
2397 if (ep->skip) {
2398 list_for_each(tmp, &ep_ring->td_list)
2399 td_num++;
2400 }
2401
2402 event_dma = le64_to_cpu(event->buffer);
2403 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2404 /* Look for common error cases */
2405 switch (trb_comp_code) {
2406 /* Skip codes that require special handling depending on
2407 * transfer type
2408 */
2409 case COMP_SUCCESS:
2410 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2411 break;
2412 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2413 trb_comp_code = COMP_SHORT_TX;
2414 else
2415 xhci_warn_ratelimited(xhci,
2416 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2417 case COMP_SHORT_TX:
2418 break;
2419 case COMP_STOP:
2420 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2421 break;
2422 case COMP_STOP_INVAL:
2423 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2424 break;
2425 case COMP_STALL:
2426 xhci_dbg(xhci, "Stalled endpoint\n");
2427 ep->ep_state |= EP_HALTED;
2428 status = -EPIPE;
2429 break;
2430 case COMP_TRB_ERR:
2431 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2432 status = -EILSEQ;
2433 break;
2434 case COMP_SPLIT_ERR:
2435 case COMP_TX_ERR:
2436 xhci_dbg(xhci, "Transfer error on endpoint\n");
2437 status = -EPROTO;
2438 break;
2439 case COMP_BABBLE:
2440 xhci_dbg(xhci, "Babble error on endpoint\n");
2441 status = -EOVERFLOW;
2442 break;
2443 case COMP_DB_ERR:
2444 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2445 status = -ENOSR;
2446 break;
2447 case COMP_BW_OVER:
2448 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2449 break;
2450 case COMP_BUFF_OVER:
2451 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2452 break;
2453 case COMP_UNDERRUN:
2454 /*
2455 * When the Isoch ring is empty, the xHC will generate
2456 * a Ring Overrun Event for IN Isoch endpoint or Ring
2457 * Underrun Event for OUT Isoch endpoint.
2458 */
2459 xhci_dbg(xhci, "underrun event on endpoint\n");
2460 if (!list_empty(&ep_ring->td_list))
2461 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2462 "still with TDs queued?\n",
2463 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2464 ep_index);
2465 goto cleanup;
2466 case COMP_OVERRUN:
2467 xhci_dbg(xhci, "overrun event on endpoint\n");
2468 if (!list_empty(&ep_ring->td_list))
2469 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2470 "still with TDs queued?\n",
2471 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2472 ep_index);
2473 goto cleanup;
2474 case COMP_DEV_ERR:
2475 xhci_warn(xhci, "WARN: detect an incompatible device");
2476 status = -EPROTO;
2477 break;
2478 case COMP_MISSED_INT:
2479 /*
2480 * When encounter missed service error, one or more isoc tds
2481 * may be missed by xHC.
2482 * Set skip flag of the ep_ring; Complete the missed tds as
2483 * short transfer when process the ep_ring next time.
2484 */
2485 ep->skip = true;
2486 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2487 goto cleanup;
2488 default:
2489 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2490 status = 0;
2491 break;
2492 }
2493 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2494 "busted\n");
2495 goto cleanup;
2496 }
2497
2498 do {
2499 /* This TRB should be in the TD at the head of this ring's
2500 * TD list.
2501 */
2502 if (list_empty(&ep_ring->td_list)) {
2503 /*
2504 * A stopped endpoint may generate an extra completion
2505 * event if the device was suspended. Don't print
2506 * warnings.
2507 */
2508 if (!(trb_comp_code == COMP_STOP ||
2509 trb_comp_code == COMP_STOP_INVAL)) {
2510 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2511 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2512 ep_index);
2513 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2514 (le32_to_cpu(event->flags) &
2515 TRB_TYPE_BITMASK)>>10);
2516 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2517 }
2518 if (ep->skip) {
2519 ep->skip = false;
2520 xhci_dbg(xhci, "td_list is empty while skip "
2521 "flag set. Clear skip flag.\n");
2522 }
2523 ret = 0;
2524 goto cleanup;
2525 }
2526
2527 /* We've skipped all the TDs on the ep ring when ep->skip set */
2528 if (ep->skip && td_num == 0) {
2529 ep->skip = false;
2530 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2531 "Clear skip flag.\n");
2532 ret = 0;
2533 goto cleanup;
2534 }
2535
2536 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2537 if (ep->skip)
2538 td_num--;
2539
2540 /* Is this a TRB in the currently executing TD? */
2541 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2542 td->last_trb, event_dma);
2543
2544 /*
2545 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2546 * is not in the current TD pointed by ep_ring->dequeue because
2547 * that the hardware dequeue pointer still at the previous TRB
2548 * of the current TD. The previous TRB maybe a Link TD or the
2549 * last TRB of the previous TD. The command completion handle
2550 * will take care the rest.
2551 */
2552 if (!event_seg && (trb_comp_code == COMP_STOP ||
2553 trb_comp_code == COMP_STOP_INVAL)) {
2554 ret = 0;
2555 goto cleanup;
2556 }
2557
2558 if (!event_seg) {
2559 if (!ep->skip ||
2560 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2561 /* Some host controllers give a spurious
2562 * successful event after a short transfer.
2563 * Ignore it.
2564 */
2565 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2566 ep_ring->last_td_was_short) {
2567 ep_ring->last_td_was_short = false;
2568 ret = 0;
2569 goto cleanup;
2570 }
2571 /* HC is busted, give up! */
2572 xhci_err(xhci,
2573 "ERROR Transfer event TRB DMA ptr not "
2574 "part of current TD\n");
2575 return -ESHUTDOWN;
2576 }
2577
2578 ret = skip_isoc_td(xhci, td, event, ep, &status);
2579 goto cleanup;
2580 }
2581 if (trb_comp_code == COMP_SHORT_TX)
2582 ep_ring->last_td_was_short = true;
2583 else
2584 ep_ring->last_td_was_short = false;
2585
2586 if (ep->skip) {
2587 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2588 ep->skip = false;
2589 }
2590
2591 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2592 sizeof(*event_trb)];
2593 /*
2594 * No-op TRB should not trigger interrupts.
2595 * If event_trb is a no-op TRB, it means the
2596 * corresponding TD has been cancelled. Just ignore
2597 * the TD.
2598 */
2599 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2600 xhci_dbg(xhci,
2601 "event_trb is a no-op TRB. Skip it\n");
2602 goto cleanup;
2603 }
2604
2605 /* Now update the urb's actual_length and give back to
2606 * the core
2607 */
2608 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2609 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2610 &status);
2611 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2612 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2613 &status);
2614 else
2615 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2616 ep, &status);
2617
2618 cleanup:
2619 /*
2620 * Do not update event ring dequeue pointer if ep->skip is set.
2621 * Will roll back to continue process missed tds.
2622 */
2623 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2624 inc_deq(xhci, xhci->event_ring);
2625 }
2626
2627 if (ret) {
2628 urb = td->urb;
2629 urb_priv = urb->hcpriv;
2630 /* Leave the TD around for the reset endpoint function
2631 * to use(but only if it's not a control endpoint,
2632 * since we already queued the Set TR dequeue pointer
2633 * command for stalled control endpoints).
2634 */
2635 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2636 (trb_comp_code != COMP_STALL &&
2637 trb_comp_code != COMP_BABBLE))
2638 xhci_urb_free_priv(xhci, urb_priv);
2639 else
2640 kfree(urb_priv);
2641
2642 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2643 if ((urb->actual_length != urb->transfer_buffer_length &&
2644 (urb->transfer_flags &
2645 URB_SHORT_NOT_OK)) ||
2646 (status != 0 &&
2647 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2648 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2649 "expected = %d, status = %d\n",
2650 urb, urb->actual_length,
2651 urb->transfer_buffer_length,
2652 status);
2653 spin_unlock(&xhci->lock);
2654 /* EHCI, UHCI, and OHCI always unconditionally set the
2655 * urb->status of an isochronous endpoint to 0.
2656 */
2657 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2658 status = 0;
2659 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2660 spin_lock(&xhci->lock);
2661 }
2662
2663 /*
2664 * If ep->skip is set, it means there are missed tds on the
2665 * endpoint ring need to take care of.
2666 * Process them as short transfer until reach the td pointed by
2667 * the event.
2668 */
2669 } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2670
2671 return 0;
2672 }
2673
2674 /*
2675 * This function handles all OS-owned events on the event ring. It may drop
2676 * xhci->lock between event processing (e.g. to pass up port status changes).
2677 * Returns >0 for "possibly more events to process" (caller should call again),
2678 * otherwise 0 if done. In future, <0 returns should indicate error code.
2679 */
2680 static int xhci_handle_event(struct xhci_hcd *xhci)
2681 {
2682 union xhci_trb *event;
2683 int update_ptrs = 1;
2684 int ret;
2685
2686 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2687 xhci->error_bitmask |= 1 << 1;
2688 return 0;
2689 }
2690
2691 event = xhci->event_ring->dequeue;
2692 /* Does the HC or OS own the TRB? */
2693 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2694 xhci->event_ring->cycle_state) {
2695 xhci->error_bitmask |= 1 << 2;
2696 return 0;
2697 }
2698
2699 /*
2700 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2701 * speculative reads of the event's flags/data below.
2702 */
2703 rmb();
2704 /* FIXME: Handle more event types. */
2705 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2706 case TRB_TYPE(TRB_COMPLETION):
2707 handle_cmd_completion(xhci, &event->event_cmd);
2708 break;
2709 case TRB_TYPE(TRB_PORT_STATUS):
2710 handle_port_status(xhci, event);
2711 update_ptrs = 0;
2712 break;
2713 case TRB_TYPE(TRB_TRANSFER):
2714 ret = handle_tx_event(xhci, &event->trans_event);
2715 if (ret < 0)
2716 xhci->error_bitmask |= 1 << 9;
2717 else
2718 update_ptrs = 0;
2719 break;
2720 case TRB_TYPE(TRB_DEV_NOTE):
2721 handle_device_notification(xhci, event);
2722 break;
2723 default:
2724 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2725 TRB_TYPE(48))
2726 handle_vendor_event(xhci, event);
2727 else
2728 xhci->error_bitmask |= 1 << 3;
2729 }
2730 /* Any of the above functions may drop and re-acquire the lock, so check
2731 * to make sure a watchdog timer didn't mark the host as non-responsive.
2732 */
2733 if (xhci->xhc_state & XHCI_STATE_DYING) {
2734 xhci_dbg(xhci, "xHCI host dying, returning from "
2735 "event handler.\n");
2736 return 0;
2737 }
2738
2739 if (update_ptrs)
2740 /* Update SW event ring dequeue pointer */
2741 inc_deq(xhci, xhci->event_ring);
2742
2743 /* Are there more items on the event ring? Caller will call us again to
2744 * check.
2745 */
2746 return 1;
2747 }
2748
2749 /*
2750 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2751 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2752 * indicators of an event TRB error, but we check the status *first* to be safe.
2753 */
2754 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2755 {
2756 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2757 u32 status;
2758 u64 temp_64;
2759 union xhci_trb *event_ring_deq;
2760 dma_addr_t deq;
2761
2762 spin_lock(&xhci->lock);
2763 /* Check if the xHC generated the interrupt, or the irq is shared */
2764 status = xhci_readl(xhci, &xhci->op_regs->status);
2765 if (status == 0xffffffff)
2766 goto hw_died;
2767
2768 if (!(status & STS_EINT)) {
2769 spin_unlock(&xhci->lock);
2770 return IRQ_NONE;
2771 }
2772 if (status & STS_FATAL) {
2773 xhci_warn(xhci, "WARNING: Host System Error\n");
2774 xhci_halt(xhci);
2775 hw_died:
2776 spin_unlock(&xhci->lock);
2777 return -ESHUTDOWN;
2778 }
2779
2780 /*
2781 * Clear the op reg interrupt status first,
2782 * so we can receive interrupts from other MSI-X interrupters.
2783 * Write 1 to clear the interrupt status.
2784 */
2785 status |= STS_EINT;
2786 xhci_writel(xhci, status, &xhci->op_regs->status);
2787 /* FIXME when MSI-X is supported and there are multiple vectors */
2788 /* Clear the MSI-X event interrupt status */
2789
2790 if (hcd->irq) {
2791 u32 irq_pending;
2792 /* Acknowledge the PCI interrupt */
2793 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2794 irq_pending |= IMAN_IP;
2795 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2796 }
2797
2798 if (xhci->xhc_state & XHCI_STATE_DYING) {
2799 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2800 "Shouldn't IRQs be disabled?\n");
2801 /* Clear the event handler busy flag (RW1C);
2802 * the event ring should be empty.
2803 */
2804 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2805 xhci_write_64(xhci, temp_64 | ERST_EHB,
2806 &xhci->ir_set->erst_dequeue);
2807 spin_unlock(&xhci->lock);
2808
2809 return IRQ_HANDLED;
2810 }
2811
2812 event_ring_deq = xhci->event_ring->dequeue;
2813 /* FIXME this should be a delayed service routine
2814 * that clears the EHB.
2815 */
2816 while (xhci_handle_event(xhci) > 0) {}
2817
2818 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2819 /* If necessary, update the HW's version of the event ring deq ptr. */
2820 if (event_ring_deq != xhci->event_ring->dequeue) {
2821 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2822 xhci->event_ring->dequeue);
2823 if (deq == 0)
2824 xhci_warn(xhci, "WARN something wrong with SW event "
2825 "ring dequeue ptr.\n");
2826 /* Update HC event ring dequeue pointer */
2827 temp_64 &= ERST_PTR_MASK;
2828 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2829 }
2830
2831 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2832 temp_64 |= ERST_EHB;
2833 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2834
2835 spin_unlock(&xhci->lock);
2836
2837 return IRQ_HANDLED;
2838 }
2839
2840 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2841 {
2842 return xhci_irq(hcd);
2843 }
2844
2845 /**** Endpoint Ring Operations ****/
2846
2847 /*
2848 * Generic function for queueing a TRB on a ring.
2849 * The caller must have checked to make sure there's room on the ring.
2850 *
2851 * @more_trbs_coming: Will you enqueue more TRBs before calling
2852 * prepare_transfer()?
2853 */
2854 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2855 bool more_trbs_coming,
2856 u32 field1, u32 field2, u32 field3, u32 field4)
2857 {
2858 struct xhci_generic_trb *trb;
2859
2860 trb = &ring->enqueue->generic;
2861 trb->field[0] = cpu_to_le32(field1);
2862 trb->field[1] = cpu_to_le32(field2);
2863 trb->field[2] = cpu_to_le32(field3);
2864 trb->field[3] = cpu_to_le32(field4);
2865 inc_enq(xhci, ring, more_trbs_coming);
2866 }
2867
2868 /*
2869 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2870 * FIXME allocate segments if the ring is full.
2871 */
2872 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2873 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2874 {
2875 unsigned int num_trbs_needed;
2876
2877 /* Make sure the endpoint has been added to xHC schedule */
2878 switch (ep_state) {
2879 case EP_STATE_DISABLED:
2880 /*
2881 * USB core changed config/interfaces without notifying us,
2882 * or hardware is reporting the wrong state.
2883 */
2884 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2885 return -ENOENT;
2886 case EP_STATE_ERROR:
2887 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2888 /* FIXME event handling code for error needs to clear it */
2889 /* XXX not sure if this should be -ENOENT or not */
2890 return -EINVAL;
2891 case EP_STATE_HALTED:
2892 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2893 case EP_STATE_STOPPED:
2894 case EP_STATE_RUNNING:
2895 break;
2896 default:
2897 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2898 /*
2899 * FIXME issue Configure Endpoint command to try to get the HC
2900 * back into a known state.
2901 */
2902 return -EINVAL;
2903 }
2904
2905 while (1) {
2906 if (room_on_ring(xhci, ep_ring, num_trbs))
2907 break;
2908
2909 if (ep_ring == xhci->cmd_ring) {
2910 xhci_err(xhci, "Do not support expand command ring\n");
2911 return -ENOMEM;
2912 }
2913
2914 xhci_dbg(xhci, "ERROR no room on ep ring, "
2915 "try ring expansion\n");
2916 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2917 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2918 mem_flags)) {
2919 xhci_err(xhci, "Ring expansion failed\n");
2920 return -ENOMEM;
2921 }
2922 }
2923
2924 if (enqueue_is_link_trb(ep_ring)) {
2925 struct xhci_ring *ring = ep_ring;
2926 union xhci_trb *next;
2927
2928 next = ring->enqueue;
2929
2930 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2931 /* If we're not dealing with 0.95 hardware or isoc rings
2932 * on AMD 0.96 host, clear the chain bit.
2933 */
2934 #ifndef CONFIG_MTK_XHCI
2935 if (!xhci_link_trb_quirk(xhci) &&
2936 !(ring->type == TYPE_ISOC &&
2937 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2938 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2939 else
2940 next->link.control |= cpu_to_le32(TRB_CHAIN);
2941 #else
2942 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2943 #endif
2944 wmb();
2945 next->link.control ^= cpu_to_le32(TRB_CYCLE);
2946
2947 /* Toggle the cycle bit after the last ring segment. */
2948 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2949 ring->cycle_state = (ring->cycle_state ? 0 : 1);
2950 }
2951 ring->enq_seg = ring->enq_seg->next;
2952 ring->enqueue = ring->enq_seg->trbs;
2953 next = ring->enqueue;
2954 }
2955 }
2956
2957 return 0;
2958 }
2959
2960 static int prepare_transfer(struct xhci_hcd *xhci,
2961 struct xhci_virt_device *xdev,
2962 unsigned int ep_index,
2963 unsigned int stream_id,
2964 unsigned int num_trbs,
2965 struct urb *urb,
2966 unsigned int td_index,
2967 gfp_t mem_flags)
2968 {
2969 int ret;
2970 struct urb_priv *urb_priv;
2971 struct xhci_td *td;
2972 struct xhci_ring *ep_ring;
2973 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2974
2975 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2976 if (!ep_ring) {
2977 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2978 stream_id);
2979 return -EINVAL;
2980 }
2981
2982 ret = prepare_ring(xhci, ep_ring,
2983 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2984 num_trbs, mem_flags);
2985 if (ret)
2986 return ret;
2987
2988 urb_priv = urb->hcpriv;
2989 td = urb_priv->td[td_index];
2990
2991 INIT_LIST_HEAD(&td->td_list);
2992 INIT_LIST_HEAD(&td->cancelled_td_list);
2993
2994 if (td_index == 0) {
2995 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2996 if (unlikely(ret))
2997 return ret;
2998 }
2999
3000 td->urb = urb;
3001 /* Add this TD to the tail of the endpoint ring's TD list */
3002 list_add_tail(&td->td_list, &ep_ring->td_list);
3003 td->start_seg = ep_ring->enq_seg;
3004 td->first_trb = ep_ring->enqueue;
3005
3006 urb_priv->td[td_index] = td;
3007
3008 return 0;
3009 }
3010
3011 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
3012 {
3013 int num_sgs, num_trbs, running_total, temp, i;
3014 struct scatterlist *sg;
3015
3016 sg = NULL;
3017 num_sgs = urb->num_mapped_sgs;
3018 temp = urb->transfer_buffer_length;
3019
3020 num_trbs = 0;
3021 for_each_sg(urb->sg, sg, num_sgs, i) {
3022 unsigned int len = sg_dma_len(sg);
3023
3024 /* Scatter gather list entries may cross 64KB boundaries */
3025 running_total = TRB_MAX_BUFF_SIZE -
3026 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
3027 running_total &= TRB_MAX_BUFF_SIZE - 1;
3028 if (running_total != 0)
3029 num_trbs++;
3030
3031 /* How many more 64KB chunks to transfer, how many more TRBs? */
3032 while (running_total < sg_dma_len(sg) && running_total < temp) {
3033 num_trbs++;
3034 running_total += TRB_MAX_BUFF_SIZE;
3035 }
3036 len = min_t(int, len, temp);
3037 temp -= len;
3038 if (temp == 0)
3039 break;
3040 }
3041 return num_trbs;
3042 }
3043
3044 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
3045 {
3046 if (num_trbs != 0)
3047 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
3048 "TRBs, %d left\n", __func__,
3049 urb->ep->desc.bEndpointAddress, num_trbs);
3050 if (running_total != urb->transfer_buffer_length)
3051 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3052 "queued %#x (%d), asked for %#x (%d)\n",
3053 __func__,
3054 urb->ep->desc.bEndpointAddress,
3055 running_total, running_total,
3056 urb->transfer_buffer_length,
3057 urb->transfer_buffer_length);
3058 }
3059
3060 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3061 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3062 struct xhci_generic_trb *start_trb)
3063 {
3064 /*
3065 * Pass all the TRBs to the hardware at once and make sure this write
3066 * isn't reordered.
3067 */
3068 wmb();
3069 if (start_cycle)
3070 start_trb->field[3] |= cpu_to_le32(start_cycle);
3071 else
3072 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3073 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3074 }
3075
3076 /*
3077 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3078 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3079 * (comprised of sg list entries) can take several service intervals to
3080 * transmit.
3081 */
3082 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3083 struct urb *urb, int slot_id, unsigned int ep_index)
3084 {
3085 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3086 xhci->devs[slot_id]->out_ctx, ep_index);
3087 int xhci_interval;
3088 int ep_interval;
3089
3090 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3091 ep_interval = urb->interval;
3092 /* Convert to microframes */
3093 if (urb->dev->speed == USB_SPEED_LOW ||
3094 urb->dev->speed == USB_SPEED_FULL)
3095 ep_interval *= 8;
3096 /* FIXME change this to a warning and a suggestion to use the new API
3097 * to set the polling interval (once the API is added).
3098 */
3099 if (xhci_interval != ep_interval) {
3100 if (printk_ratelimit())
3101 dev_dbg(&urb->dev->dev, "Driver uses different interval"
3102 " (%d microframe%s) than xHCI "
3103 "(%d microframe%s)\n",
3104 ep_interval,
3105 ep_interval == 1 ? "" : "s",
3106 xhci_interval,
3107 xhci_interval == 1 ? "" : "s");
3108 urb->interval = xhci_interval;
3109 /* Convert back to frames for LS/FS devices */
3110 if (urb->dev->speed == USB_SPEED_LOW ||
3111 urb->dev->speed == USB_SPEED_FULL)
3112 urb->interval /= 8;
3113 }
3114 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3115 }
3116
3117 /*
3118 * The TD size is the number of bytes remaining in the TD (including this TRB),
3119 * right shifted by 10.
3120 * It must fit in bits 21:17, so it can't be bigger than 31.
3121 */
3122 #ifdef CONFIG_MTK_XHCI
3123 static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
3124 , unsigned int maxp, unsigned trb_buffer_length)
3125 {
3126 u32 max = 31;
3127 int remainder, td_packet_count, packet_transferred;
3128
3129 //0 for the last TRB
3130 //FIXME: need to workaround if there is ZLP in this TD
3131 if (td_running_total + trb_buffer_length == td_transfer_size)
3132 return 0;
3133
3134 //FIXME: need to take care of high-bandwidth (MAX_ESIT)
3135 packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
3136 td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
3137 remainder = td_packet_count - packet_transferred;
3138
3139 if (remainder > max)
3140 return max << 17;
3141 else
3142 return remainder << 17;
3143 }
3144 #else
3145 static u32 xhci_td_remainder(unsigned int remainder)
3146 {
3147 u32 max = (1 << (21 - 17 + 1)) - 1;
3148
3149 if ((remainder >> 10) >= max)
3150 return max << 17;
3151 else
3152 return (remainder >> 10) << 17;
3153 }
3154 #endif
3155
3156
3157 #ifndef CONFIG_MTK_XHCI
3158 /*
3159 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3160 * packets remaining in the TD (*not* including this TRB).
3161 *
3162 * Total TD packet count = total_packet_count =
3163 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3164 *
3165 * Packets transferred up to and including this TRB = packets_transferred =
3166 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3167 *
3168 * TD size = total_packet_count - packets_transferred
3169 *
3170 * It must fit in bits 21:17, so it can't be bigger than 31.
3171 * The last TRB in a TD must have the TD size set to zero.
3172 */
3173 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3174 unsigned int total_packet_count, struct urb *urb,
3175 unsigned int num_trbs_left)
3176 {
3177 int packets_transferred;
3178
3179 /* One TRB with a zero-length data packet. */
3180 if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3181 return 0;
3182
3183 /* All the TRB queueing functions don't count the current TRB in
3184 * running_total.
3185 */
3186 packets_transferred = (running_total + trb_buff_len) /
3187 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3188
3189 if ((total_packet_count - packets_transferred) > 31)
3190 return 31 << 17;
3191 return (total_packet_count - packets_transferred) << 17;
3192 }
3193 #endif
3194
3195 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3196 struct urb *urb, int slot_id, unsigned int ep_index)
3197 {
3198 struct xhci_ring *ep_ring;
3199 unsigned int num_trbs;
3200 struct urb_priv *urb_priv;
3201 struct xhci_td *td;
3202 struct scatterlist *sg;
3203 int num_sgs;
3204 int trb_buff_len, this_sg_len, running_total;
3205 unsigned int total_packet_count;
3206 bool first_trb;
3207 u64 addr;
3208 bool more_trbs_coming;
3209
3210 struct xhci_generic_trb *start_trb;
3211 int start_cycle;
3212
3213 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3214 if (!ep_ring)
3215 return -EINVAL;
3216
3217 num_trbs = count_sg_trbs_needed(xhci, urb);
3218 num_sgs = urb->num_mapped_sgs;
3219 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3220 usb_endpoint_maxp(&urb->ep->desc));
3221
3222 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3223 ep_index, urb->stream_id,
3224 num_trbs, urb, 0, mem_flags);
3225 if (trb_buff_len < 0)
3226 return trb_buff_len;
3227
3228 urb_priv = urb->hcpriv;
3229 td = urb_priv->td[0];
3230
3231 /*
3232 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3233 * until we've finished creating all the other TRBs. The ring's cycle
3234 * state may change as we enqueue the other TRBs, so save it too.
3235 */
3236 start_trb = &ep_ring->enqueue->generic;
3237 start_cycle = ep_ring->cycle_state;
3238
3239 running_total = 0;
3240 /*
3241 * How much data is in the first TRB?
3242 *
3243 * There are three forces at work for TRB buffer pointers and lengths:
3244 * 1. We don't want to walk off the end of this sg-list entry buffer.
3245 * 2. The transfer length that the driver requested may be smaller than
3246 * the amount of memory allocated for this scatter-gather list.
3247 * 3. TRBs buffers can't cross 64KB boundaries.
3248 */
3249 sg = urb->sg;
3250 addr = (u64) sg_dma_address(sg);
3251 this_sg_len = sg_dma_len(sg);
3252 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3253 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3254 if (trb_buff_len > urb->transfer_buffer_length)
3255 trb_buff_len = urb->transfer_buffer_length;
3256
3257 first_trb = true;
3258 /* Queue the first TRB, even if it's zero-length */
3259 do {
3260 u32 field = 0;
3261 u32 length_field = 0;
3262 u32 remainder = 0;
3263
3264 /* Don't change the cycle bit of the first TRB until later */
3265 if (first_trb) {
3266 first_trb = false;
3267 if (start_cycle == 0)
3268 field |= 0x1;
3269 } else
3270 field |= ep_ring->cycle_state;
3271
3272 /* Chain all the TRBs together; clear the chain bit in the last
3273 * TRB to indicate it's the last TRB in the chain.
3274 */
3275 if (num_trbs > 1) {
3276 field |= TRB_CHAIN;
3277 } else {
3278 /* FIXME - add check for ZERO_PACKET flag before this */
3279 td->last_trb = ep_ring->enqueue;
3280 field |= TRB_IOC;
3281 }
3282
3283 /* Only set interrupt on short packet for IN endpoints */
3284 if (usb_urb_dir_in(urb))
3285 field |= TRB_ISP;
3286
3287 if (TRB_MAX_BUFF_SIZE -
3288 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3289 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3290 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3291 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3292 (unsigned int) addr + trb_buff_len);
3293 }
3294
3295 /* Set the TRB length, TD size, and interrupter fields. */
3296 #ifdef CONFIG_MTK_XHCI
3297 if(num_trbs >1){
3298 remainder = xhci_td_remainder(urb->transfer_buffer_length,
3299 running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
3300 }
3301 #else
3302 /* Set the TRB length, TD size, and interrupter fields. */
3303 if (xhci->hci_version < 0x100) {
3304 remainder = xhci_td_remainder(
3305 urb->transfer_buffer_length -
3306 running_total);
3307 } else {
3308 remainder = xhci_v1_0_td_remainder(running_total,
3309 trb_buff_len, total_packet_count, urb,
3310 num_trbs - 1);
3311 }
3312 #endif
3313
3314 length_field = TRB_LEN(trb_buff_len) |
3315 remainder |
3316 TRB_INTR_TARGET(0);
3317
3318 if (num_trbs > 1)
3319 more_trbs_coming = true;
3320 else
3321 more_trbs_coming = false;
3322 queue_trb(xhci, ep_ring, more_trbs_coming,
3323 lower_32_bits(addr),
3324 upper_32_bits(addr),
3325 length_field,
3326 field | TRB_TYPE(TRB_NORMAL));
3327 --num_trbs;
3328 running_total += trb_buff_len;
3329
3330 /* Calculate length for next transfer --
3331 * Are we done queueing all the TRBs for this sg entry?
3332 */
3333 this_sg_len -= trb_buff_len;
3334 if (this_sg_len == 0) {
3335 --num_sgs;
3336 if (num_sgs == 0)
3337 break;
3338 sg = sg_next(sg);
3339 addr = (u64) sg_dma_address(sg);
3340 this_sg_len = sg_dma_len(sg);
3341 } else {
3342 addr += trb_buff_len;
3343 }
3344
3345 trb_buff_len = TRB_MAX_BUFF_SIZE -
3346 (addr & (TRB_MAX_BUFF_SIZE - 1));
3347 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3348 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3349 trb_buff_len =
3350 urb->transfer_buffer_length - running_total;
3351 } while (running_total < urb->transfer_buffer_length);
3352
3353 check_trb_math(urb, num_trbs, running_total);
3354 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3355 start_cycle, start_trb);
3356 return 0;
3357 }
3358
3359 /* This is very similar to what ehci-q.c qtd_fill() does */
3360 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3361 struct urb *urb, int slot_id, unsigned int ep_index)
3362 {
3363 struct xhci_ring *ep_ring;
3364 struct urb_priv *urb_priv;
3365 struct xhci_td *td;
3366 int num_trbs;
3367 struct xhci_generic_trb *start_trb;
3368 bool first_trb;
3369 bool more_trbs_coming;
3370 int start_cycle;
3371 u32 field, length_field;
3372 #ifdef CONFIG_MTK_XHCI
3373 int max_packet = USB_SPEED_HIGH;
3374 #endif
3375 int running_total, trb_buff_len, ret;
3376 unsigned int total_packet_count;
3377 u64 addr;
3378
3379 if (urb->num_sgs)
3380 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3381
3382 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3383 if (!ep_ring)
3384 return -EINVAL;
3385
3386 num_trbs = 0;
3387 /* How much data is (potentially) left before the 64KB boundary? */
3388 running_total = TRB_MAX_BUFF_SIZE -
3389 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3390 running_total &= TRB_MAX_BUFF_SIZE - 1;
3391
3392 /* If there's some data on this 64KB chunk, or we have to send a
3393 * zero-length transfer, we need at least one TRB
3394 */
3395 if (running_total != 0 || urb->transfer_buffer_length == 0)
3396 num_trbs++;
3397 /* How many more 64KB chunks to transfer, how many more TRBs? */
3398 while (running_total < urb->transfer_buffer_length) {
3399 num_trbs++;
3400 running_total += TRB_MAX_BUFF_SIZE;
3401 }
3402 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
3403
3404 #ifdef CONFIG_MTK_XHCI
3405 switch(urb->dev->speed){
3406 case USB_SPEED_SUPER:
3407 max_packet = urb->ep->desc.wMaxPacketSize;
3408 break;
3409 case USB_SPEED_HIGH:
3410 case USB_SPEED_FULL:
3411 case USB_SPEED_LOW:
3412 default:
3413 max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
3414 break;
3415 }
3416 if((urb->transfer_flags & URB_ZERO_PACKET)
3417 && ((urb->transfer_buffer_length % max_packet) == 0)){
3418 num_trbs++;
3419 }
3420 #endif
3421
3422 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3423 ep_index, urb->stream_id,
3424 num_trbs, urb, 0, mem_flags);
3425 if (ret < 0)
3426 return ret;
3427
3428 urb_priv = urb->hcpriv;
3429 td = urb_priv->td[0];
3430
3431 /*
3432 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3433 * until we've finished creating all the other TRBs. The ring's cycle
3434 * state may change as we enqueue the other TRBs, so save it too.
3435 */
3436 start_trb = &ep_ring->enqueue->generic;
3437 start_cycle = ep_ring->cycle_state;
3438
3439 running_total = 0;
3440 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3441 usb_endpoint_maxp(&urb->ep->desc));
3442 /* How much data is in the first TRB? */
3443 addr = (u64) urb->transfer_dma;
3444 trb_buff_len = TRB_MAX_BUFF_SIZE -
3445 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3446 if (trb_buff_len > urb->transfer_buffer_length)
3447 trb_buff_len = urb->transfer_buffer_length;
3448
3449 first_trb = true;
3450
3451 /* Queue the first TRB, even if it's zero-length */
3452 do {
3453 u32 remainder = 0;
3454 field = 0;
3455
3456 /* Don't change the cycle bit of the first TRB until later */
3457 if (first_trb) {
3458 first_trb = false;
3459 if (start_cycle == 0)
3460 field |= 0x1;
3461 } else
3462 field |= ep_ring->cycle_state;
3463
3464 /* Chain all the TRBs together; clear the chain bit in the last
3465 * TRB to indicate it's the last TRB in the chain.
3466 */
3467 if (num_trbs > 1) {
3468 field |= TRB_CHAIN;
3469 } else {
3470 /* FIXME - add check for ZERO_PACKET flag before this */
3471 td->last_trb = ep_ring->enqueue;
3472 field |= TRB_IOC;
3473 }
3474
3475 /* Only set interrupt on short packet for IN endpoints */
3476 if (usb_urb_dir_in(urb))
3477 field |= TRB_ISP;
3478 #ifdef CONFIG_MTK_XHCI
3479 remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
3480 #else
3481 /* Set the TRB length, TD size, and interrupter fields. */
3482 if (xhci->hci_version < 0x100) {
3483 remainder = xhci_td_remainder(
3484 urb->transfer_buffer_length -
3485 running_total);
3486 } else {
3487 remainder = xhci_v1_0_td_remainder(running_total,
3488 trb_buff_len, total_packet_count, urb,
3489 num_trbs - 1);
3490 }
3491 #endif
3492 length_field = TRB_LEN(trb_buff_len) |
3493 remainder |
3494 TRB_INTR_TARGET(0);
3495
3496 if (num_trbs > 1)
3497 more_trbs_coming = true;
3498 else
3499 more_trbs_coming = false;
3500 queue_trb(xhci, ep_ring, more_trbs_coming,
3501 lower_32_bits(addr),
3502 upper_32_bits(addr),
3503 length_field,
3504 field | TRB_TYPE(TRB_NORMAL));
3505 --num_trbs;
3506 running_total += trb_buff_len;
3507
3508 /* Calculate length for next transfer */
3509 addr += trb_buff_len;
3510 trb_buff_len = urb->transfer_buffer_length - running_total;
3511 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3512 trb_buff_len = TRB_MAX_BUFF_SIZE;
3513 } while (running_total < urb->transfer_buffer_length);
3514
3515 check_trb_math(urb, num_trbs, running_total);
3516 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3517 start_cycle, start_trb);
3518 return 0;
3519 }
3520
3521 /* Caller must have locked xhci->lock */
3522 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3523 struct urb *urb, int slot_id, unsigned int ep_index)
3524 {
3525 struct xhci_ring *ep_ring;
3526 int num_trbs;
3527 int ret;
3528 struct usb_ctrlrequest *setup;
3529 struct xhci_generic_trb *start_trb;
3530 int start_cycle;
3531 u32 field, length_field;
3532 struct urb_priv *urb_priv;
3533 struct xhci_td *td;
3534
3535 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3536 if (!ep_ring)
3537 return -EINVAL;
3538
3539 /*
3540 * Need to copy setup packet into setup TRB, so we can't use the setup
3541 * DMA address.
3542 */
3543 if (!urb->setup_packet)
3544 return -EINVAL;
3545
3546 /* 1 TRB for setup, 1 for status */
3547 num_trbs = 2;
3548 /*
3549 * Don't need to check if we need additional event data and normal TRBs,
3550 * since data in control transfers will never get bigger than 16MB
3551 * XXX: can we get a buffer that crosses 64KB boundaries?
3552 */
3553 if (urb->transfer_buffer_length > 0)
3554 num_trbs++;
3555 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3556 ep_index, urb->stream_id,
3557 num_trbs, urb, 0, mem_flags);
3558 if (ret < 0)
3559 return ret;
3560
3561 urb_priv = urb->hcpriv;
3562 td = urb_priv->td[0];
3563
3564 /*
3565 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3566 * until we've finished creating all the other TRBs. The ring's cycle
3567 * state may change as we enqueue the other TRBs, so save it too.
3568 */
3569 start_trb = &ep_ring->enqueue->generic;
3570 start_cycle = ep_ring->cycle_state;
3571
3572 /* Queue setup TRB - see section 6.4.1.2.1 */
3573 /* FIXME better way to translate setup_packet into two u32 fields? */
3574 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3575 field = 0;
3576 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3577 if (start_cycle == 0)
3578 field |= 0x1;
3579
3580 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3581 #ifdef CONFIG_MTK_XHCI
3582 if(1){
3583 #else
3584 if (xhci->hci_version == 0x100) {
3585 #endif
3586 if (urb->transfer_buffer_length > 0) {
3587 if (setup->bRequestType & USB_DIR_IN)
3588 field |= TRB_TX_TYPE(TRB_DATA_IN);
3589 else
3590 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3591 }
3592 }
3593
3594 queue_trb(xhci, ep_ring, true,
3595 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3596 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3597 TRB_LEN(8) | TRB_INTR_TARGET(0),
3598 /* Immediate data in pointer */
3599 field);
3600
3601 /* If there's data, queue data TRBs */
3602 /* Only set interrupt on short packet for IN endpoints */
3603 if (usb_urb_dir_in(urb))
3604 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3605 else
3606 field = TRB_TYPE(TRB_DATA);
3607
3608 length_field = TRB_LEN(urb->transfer_buffer_length) |
3609 #ifdef CONFIG_MTK_XHCI
3610 //CC: MTK style, no scatter-gather for control transfer
3611 0 |
3612 #else
3613 xhci_td_remainder(urb->transfer_buffer_length) |
3614 #endif
3615 TRB_INTR_TARGET(0);
3616 if (urb->transfer_buffer_length > 0) {
3617 if (setup->bRequestType & USB_DIR_IN)
3618 field |= TRB_DIR_IN;
3619 queue_trb(xhci, ep_ring, true,
3620 lower_32_bits(urb->transfer_dma),
3621 upper_32_bits(urb->transfer_dma),
3622 length_field,
3623 field | ep_ring->cycle_state);
3624 }
3625
3626 /* Save the DMA address of the last TRB in the TD */
3627 td->last_trb = ep_ring->enqueue;
3628
3629 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3630 /* If the device sent data, the status stage is an OUT transfer */
3631 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3632 field = 0;
3633 else
3634 field = TRB_DIR_IN;
3635 queue_trb(xhci, ep_ring, false,
3636 0,
3637 0,
3638 TRB_INTR_TARGET(0),
3639 /* Event on completion */
3640 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3641
3642 giveback_first_trb(xhci, slot_id, ep_index, 0,
3643 start_cycle, start_trb);
3644 return 0;
3645 }
3646
3647 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3648 struct urb *urb, int i)
3649 {
3650 int num_trbs = 0;
3651 u64 addr, td_len;
3652
3653 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3654 td_len = urb->iso_frame_desc[i].length;
3655
3656 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3657 TRB_MAX_BUFF_SIZE);
3658 if (num_trbs == 0)
3659 num_trbs++;
3660
3661 return num_trbs;
3662 }
3663
3664 /*
3665 * The transfer burst count field of the isochronous TRB defines the number of
3666 * bursts that are required to move all packets in this TD. Only SuperSpeed
3667 * devices can burst up to bMaxBurst number of packets per service interval.
3668 * This field is zero based, meaning a value of zero in the field means one
3669 * burst. Basically, for everything but SuperSpeed devices, this field will be
3670 * zero. Only xHCI 1.0 host controllers support this field.
3671 */
3672 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3673 struct usb_device *udev,
3674 struct urb *urb, unsigned int total_packet_count)
3675 {
3676 unsigned int max_burst;
3677
3678 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3679 return 0;
3680
3681 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3682 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3683 }
3684
3685 /*
3686 * Returns the number of packets in the last "burst" of packets. This field is
3687 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3688 * the last burst packet count is equal to the total number of packets in the
3689 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3690 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3691 * contain 1 to (bMaxBurst + 1) packets.
3692 */
3693 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3694 struct usb_device *udev,
3695 struct urb *urb, unsigned int total_packet_count)
3696 {
3697 unsigned int max_burst;
3698 unsigned int residue;
3699
3700 if (xhci->hci_version < 0x100)
3701 return 0;
3702
3703 switch (udev->speed) {
3704 case USB_SPEED_SUPER:
3705 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3706 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3707 residue = total_packet_count % (max_burst + 1);
3708 /* If residue is zero, the last burst contains (max_burst + 1)
3709 * number of packets, but the TLBPC field is zero-based.
3710 */
3711 if (residue == 0)
3712 return max_burst;
3713 return residue - 1;
3714 default:
3715 if (total_packet_count == 0)
3716 return 0;
3717 return total_packet_count - 1;
3718 }
3719 }
3720
3721 /* This is for isoc transfer */
3722 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3723 struct urb *urb, int slot_id, unsigned int ep_index)
3724 {
3725 struct xhci_ring *ep_ring;
3726 struct urb_priv *urb_priv;
3727 struct xhci_td *td;
3728 int num_tds, trbs_per_td;
3729 struct xhci_generic_trb *start_trb;
3730 bool first_trb;
3731 int start_cycle;
3732 u32 field, length_field;
3733 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3734 u64 start_addr, addr;
3735 int i, j;
3736 bool more_trbs_coming;
3737 #ifdef CONFIG_MTK_XHCI
3738 int max_packet = USB_SPEED_HIGH;
3739 #endif
3740
3741 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3742
3743 num_tds = urb->number_of_packets;
3744 if (num_tds < 1) {
3745 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3746 return -EINVAL;
3747 }
3748
3749 start_addr = (u64) urb->transfer_dma;
3750 start_trb = &ep_ring->enqueue->generic;
3751 start_cycle = ep_ring->cycle_state;
3752
3753 #ifdef CONFIG_MTK_XHCI
3754 switch(urb->dev->speed){
3755 case USB_SPEED_SUPER:
3756 max_packet = urb->ep->desc.wMaxPacketSize;
3757 break;
3758 case USB_SPEED_HIGH:
3759 case USB_SPEED_FULL:
3760 case USB_SPEED_LOW:
3761 default:
3762 max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
3763 break;
3764 }
3765 #endif
3766 urb_priv = urb->hcpriv;
3767 /* Queue the first TRB, even if it's zero-length */
3768 for (i = 0; i < num_tds; i++) {
3769 unsigned int total_packet_count;
3770 unsigned int burst_count;
3771 unsigned int residue;
3772
3773 first_trb = true;
3774 running_total = 0;
3775 addr = start_addr + urb->iso_frame_desc[i].offset;
3776 td_len = urb->iso_frame_desc[i].length;
3777 td_remain_len = td_len;
3778 total_packet_count = DIV_ROUND_UP(td_len,
3779 GET_MAX_PACKET(
3780 usb_endpoint_maxp(&urb->ep->desc)));
3781 /* A zero-length transfer still involves at least one packet. */
3782 if (total_packet_count == 0)
3783 total_packet_count++;
3784 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3785 total_packet_count);
3786 residue = xhci_get_last_burst_packet_count(xhci,
3787 urb->dev, urb, total_packet_count);
3788
3789 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3790
3791 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3792 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3793 if (ret < 0) {
3794 if (i == 0)
3795 return ret;
3796 goto cleanup;
3797 }
3798
3799 td = urb_priv->td[i];
3800 for (j = 0; j < trbs_per_td; j++) {
3801 u32 remainder = 0;
3802 field = 0;
3803
3804 if (first_trb) {
3805 field = TRB_TBC(burst_count) |
3806 TRB_TLBPC(residue);
3807 /* Queue the isoc TRB */
3808 field |= TRB_TYPE(TRB_ISOC);
3809 /* Assume URB_ISO_ASAP is set */
3810 field |= TRB_SIA;
3811 if (i == 0) {
3812 if (start_cycle == 0)
3813 field |= 0x1;
3814 } else
3815 field |= ep_ring->cycle_state;
3816 first_trb = false;
3817 } else {
3818 /* Queue other normal TRBs */
3819 field |= TRB_TYPE(TRB_NORMAL);
3820 field |= ep_ring->cycle_state;
3821 }
3822
3823 /* Only set interrupt on short packet for IN EPs */
3824 if (usb_urb_dir_in(urb))
3825 field |= TRB_ISP;
3826
3827 /* Chain all the TRBs together; clear the chain bit in
3828 * the last TRB to indicate it's the last TRB in the
3829 * chain.
3830 */
3831 if (j < trbs_per_td - 1) {
3832 field |= TRB_CHAIN;
3833 more_trbs_coming = true;
3834 } else {
3835 td->last_trb = ep_ring->enqueue;
3836 field |= TRB_IOC;
3837 if (xhci->hci_version == 0x100 &&
3838 !(xhci->quirks &
3839 XHCI_AVOID_BEI)) {
3840 /* Set BEI bit except for the last td */
3841 if (i < num_tds - 1)
3842 field |= TRB_BEI;
3843 }
3844 more_trbs_coming = false;
3845 }
3846
3847 /* Calculate TRB length */
3848 trb_buff_len = TRB_MAX_BUFF_SIZE -
3849 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3850 if (trb_buff_len > td_remain_len)
3851 trb_buff_len = td_remain_len;
3852
3853 /* Set the TRB length, TD size, & interrupter fields. */
3854 #ifdef CONFIG_MTK_XHCI
3855 remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
3856 #else
3857 if (xhci->hci_version < 0x100) {
3858 remainder = xhci_td_remainder(
3859 td_len - running_total);
3860 } else {
3861 remainder = xhci_v1_0_td_remainder(
3862 running_total, trb_buff_len,
3863 total_packet_count, urb,
3864 (trbs_per_td - j - 1));
3865 }
3866 #endif
3867 length_field = TRB_LEN(trb_buff_len) |
3868 remainder |
3869 TRB_INTR_TARGET(0);
3870
3871 queue_trb(xhci, ep_ring, more_trbs_coming,
3872 lower_32_bits(addr),
3873 upper_32_bits(addr),
3874 length_field,
3875 field);
3876 running_total += trb_buff_len;
3877
3878 addr += trb_buff_len;
3879 td_remain_len -= trb_buff_len;
3880 }
3881
3882 /* Check TD length */
3883 if (running_total != td_len) {
3884 xhci_err(xhci, "ISOC TD length unmatch\n");
3885 ret = -EINVAL;
3886 goto cleanup;
3887 }
3888 }
3889 #ifndef CONFIG_MTK_XHCI
3890 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3891 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3892 usb_amd_quirk_pll_disable();
3893 }
3894 #endif
3895 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3896
3897 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3898 start_cycle, start_trb);
3899 return 0;
3900 cleanup:
3901 /* Clean up a partially enqueued isoc transfer. */
3902
3903 for (i--; i >= 0; i--)
3904 list_del_init(&urb_priv->td[i]->td_list);
3905
3906 /* Use the first TD as a temporary variable to turn the TDs we've queued
3907 * into No-ops with a software-owned cycle bit. That way the hardware
3908 * won't accidentally start executing bogus TDs when we partially
3909 * overwrite them. td->first_trb and td->start_seg are already set.
3910 */
3911 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3912 /* Every TRB except the first & last will have its cycle bit flipped. */
3913 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3914
3915 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3916 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3917 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3918 ep_ring->cycle_state = start_cycle;
3919 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3920 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3921 return ret;
3922 }
3923
3924 /*
3925 * Check transfer ring to guarantee there is enough room for the urb.
3926 * Update ISO URB start_frame and interval.
3927 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3928 * update the urb->start_frame by now.
3929 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3930 */
3931 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3932 struct urb *urb, int slot_id, unsigned int ep_index)
3933 {
3934 struct xhci_virt_device *xdev;
3935 struct xhci_ring *ep_ring;
3936 struct xhci_ep_ctx *ep_ctx;
3937 int start_frame;
3938 int xhci_interval;
3939 int ep_interval;
3940 int num_tds, num_trbs, i;
3941 int ret;
3942
3943 xdev = xhci->devs[slot_id];
3944 ep_ring = xdev->eps[ep_index].ring;
3945 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3946
3947 num_trbs = 0;
3948 num_tds = urb->number_of_packets;
3949 for (i = 0; i < num_tds; i++)
3950 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3951
3952 /* Check the ring to guarantee there is enough room for the whole urb.
3953 * Do not insert any td of the urb to the ring if the check failed.
3954 */
3955 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3956 num_trbs, mem_flags);
3957 if (ret)
3958 return ret;
3959
3960 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3961 start_frame &= 0x3fff;
3962
3963 urb->start_frame = start_frame;
3964 if (urb->dev->speed == USB_SPEED_LOW ||
3965 urb->dev->speed == USB_SPEED_FULL)
3966 urb->start_frame >>= 3;
3967
3968 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3969 ep_interval = urb->interval;
3970 /* Convert to microframes */
3971 if (urb->dev->speed == USB_SPEED_LOW ||
3972 urb->dev->speed == USB_SPEED_FULL)
3973 ep_interval *= 8;
3974 /* FIXME change this to a warning and a suggestion to use the new API
3975 * to set the polling interval (once the API is added).
3976 */
3977 if (xhci_interval != ep_interval) {
3978 if (printk_ratelimit())
3979 dev_dbg(&urb->dev->dev, "Driver uses different interval"
3980 " (%d microframe%s) than xHCI "
3981 "(%d microframe%s)\n",
3982 ep_interval,
3983 ep_interval == 1 ? "" : "s",
3984 xhci_interval,
3985 xhci_interval == 1 ? "" : "s");
3986 urb->interval = xhci_interval;
3987 /* Convert back to frames for LS/FS devices */
3988 if (urb->dev->speed == USB_SPEED_LOW ||
3989 urb->dev->speed == USB_SPEED_FULL)
3990 urb->interval /= 8;
3991 }
3992 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3993
3994 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3995 }
3996
3997 /**** Command Ring Operations ****/
3998
3999 /* Generic function for queueing a command TRB on the command ring.
4000 * Check to make sure there's room on the command ring for one command TRB.
4001 * Also check that there's room reserved for commands that must not fail.
4002 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4003 * then only check for the number of reserved spots.
4004 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4005 * because the command event handler may want to resubmit a failed command.
4006 */
4007 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
4008 u32 field3, u32 field4, bool command_must_succeed)
4009 {
4010 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4011 int ret;
4012
4013 if (!command_must_succeed)
4014 reserved_trbs++;
4015
4016 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4017 reserved_trbs, GFP_ATOMIC);
4018 if (ret < 0) {
4019 xhci_err(xhci, "ERR: No room for command on command ring\n");
4020 if (command_must_succeed)
4021 xhci_err(xhci, "ERR: Reserved TRB counting for "
4022 "unfailable commands failed.\n");
4023 return ret;
4024 }
4025 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4026 field4 | xhci->cmd_ring->cycle_state);
4027 return 0;
4028 }
4029
4030 /* Queue a slot enable or disable request on the command ring */
4031 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
4032 {
4033 return queue_command(xhci, 0, 0, 0,
4034 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4035 }
4036
4037 /* Queue an address device command TRB */
4038 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4039 u32 slot_id)
4040 {
4041 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4042 upper_32_bits(in_ctx_ptr), 0,
4043 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
4044 false);
4045 }
4046
4047 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
4048 u32 field1, u32 field2, u32 field3, u32 field4)
4049 {
4050 return queue_command(xhci, field1, field2, field3, field4, false);
4051 }
4052
4053 /* Queue a reset device command TRB */
4054 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
4055 {
4056 return queue_command(xhci, 0, 0, 0,
4057 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4058 false);
4059 }
4060
4061 /* Queue a configure endpoint command TRB */
4062 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4063 u32 slot_id, bool command_must_succeed)
4064 {
4065 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4066 upper_32_bits(in_ctx_ptr), 0,
4067 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4068 command_must_succeed);
4069 }
4070
4071 /* Queue an evaluate context command TRB */
4072 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4073 u32 slot_id, bool command_must_succeed)
4074 {
4075 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4076 upper_32_bits(in_ctx_ptr), 0,
4077 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4078 command_must_succeed);
4079 }
4080
4081 /*
4082 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4083 * activity on an endpoint that is about to be suspended.
4084 */
4085 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
4086 unsigned int ep_index, int suspend)
4087 {
4088 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4089 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4090 u32 type = TRB_TYPE(TRB_STOP_RING);
4091 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4092
4093 return queue_command(xhci, 0, 0, 0,
4094 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4095 }
4096
4097 /* Set Transfer Ring Dequeue Pointer command.
4098 * This should not be used for endpoints that have streams enabled.
4099 */
4100 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
4101 unsigned int ep_index, unsigned int stream_id,
4102 struct xhci_segment *deq_seg,
4103 union xhci_trb *deq_ptr, u32 cycle_state)
4104 {
4105 dma_addr_t addr;
4106 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4107 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4108 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4109 u32 type = TRB_TYPE(TRB_SET_DEQ);
4110 struct xhci_virt_ep *ep;
4111
4112 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
4113 if (addr == 0) {
4114 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4115 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4116 deq_seg, deq_ptr);
4117 return 0;
4118 }
4119 ep = &xhci->devs[slot_id]->eps[ep_index];
4120 if ((ep->ep_state & SET_DEQ_PENDING)) {
4121 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4122 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4123 return 0;
4124 }
4125 ep->queued_deq_seg = deq_seg;
4126 ep->queued_deq_ptr = deq_ptr;
4127 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
4128 upper_32_bits(addr), trb_stream_id,
4129 trb_slot_id | trb_ep_index | type, false);
4130 }
4131
4132 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
4133 unsigned int ep_index)
4134 {
4135 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4136 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4137 u32 type = TRB_TYPE(TRB_RESET_EP);
4138
4139 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
4140 false);
4141 }