usb/xhci: move xhci_gen_setup() away from -pci.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / host / xhci.c
CommitLineData
66d4eadd
SS
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
43b86af8 23#include <linux/pci.h>
66d4eadd 24#include <linux/irq.h>
8df75f42 25#include <linux/log2.h>
66d4eadd 26#include <linux/module.h>
b0567b3f 27#include <linux/moduleparam.h>
5a0e3ad6 28#include <linux/slab.h>
66d4eadd
SS
29
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
b0567b3f
SS
35/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
66d4eadd
SS
40/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72/*
4f0f0bae 73 * Disable interrupts and begin the xHCI halting process.
66d4eadd 74 */
4f0f0bae 75void xhci_quiesce(struct xhci_hcd *xhci)
66d4eadd
SS
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
66d4eadd
SS
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
4f0f0bae
SS
89}
90
91/*
92 * Force HC into halt state.
93 *
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
bdfca502 96 * should halt within 16 ms of the run/stop bit being cleared.
4f0f0bae 97 * Read HC Halted bit in the status register to see when the HC is finished.
4f0f0bae
SS
98 */
99int xhci_halt(struct xhci_hcd *xhci)
100{
c6cc27c7 101 int ret;
4f0f0bae
SS
102 xhci_dbg(xhci, "// Halt the HC\n");
103 xhci_quiesce(xhci);
66d4eadd 104
c6cc27c7 105 ret = handshake(xhci, &xhci->op_regs->status,
66d4eadd 106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
c6cc27c7
SS
107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 return ret;
66d4eadd
SS
110}
111
ed07453f
SS
112/*
113 * Set the run bit and wait for the host to be running.
114 */
8212a49d 115static int xhci_start(struct xhci_hcd *xhci)
ed07453f
SS
116{
117 u32 temp;
118 int ret;
119
120 temp = xhci_readl(xhci, &xhci->op_regs->command);
121 temp |= (CMD_RUN);
122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
123 temp);
124 xhci_writel(xhci, temp, &xhci->op_regs->command);
125
126 /*
127 * Wait for the HCHalted Status bit to be 0 to indicate the host is
128 * running.
129 */
130 ret = handshake(xhci, &xhci->op_regs->status,
131 STS_HALT, 0, XHCI_MAX_HALT_USEC);
132 if (ret == -ETIMEDOUT)
133 xhci_err(xhci, "Host took too long to start, "
134 "waited %u microseconds.\n",
135 XHCI_MAX_HALT_USEC);
c6cc27c7
SS
136 if (!ret)
137 xhci->xhc_state &= ~XHCI_STATE_HALTED;
ed07453f
SS
138 return ret;
139}
140
66d4eadd 141/*
ac04e6ff 142 * Reset a halted HC.
66d4eadd
SS
143 *
144 * This resets pipelines, timers, counters, state machines, etc.
145 * Transactions will be terminated immediately, and operational registers
146 * will be set to their defaults.
147 */
148int xhci_reset(struct xhci_hcd *xhci)
149{
150 u32 command;
151 u32 state;
2d62f3ee 152 int ret;
66d4eadd
SS
153
154 state = xhci_readl(xhci, &xhci->op_regs->status);
d3512f63
SS
155 if ((state & STS_HALT) == 0) {
156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
157 return 0;
158 }
66d4eadd
SS
159
160 xhci_dbg(xhci, "// Reset the HC\n");
161 command = xhci_readl(xhci, &xhci->op_regs->command);
162 command |= CMD_RESET;
163 xhci_writel(xhci, command, &xhci->op_regs->command);
66d4eadd 164
2d62f3ee
SS
165 ret = handshake(xhci, &xhci->op_regs->command,
166 CMD_RESET, 0, 250 * 1000);
167 if (ret)
168 return ret;
169
170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
171 /*
172 * xHCI cannot write to any doorbells or operational registers other
173 * than status until the "Controller Not Ready" flag is cleared.
174 */
175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
66d4eadd
SS
176}
177
421aa841
SAS
178#ifdef CONFIG_PCI
179static int xhci_free_msi(struct xhci_hcd *xhci)
43b86af8
DN
180{
181 int i;
43b86af8 182
421aa841
SAS
183 if (!xhci->msix_entries)
184 return -EINVAL;
43b86af8 185
421aa841
SAS
186 for (i = 0; i < xhci->msix_count; i++)
187 if (xhci->msix_entries[i].vector)
188 free_irq(xhci->msix_entries[i].vector,
189 xhci_to_hcd(xhci));
190 return 0;
43b86af8
DN
191}
192
193/*
194 * Set up MSI
195 */
196static int xhci_setup_msi(struct xhci_hcd *xhci)
66d4eadd
SS
197{
198 int ret;
43b86af8
DN
199 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
200
201 ret = pci_enable_msi(pdev);
202 if (ret) {
203 xhci_err(xhci, "failed to allocate MSI entry\n");
204 return ret;
205 }
206
207 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
208 0, "xhci_hcd", xhci_to_hcd(xhci));
209 if (ret) {
210 xhci_err(xhci, "disable MSI interrupt\n");
211 pci_disable_msi(pdev);
212 }
213
214 return ret;
215}
216
421aa841
SAS
217/*
218 * Free IRQs
219 * free all IRQs request
220 */
221static void xhci_free_irq(struct xhci_hcd *xhci)
222{
223 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
224 int ret;
225
226 /* return if using legacy interrupt */
227 if (xhci_to_hcd(xhci)->irq >= 0)
228 return;
229
230 ret = xhci_free_msi(xhci);
231 if (!ret)
232 return;
233 if (pdev->irq >= 0)
234 free_irq(pdev->irq, xhci_to_hcd(xhci));
235
236 return;
237}
238
43b86af8
DN
239/*
240 * Set up MSI-X
241 */
242static int xhci_setup_msix(struct xhci_hcd *xhci)
243{
244 int i, ret = 0;
0029227f
AX
245 struct usb_hcd *hcd = xhci_to_hcd(xhci);
246 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
66d4eadd 247
43b86af8
DN
248 /*
249 * calculate number of msi-x vectors supported.
250 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
251 * with max number of interrupters based on the xhci HCSPARAMS1.
252 * - num_online_cpus: maximum msi-x vectors per CPUs core.
253 * Add additional 1 vector to ensure always available interrupt.
254 */
255 xhci->msix_count = min(num_online_cpus() + 1,
256 HCS_MAX_INTRS(xhci->hcs_params1));
257
258 xhci->msix_entries =
259 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
86871975 260 GFP_KERNEL);
66d4eadd
SS
261 if (!xhci->msix_entries) {
262 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
263 return -ENOMEM;
264 }
43b86af8
DN
265
266 for (i = 0; i < xhci->msix_count; i++) {
267 xhci->msix_entries[i].entry = i;
268 xhci->msix_entries[i].vector = 0;
269 }
66d4eadd
SS
270
271 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
272 if (ret) {
273 xhci_err(xhci, "Failed to enable MSI-X\n");
274 goto free_entries;
275 }
276
43b86af8
DN
277 for (i = 0; i < xhci->msix_count; i++) {
278 ret = request_irq(xhci->msix_entries[i].vector,
279 (irq_handler_t)xhci_msi_irq,
280 0, "xhci_hcd", xhci_to_hcd(xhci));
281 if (ret)
282 goto disable_msix;
66d4eadd 283 }
43b86af8 284
0029227f 285 hcd->msix_enabled = 1;
43b86af8 286 return ret;
66d4eadd
SS
287
288disable_msix:
43b86af8
DN
289 xhci_err(xhci, "disable MSI-X interrupt\n");
290 xhci_free_irq(xhci);
66d4eadd
SS
291 pci_disable_msix(pdev);
292free_entries:
293 kfree(xhci->msix_entries);
294 xhci->msix_entries = NULL;
295 return ret;
296}
297
66d4eadd
SS
298/* Free any IRQs and disable MSI-X */
299static void xhci_cleanup_msix(struct xhci_hcd *xhci)
300{
0029227f
AX
301 struct usb_hcd *hcd = xhci_to_hcd(xhci);
302 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
66d4eadd 303
43b86af8
DN
304 xhci_free_irq(xhci);
305
306 if (xhci->msix_entries) {
307 pci_disable_msix(pdev);
308 kfree(xhci->msix_entries);
309 xhci->msix_entries = NULL;
310 } else {
311 pci_disable_msi(pdev);
312 }
313
0029227f 314 hcd->msix_enabled = 0;
43b86af8 315 return;
66d4eadd 316}
66d4eadd 317
421aa841
SAS
318static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
319{
320 int i;
321
322 if (xhci->msix_entries) {
323 for (i = 0; i < xhci->msix_count; i++)
324 synchronize_irq(xhci->msix_entries[i].vector);
325 }
326}
327
328static int xhci_try_enable_msi(struct usb_hcd *hcd)
329{
330 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
331 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
332 int ret;
333
334 /*
335 * Some Fresco Logic host controllers advertise MSI, but fail to
336 * generate interrupts. Don't even try to enable MSI.
337 */
338 if (xhci->quirks & XHCI_BROKEN_MSI)
339 return 0;
340
341 /* unregister the legacy interrupt */
342 if (hcd->irq)
343 free_irq(hcd->irq, hcd);
344 hcd->irq = -1;
345
346 ret = xhci_setup_msix(xhci);
347 if (ret)
348 /* fall back to msi*/
349 ret = xhci_setup_msi(xhci);
350
351 if (!ret)
352 /* hcd->irq is -1, we have MSI */
353 return 0;
354
355 /* fall back to legacy interrupt*/
356 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
357 hcd->irq_descr, hcd);
358 if (ret) {
359 xhci_err(xhci, "request interrupt %d failed\n",
360 pdev->irq);
361 return ret;
362 }
363 hcd->irq = pdev->irq;
364 return 0;
365}
366
367#else
368
369static int xhci_try_enable_msi(struct usb_hcd *hcd)
370{
371 return 0;
372}
373
374static void xhci_cleanup_msix(struct xhci_hcd *xhci)
375{
376}
377
378static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
379{
380}
381
382#endif
383
66d4eadd
SS
384/*
385 * Initialize memory for HCD and xHC (one-time init).
386 *
387 * Program the PAGESIZE register, initialize the device context array, create
388 * device contexts (?), set up a command ring segment (or two?), create event
389 * ring (one for now).
390 */
391int xhci_init(struct usb_hcd *hcd)
392{
393 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
394 int retval = 0;
395
396 xhci_dbg(xhci, "xhci_init\n");
397 spin_lock_init(&xhci->lock);
d7826599 398 if (xhci->hci_version == 0x95 && link_quirk) {
b0567b3f
SS
399 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
400 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
401 } else {
ac9d8fe7 402 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
b0567b3f 403 }
66d4eadd
SS
404 retval = xhci_mem_init(xhci, GFP_KERNEL);
405 xhci_dbg(xhci, "Finished xhci_init\n");
406
407 return retval;
408}
409
7f84eef0
SS
410/*-------------------------------------------------------------------------*/
411
7f84eef0
SS
412
413#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
8212a49d 414static void xhci_event_ring_work(unsigned long arg)
7f84eef0
SS
415{
416 unsigned long flags;
417 int temp;
8e595a5d 418 u64 temp_64;
7f84eef0
SS
419 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
420 int i, j;
421
422 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
423
424 spin_lock_irqsave(&xhci->lock, flags);
425 temp = xhci_readl(xhci, &xhci->op_regs->status);
426 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
7bd89b40
SS
427 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
428 (xhci->xhc_state & XHCI_STATE_HALTED)) {
e4ab05df
SS
429 xhci_dbg(xhci, "HW died, polling stopped.\n");
430 spin_unlock_irqrestore(&xhci->lock, flags);
431 return;
432 }
433
7f84eef0
SS
434 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
435 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
7f84eef0
SS
436 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
437 xhci->error_bitmask = 0;
438 xhci_dbg(xhci, "Event ring:\n");
439 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
440 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
8e595a5d
SS
441 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
442 temp_64 &= ~ERST_PTR_MASK;
443 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
7f84eef0
SS
444 xhci_dbg(xhci, "Command ring:\n");
445 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
446 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
447 xhci_dbg_cmd_ptrs(xhci);
3ffbba95 448 for (i = 0; i < MAX_HC_SLOTS; ++i) {
63a0d9ab
SS
449 if (!xhci->devs[i])
450 continue;
451 for (j = 0; j < 31; ++j) {
e9df17eb 452 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
3ffbba95
SS
453 }
454 }
7f84eef0
SS
455 spin_unlock_irqrestore(&xhci->lock, flags);
456
457 if (!xhci->zombie)
458 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
459 else
460 xhci_dbg(xhci, "Quit polling the event ring.\n");
461}
462#endif
463
f6ff0ac8
SS
464static int xhci_run_finished(struct xhci_hcd *xhci)
465{
466 if (xhci_start(xhci)) {
467 xhci_halt(xhci);
468 return -ENODEV;
469 }
470 xhci->shared_hcd->state = HC_STATE_RUNNING;
471
472 if (xhci->quirks & XHCI_NEC_HOST)
473 xhci_ring_cmd_db(xhci);
474
475 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
476 return 0;
477}
478
66d4eadd
SS
479/*
480 * Start the HC after it was halted.
481 *
482 * This function is called by the USB core when the HC driver is added.
483 * Its opposite is xhci_stop().
484 *
485 * xhci_init() must be called once before this function can be called.
486 * Reset the HC, enable device slot contexts, program DCBAAP, and
487 * set command ring pointer and event ring pointer.
488 *
489 * Setup MSI-X vectors and enable interrupts.
490 */
491int xhci_run(struct usb_hcd *hcd)
492{
493 u32 temp;
8e595a5d 494 u64 temp_64;
3fd1ec58 495 int ret;
66d4eadd 496 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
66d4eadd 497
f6ff0ac8
SS
498 /* Start the xHCI host controller running only after the USB 2.0 roothub
499 * is setup.
500 */
66d4eadd 501
0f2a7930 502 hcd->uses_new_polling = 1;
f6ff0ac8
SS
503 if (!usb_hcd_is_primary_hcd(hcd))
504 return xhci_run_finished(xhci);
0f2a7930 505
7f84eef0 506 xhci_dbg(xhci, "xhci_run\n");
43b86af8 507
3fd1ec58 508 ret = xhci_try_enable_msi(hcd);
43b86af8 509 if (ret)
3fd1ec58 510 return ret;
66d4eadd 511
7f84eef0
SS
512#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
513 init_timer(&xhci->event_ring_timer);
514 xhci->event_ring_timer.data = (unsigned long) xhci;
23e3be11 515 xhci->event_ring_timer.function = xhci_event_ring_work;
7f84eef0
SS
516 /* Poll the event ring */
517 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
518 xhci->zombie = 0;
519 xhci_dbg(xhci, "Setting event ring polling timer\n");
520 add_timer(&xhci->event_ring_timer);
521#endif
522
66e49d87
SS
523 xhci_dbg(xhci, "Command ring memory map follows:\n");
524 xhci_debug_ring(xhci, xhci->cmd_ring);
525 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
526 xhci_dbg_cmd_ptrs(xhci);
527
528 xhci_dbg(xhci, "ERST memory map follows:\n");
529 xhci_dbg_erst(xhci, &xhci->erst);
530 xhci_dbg(xhci, "Event ring:\n");
531 xhci_debug_ring(xhci, xhci->event_ring);
532 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
533 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
534 temp_64 &= ~ERST_PTR_MASK;
535 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
536
66d4eadd
SS
537 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
538 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
a4d88302 539 temp &= ~ER_IRQ_INTERVAL_MASK;
66d4eadd
SS
540 temp |= (u32) 160;
541 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
542
543 /* Set the HCD state before we enable the irqs */
66d4eadd
SS
544 temp = xhci_readl(xhci, &xhci->op_regs->command);
545 temp |= (CMD_EIE);
546 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
547 temp);
548 xhci_writel(xhci, temp, &xhci->op_regs->command);
549
550 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
700e2052
GKH
551 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
552 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
66d4eadd
SS
553 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
554 &xhci->ir_set->irq_pending);
09ece30e 555 xhci_print_ir_set(xhci, 0);
66d4eadd 556
0238634d
SS
557 if (xhci->quirks & XHCI_NEC_HOST)
558 xhci_queue_vendor_command(xhci, 0, 0, 0,
559 TRB_TYPE(TRB_NEC_GET_FW));
7f84eef0 560
f6ff0ac8
SS
561 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
562 return 0;
563}
ed07453f 564
f6ff0ac8
SS
565static void xhci_only_stop_hcd(struct usb_hcd *hcd)
566{
567 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
66d4eadd 568
f6ff0ac8
SS
569 spin_lock_irq(&xhci->lock);
570 xhci_halt(xhci);
571
572 /* The shared_hcd is going to be deallocated shortly (the USB core only
573 * calls this function when allocation fails in usb_add_hcd(), or
574 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
575 */
576 xhci->shared_hcd = NULL;
577 spin_unlock_irq(&xhci->lock);
66d4eadd
SS
578}
579
580/*
581 * Stop xHCI driver.
582 *
583 * This function is called by the USB core when the HC driver is removed.
584 * Its opposite is xhci_run().
585 *
586 * Disable device contexts, disable IRQs, and quiesce the HC.
587 * Reset the HC, finish any completed transactions, and cleanup memory.
588 */
589void xhci_stop(struct usb_hcd *hcd)
590{
591 u32 temp;
592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
593
f6ff0ac8
SS
594 if (!usb_hcd_is_primary_hcd(hcd)) {
595 xhci_only_stop_hcd(xhci->shared_hcd);
596 return;
597 }
598
66d4eadd 599 spin_lock_irq(&xhci->lock);
f6ff0ac8
SS
600 /* Make sure the xHC is halted for a USB3 roothub
601 * (xhci_stop() could be called as part of failed init).
602 */
66d4eadd
SS
603 xhci_halt(xhci);
604 xhci_reset(xhci);
605 spin_unlock_irq(&xhci->lock);
606
40a9fb17
ZR
607 xhci_cleanup_msix(xhci);
608
7f84eef0
SS
609#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
610 /* Tell the event ring poll function not to reschedule */
611 xhci->zombie = 1;
612 del_timer_sync(&xhci->event_ring_timer);
613#endif
614
c41136b0
AX
615 if (xhci->quirks & XHCI_AMD_PLL_FIX)
616 usb_amd_dev_put();
617
66d4eadd
SS
618 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
619 temp = xhci_readl(xhci, &xhci->op_regs->status);
620 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
621 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
622 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
623 &xhci->ir_set->irq_pending);
09ece30e 624 xhci_print_ir_set(xhci, 0);
66d4eadd
SS
625
626 xhci_dbg(xhci, "cleaning up memory\n");
627 xhci_mem_cleanup(xhci);
628 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
629 xhci_readl(xhci, &xhci->op_regs->status));
630}
631
632/*
633 * Shutdown HC (not bus-specific)
634 *
635 * This is called when the machine is rebooting or halting. We assume that the
636 * machine will be powered off, and the HC's internal state will be reset.
637 * Don't bother to free memory.
f6ff0ac8
SS
638 *
639 * This will only ever be called with the main usb_hcd (the USB3 roothub).
66d4eadd
SS
640 */
641void xhci_shutdown(struct usb_hcd *hcd)
642{
643 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
644
645 spin_lock_irq(&xhci->lock);
646 xhci_halt(xhci);
43b86af8 647 spin_unlock_irq(&xhci->lock);
66d4eadd 648
40a9fb17
ZR
649 xhci_cleanup_msix(xhci);
650
66d4eadd
SS
651 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
652 xhci_readl(xhci, &xhci->op_regs->status));
653}
654
b5b5c3ac 655#ifdef CONFIG_PM
5535b1d5
AX
656static void xhci_save_registers(struct xhci_hcd *xhci)
657{
658 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
659 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
660 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
661 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
662 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
663 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
664 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
665 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
666 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
667}
668
669static void xhci_restore_registers(struct xhci_hcd *xhci)
670{
671 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
672 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
673 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
674 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
675 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
676 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
677 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
678 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
679}
680
89821320
SS
681static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
682{
683 u64 val_64;
684
685 /* step 2: initialize command ring buffer */
686 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
687 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
688 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
689 xhci->cmd_ring->dequeue) &
690 (u64) ~CMD_RING_RSVD_BITS) |
691 xhci->cmd_ring->cycle_state;
692 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
693 (long unsigned long) val_64);
694 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
695}
696
697/*
698 * The whole command ring must be cleared to zero when we suspend the host.
699 *
700 * The host doesn't save the command ring pointer in the suspend well, so we
701 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
702 * aligned, because of the reserved bits in the command ring dequeue pointer
703 * register. Therefore, we can't just set the dequeue pointer back in the
704 * middle of the ring (TRBs are 16-byte aligned).
705 */
706static void xhci_clear_command_ring(struct xhci_hcd *xhci)
707{
708 struct xhci_ring *ring;
709 struct xhci_segment *seg;
710
711 ring = xhci->cmd_ring;
712 seg = ring->deq_seg;
713 do {
714 memset(seg->trbs, 0, SEGMENT_SIZE);
715 seg = seg->next;
716 } while (seg != ring->deq_seg);
717
718 /* Reset the software enqueue and dequeue pointers */
719 ring->deq_seg = ring->first_seg;
720 ring->dequeue = ring->first_seg->trbs;
721 ring->enq_seg = ring->deq_seg;
722 ring->enqueue = ring->dequeue;
723
724 /*
725 * Ring is now zeroed, so the HW should look for change of ownership
726 * when the cycle bit is set to 1.
727 */
728 ring->cycle_state = 1;
729
730 /*
731 * Reset the hardware dequeue pointer.
732 * Yes, this will need to be re-written after resume, but we're paranoid
733 * and want to make sure the hardware doesn't access bogus memory
734 * because, say, the BIOS or an SMI started the host without changing
735 * the command ring pointers.
736 */
737 xhci_set_cmd_ring_deq(xhci);
738}
739
5535b1d5
AX
740/*
741 * Stop HC (not bus-specific)
742 *
743 * This is called when the machine transition into S3/S4 mode.
744 *
745 */
746int xhci_suspend(struct xhci_hcd *xhci)
747{
748 int rc = 0;
749 struct usb_hcd *hcd = xhci_to_hcd(xhci);
750 u32 command;
751
752 spin_lock_irq(&xhci->lock);
753 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
b3209379 754 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
5535b1d5
AX
755 /* step 1: stop endpoint */
756 /* skipped assuming that port suspend has done */
757
758 /* step 2: clear Run/Stop bit */
759 command = xhci_readl(xhci, &xhci->op_regs->command);
760 command &= ~CMD_RUN;
761 xhci_writel(xhci, command, &xhci->op_regs->command);
762 if (handshake(xhci, &xhci->op_regs->status,
763 STS_HALT, STS_HALT, 100*100)) {
764 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
765 spin_unlock_irq(&xhci->lock);
766 return -ETIMEDOUT;
767 }
89821320 768 xhci_clear_command_ring(xhci);
5535b1d5
AX
769
770 /* step 3: save registers */
771 xhci_save_registers(xhci);
772
773 /* step 4: set CSS flag */
774 command = xhci_readl(xhci, &xhci->op_regs->command);
775 command |= CMD_CSS;
776 xhci_writel(xhci, command, &xhci->op_regs->command);
777 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
778 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
779 spin_unlock_irq(&xhci->lock);
780 return -ETIMEDOUT;
781 }
5535b1d5
AX
782 spin_unlock_irq(&xhci->lock);
783
0029227f
AX
784 /* step 5: remove core well power */
785 /* synchronize irq when using MSI-X */
421aa841 786 xhci_msix_sync_irqs(xhci);
0029227f 787
5535b1d5
AX
788 return rc;
789}
790
791/*
792 * start xHC (not bus-specific)
793 *
794 * This is called when the machine transition from S3/S4 mode.
795 *
796 */
797int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
798{
799 u32 command, temp = 0;
800 struct usb_hcd *hcd = xhci_to_hcd(xhci);
65b22f93 801 struct usb_hcd *secondary_hcd;
019a35f1 802 int retval;
5535b1d5 803
f6ff0ac8 804 /* Wait a bit if either of the roothubs need to settle from the
25985edc 805 * transition into bus suspend.
20b67cf5 806 */
f6ff0ac8
SS
807 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
808 time_before(jiffies,
809 xhci->bus_state[1].next_statechange))
5535b1d5
AX
810 msleep(100);
811
812 spin_lock_irq(&xhci->lock);
c877b3b2
ML
813 if (xhci->quirks & XHCI_RESET_ON_RESUME)
814 hibernated = true;
5535b1d5
AX
815
816 if (!hibernated) {
817 /* step 1: restore register */
818 xhci_restore_registers(xhci);
819 /* step 2: initialize command ring buffer */
89821320 820 xhci_set_cmd_ring_deq(xhci);
5535b1d5
AX
821 /* step 3: restore state and start state*/
822 /* step 3: set CRS flag */
823 command = xhci_readl(xhci, &xhci->op_regs->command);
824 command |= CMD_CRS;
825 xhci_writel(xhci, command, &xhci->op_regs->command);
826 if (handshake(xhci, &xhci->op_regs->status,
827 STS_RESTORE, 0, 10*100)) {
828 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
829 spin_unlock_irq(&xhci->lock);
830 return -ETIMEDOUT;
831 }
832 temp = xhci_readl(xhci, &xhci->op_regs->status);
833 }
834
835 /* If restore operation fails, re-initialize the HC during resume */
836 if ((temp & STS_SRE) || hibernated) {
fedd383e
SS
837 /* Let the USB core know _both_ roothubs lost power. */
838 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
839 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
5535b1d5
AX
840
841 xhci_dbg(xhci, "Stop HCD\n");
842 xhci_halt(xhci);
843 xhci_reset(xhci);
5535b1d5 844 spin_unlock_irq(&xhci->lock);
0029227f 845 xhci_cleanup_msix(xhci);
5535b1d5
AX
846
847#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
848 /* Tell the event ring poll function not to reschedule */
849 xhci->zombie = 1;
850 del_timer_sync(&xhci->event_ring_timer);
851#endif
852
853 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
854 temp = xhci_readl(xhci, &xhci->op_regs->status);
855 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
856 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
857 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
858 &xhci->ir_set->irq_pending);
09ece30e 859 xhci_print_ir_set(xhci, 0);
5535b1d5
AX
860
861 xhci_dbg(xhci, "cleaning up memory\n");
862 xhci_mem_cleanup(xhci);
863 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
864 xhci_readl(xhci, &xhci->op_regs->status));
865
65b22f93
SS
866 /* USB core calls the PCI reinit and start functions twice:
867 * first with the primary HCD, and then with the secondary HCD.
868 * If we don't do the same, the host will never be started.
869 */
870 if (!usb_hcd_is_primary_hcd(hcd))
871 secondary_hcd = hcd;
872 else
873 secondary_hcd = xhci->shared_hcd;
874
875 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
876 retval = xhci_init(hcd->primary_hcd);
5535b1d5
AX
877 if (retval)
878 return retval;
65b22f93
SS
879 xhci_dbg(xhci, "Start the primary HCD\n");
880 retval = xhci_run(hcd->primary_hcd);
881 if (retval)
882 goto failed_restart;
5535b1d5 883
65b22f93
SS
884 xhci_dbg(xhci, "Start the secondary HCD\n");
885 retval = xhci_run(secondary_hcd);
b3209379 886 if (!retval) {
5535b1d5 887 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
b3209379
SS
888 set_bit(HCD_FLAG_HW_ACCESSIBLE,
889 &xhci->shared_hcd->flags);
890 }
65b22f93 891failed_restart:
5535b1d5 892 hcd->state = HC_STATE_SUSPENDED;
b3209379 893 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
5535b1d5
AX
894 return retval;
895 }
896
5535b1d5
AX
897 /* step 4: set Run/Stop bit */
898 command = xhci_readl(xhci, &xhci->op_regs->command);
899 command |= CMD_RUN;
900 xhci_writel(xhci, command, &xhci->op_regs->command);
901 handshake(xhci, &xhci->op_regs->status, STS_HALT,
902 0, 250 * 1000);
903
904 /* step 5: walk topology and initialize portsc,
905 * portpmsc and portli
906 */
907 /* this is done in bus_resume */
908
909 /* step 6: restart each of the previously
910 * Running endpoints by ringing their doorbells
911 */
912
913 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
b3209379 914 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
5535b1d5
AX
915
916 spin_unlock_irq(&xhci->lock);
917 return 0;
918}
b5b5c3ac
SS
919#endif /* CONFIG_PM */
920
7f84eef0
SS
921/*-------------------------------------------------------------------------*/
922
d0e96f5a
SS
923/**
924 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
925 * HCDs. Find the index for an endpoint given its descriptor. Use the return
926 * value to right shift 1 for the bitmask.
927 *
928 * Index = (epnum * 2) + direction - 1,
929 * where direction = 0 for OUT, 1 for IN.
930 * For control endpoints, the IN index is used (OUT index is unused), so
931 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
932 */
933unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
934{
935 unsigned int index;
936 if (usb_endpoint_xfer_control(desc))
937 index = (unsigned int) (usb_endpoint_num(desc)*2);
938 else
939 index = (unsigned int) (usb_endpoint_num(desc)*2) +
940 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
941 return index;
942}
943
f94e0186
SS
944/* Find the flag for this endpoint (for use in the control context). Use the
945 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
946 * bit 1, etc.
947 */
948unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
949{
950 return 1 << (xhci_get_endpoint_index(desc) + 1);
951}
952
ac9d8fe7
SS
953/* Find the flag for this endpoint (for use in the control context). Use the
954 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
955 * bit 1, etc.
956 */
957unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
958{
959 return 1 << (ep_index + 1);
960}
961
f94e0186
SS
962/* Compute the last valid endpoint context index. Basically, this is the
963 * endpoint index plus one. For slot contexts with more than valid endpoint,
964 * we find the most significant bit set in the added contexts flags.
965 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
966 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
967 */
ac9d8fe7 968unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
f94e0186
SS
969{
970 return fls(added_ctxs) - 1;
971}
972
d0e96f5a
SS
973/* Returns 1 if the arguments are OK;
974 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
975 */
8212a49d 976static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
64927730
AX
977 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
978 const char *func) {
979 struct xhci_hcd *xhci;
980 struct xhci_virt_device *virt_dev;
981
d0e96f5a
SS
982 if (!hcd || (check_ep && !ep) || !udev) {
983 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
984 func);
985 return -EINVAL;
986 }
987 if (!udev->parent) {
988 printk(KERN_DEBUG "xHCI %s called for root hub\n",
989 func);
990 return 0;
991 }
64927730 992
7bd89b40
SS
993 xhci = hcd_to_xhci(hcd);
994 if (xhci->xhc_state & XHCI_STATE_HALTED)
995 return -ENODEV;
996
64927730 997 if (check_virt_dev) {
73ddc247 998 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
64927730
AX
999 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1000 "device\n", func);
1001 return -EINVAL;
1002 }
1003
1004 virt_dev = xhci->devs[udev->slot_id];
1005 if (virt_dev->udev != udev) {
1006 printk(KERN_DEBUG "xHCI %s called with udev and "
1007 "virt_dev does not match\n", func);
1008 return -EINVAL;
1009 }
d0e96f5a 1010 }
64927730 1011
d0e96f5a
SS
1012 return 1;
1013}
1014
2d3f1fac 1015static int xhci_configure_endpoint(struct xhci_hcd *xhci,
913a8a34
SS
1016 struct usb_device *udev, struct xhci_command *command,
1017 bool ctx_change, bool must_succeed);
2d3f1fac
SS
1018
1019/*
1020 * Full speed devices may have a max packet size greater than 8 bytes, but the
1021 * USB core doesn't know that until it reads the first 8 bytes of the
1022 * descriptor. If the usb_device's max packet size changes after that point,
1023 * we need to issue an evaluate context command and wait on it.
1024 */
1025static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1026 unsigned int ep_index, struct urb *urb)
1027{
1028 struct xhci_container_ctx *in_ctx;
1029 struct xhci_container_ctx *out_ctx;
1030 struct xhci_input_control_ctx *ctrl_ctx;
1031 struct xhci_ep_ctx *ep_ctx;
1032 int max_packet_size;
1033 int hw_max_packet_size;
1034 int ret = 0;
1035
1036 out_ctx = xhci->devs[slot_id]->out_ctx;
1037 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
28ccd296 1038 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
29cc8897 1039 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
2d3f1fac
SS
1040 if (hw_max_packet_size != max_packet_size) {
1041 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1042 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1043 max_packet_size);
1044 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1045 hw_max_packet_size);
1046 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1047
1048 /* Set up the modified control endpoint 0 */
913a8a34
SS
1049 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1050 xhci->devs[slot_id]->out_ctx, ep_index);
2d3f1fac
SS
1051 in_ctx = xhci->devs[slot_id]->in_ctx;
1052 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
28ccd296
ME
1053 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1054 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
2d3f1fac
SS
1055
1056 /* Set up the input context flags for the command */
1057 /* FIXME: This won't work if a non-default control endpoint
1058 * changes max packet sizes.
1059 */
1060 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
28ccd296 1061 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
2d3f1fac
SS
1062 ctrl_ctx->drop_flags = 0;
1063
1064 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1065 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1066 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1067 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1068
913a8a34
SS
1069 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1070 true, false);
2d3f1fac
SS
1071
1072 /* Clean up the input context for later use by bandwidth
1073 * functions.
1074 */
28ccd296 1075 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
2d3f1fac
SS
1076 }
1077 return ret;
1078}
1079
d0e96f5a
SS
1080/*
1081 * non-error returns are a promise to giveback() the urb later
1082 * we drop ownership so next owner (or urb unlink) can get it
1083 */
1084int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1085{
1086 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2ffdea25 1087 struct xhci_td *buffer;
d0e96f5a
SS
1088 unsigned long flags;
1089 int ret = 0;
1090 unsigned int slot_id, ep_index;
8e51adcc
AX
1091 struct urb_priv *urb_priv;
1092 int size, i;
2d3f1fac 1093
64927730
AX
1094 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1095 true, true, __func__) <= 0)
d0e96f5a
SS
1096 return -EINVAL;
1097
1098 slot_id = urb->dev->slot_id;
1099 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
d0e96f5a 1100
541c7d43 1101 if (!HCD_HW_ACCESSIBLE(hcd)) {
d0e96f5a
SS
1102 if (!in_interrupt())
1103 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1104 ret = -ESHUTDOWN;
1105 goto exit;
1106 }
8e51adcc
AX
1107
1108 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1109 size = urb->number_of_packets;
1110 else
1111 size = 1;
1112
1113 urb_priv = kzalloc(sizeof(struct urb_priv) +
1114 size * sizeof(struct xhci_td *), mem_flags);
1115 if (!urb_priv)
1116 return -ENOMEM;
1117
2ffdea25
AX
1118 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1119 if (!buffer) {
1120 kfree(urb_priv);
1121 return -ENOMEM;
1122 }
1123
8e51adcc 1124 for (i = 0; i < size; i++) {
2ffdea25
AX
1125 urb_priv->td[i] = buffer;
1126 buffer++;
8e51adcc
AX
1127 }
1128
1129 urb_priv->length = size;
1130 urb_priv->td_cnt = 0;
1131 urb->hcpriv = urb_priv;
1132
2d3f1fac
SS
1133 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1134 /* Check to see if the max packet size for the default control
1135 * endpoint changed during FS device enumeration
1136 */
1137 if (urb->dev->speed == USB_SPEED_FULL) {
1138 ret = xhci_check_maxpacket(xhci, slot_id,
1139 ep_index, urb);
d13565c1
SS
1140 if (ret < 0) {
1141 xhci_urb_free_priv(xhci, urb_priv);
1142 urb->hcpriv = NULL;
2d3f1fac 1143 return ret;
d13565c1 1144 }
2d3f1fac
SS
1145 }
1146
b11069f5
SS
1147 /* We have a spinlock and interrupts disabled, so we must pass
1148 * atomic context to this function, which may allocate memory.
1149 */
2d3f1fac 1150 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
1151 if (xhci->xhc_state & XHCI_STATE_DYING)
1152 goto dying;
b11069f5 1153 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
23e3be11 1154 slot_id, ep_index);
d13565c1
SS
1155 if (ret)
1156 goto free_priv;
2d3f1fac
SS
1157 spin_unlock_irqrestore(&xhci->lock, flags);
1158 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1159 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
1160 if (xhci->xhc_state & XHCI_STATE_DYING)
1161 goto dying;
8df75f42
SS
1162 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1163 EP_GETTING_STREAMS) {
1164 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1165 "is transitioning to using streams.\n");
1166 ret = -EINVAL;
1167 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1168 EP_GETTING_NO_STREAMS) {
1169 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1170 "is transitioning to "
1171 "not having streams.\n");
1172 ret = -EINVAL;
1173 } else {
1174 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1175 slot_id, ep_index);
1176 }
d13565c1
SS
1177 if (ret)
1178 goto free_priv;
2d3f1fac 1179 spin_unlock_irqrestore(&xhci->lock, flags);
624defa1
SS
1180 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1181 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
1182 if (xhci->xhc_state & XHCI_STATE_DYING)
1183 goto dying;
624defa1
SS
1184 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1185 slot_id, ep_index);
d13565c1
SS
1186 if (ret)
1187 goto free_priv;
624defa1 1188 spin_unlock_irqrestore(&xhci->lock, flags);
2d3f1fac 1189 } else {
787f4e5a
AX
1190 spin_lock_irqsave(&xhci->lock, flags);
1191 if (xhci->xhc_state & XHCI_STATE_DYING)
1192 goto dying;
1193 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1194 slot_id, ep_index);
d13565c1
SS
1195 if (ret)
1196 goto free_priv;
787f4e5a 1197 spin_unlock_irqrestore(&xhci->lock, flags);
2d3f1fac 1198 }
d0e96f5a 1199exit:
d0e96f5a 1200 return ret;
6f5165cf
SS
1201dying:
1202 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1203 "non-responsive xHCI host.\n",
1204 urb->ep->desc.bEndpointAddress, urb);
d13565c1
SS
1205 ret = -ESHUTDOWN;
1206free_priv:
1207 xhci_urb_free_priv(xhci, urb_priv);
1208 urb->hcpriv = NULL;
6f5165cf 1209 spin_unlock_irqrestore(&xhci->lock, flags);
d13565c1 1210 return ret;
d0e96f5a
SS
1211}
1212
021bff91
SS
1213/* Get the right ring for the given URB.
1214 * If the endpoint supports streams, boundary check the URB's stream ID.
1215 * If the endpoint doesn't support streams, return the singular endpoint ring.
1216 */
1217static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1218 struct urb *urb)
1219{
1220 unsigned int slot_id;
1221 unsigned int ep_index;
1222 unsigned int stream_id;
1223 struct xhci_virt_ep *ep;
1224
1225 slot_id = urb->dev->slot_id;
1226 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1227 stream_id = urb->stream_id;
1228 ep = &xhci->devs[slot_id]->eps[ep_index];
1229 /* Common case: no streams */
1230 if (!(ep->ep_state & EP_HAS_STREAMS))
1231 return ep->ring;
1232
1233 if (stream_id == 0) {
1234 xhci_warn(xhci,
1235 "WARN: Slot ID %u, ep index %u has streams, "
1236 "but URB has no stream ID.\n",
1237 slot_id, ep_index);
1238 return NULL;
1239 }
1240
1241 if (stream_id < ep->stream_info->num_streams)
1242 return ep->stream_info->stream_rings[stream_id];
1243
1244 xhci_warn(xhci,
1245 "WARN: Slot ID %u, ep index %u has "
1246 "stream IDs 1 to %u allocated, "
1247 "but stream ID %u is requested.\n",
1248 slot_id, ep_index,
1249 ep->stream_info->num_streams - 1,
1250 stream_id);
1251 return NULL;
1252}
1253
ae636747
SS
1254/*
1255 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1256 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1257 * should pick up where it left off in the TD, unless a Set Transfer Ring
1258 * Dequeue Pointer is issued.
1259 *
1260 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1261 * the ring. Since the ring is a contiguous structure, they can't be physically
1262 * removed. Instead, there are two options:
1263 *
1264 * 1) If the HC is in the middle of processing the URB to be canceled, we
1265 * simply move the ring's dequeue pointer past those TRBs using the Set
1266 * Transfer Ring Dequeue Pointer command. This will be the common case,
1267 * when drivers timeout on the last submitted URB and attempt to cancel.
1268 *
1269 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1270 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1271 * HC will need to invalidate the any TRBs it has cached after the stop
1272 * endpoint command, as noted in the xHCI 0.95 errata.
1273 *
1274 * 3) The TD may have completed by the time the Stop Endpoint Command
1275 * completes, so software needs to handle that case too.
1276 *
1277 * This function should protect against the TD enqueueing code ringing the
1278 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1279 * It also needs to account for multiple cancellations on happening at the same
1280 * time for the same endpoint.
1281 *
1282 * Note that this function can be called in any context, or so says
1283 * usb_hcd_unlink_urb()
d0e96f5a
SS
1284 */
1285int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1286{
ae636747 1287 unsigned long flags;
8e51adcc 1288 int ret, i;
e34b2fbf 1289 u32 temp;
ae636747 1290 struct xhci_hcd *xhci;
8e51adcc 1291 struct urb_priv *urb_priv;
ae636747
SS
1292 struct xhci_td *td;
1293 unsigned int ep_index;
1294 struct xhci_ring *ep_ring;
63a0d9ab 1295 struct xhci_virt_ep *ep;
ae636747
SS
1296
1297 xhci = hcd_to_xhci(hcd);
1298 spin_lock_irqsave(&xhci->lock, flags);
1299 /* Make sure the URB hasn't completed or been unlinked already */
1300 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1301 if (ret || !urb->hcpriv)
1302 goto done;
e34b2fbf 1303 temp = xhci_readl(xhci, &xhci->op_regs->status);
c6cc27c7 1304 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
e34b2fbf 1305 xhci_dbg(xhci, "HW died, freeing TD.\n");
8e51adcc 1306 urb_priv = urb->hcpriv;
585df1d9
SS
1307 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1308 td = urb_priv->td[i];
1309 if (!list_empty(&td->td_list))
1310 list_del_init(&td->td_list);
1311 if (!list_empty(&td->cancelled_td_list))
1312 list_del_init(&td->cancelled_td_list);
1313 }
e34b2fbf
SS
1314
1315 usb_hcd_unlink_urb_from_ep(hcd, urb);
1316 spin_unlock_irqrestore(&xhci->lock, flags);
214f76f7 1317 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
8e51adcc 1318 xhci_urb_free_priv(xhci, urb_priv);
e34b2fbf
SS
1319 return ret;
1320 }
7bd89b40
SS
1321 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1322 (xhci->xhc_state & XHCI_STATE_HALTED)) {
6f5165cf
SS
1323 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1324 "non-responsive xHCI host.\n",
1325 urb->ep->desc.bEndpointAddress, urb);
1326 /* Let the stop endpoint command watchdog timer (which set this
1327 * state) finish cleaning up the endpoint TD lists. We must
1328 * have caught it in the middle of dropping a lock and giving
1329 * back an URB.
1330 */
1331 goto done;
1332 }
ae636747 1333
700e2052 1334 xhci_dbg(xhci, "Cancel URB %p\n", urb);
66e49d87
SS
1335 xhci_dbg(xhci, "Event ring:\n");
1336 xhci_debug_ring(xhci, xhci->event_ring);
ae636747 1337 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
63a0d9ab 1338 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
e9df17eb
SS
1339 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1340 if (!ep_ring) {
1341 ret = -EINVAL;
1342 goto done;
1343 }
1344
66e49d87
SS
1345 xhci_dbg(xhci, "Endpoint ring:\n");
1346 xhci_debug_ring(xhci, ep_ring);
ae636747 1347
8e51adcc
AX
1348 urb_priv = urb->hcpriv;
1349
1350 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1351 td = urb_priv->td[i];
1352 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1353 }
1354
ae636747
SS
1355 /* Queue a stop endpoint command, but only if this is
1356 * the first cancellation to be handled.
1357 */
678539cf
SS
1358 if (!(ep->ep_state & EP_HALT_PENDING)) {
1359 ep->ep_state |= EP_HALT_PENDING;
6f5165cf
SS
1360 ep->stop_cmds_pending++;
1361 ep->stop_cmd_timer.expires = jiffies +
1362 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1363 add_timer(&ep->stop_cmd_timer);
be88fe4f 1364 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
23e3be11 1365 xhci_ring_cmd_db(xhci);
ae636747
SS
1366 }
1367done:
1368 spin_unlock_irqrestore(&xhci->lock, flags);
1369 return ret;
d0e96f5a
SS
1370}
1371
f94e0186
SS
1372/* Drop an endpoint from a new bandwidth configuration for this device.
1373 * Only one call to this function is allowed per endpoint before
1374 * check_bandwidth() or reset_bandwidth() must be called.
1375 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1376 * add the endpoint to the schedule with possibly new parameters denoted by a
1377 * different endpoint descriptor in usb_host_endpoint.
1378 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1379 * not allowed.
f88ba78d
SS
1380 *
1381 * The USB core will not allow URBs to be queued to an endpoint that is being
1382 * disabled, so there's no need for mutual exclusion to protect
1383 * the xhci->devs[slot_id] structure.
f94e0186
SS
1384 */
1385int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1386 struct usb_host_endpoint *ep)
1387{
f94e0186 1388 struct xhci_hcd *xhci;
d115b048
JY
1389 struct xhci_container_ctx *in_ctx, *out_ctx;
1390 struct xhci_input_control_ctx *ctrl_ctx;
1391 struct xhci_slot_ctx *slot_ctx;
f94e0186
SS
1392 unsigned int last_ctx;
1393 unsigned int ep_index;
1394 struct xhci_ep_ctx *ep_ctx;
1395 u32 drop_flag;
1396 u32 new_add_flags, new_drop_flags, new_slot_info;
1397 int ret;
1398
64927730 1399 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
f94e0186
SS
1400 if (ret <= 0)
1401 return ret;
1402 xhci = hcd_to_xhci(hcd);
fe6c6c13
SS
1403 if (xhci->xhc_state & XHCI_STATE_DYING)
1404 return -ENODEV;
f94e0186 1405
fe6c6c13 1406 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
1407 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1408 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1409 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1410 __func__, drop_flag);
1411 return 0;
1412 }
1413
f94e0186 1414 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
d115b048
JY
1415 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1416 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
f94e0186 1417 ep_index = xhci_get_endpoint_index(&ep->desc);
d115b048 1418 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
f94e0186
SS
1419 /* If the HC already knows the endpoint is disabled,
1420 * or the HCD has noted it is disabled, ignore this request
1421 */
f5960b69
ME
1422 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1423 cpu_to_le32(EP_STATE_DISABLED)) ||
28ccd296
ME
1424 le32_to_cpu(ctrl_ctx->drop_flags) &
1425 xhci_get_endpoint_flag(&ep->desc)) {
700e2052
GKH
1426 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1427 __func__, ep);
f94e0186
SS
1428 return 0;
1429 }
1430
28ccd296
ME
1431 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1432 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
f94e0186 1433
28ccd296
ME
1434 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1435 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
f94e0186 1436
28ccd296 1437 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
d115b048 1438 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
f94e0186 1439 /* Update the last valid endpoint context, if we deleted the last one */
28ccd296
ME
1440 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1441 LAST_CTX(last_ctx)) {
1442 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1443 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
f94e0186 1444 }
28ccd296 1445 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
f94e0186
SS
1446
1447 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1448
f94e0186
SS
1449 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1450 (unsigned int) ep->desc.bEndpointAddress,
1451 udev->slot_id,
1452 (unsigned int) new_drop_flags,
1453 (unsigned int) new_add_flags,
1454 (unsigned int) new_slot_info);
1455 return 0;
1456}
1457
1458/* Add an endpoint to a new possible bandwidth configuration for this device.
1459 * Only one call to this function is allowed per endpoint before
1460 * check_bandwidth() or reset_bandwidth() must be called.
1461 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1462 * add the endpoint to the schedule with possibly new parameters denoted by a
1463 * different endpoint descriptor in usb_host_endpoint.
1464 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1465 * not allowed.
f88ba78d
SS
1466 *
1467 * The USB core will not allow URBs to be queued to an endpoint until the
1468 * configuration or alt setting is installed in the device, so there's no need
1469 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
f94e0186
SS
1470 */
1471int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1472 struct usb_host_endpoint *ep)
1473{
f94e0186 1474 struct xhci_hcd *xhci;
d115b048 1475 struct xhci_container_ctx *in_ctx, *out_ctx;
f94e0186
SS
1476 unsigned int ep_index;
1477 struct xhci_ep_ctx *ep_ctx;
d115b048
JY
1478 struct xhci_slot_ctx *slot_ctx;
1479 struct xhci_input_control_ctx *ctrl_ctx;
f94e0186
SS
1480 u32 added_ctxs;
1481 unsigned int last_ctx;
1482 u32 new_add_flags, new_drop_flags, new_slot_info;
fa75ac37 1483 struct xhci_virt_device *virt_dev;
f94e0186
SS
1484 int ret = 0;
1485
64927730 1486 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
a1587d97
SS
1487 if (ret <= 0) {
1488 /* So we won't queue a reset ep command for a root hub */
1489 ep->hcpriv = NULL;
f94e0186 1490 return ret;
a1587d97 1491 }
f94e0186 1492 xhci = hcd_to_xhci(hcd);
fe6c6c13
SS
1493 if (xhci->xhc_state & XHCI_STATE_DYING)
1494 return -ENODEV;
f94e0186
SS
1495
1496 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1497 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1498 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1499 /* FIXME when we have to issue an evaluate endpoint command to
1500 * deal with ep0 max packet size changing once we get the
1501 * descriptors
1502 */
1503 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1504 __func__, added_ctxs);
1505 return 0;
1506 }
1507
fa75ac37
SS
1508 virt_dev = xhci->devs[udev->slot_id];
1509 in_ctx = virt_dev->in_ctx;
1510 out_ctx = virt_dev->out_ctx;
d115b048 1511 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
f94e0186 1512 ep_index = xhci_get_endpoint_index(&ep->desc);
d115b048 1513 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
fa75ac37
SS
1514
1515 /* If this endpoint is already in use, and the upper layers are trying
1516 * to add it again without dropping it, reject the addition.
1517 */
1518 if (virt_dev->eps[ep_index].ring &&
1519 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1520 xhci_get_endpoint_flag(&ep->desc))) {
1521 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1522 "without dropping it.\n",
1523 (unsigned int) ep->desc.bEndpointAddress);
1524 return -EINVAL;
1525 }
1526
f94e0186
SS
1527 /* If the HCD has already noted the endpoint is enabled,
1528 * ignore this request.
1529 */
28ccd296
ME
1530 if (le32_to_cpu(ctrl_ctx->add_flags) &
1531 xhci_get_endpoint_flag(&ep->desc)) {
700e2052
GKH
1532 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1533 __func__, ep);
f94e0186
SS
1534 return 0;
1535 }
1536
f88ba78d
SS
1537 /*
1538 * Configuration and alternate setting changes must be done in
1539 * process context, not interrupt context (or so documenation
1540 * for usb_set_interface() and usb_set_configuration() claim).
1541 */
fa75ac37 1542 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
f94e0186
SS
1543 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1544 __func__, ep->desc.bEndpointAddress);
f94e0186
SS
1545 return -ENOMEM;
1546 }
1547
28ccd296
ME
1548 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1549 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
f94e0186
SS
1550
1551 /* If xhci_endpoint_disable() was called for this endpoint, but the
1552 * xHC hasn't been notified yet through the check_bandwidth() call,
1553 * this re-adds a new state for the endpoint from the new endpoint
1554 * descriptors. We must drop and re-add this endpoint, so we leave the
1555 * drop flags alone.
1556 */
28ccd296 1557 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
f94e0186 1558
d115b048 1559 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
f94e0186 1560 /* Update the last valid endpoint context, if we just added one past */
28ccd296
ME
1561 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1562 LAST_CTX(last_ctx)) {
1563 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1564 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
f94e0186 1565 }
28ccd296 1566 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
f94e0186 1567
a1587d97
SS
1568 /* Store the usb_device pointer for later use */
1569 ep->hcpriv = udev;
1570
f94e0186
SS
1571 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1572 (unsigned int) ep->desc.bEndpointAddress,
1573 udev->slot_id,
1574 (unsigned int) new_drop_flags,
1575 (unsigned int) new_add_flags,
1576 (unsigned int) new_slot_info);
1577 return 0;
1578}
1579
d115b048 1580static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
f94e0186 1581{
d115b048 1582 struct xhci_input_control_ctx *ctrl_ctx;
f94e0186 1583 struct xhci_ep_ctx *ep_ctx;
d115b048 1584 struct xhci_slot_ctx *slot_ctx;
f94e0186
SS
1585 int i;
1586
1587 /* When a device's add flag and drop flag are zero, any subsequent
1588 * configure endpoint command will leave that endpoint's state
1589 * untouched. Make sure we don't leave any old state in the input
1590 * endpoint contexts.
1591 */
d115b048
JY
1592 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1593 ctrl_ctx->drop_flags = 0;
1594 ctrl_ctx->add_flags = 0;
1595 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
28ccd296 1596 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
f94e0186 1597 /* Endpoint 0 is always valid */
28ccd296 1598 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
f94e0186 1599 for (i = 1; i < 31; ++i) {
d115b048 1600 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
f94e0186
SS
1601 ep_ctx->ep_info = 0;
1602 ep_ctx->ep_info2 = 0;
8e595a5d 1603 ep_ctx->deq = 0;
f94e0186
SS
1604 ep_ctx->tx_info = 0;
1605 }
1606}
1607
f2217e8e 1608static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
00161f7d 1609 struct usb_device *udev, u32 *cmd_status)
f2217e8e
SS
1610{
1611 int ret;
1612
913a8a34 1613 switch (*cmd_status) {
f2217e8e
SS
1614 case COMP_ENOMEM:
1615 dev_warn(&udev->dev, "Not enough host controller resources "
1616 "for new device state.\n");
1617 ret = -ENOMEM;
1618 /* FIXME: can we allocate more resources for the HC? */
1619 break;
1620 case COMP_BW_ERR:
1621 dev_warn(&udev->dev, "Not enough bandwidth "
1622 "for new device state.\n");
1623 ret = -ENOSPC;
1624 /* FIXME: can we go back to the old state? */
1625 break;
1626 case COMP_TRB_ERR:
1627 /* the HCD set up something wrong */
1628 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1629 "add flag = 1, "
1630 "and endpoint is not disabled.\n");
1631 ret = -EINVAL;
1632 break;
f6ba6fe2
AH
1633 case COMP_DEV_ERR:
1634 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1635 "configure command.\n");
1636 ret = -ENODEV;
1637 break;
f2217e8e
SS
1638 case COMP_SUCCESS:
1639 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1640 ret = 0;
1641 break;
1642 default:
1643 xhci_err(xhci, "ERROR: unexpected command completion "
913a8a34 1644 "code 0x%x.\n", *cmd_status);
f2217e8e
SS
1645 ret = -EINVAL;
1646 break;
1647 }
1648 return ret;
1649}
1650
1651static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
00161f7d 1652 struct usb_device *udev, u32 *cmd_status)
f2217e8e
SS
1653{
1654 int ret;
913a8a34 1655 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
f2217e8e 1656
913a8a34 1657 switch (*cmd_status) {
f2217e8e
SS
1658 case COMP_EINVAL:
1659 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1660 "context command.\n");
1661 ret = -EINVAL;
1662 break;
1663 case COMP_EBADSLT:
1664 dev_warn(&udev->dev, "WARN: slot not enabled for"
1665 "evaluate context command.\n");
1666 case COMP_CTX_STATE:
1667 dev_warn(&udev->dev, "WARN: invalid context state for "
1668 "evaluate context command.\n");
1669 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1670 ret = -EINVAL;
1671 break;
f6ba6fe2
AH
1672 case COMP_DEV_ERR:
1673 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1674 "context command.\n");
1675 ret = -ENODEV;
1676 break;
1bb73a88
AH
1677 case COMP_MEL_ERR:
1678 /* Max Exit Latency too large error */
1679 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1680 ret = -EINVAL;
1681 break;
f2217e8e
SS
1682 case COMP_SUCCESS:
1683 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1684 ret = 0;
1685 break;
1686 default:
1687 xhci_err(xhci, "ERROR: unexpected command completion "
913a8a34 1688 "code 0x%x.\n", *cmd_status);
f2217e8e
SS
1689 ret = -EINVAL;
1690 break;
1691 }
1692 return ret;
1693}
1694
2cf95c18
SS
1695static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1696 struct xhci_container_ctx *in_ctx)
1697{
1698 struct xhci_input_control_ctx *ctrl_ctx;
1699 u32 valid_add_flags;
1700 u32 valid_drop_flags;
1701
1702 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1703 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1704 * (bit 1). The default control endpoint is added during the Address
1705 * Device command and is never removed until the slot is disabled.
1706 */
1707 valid_add_flags = ctrl_ctx->add_flags >> 2;
1708 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1709
1710 /* Use hweight32 to count the number of ones in the add flags, or
1711 * number of endpoints added. Don't count endpoints that are changed
1712 * (both added and dropped).
1713 */
1714 return hweight32(valid_add_flags) -
1715 hweight32(valid_add_flags & valid_drop_flags);
1716}
1717
1718static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1719 struct xhci_container_ctx *in_ctx)
1720{
1721 struct xhci_input_control_ctx *ctrl_ctx;
1722 u32 valid_add_flags;
1723 u32 valid_drop_flags;
1724
1725 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1726 valid_add_flags = ctrl_ctx->add_flags >> 2;
1727 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1728
1729 return hweight32(valid_drop_flags) -
1730 hweight32(valid_add_flags & valid_drop_flags);
1731}
1732
1733/*
1734 * We need to reserve the new number of endpoints before the configure endpoint
1735 * command completes. We can't subtract the dropped endpoints from the number
1736 * of active endpoints until the command completes because we can oversubscribe
1737 * the host in this case:
1738 *
1739 * - the first configure endpoint command drops more endpoints than it adds
1740 * - a second configure endpoint command that adds more endpoints is queued
1741 * - the first configure endpoint command fails, so the config is unchanged
1742 * - the second command may succeed, even though there isn't enough resources
1743 *
1744 * Must be called with xhci->lock held.
1745 */
1746static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1747 struct xhci_container_ctx *in_ctx)
1748{
1749 u32 added_eps;
1750
1751 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1752 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1753 xhci_dbg(xhci, "Not enough ep ctxs: "
1754 "%u active, need to add %u, limit is %u.\n",
1755 xhci->num_active_eps, added_eps,
1756 xhci->limit_active_eps);
1757 return -ENOMEM;
1758 }
1759 xhci->num_active_eps += added_eps;
1760 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1761 xhci->num_active_eps);
1762 return 0;
1763}
1764
1765/*
1766 * The configure endpoint was failed by the xHC for some other reason, so we
1767 * need to revert the resources that failed configuration would have used.
1768 *
1769 * Must be called with xhci->lock held.
1770 */
1771static void xhci_free_host_resources(struct xhci_hcd *xhci,
1772 struct xhci_container_ctx *in_ctx)
1773{
1774 u32 num_failed_eps;
1775
1776 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1777 xhci->num_active_eps -= num_failed_eps;
1778 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1779 num_failed_eps,
1780 xhci->num_active_eps);
1781}
1782
1783/*
1784 * Now that the command has completed, clean up the active endpoint count by
1785 * subtracting out the endpoints that were dropped (but not changed).
1786 *
1787 * Must be called with xhci->lock held.
1788 */
1789static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1790 struct xhci_container_ctx *in_ctx)
1791{
1792 u32 num_dropped_eps;
1793
1794 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1795 xhci->num_active_eps -= num_dropped_eps;
1796 if (num_dropped_eps)
1797 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1798 num_dropped_eps,
1799 xhci->num_active_eps);
1800}
1801
c29eea62
SS
1802unsigned int xhci_get_block_size(struct usb_device *udev)
1803{
1804 switch (udev->speed) {
1805 case USB_SPEED_LOW:
1806 case USB_SPEED_FULL:
1807 return FS_BLOCK;
1808 case USB_SPEED_HIGH:
1809 return HS_BLOCK;
1810 case USB_SPEED_SUPER:
1811 return SS_BLOCK;
1812 case USB_SPEED_UNKNOWN:
1813 case USB_SPEED_WIRELESS:
1814 default:
1815 /* Should never happen */
1816 return 1;
1817 }
1818}
1819
1820unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1821{
1822 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1823 return LS_OVERHEAD;
1824 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1825 return FS_OVERHEAD;
1826 return HS_OVERHEAD;
1827}
1828
1829/* If we are changing a LS/FS device under a HS hub,
1830 * make sure (if we are activating a new TT) that the HS bus has enough
1831 * bandwidth for this new TT.
1832 */
1833static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1834 struct xhci_virt_device *virt_dev,
1835 int old_active_eps)
1836{
1837 struct xhci_interval_bw_table *bw_table;
1838 struct xhci_tt_bw_info *tt_info;
1839
1840 /* Find the bandwidth table for the root port this TT is attached to. */
1841 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1842 tt_info = virt_dev->tt_info;
1843 /* If this TT already had active endpoints, the bandwidth for this TT
1844 * has already been added. Removing all periodic endpoints (and thus
1845 * making the TT enactive) will only decrease the bandwidth used.
1846 */
1847 if (old_active_eps)
1848 return 0;
1849 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1850 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1851 return -ENOMEM;
1852 return 0;
1853 }
1854 /* Not sure why we would have no new active endpoints...
1855 *
1856 * Maybe because of an Evaluate Context change for a hub update or a
1857 * control endpoint 0 max packet size change?
1858 * FIXME: skip the bandwidth calculation in that case.
1859 */
1860 return 0;
1861}
1862
2b698999
SS
1863static int xhci_check_ss_bw(struct xhci_hcd *xhci,
1864 struct xhci_virt_device *virt_dev)
1865{
1866 unsigned int bw_reserved;
1867
1868 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
1869 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
1870 return -ENOMEM;
1871
1872 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
1873 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
1874 return -ENOMEM;
1875
1876 return 0;
1877}
1878
c29eea62
SS
1879/*
1880 * This algorithm is a very conservative estimate of the worst-case scheduling
1881 * scenario for any one interval. The hardware dynamically schedules the
1882 * packets, so we can't tell which microframe could be the limiting factor in
1883 * the bandwidth scheduling. This only takes into account periodic endpoints.
1884 *
1885 * Obviously, we can't solve an NP complete problem to find the minimum worst
1886 * case scenario. Instead, we come up with an estimate that is no less than
1887 * the worst case bandwidth used for any one microframe, but may be an
1888 * over-estimate.
1889 *
1890 * We walk the requirements for each endpoint by interval, starting with the
1891 * smallest interval, and place packets in the schedule where there is only one
1892 * possible way to schedule packets for that interval. In order to simplify
1893 * this algorithm, we record the largest max packet size for each interval, and
1894 * assume all packets will be that size.
1895 *
1896 * For interval 0, we obviously must schedule all packets for each interval.
1897 * The bandwidth for interval 0 is just the amount of data to be transmitted
1898 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1899 * the number of packets).
1900 *
1901 * For interval 1, we have two possible microframes to schedule those packets
1902 * in. For this algorithm, if we can schedule the same number of packets for
1903 * each possible scheduling opportunity (each microframe), we will do so. The
1904 * remaining number of packets will be saved to be transmitted in the gaps in
1905 * the next interval's scheduling sequence.
1906 *
1907 * As we move those remaining packets to be scheduled with interval 2 packets,
1908 * we have to double the number of remaining packets to transmit. This is
1909 * because the intervals are actually powers of 2, and we would be transmitting
1910 * the previous interval's packets twice in this interval. We also have to be
1911 * sure that when we look at the largest max packet size for this interval, we
1912 * also look at the largest max packet size for the remaining packets and take
1913 * the greater of the two.
1914 *
1915 * The algorithm continues to evenly distribute packets in each scheduling
1916 * opportunity, and push the remaining packets out, until we get to the last
1917 * interval. Then those packets and their associated overhead are just added
1918 * to the bandwidth used.
2e27980e
SS
1919 */
1920static int xhci_check_bw_table(struct xhci_hcd *xhci,
1921 struct xhci_virt_device *virt_dev,
1922 int old_active_eps)
1923{
c29eea62
SS
1924 unsigned int bw_reserved;
1925 unsigned int max_bandwidth;
1926 unsigned int bw_used;
1927 unsigned int block_size;
1928 struct xhci_interval_bw_table *bw_table;
1929 unsigned int packet_size = 0;
1930 unsigned int overhead = 0;
1931 unsigned int packets_transmitted = 0;
1932 unsigned int packets_remaining = 0;
1933 unsigned int i;
1934
2b698999
SS
1935 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1936 return xhci_check_ss_bw(xhci, virt_dev);
1937
c29eea62
SS
1938 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1939 max_bandwidth = HS_BW_LIMIT;
1940 /* Convert percent of bus BW reserved to blocks reserved */
1941 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1942 } else {
1943 max_bandwidth = FS_BW_LIMIT;
1944 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1945 }
1946
1947 bw_table = virt_dev->bw_table;
1948 /* We need to translate the max packet size and max ESIT payloads into
1949 * the units the hardware uses.
1950 */
1951 block_size = xhci_get_block_size(virt_dev->udev);
1952
1953 /* If we are manipulating a LS/FS device under a HS hub, double check
1954 * that the HS bus has enough bandwidth if we are activing a new TT.
1955 */
1956 if (virt_dev->tt_info) {
1957 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1958 virt_dev->real_port);
1959 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1960 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1961 "newly activated TT.\n");
1962 return -ENOMEM;
1963 }
1964 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1965 virt_dev->tt_info->slot_id,
1966 virt_dev->tt_info->ttport);
1967 } else {
1968 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1969 virt_dev->real_port);
1970 }
1971
1972 /* Add in how much bandwidth will be used for interval zero, or the
1973 * rounded max ESIT payload + number of packets * largest overhead.
1974 */
1975 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
1976 bw_table->interval_bw[0].num_packets *
1977 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
1978
1979 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
1980 unsigned int bw_added;
1981 unsigned int largest_mps;
1982 unsigned int interval_overhead;
1983
1984 /*
1985 * How many packets could we transmit in this interval?
1986 * If packets didn't fit in the previous interval, we will need
1987 * to transmit that many packets twice within this interval.
1988 */
1989 packets_remaining = 2 * packets_remaining +
1990 bw_table->interval_bw[i].num_packets;
1991
1992 /* Find the largest max packet size of this or the previous
1993 * interval.
1994 */
1995 if (list_empty(&bw_table->interval_bw[i].endpoints))
1996 largest_mps = 0;
1997 else {
1998 struct xhci_virt_ep *virt_ep;
1999 struct list_head *ep_entry;
2000
2001 ep_entry = bw_table->interval_bw[i].endpoints.next;
2002 virt_ep = list_entry(ep_entry,
2003 struct xhci_virt_ep, bw_endpoint_list);
2004 /* Convert to blocks, rounding up */
2005 largest_mps = DIV_ROUND_UP(
2006 virt_ep->bw_info.max_packet_size,
2007 block_size);
2008 }
2009 if (largest_mps > packet_size)
2010 packet_size = largest_mps;
2011
2012 /* Use the larger overhead of this or the previous interval. */
2013 interval_overhead = xhci_get_largest_overhead(
2014 &bw_table->interval_bw[i]);
2015 if (interval_overhead > overhead)
2016 overhead = interval_overhead;
2017
2018 /* How many packets can we evenly distribute across
2019 * (1 << (i + 1)) possible scheduling opportunities?
2020 */
2021 packets_transmitted = packets_remaining >> (i + 1);
2022
2023 /* Add in the bandwidth used for those scheduled packets */
2024 bw_added = packets_transmitted * (overhead + packet_size);
2025
2026 /* How many packets do we have remaining to transmit? */
2027 packets_remaining = packets_remaining % (1 << (i + 1));
2028
2029 /* What largest max packet size should those packets have? */
2030 /* If we've transmitted all packets, don't carry over the
2031 * largest packet size.
2032 */
2033 if (packets_remaining == 0) {
2034 packet_size = 0;
2035 overhead = 0;
2036 } else if (packets_transmitted > 0) {
2037 /* Otherwise if we do have remaining packets, and we've
2038 * scheduled some packets in this interval, take the
2039 * largest max packet size from endpoints with this
2040 * interval.
2041 */
2042 packet_size = largest_mps;
2043 overhead = interval_overhead;
2044 }
2045 /* Otherwise carry over packet_size and overhead from the last
2046 * time we had a remainder.
2047 */
2048 bw_used += bw_added;
2049 if (bw_used > max_bandwidth) {
2050 xhci_warn(xhci, "Not enough bandwidth. "
2051 "Proposed: %u, Max: %u\n",
2052 bw_used, max_bandwidth);
2053 return -ENOMEM;
2054 }
2055 }
2056 /*
2057 * Ok, we know we have some packets left over after even-handedly
2058 * scheduling interval 15. We don't know which microframes they will
2059 * fit into, so we over-schedule and say they will be scheduled every
2060 * microframe.
2061 */
2062 if (packets_remaining > 0)
2063 bw_used += overhead + packet_size;
2064
2065 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2066 unsigned int port_index = virt_dev->real_port - 1;
2067
2068 /* OK, we're manipulating a HS device attached to a
2069 * root port bandwidth domain. Include the number of active TTs
2070 * in the bandwidth used.
2071 */
2072 bw_used += TT_HS_OVERHEAD *
2073 xhci->rh_bw[port_index].num_active_tts;
2074 }
2075
2076 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2077 "Available: %u " "percent\n",
2078 bw_used, max_bandwidth, bw_reserved,
2079 (max_bandwidth - bw_used - bw_reserved) * 100 /
2080 max_bandwidth);
2081
2082 bw_used += bw_reserved;
2083 if (bw_used > max_bandwidth) {
2084 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2085 bw_used, max_bandwidth);
2086 return -ENOMEM;
2087 }
2088
2089 bw_table->bw_used = bw_used;
2e27980e
SS
2090 return 0;
2091}
2092
2093static bool xhci_is_async_ep(unsigned int ep_type)
2094{
2095 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2096 ep_type != ISOC_IN_EP &&
2097 ep_type != INT_IN_EP);
2098}
2099
2b698999
SS
2100static bool xhci_is_sync_in_ep(unsigned int ep_type)
2101{
2102 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2103}
2104
2105static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2106{
2107 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2108
2109 if (ep_bw->ep_interval == 0)
2110 return SS_OVERHEAD_BURST +
2111 (ep_bw->mult * ep_bw->num_packets *
2112 (SS_OVERHEAD + mps));
2113 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2114 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2115 1 << ep_bw->ep_interval);
2116
2117}
2118
2e27980e
SS
2119void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2120 struct xhci_bw_info *ep_bw,
2121 struct xhci_interval_bw_table *bw_table,
2122 struct usb_device *udev,
2123 struct xhci_virt_ep *virt_ep,
2124 struct xhci_tt_bw_info *tt_info)
2125{
2126 struct xhci_interval_bw *interval_bw;
2127 int normalized_interval;
2128
2b698999 2129 if (xhci_is_async_ep(ep_bw->type))
2e27980e
SS
2130 return;
2131
2b698999
SS
2132 if (udev->speed == USB_SPEED_SUPER) {
2133 if (xhci_is_sync_in_ep(ep_bw->type))
2134 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2135 xhci_get_ss_bw_consumed(ep_bw);
2136 else
2137 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2138 xhci_get_ss_bw_consumed(ep_bw);
2139 return;
2140 }
2141
2142 /* SuperSpeed endpoints never get added to intervals in the table, so
2143 * this check is only valid for HS/FS/LS devices.
2144 */
2145 if (list_empty(&virt_ep->bw_endpoint_list))
2146 return;
2e27980e
SS
2147 /* For LS/FS devices, we need to translate the interval expressed in
2148 * microframes to frames.
2149 */
2150 if (udev->speed == USB_SPEED_HIGH)
2151 normalized_interval = ep_bw->ep_interval;
2152 else
2153 normalized_interval = ep_bw->ep_interval - 3;
2154
2155 if (normalized_interval == 0)
2156 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2157 interval_bw = &bw_table->interval_bw[normalized_interval];
2158 interval_bw->num_packets -= ep_bw->num_packets;
2159 switch (udev->speed) {
2160 case USB_SPEED_LOW:
2161 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2162 break;
2163 case USB_SPEED_FULL:
2164 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2165 break;
2166 case USB_SPEED_HIGH:
2167 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2168 break;
2169 case USB_SPEED_SUPER:
2170 case USB_SPEED_UNKNOWN:
2171 case USB_SPEED_WIRELESS:
2172 /* Should never happen because only LS/FS/HS endpoints will get
2173 * added to the endpoint list.
2174 */
2175 return;
2176 }
2177 if (tt_info)
2178 tt_info->active_eps -= 1;
2179 list_del_init(&virt_ep->bw_endpoint_list);
2180}
2181
2182static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2183 struct xhci_bw_info *ep_bw,
2184 struct xhci_interval_bw_table *bw_table,
2185 struct usb_device *udev,
2186 struct xhci_virt_ep *virt_ep,
2187 struct xhci_tt_bw_info *tt_info)
2188{
2189 struct xhci_interval_bw *interval_bw;
2190 struct xhci_virt_ep *smaller_ep;
2191 int normalized_interval;
2192
2193 if (xhci_is_async_ep(ep_bw->type))
2194 return;
2195
2b698999
SS
2196 if (udev->speed == USB_SPEED_SUPER) {
2197 if (xhci_is_sync_in_ep(ep_bw->type))
2198 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2199 xhci_get_ss_bw_consumed(ep_bw);
2200 else
2201 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2202 xhci_get_ss_bw_consumed(ep_bw);
2203 return;
2204 }
2205
2e27980e
SS
2206 /* For LS/FS devices, we need to translate the interval expressed in
2207 * microframes to frames.
2208 */
2209 if (udev->speed == USB_SPEED_HIGH)
2210 normalized_interval = ep_bw->ep_interval;
2211 else
2212 normalized_interval = ep_bw->ep_interval - 3;
2213
2214 if (normalized_interval == 0)
2215 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2216 interval_bw = &bw_table->interval_bw[normalized_interval];
2217 interval_bw->num_packets += ep_bw->num_packets;
2218 switch (udev->speed) {
2219 case USB_SPEED_LOW:
2220 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2221 break;
2222 case USB_SPEED_FULL:
2223 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2224 break;
2225 case USB_SPEED_HIGH:
2226 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2227 break;
2228 case USB_SPEED_SUPER:
2229 case USB_SPEED_UNKNOWN:
2230 case USB_SPEED_WIRELESS:
2231 /* Should never happen because only LS/FS/HS endpoints will get
2232 * added to the endpoint list.
2233 */
2234 return;
2235 }
2236
2237 if (tt_info)
2238 tt_info->active_eps += 1;
2239 /* Insert the endpoint into the list, largest max packet size first. */
2240 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2241 bw_endpoint_list) {
2242 if (ep_bw->max_packet_size >=
2243 smaller_ep->bw_info.max_packet_size) {
2244 /* Add the new ep before the smaller endpoint */
2245 list_add_tail(&virt_ep->bw_endpoint_list,
2246 &smaller_ep->bw_endpoint_list);
2247 return;
2248 }
2249 }
2250 /* Add the new endpoint at the end of the list. */
2251 list_add_tail(&virt_ep->bw_endpoint_list,
2252 &interval_bw->endpoints);
2253}
2254
2255void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2256 struct xhci_virt_device *virt_dev,
2257 int old_active_eps)
2258{
2259 struct xhci_root_port_bw_info *rh_bw_info;
2260 if (!virt_dev->tt_info)
2261 return;
2262
2263 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2264 if (old_active_eps == 0 &&
2265 virt_dev->tt_info->active_eps != 0) {
2266 rh_bw_info->num_active_tts += 1;
c29eea62 2267 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2e27980e
SS
2268 } else if (old_active_eps != 0 &&
2269 virt_dev->tt_info->active_eps == 0) {
2270 rh_bw_info->num_active_tts -= 1;
c29eea62 2271 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2e27980e
SS
2272 }
2273}
2274
2275static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2276 struct xhci_virt_device *virt_dev,
2277 struct xhci_container_ctx *in_ctx)
2278{
2279 struct xhci_bw_info ep_bw_info[31];
2280 int i;
2281 struct xhci_input_control_ctx *ctrl_ctx;
2282 int old_active_eps = 0;
2283
2e27980e
SS
2284 if (virt_dev->tt_info)
2285 old_active_eps = virt_dev->tt_info->active_eps;
2286
2287 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2288
2289 for (i = 0; i < 31; i++) {
2290 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2291 continue;
2292
2293 /* Make a copy of the BW info in case we need to revert this */
2294 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2295 sizeof(ep_bw_info[i]));
2296 /* Drop the endpoint from the interval table if the endpoint is
2297 * being dropped or changed.
2298 */
2299 if (EP_IS_DROPPED(ctrl_ctx, i))
2300 xhci_drop_ep_from_interval_table(xhci,
2301 &virt_dev->eps[i].bw_info,
2302 virt_dev->bw_table,
2303 virt_dev->udev,
2304 &virt_dev->eps[i],
2305 virt_dev->tt_info);
2306 }
2307 /* Overwrite the information stored in the endpoints' bw_info */
2308 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2309 for (i = 0; i < 31; i++) {
2310 /* Add any changed or added endpoints to the interval table */
2311 if (EP_IS_ADDED(ctrl_ctx, i))
2312 xhci_add_ep_to_interval_table(xhci,
2313 &virt_dev->eps[i].bw_info,
2314 virt_dev->bw_table,
2315 virt_dev->udev,
2316 &virt_dev->eps[i],
2317 virt_dev->tt_info);
2318 }
2319
2320 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2321 /* Ok, this fits in the bandwidth we have.
2322 * Update the number of active TTs.
2323 */
2324 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2325 return 0;
2326 }
2327
2328 /* We don't have enough bandwidth for this, revert the stored info. */
2329 for (i = 0; i < 31; i++) {
2330 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2331 continue;
2332
2333 /* Drop the new copies of any added or changed endpoints from
2334 * the interval table.
2335 */
2336 if (EP_IS_ADDED(ctrl_ctx, i)) {
2337 xhci_drop_ep_from_interval_table(xhci,
2338 &virt_dev->eps[i].bw_info,
2339 virt_dev->bw_table,
2340 virt_dev->udev,
2341 &virt_dev->eps[i],
2342 virt_dev->tt_info);
2343 }
2344 /* Revert the endpoint back to its old information */
2345 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2346 sizeof(ep_bw_info[i]));
2347 /* Add any changed or dropped endpoints back into the table */
2348 if (EP_IS_DROPPED(ctrl_ctx, i))
2349 xhci_add_ep_to_interval_table(xhci,
2350 &virt_dev->eps[i].bw_info,
2351 virt_dev->bw_table,
2352 virt_dev->udev,
2353 &virt_dev->eps[i],
2354 virt_dev->tt_info);
2355 }
2356 return -ENOMEM;
2357}
2358
2359
f2217e8e
SS
2360/* Issue a configure endpoint command or evaluate context command
2361 * and wait for it to finish.
2362 */
2363static int xhci_configure_endpoint(struct xhci_hcd *xhci,
913a8a34
SS
2364 struct usb_device *udev,
2365 struct xhci_command *command,
2366 bool ctx_change, bool must_succeed)
f2217e8e
SS
2367{
2368 int ret;
2369 int timeleft;
2370 unsigned long flags;
913a8a34
SS
2371 struct xhci_container_ctx *in_ctx;
2372 struct completion *cmd_completion;
28ccd296 2373 u32 *cmd_status;
913a8a34 2374 struct xhci_virt_device *virt_dev;
f2217e8e
SS
2375
2376 spin_lock_irqsave(&xhci->lock, flags);
913a8a34 2377 virt_dev = xhci->devs[udev->slot_id];
750645f8
SS
2378
2379 if (command)
913a8a34 2380 in_ctx = command->in_ctx;
750645f8
SS
2381 else
2382 in_ctx = virt_dev->in_ctx;
2cf95c18 2383
750645f8
SS
2384 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2385 xhci_reserve_host_resources(xhci, in_ctx)) {
2386 spin_unlock_irqrestore(&xhci->lock, flags);
2387 xhci_warn(xhci, "Not enough host resources, "
2388 "active endpoint contexts = %u\n",
2389 xhci->num_active_eps);
2390 return -ENOMEM;
2391 }
2e27980e
SS
2392 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2393 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2394 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2395 xhci_free_host_resources(xhci, in_ctx);
2396 spin_unlock_irqrestore(&xhci->lock, flags);
2397 xhci_warn(xhci, "Not enough bandwidth\n");
2398 return -ENOMEM;
2399 }
750645f8
SS
2400
2401 if (command) {
913a8a34
SS
2402 cmd_completion = command->completion;
2403 cmd_status = &command->status;
2404 command->command_trb = xhci->cmd_ring->enqueue;
7a3783ef
PZ
2405
2406 /* Enqueue pointer can be left pointing to the link TRB,
2407 * we must handle that
2408 */
f5960b69 2409 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
7a3783ef
PZ
2410 command->command_trb =
2411 xhci->cmd_ring->enq_seg->next->trbs;
2412
913a8a34
SS
2413 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2414 } else {
913a8a34
SS
2415 cmd_completion = &virt_dev->cmd_completion;
2416 cmd_status = &virt_dev->cmd_status;
2417 }
1d68064a 2418 init_completion(cmd_completion);
913a8a34 2419
f2217e8e 2420 if (!ctx_change)
913a8a34
SS
2421 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2422 udev->slot_id, must_succeed);
f2217e8e 2423 else
913a8a34 2424 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
f2217e8e
SS
2425 udev->slot_id);
2426 if (ret < 0) {
c01591bd
SS
2427 if (command)
2428 list_del(&command->cmd_list);
2cf95c18
SS
2429 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2430 xhci_free_host_resources(xhci, in_ctx);
f2217e8e
SS
2431 spin_unlock_irqrestore(&xhci->lock, flags);
2432 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2433 return -ENOMEM;
2434 }
2435 xhci_ring_cmd_db(xhci);
2436 spin_unlock_irqrestore(&xhci->lock, flags);
2437
2438 /* Wait for the configure endpoint command to complete */
2439 timeleft = wait_for_completion_interruptible_timeout(
913a8a34 2440 cmd_completion,
f2217e8e
SS
2441 USB_CTRL_SET_TIMEOUT);
2442 if (timeleft <= 0) {
2443 xhci_warn(xhci, "%s while waiting for %s command\n",
2444 timeleft == 0 ? "Timeout" : "Signal",
2445 ctx_change == 0 ?
2446 "configure endpoint" :
2447 "evaluate context");
2448 /* FIXME cancel the configure endpoint command */
2449 return -ETIME;
2450 }
2451
2452 if (!ctx_change)
2cf95c18
SS
2453 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2454 else
2455 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2456
2457 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2458 spin_lock_irqsave(&xhci->lock, flags);
2459 /* If the command failed, remove the reserved resources.
2460 * Otherwise, clean up the estimate to include dropped eps.
2461 */
2462 if (ret)
2463 xhci_free_host_resources(xhci, in_ctx);
2464 else
2465 xhci_finish_resource_reservation(xhci, in_ctx);
2466 spin_unlock_irqrestore(&xhci->lock, flags);
2467 }
2468 return ret;
f2217e8e
SS
2469}
2470
f88ba78d
SS
2471/* Called after one or more calls to xhci_add_endpoint() or
2472 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2473 * to call xhci_reset_bandwidth().
2474 *
2475 * Since we are in the middle of changing either configuration or
2476 * installing a new alt setting, the USB core won't allow URBs to be
2477 * enqueued for any endpoint on the old config or interface. Nothing
2478 * else should be touching the xhci->devs[slot_id] structure, so we
2479 * don't need to take the xhci->lock for manipulating that.
2480 */
f94e0186
SS
2481int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2482{
2483 int i;
2484 int ret = 0;
f94e0186
SS
2485 struct xhci_hcd *xhci;
2486 struct xhci_virt_device *virt_dev;
d115b048
JY
2487 struct xhci_input_control_ctx *ctrl_ctx;
2488 struct xhci_slot_ctx *slot_ctx;
f94e0186 2489
64927730 2490 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
f94e0186
SS
2491 if (ret <= 0)
2492 return ret;
2493 xhci = hcd_to_xhci(hcd);
fe6c6c13
SS
2494 if (xhci->xhc_state & XHCI_STATE_DYING)
2495 return -ENODEV;
f94e0186 2496
700e2052 2497 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
2498 virt_dev = xhci->devs[udev->slot_id];
2499
2500 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
d115b048 2501 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
28ccd296
ME
2502 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2503 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2504 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2dc37539
SS
2505
2506 /* Don't issue the command if there's no endpoints to update. */
2507 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2508 ctrl_ctx->drop_flags == 0)
2509 return 0;
2510
f94e0186 2511 xhci_dbg(xhci, "New Input Control Context:\n");
d115b048
JY
2512 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2513 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
28ccd296 2514 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
f94e0186 2515
913a8a34
SS
2516 ret = xhci_configure_endpoint(xhci, udev, NULL,
2517 false, false);
f94e0186
SS
2518 if (ret) {
2519 /* Callee should call reset_bandwidth() */
f94e0186
SS
2520 return ret;
2521 }
2522
2523 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
d115b048 2524 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
28ccd296 2525 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
f94e0186 2526
834cb0fc
SS
2527 /* Free any rings that were dropped, but not changed. */
2528 for (i = 1; i < 31; ++i) {
4819fef5
ME
2529 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2530 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
834cb0fc
SS
2531 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2532 }
d115b048 2533 xhci_zero_in_ctx(xhci, virt_dev);
834cb0fc
SS
2534 /*
2535 * Install any rings for completely new endpoints or changed endpoints,
2536 * and free or cache any old rings from changed endpoints.
2537 */
f94e0186 2538 for (i = 1; i < 31; ++i) {
74f9fe21
SS
2539 if (!virt_dev->eps[i].new_ring)
2540 continue;
2541 /* Only cache or free the old ring if it exists.
2542 * It may not if this is the first add of an endpoint.
2543 */
2544 if (virt_dev->eps[i].ring) {
412566bd 2545 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
f94e0186 2546 }
74f9fe21
SS
2547 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2548 virt_dev->eps[i].new_ring = NULL;
f94e0186
SS
2549 }
2550
f94e0186
SS
2551 return ret;
2552}
2553
2554void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2555{
f94e0186
SS
2556 struct xhci_hcd *xhci;
2557 struct xhci_virt_device *virt_dev;
2558 int i, ret;
2559
64927730 2560 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
f94e0186
SS
2561 if (ret <= 0)
2562 return;
2563 xhci = hcd_to_xhci(hcd);
2564
700e2052 2565 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
2566 virt_dev = xhci->devs[udev->slot_id];
2567 /* Free any rings allocated for added endpoints */
2568 for (i = 0; i < 31; ++i) {
63a0d9ab
SS
2569 if (virt_dev->eps[i].new_ring) {
2570 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2571 virt_dev->eps[i].new_ring = NULL;
f94e0186
SS
2572 }
2573 }
d115b048 2574 xhci_zero_in_ctx(xhci, virt_dev);
f94e0186
SS
2575}
2576
5270b951 2577static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
913a8a34
SS
2578 struct xhci_container_ctx *in_ctx,
2579 struct xhci_container_ctx *out_ctx,
2580 u32 add_flags, u32 drop_flags)
5270b951
SS
2581{
2582 struct xhci_input_control_ctx *ctrl_ctx;
913a8a34 2583 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
28ccd296
ME
2584 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2585 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
913a8a34 2586 xhci_slot_copy(xhci, in_ctx, out_ctx);
28ccd296 2587 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5270b951 2588
913a8a34
SS
2589 xhci_dbg(xhci, "Input Context:\n");
2590 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
5270b951
SS
2591}
2592
8212a49d 2593static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
ac9d8fe7
SS
2594 unsigned int slot_id, unsigned int ep_index,
2595 struct xhci_dequeue_state *deq_state)
2596{
2597 struct xhci_container_ctx *in_ctx;
ac9d8fe7
SS
2598 struct xhci_ep_ctx *ep_ctx;
2599 u32 added_ctxs;
2600 dma_addr_t addr;
2601
913a8a34
SS
2602 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2603 xhci->devs[slot_id]->out_ctx, ep_index);
ac9d8fe7
SS
2604 in_ctx = xhci->devs[slot_id]->in_ctx;
2605 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2606 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2607 deq_state->new_deq_ptr);
2608 if (addr == 0) {
2609 xhci_warn(xhci, "WARN Cannot submit config ep after "
2610 "reset ep command\n");
2611 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2612 deq_state->new_deq_seg,
2613 deq_state->new_deq_ptr);
2614 return;
2615 }
28ccd296 2616 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
ac9d8fe7 2617
ac9d8fe7 2618 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
913a8a34
SS
2619 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2620 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
ac9d8fe7
SS
2621}
2622
82d1009f 2623void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
63a0d9ab 2624 struct usb_device *udev, unsigned int ep_index)
82d1009f
SS
2625{
2626 struct xhci_dequeue_state deq_state;
63a0d9ab 2627 struct xhci_virt_ep *ep;
82d1009f
SS
2628
2629 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
63a0d9ab 2630 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
82d1009f
SS
2631 /* We need to move the HW's dequeue pointer past this TD,
2632 * or it will attempt to resend it on the next doorbell ring.
2633 */
2634 xhci_find_new_dequeue_state(xhci, udev->slot_id,
e9df17eb 2635 ep_index, ep->stopped_stream, ep->stopped_td,
ac9d8fe7 2636 &deq_state);
82d1009f 2637
ac9d8fe7
SS
2638 /* HW with the reset endpoint quirk will use the saved dequeue state to
2639 * issue a configure endpoint command later.
2640 */
2641 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2642 xhci_dbg(xhci, "Queueing new dequeue state\n");
63a0d9ab 2643 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
e9df17eb 2644 ep_index, ep->stopped_stream, &deq_state);
ac9d8fe7
SS
2645 } else {
2646 /* Better hope no one uses the input context between now and the
2647 * reset endpoint completion!
e9df17eb
SS
2648 * XXX: No idea how this hardware will react when stream rings
2649 * are enabled.
ac9d8fe7
SS
2650 */
2651 xhci_dbg(xhci, "Setting up input context for "
2652 "configure endpoint command\n");
2653 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2654 ep_index, &deq_state);
2655 }
82d1009f
SS
2656}
2657
a1587d97
SS
2658/* Deal with stalled endpoints. The core should have sent the control message
2659 * to clear the halt condition. However, we need to make the xHCI hardware
2660 * reset its sequence number, since a device will expect a sequence number of
2661 * zero after the halt condition is cleared.
2662 * Context: in_interrupt
2663 */
2664void xhci_endpoint_reset(struct usb_hcd *hcd,
2665 struct usb_host_endpoint *ep)
2666{
2667 struct xhci_hcd *xhci;
2668 struct usb_device *udev;
2669 unsigned int ep_index;
2670 unsigned long flags;
2671 int ret;
63a0d9ab 2672 struct xhci_virt_ep *virt_ep;
a1587d97
SS
2673
2674 xhci = hcd_to_xhci(hcd);
2675 udev = (struct usb_device *) ep->hcpriv;
2676 /* Called with a root hub endpoint (or an endpoint that wasn't added
2677 * with xhci_add_endpoint()
2678 */
2679 if (!ep->hcpriv)
2680 return;
2681 ep_index = xhci_get_endpoint_index(&ep->desc);
63a0d9ab
SS
2682 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2683 if (!virt_ep->stopped_td) {
c92bcfa7
SS
2684 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2685 ep->desc.bEndpointAddress);
2686 return;
2687 }
82d1009f
SS
2688 if (usb_endpoint_xfer_control(&ep->desc)) {
2689 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2690 return;
2691 }
a1587d97
SS
2692
2693 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2694 spin_lock_irqsave(&xhci->lock, flags);
2695 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
c92bcfa7
SS
2696 /*
2697 * Can't change the ring dequeue pointer until it's transitioned to the
2698 * stopped state, which is only upon a successful reset endpoint
2699 * command. Better hope that last command worked!
2700 */
a1587d97 2701 if (!ret) {
63a0d9ab
SS
2702 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2703 kfree(virt_ep->stopped_td);
a1587d97
SS
2704 xhci_ring_cmd_db(xhci);
2705 }
1624ae1c
SS
2706 virt_ep->stopped_td = NULL;
2707 virt_ep->stopped_trb = NULL;
5e5cf6fc 2708 virt_ep->stopped_stream = 0;
a1587d97
SS
2709 spin_unlock_irqrestore(&xhci->lock, flags);
2710
2711 if (ret)
2712 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2713}
2714
8df75f42
SS
2715static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2716 struct usb_device *udev, struct usb_host_endpoint *ep,
2717 unsigned int slot_id)
2718{
2719 int ret;
2720 unsigned int ep_index;
2721 unsigned int ep_state;
2722
2723 if (!ep)
2724 return -EINVAL;
64927730 2725 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
8df75f42
SS
2726 if (ret <= 0)
2727 return -EINVAL;
842f1690 2728 if (ep->ss_ep_comp.bmAttributes == 0) {
8df75f42
SS
2729 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2730 " descriptor for ep 0x%x does not support streams\n",
2731 ep->desc.bEndpointAddress);
2732 return -EINVAL;
2733 }
2734
2735 ep_index = xhci_get_endpoint_index(&ep->desc);
2736 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2737 if (ep_state & EP_HAS_STREAMS ||
2738 ep_state & EP_GETTING_STREAMS) {
2739 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2740 "already has streams set up.\n",
2741 ep->desc.bEndpointAddress);
2742 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2743 "dynamic stream context array reallocation.\n");
2744 return -EINVAL;
2745 }
2746 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2747 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2748 "endpoint 0x%x; URBs are pending.\n",
2749 ep->desc.bEndpointAddress);
2750 return -EINVAL;
2751 }
2752 return 0;
2753}
2754
2755static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2756 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2757{
2758 unsigned int max_streams;
2759
2760 /* The stream context array size must be a power of two */
2761 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2762 /*
2763 * Find out how many primary stream array entries the host controller
2764 * supports. Later we may use secondary stream arrays (similar to 2nd
2765 * level page entries), but that's an optional feature for xHCI host
2766 * controllers. xHCs must support at least 4 stream IDs.
2767 */
2768 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2769 if (*num_stream_ctxs > max_streams) {
2770 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2771 max_streams);
2772 *num_stream_ctxs = max_streams;
2773 *num_streams = max_streams;
2774 }
2775}
2776
2777/* Returns an error code if one of the endpoint already has streams.
2778 * This does not change any data structures, it only checks and gathers
2779 * information.
2780 */
2781static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2782 struct usb_device *udev,
2783 struct usb_host_endpoint **eps, unsigned int num_eps,
2784 unsigned int *num_streams, u32 *changed_ep_bitmask)
2785{
8df75f42
SS
2786 unsigned int max_streams;
2787 unsigned int endpoint_flag;
2788 int i;
2789 int ret;
2790
2791 for (i = 0; i < num_eps; i++) {
2792 ret = xhci_check_streams_endpoint(xhci, udev,
2793 eps[i], udev->slot_id);
2794 if (ret < 0)
2795 return ret;
2796
842f1690
AS
2797 max_streams = USB_SS_MAX_STREAMS(
2798 eps[i]->ss_ep_comp.bmAttributes);
8df75f42
SS
2799 if (max_streams < (*num_streams - 1)) {
2800 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2801 eps[i]->desc.bEndpointAddress,
2802 max_streams);
2803 *num_streams = max_streams+1;
2804 }
2805
2806 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2807 if (*changed_ep_bitmask & endpoint_flag)
2808 return -EINVAL;
2809 *changed_ep_bitmask |= endpoint_flag;
2810 }
2811 return 0;
2812}
2813
2814static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2815 struct usb_device *udev,
2816 struct usb_host_endpoint **eps, unsigned int num_eps)
2817{
2818 u32 changed_ep_bitmask = 0;
2819 unsigned int slot_id;
2820 unsigned int ep_index;
2821 unsigned int ep_state;
2822 int i;
2823
2824 slot_id = udev->slot_id;
2825 if (!xhci->devs[slot_id])
2826 return 0;
2827
2828 for (i = 0; i < num_eps; i++) {
2829 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2830 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2831 /* Are streams already being freed for the endpoint? */
2832 if (ep_state & EP_GETTING_NO_STREAMS) {
2833 xhci_warn(xhci, "WARN Can't disable streams for "
2834 "endpoint 0x%x\n, "
2835 "streams are being disabled already.",
2836 eps[i]->desc.bEndpointAddress);
2837 return 0;
2838 }
2839 /* Are there actually any streams to free? */
2840 if (!(ep_state & EP_HAS_STREAMS) &&
2841 !(ep_state & EP_GETTING_STREAMS)) {
2842 xhci_warn(xhci, "WARN Can't disable streams for "
2843 "endpoint 0x%x\n, "
2844 "streams are already disabled!",
2845 eps[i]->desc.bEndpointAddress);
2846 xhci_warn(xhci, "WARN xhci_free_streams() called "
2847 "with non-streams endpoint\n");
2848 return 0;
2849 }
2850 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2851 }
2852 return changed_ep_bitmask;
2853}
2854
2855/*
2856 * The USB device drivers use this function (though the HCD interface in USB
2857 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2858 * coordinate mass storage command queueing across multiple endpoints (basically
2859 * a stream ID == a task ID).
2860 *
2861 * Setting up streams involves allocating the same size stream context array
2862 * for each endpoint and issuing a configure endpoint command for all endpoints.
2863 *
2864 * Don't allow the call to succeed if one endpoint only supports one stream
2865 * (which means it doesn't support streams at all).
2866 *
2867 * Drivers may get less stream IDs than they asked for, if the host controller
2868 * hardware or endpoints claim they can't support the number of requested
2869 * stream IDs.
2870 */
2871int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2872 struct usb_host_endpoint **eps, unsigned int num_eps,
2873 unsigned int num_streams, gfp_t mem_flags)
2874{
2875 int i, ret;
2876 struct xhci_hcd *xhci;
2877 struct xhci_virt_device *vdev;
2878 struct xhci_command *config_cmd;
2879 unsigned int ep_index;
2880 unsigned int num_stream_ctxs;
2881 unsigned long flags;
2882 u32 changed_ep_bitmask = 0;
2883
2884 if (!eps)
2885 return -EINVAL;
2886
2887 /* Add one to the number of streams requested to account for
2888 * stream 0 that is reserved for xHCI usage.
2889 */
2890 num_streams += 1;
2891 xhci = hcd_to_xhci(hcd);
2892 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2893 num_streams);
2894
2895 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2896 if (!config_cmd) {
2897 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2898 return -ENOMEM;
2899 }
2900
2901 /* Check to make sure all endpoints are not already configured for
2902 * streams. While we're at it, find the maximum number of streams that
2903 * all the endpoints will support and check for duplicate endpoints.
2904 */
2905 spin_lock_irqsave(&xhci->lock, flags);
2906 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2907 num_eps, &num_streams, &changed_ep_bitmask);
2908 if (ret < 0) {
2909 xhci_free_command(xhci, config_cmd);
2910 spin_unlock_irqrestore(&xhci->lock, flags);
2911 return ret;
2912 }
2913 if (num_streams <= 1) {
2914 xhci_warn(xhci, "WARN: endpoints can't handle "
2915 "more than one stream.\n");
2916 xhci_free_command(xhci, config_cmd);
2917 spin_unlock_irqrestore(&xhci->lock, flags);
2918 return -EINVAL;
2919 }
2920 vdev = xhci->devs[udev->slot_id];
25985edc 2921 /* Mark each endpoint as being in transition, so
8df75f42
SS
2922 * xhci_urb_enqueue() will reject all URBs.
2923 */
2924 for (i = 0; i < num_eps; i++) {
2925 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2926 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2927 }
2928 spin_unlock_irqrestore(&xhci->lock, flags);
2929
2930 /* Setup internal data structures and allocate HW data structures for
2931 * streams (but don't install the HW structures in the input context
2932 * until we're sure all memory allocation succeeded).
2933 */
2934 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2935 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2936 num_stream_ctxs, num_streams);
2937
2938 for (i = 0; i < num_eps; i++) {
2939 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2940 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2941 num_stream_ctxs,
2942 num_streams, mem_flags);
2943 if (!vdev->eps[ep_index].stream_info)
2944 goto cleanup;
2945 /* Set maxPstreams in endpoint context and update deq ptr to
2946 * point to stream context array. FIXME
2947 */
2948 }
2949
2950 /* Set up the input context for a configure endpoint command. */
2951 for (i = 0; i < num_eps; i++) {
2952 struct xhci_ep_ctx *ep_ctx;
2953
2954 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2955 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2956
2957 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2958 vdev->out_ctx, ep_index);
2959 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2960 vdev->eps[ep_index].stream_info);
2961 }
2962 /* Tell the HW to drop its old copy of the endpoint context info
2963 * and add the updated copy from the input context.
2964 */
2965 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2966 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2967
2968 /* Issue and wait for the configure endpoint command */
2969 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2970 false, false);
2971
2972 /* xHC rejected the configure endpoint command for some reason, so we
2973 * leave the old ring intact and free our internal streams data
2974 * structure.
2975 */
2976 if (ret < 0)
2977 goto cleanup;
2978
2979 spin_lock_irqsave(&xhci->lock, flags);
2980 for (i = 0; i < num_eps; i++) {
2981 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2982 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2983 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2984 udev->slot_id, ep_index);
2985 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2986 }
2987 xhci_free_command(xhci, config_cmd);
2988 spin_unlock_irqrestore(&xhci->lock, flags);
2989
2990 /* Subtract 1 for stream 0, which drivers can't use */
2991 return num_streams - 1;
2992
2993cleanup:
2994 /* If it didn't work, free the streams! */
2995 for (i = 0; i < num_eps; i++) {
2996 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2997 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
8a007748 2998 vdev->eps[ep_index].stream_info = NULL;
8df75f42
SS
2999 /* FIXME Unset maxPstreams in endpoint context and
3000 * update deq ptr to point to normal string ring.
3001 */
3002 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3003 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3004 xhci_endpoint_zero(xhci, vdev, eps[i]);
3005 }
3006 xhci_free_command(xhci, config_cmd);
3007 return -ENOMEM;
3008}
3009
3010/* Transition the endpoint from using streams to being a "normal" endpoint
3011 * without streams.
3012 *
3013 * Modify the endpoint context state, submit a configure endpoint command,
3014 * and free all endpoint rings for streams if that completes successfully.
3015 */
3016int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3017 struct usb_host_endpoint **eps, unsigned int num_eps,
3018 gfp_t mem_flags)
3019{
3020 int i, ret;
3021 struct xhci_hcd *xhci;
3022 struct xhci_virt_device *vdev;
3023 struct xhci_command *command;
3024 unsigned int ep_index;
3025 unsigned long flags;
3026 u32 changed_ep_bitmask;
3027
3028 xhci = hcd_to_xhci(hcd);
3029 vdev = xhci->devs[udev->slot_id];
3030
3031 /* Set up a configure endpoint command to remove the streams rings */
3032 spin_lock_irqsave(&xhci->lock, flags);
3033 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3034 udev, eps, num_eps);
3035 if (changed_ep_bitmask == 0) {
3036 spin_unlock_irqrestore(&xhci->lock, flags);
3037 return -EINVAL;
3038 }
3039
3040 /* Use the xhci_command structure from the first endpoint. We may have
3041 * allocated too many, but the driver may call xhci_free_streams() for
3042 * each endpoint it grouped into one call to xhci_alloc_streams().
3043 */
3044 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3045 command = vdev->eps[ep_index].stream_info->free_streams_command;
3046 for (i = 0; i < num_eps; i++) {
3047 struct xhci_ep_ctx *ep_ctx;
3048
3049 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3050 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3051 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3052 EP_GETTING_NO_STREAMS;
3053
3054 xhci_endpoint_copy(xhci, command->in_ctx,
3055 vdev->out_ctx, ep_index);
3056 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3057 &vdev->eps[ep_index]);
3058 }
3059 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3060 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3061 spin_unlock_irqrestore(&xhci->lock, flags);
3062
3063 /* Issue and wait for the configure endpoint command,
3064 * which must succeed.
3065 */
3066 ret = xhci_configure_endpoint(xhci, udev, command,
3067 false, true);
3068
3069 /* xHC rejected the configure endpoint command for some reason, so we
3070 * leave the streams rings intact.
3071 */
3072 if (ret < 0)
3073 return ret;
3074
3075 spin_lock_irqsave(&xhci->lock, flags);
3076 for (i = 0; i < num_eps; i++) {
3077 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3078 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
8a007748 3079 vdev->eps[ep_index].stream_info = NULL;
8df75f42
SS
3080 /* FIXME Unset maxPstreams in endpoint context and
3081 * update deq ptr to point to normal string ring.
3082 */
3083 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3084 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3085 }
3086 spin_unlock_irqrestore(&xhci->lock, flags);
3087
3088 return 0;
3089}
3090
2cf95c18
SS
3091/*
3092 * Deletes endpoint resources for endpoints that were active before a Reset
3093 * Device command, or a Disable Slot command. The Reset Device command leaves
3094 * the control endpoint intact, whereas the Disable Slot command deletes it.
3095 *
3096 * Must be called with xhci->lock held.
3097 */
3098void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3099 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3100{
3101 int i;
3102 unsigned int num_dropped_eps = 0;
3103 unsigned int drop_flags = 0;
3104
3105 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3106 if (virt_dev->eps[i].ring) {
3107 drop_flags |= 1 << i;
3108 num_dropped_eps++;
3109 }
3110 }
3111 xhci->num_active_eps -= num_dropped_eps;
3112 if (num_dropped_eps)
3113 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3114 "%u now active.\n",
3115 num_dropped_eps, drop_flags,
3116 xhci->num_active_eps);
3117}
3118
2a8f82c4
SS
3119/*
3120 * This submits a Reset Device Command, which will set the device state to 0,
3121 * set the device address to 0, and disable all the endpoints except the default
3122 * control endpoint. The USB core should come back and call
3123 * xhci_address_device(), and then re-set up the configuration. If this is
3124 * called because of a usb_reset_and_verify_device(), then the old alternate
3125 * settings will be re-installed through the normal bandwidth allocation
3126 * functions.
3127 *
3128 * Wait for the Reset Device command to finish. Remove all structures
3129 * associated with the endpoints that were disabled. Clear the input device
3130 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
f0615c45
AX
3131 *
3132 * If the virt_dev to be reset does not exist or does not match the udev,
3133 * it means the device is lost, possibly due to the xHC restore error and
3134 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3135 * re-allocate the device.
2a8f82c4 3136 */
f0615c45 3137int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2a8f82c4
SS
3138{
3139 int ret, i;
3140 unsigned long flags;
3141 struct xhci_hcd *xhci;
3142 unsigned int slot_id;
3143 struct xhci_virt_device *virt_dev;
3144 struct xhci_command *reset_device_cmd;
3145 int timeleft;
3146 int last_freed_endpoint;
001fd382 3147 struct xhci_slot_ctx *slot_ctx;
2e27980e 3148 int old_active_eps = 0;
2a8f82c4 3149
f0615c45 3150 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2a8f82c4
SS
3151 if (ret <= 0)
3152 return ret;
3153 xhci = hcd_to_xhci(hcd);
3154 slot_id = udev->slot_id;
3155 virt_dev = xhci->devs[slot_id];
f0615c45
AX
3156 if (!virt_dev) {
3157 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3158 "not exist. Re-allocate the device\n", slot_id);
3159 ret = xhci_alloc_dev(hcd, udev);
3160 if (ret == 1)
3161 return 0;
3162 else
3163 return -EINVAL;
3164 }
3165
3166 if (virt_dev->udev != udev) {
3167 /* If the virt_dev and the udev does not match, this virt_dev
3168 * may belong to another udev.
3169 * Re-allocate the device.
3170 */
3171 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3172 "not match the udev. Re-allocate the device\n",
3173 slot_id);
3174 ret = xhci_alloc_dev(hcd, udev);
3175 if (ret == 1)
3176 return 0;
3177 else
3178 return -EINVAL;
3179 }
2a8f82c4 3180
001fd382
ML
3181 /* If device is not setup, there is no point in resetting it */
3182 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3183 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3184 SLOT_STATE_DISABLED)
3185 return 0;
3186
2a8f82c4
SS
3187 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3188 /* Allocate the command structure that holds the struct completion.
3189 * Assume we're in process context, since the normal device reset
3190 * process has to wait for the device anyway. Storage devices are
3191 * reset as part of error handling, so use GFP_NOIO instead of
3192 * GFP_KERNEL.
3193 */
3194 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3195 if (!reset_device_cmd) {
3196 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3197 return -ENOMEM;
3198 }
3199
3200 /* Attempt to submit the Reset Device command to the command ring */
3201 spin_lock_irqsave(&xhci->lock, flags);
3202 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
7a3783ef
PZ
3203
3204 /* Enqueue pointer can be left pointing to the link TRB,
3205 * we must handle that
3206 */
f5960b69 3207 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
7a3783ef
PZ
3208 reset_device_cmd->command_trb =
3209 xhci->cmd_ring->enq_seg->next->trbs;
3210
2a8f82c4
SS
3211 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3212 ret = xhci_queue_reset_device(xhci, slot_id);
3213 if (ret) {
3214 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3215 list_del(&reset_device_cmd->cmd_list);
3216 spin_unlock_irqrestore(&xhci->lock, flags);
3217 goto command_cleanup;
3218 }
3219 xhci_ring_cmd_db(xhci);
3220 spin_unlock_irqrestore(&xhci->lock, flags);
3221
3222 /* Wait for the Reset Device command to finish */
3223 timeleft = wait_for_completion_interruptible_timeout(
3224 reset_device_cmd->completion,
3225 USB_CTRL_SET_TIMEOUT);
3226 if (timeleft <= 0) {
3227 xhci_warn(xhci, "%s while waiting for reset device command\n",
3228 timeleft == 0 ? "Timeout" : "Signal");
3229 spin_lock_irqsave(&xhci->lock, flags);
3230 /* The timeout might have raced with the event ring handler, so
3231 * only delete from the list if the item isn't poisoned.
3232 */
3233 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3234 list_del(&reset_device_cmd->cmd_list);
3235 spin_unlock_irqrestore(&xhci->lock, flags);
3236 ret = -ETIME;
3237 goto command_cleanup;
3238 }
3239
3240 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3241 * unless we tried to reset a slot ID that wasn't enabled,
3242 * or the device wasn't in the addressed or configured state.
3243 */
3244 ret = reset_device_cmd->status;
3245 switch (ret) {
3246 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3247 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3248 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3249 slot_id,
3250 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3251 xhci_info(xhci, "Not freeing device rings.\n");
3252 /* Don't treat this as an error. May change my mind later. */
3253 ret = 0;
3254 goto command_cleanup;
3255 case COMP_SUCCESS:
3256 xhci_dbg(xhci, "Successful reset device command.\n");
3257 break;
3258 default:
3259 if (xhci_is_vendor_info_code(xhci, ret))
3260 break;
3261 xhci_warn(xhci, "Unknown completion code %u for "
3262 "reset device command.\n", ret);
3263 ret = -EINVAL;
3264 goto command_cleanup;
3265 }
3266
2cf95c18
SS
3267 /* Free up host controller endpoint resources */
3268 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3269 spin_lock_irqsave(&xhci->lock, flags);
3270 /* Don't delete the default control endpoint resources */
3271 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3272 spin_unlock_irqrestore(&xhci->lock, flags);
3273 }
3274
2a8f82c4
SS
3275 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3276 last_freed_endpoint = 1;
3277 for (i = 1; i < 31; ++i) {
2dea75d9
DT
3278 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3279
3280 if (ep->ep_state & EP_HAS_STREAMS) {
3281 xhci_free_stream_info(xhci, ep->stream_info);
3282 ep->stream_info = NULL;
3283 ep->ep_state &= ~EP_HAS_STREAMS;
3284 }
3285
3286 if (ep->ring) {
3287 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3288 last_freed_endpoint = i;
3289 }
2e27980e
SS
3290 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3291 xhci_drop_ep_from_interval_table(xhci,
3292 &virt_dev->eps[i].bw_info,
3293 virt_dev->bw_table,
3294 udev,
3295 &virt_dev->eps[i],
3296 virt_dev->tt_info);
9af5d71d 3297 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
2a8f82c4 3298 }
2e27980e
SS
3299 /* If necessary, update the number of active TTs on this root port */
3300 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3301
2a8f82c4
SS
3302 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3303 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3304 ret = 0;
3305
3306command_cleanup:
3307 xhci_free_command(xhci, reset_device_cmd);
3308 return ret;
3309}
3310
3ffbba95
SS
3311/*
3312 * At this point, the struct usb_device is about to go away, the device has
3313 * disconnected, and all traffic has been stopped and the endpoints have been
3314 * disabled. Free any HC data structures associated with that device.
3315 */
3316void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3317{
3318 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
6f5165cf 3319 struct xhci_virt_device *virt_dev;
3ffbba95 3320 unsigned long flags;
c526d0d4 3321 u32 state;
64927730 3322 int i, ret;
3ffbba95 3323
64927730 3324 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
7bd89b40
SS
3325 /* If the host is halted due to driver unload, we still need to free the
3326 * device.
3327 */
3328 if (ret <= 0 && ret != -ENODEV)
3ffbba95 3329 return;
64927730 3330
6f5165cf 3331 virt_dev = xhci->devs[udev->slot_id];
6f5165cf
SS
3332
3333 /* Stop any wayward timer functions (which may grab the lock) */
3334 for (i = 0; i < 31; ++i) {
3335 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3336 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3337 }
3ffbba95 3338
65580b43
AX
3339 if (udev->usb2_hw_lpm_enabled) {
3340 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3341 udev->usb2_hw_lpm_enabled = 0;
3342 }
3343
3ffbba95 3344 spin_lock_irqsave(&xhci->lock, flags);
c526d0d4
SS
3345 /* Don't disable the slot if the host controller is dead. */
3346 state = xhci_readl(xhci, &xhci->op_regs->status);
7bd89b40
SS
3347 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3348 (xhci->xhc_state & XHCI_STATE_HALTED)) {
c526d0d4
SS
3349 xhci_free_virt_device(xhci, udev->slot_id);
3350 spin_unlock_irqrestore(&xhci->lock, flags);
3351 return;
3352 }
3353
23e3be11 3354 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3ffbba95
SS
3355 spin_unlock_irqrestore(&xhci->lock, flags);
3356 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3357 return;
3358 }
23e3be11 3359 xhci_ring_cmd_db(xhci);
3ffbba95
SS
3360 spin_unlock_irqrestore(&xhci->lock, flags);
3361 /*
3362 * Event command completion handler will free any data structures
f88ba78d 3363 * associated with the slot. XXX Can free sleep?
3ffbba95
SS
3364 */
3365}
3366
2cf95c18
SS
3367/*
3368 * Checks if we have enough host controller resources for the default control
3369 * endpoint.
3370 *
3371 * Must be called with xhci->lock held.
3372 */
3373static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3374{
3375 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3376 xhci_dbg(xhci, "Not enough ep ctxs: "
3377 "%u active, need to add 1, limit is %u.\n",
3378 xhci->num_active_eps, xhci->limit_active_eps);
3379 return -ENOMEM;
3380 }
3381 xhci->num_active_eps += 1;
3382 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3383 xhci->num_active_eps);
3384 return 0;
3385}
3386
3387
3ffbba95
SS
3388/*
3389 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3390 * timed out, or allocating memory failed. Returns 1 on success.
3391 */
3392int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3393{
3394 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3395 unsigned long flags;
3396 int timeleft;
3397 int ret;
3398
3399 spin_lock_irqsave(&xhci->lock, flags);
23e3be11 3400 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3ffbba95
SS
3401 if (ret) {
3402 spin_unlock_irqrestore(&xhci->lock, flags);
3403 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3404 return 0;
3405 }
23e3be11 3406 xhci_ring_cmd_db(xhci);
3ffbba95
SS
3407 spin_unlock_irqrestore(&xhci->lock, flags);
3408
3409 /* XXX: how much time for xHC slot assignment? */
3410 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3411 USB_CTRL_SET_TIMEOUT);
3412 if (timeleft <= 0) {
3413 xhci_warn(xhci, "%s while waiting for a slot\n",
3414 timeleft == 0 ? "Timeout" : "Signal");
3415 /* FIXME cancel the enable slot request */
3416 return 0;
3417 }
3418
3ffbba95
SS
3419 if (!xhci->slot_id) {
3420 xhci_err(xhci, "Error while assigning device slot ID\n");
3ffbba95
SS
3421 return 0;
3422 }
2cf95c18
SS
3423
3424 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3425 spin_lock_irqsave(&xhci->lock, flags);
3426 ret = xhci_reserve_host_control_ep_resources(xhci);
3427 if (ret) {
3428 spin_unlock_irqrestore(&xhci->lock, flags);
3429 xhci_warn(xhci, "Not enough host resources, "
3430 "active endpoint contexts = %u\n",
3431 xhci->num_active_eps);
3432 goto disable_slot;
3433 }
3434 spin_unlock_irqrestore(&xhci->lock, flags);
3435 }
3436 /* Use GFP_NOIO, since this function can be called from
a6d940dd
SS
3437 * xhci_discover_or_reset_device(), which may be called as part of
3438 * mass storage driver error handling.
3439 */
3440 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3ffbba95 3441 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2cf95c18 3442 goto disable_slot;
3ffbba95
SS
3443 }
3444 udev->slot_id = xhci->slot_id;
3445 /* Is this a LS or FS device under a HS hub? */
3446 /* Hub or peripherial? */
3ffbba95 3447 return 1;
2cf95c18
SS
3448
3449disable_slot:
3450 /* Disable slot, if we can do it without mem alloc */
3451 spin_lock_irqsave(&xhci->lock, flags);
3452 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3453 xhci_ring_cmd_db(xhci);
3454 spin_unlock_irqrestore(&xhci->lock, flags);
3455 return 0;
3ffbba95
SS
3456}
3457
3458/*
3459 * Issue an Address Device command (which will issue a SetAddress request to
3460 * the device).
3461 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3462 * we should only issue and wait on one address command at the same time.
3463 *
3464 * We add one to the device address issued by the hardware because the USB core
3465 * uses address 1 for the root hubs (even though they're not really devices).
3466 */
3467int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3468{
3469 unsigned long flags;
3470 int timeleft;
3471 struct xhci_virt_device *virt_dev;
3472 int ret = 0;
3473 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
d115b048
JY
3474 struct xhci_slot_ctx *slot_ctx;
3475 struct xhci_input_control_ctx *ctrl_ctx;
8e595a5d 3476 u64 temp_64;
3ffbba95
SS
3477
3478 if (!udev->slot_id) {
3479 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3480 return -EINVAL;
3481 }
3482
3ffbba95
SS
3483 virt_dev = xhci->devs[udev->slot_id];
3484
7ed603ec
ME
3485 if (WARN_ON(!virt_dev)) {
3486 /*
3487 * In plug/unplug torture test with an NEC controller,
3488 * a zero-dereference was observed once due to virt_dev = 0.
3489 * Print useful debug rather than crash if it is observed again!
3490 */
3491 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3492 udev->slot_id);
3493 return -EINVAL;
3494 }
3495
f0615c45
AX
3496 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3497 /*
3498 * If this is the first Set Address since device plug-in or
3499 * virt_device realloaction after a resume with an xHCI power loss,
3500 * then set up the slot context.
3501 */
3502 if (!slot_ctx->dev_info)
3ffbba95 3503 xhci_setup_addressable_virt_dev(xhci, udev);
f0615c45 3504 /* Otherwise, update the control endpoint ring enqueue pointer. */
2d1ee590
SS
3505 else
3506 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
66e49d87 3507 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
d115b048 3508 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3ffbba95 3509
f88ba78d 3510 spin_lock_irqsave(&xhci->lock, flags);
d115b048
JY
3511 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3512 udev->slot_id);
3ffbba95
SS
3513 if (ret) {
3514 spin_unlock_irqrestore(&xhci->lock, flags);
3515 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3516 return ret;
3517 }
23e3be11 3518 xhci_ring_cmd_db(xhci);
3ffbba95
SS
3519 spin_unlock_irqrestore(&xhci->lock, flags);
3520
3521 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3522 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3523 USB_CTRL_SET_TIMEOUT);
3524 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3525 * the SetAddress() "recovery interval" required by USB and aborting the
3526 * command on a timeout.
3527 */
3528 if (timeleft <= 0) {
cd68176a 3529 xhci_warn(xhci, "%s while waiting for address device command\n",
3ffbba95
SS
3530 timeleft == 0 ? "Timeout" : "Signal");
3531 /* FIXME cancel the address device command */
3532 return -ETIME;
3533 }
3534
3ffbba95
SS
3535 switch (virt_dev->cmd_status) {
3536 case COMP_CTX_STATE:
3537 case COMP_EBADSLT:
3538 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3539 udev->slot_id);
3540 ret = -EINVAL;
3541 break;
3542 case COMP_TX_ERR:
3543 dev_warn(&udev->dev, "Device not responding to set address.\n");
3544 ret = -EPROTO;
3545 break;
f6ba6fe2
AH
3546 case COMP_DEV_ERR:
3547 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3548 "device command.\n");
3549 ret = -ENODEV;
3550 break;
3ffbba95
SS
3551 case COMP_SUCCESS:
3552 xhci_dbg(xhci, "Successful Address Device command\n");
3553 break;
3554 default:
3555 xhci_err(xhci, "ERROR: unexpected command completion "
3556 "code 0x%x.\n", virt_dev->cmd_status);
66e49d87 3557 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
d115b048 3558 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3ffbba95
SS
3559 ret = -EINVAL;
3560 break;
3561 }
3562 if (ret) {
3ffbba95
SS
3563 return ret;
3564 }
8e595a5d
SS
3565 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3566 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3567 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
28ccd296
ME
3568 udev->slot_id,
3569 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3570 (unsigned long long)
3571 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
700e2052 3572 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
d115b048 3573 (unsigned long long)virt_dev->out_ctx->dma);
3ffbba95 3574 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
d115b048 3575 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3ffbba95 3576 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
d115b048 3577 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3ffbba95
SS
3578 /*
3579 * USB core uses address 1 for the roothubs, so we add one to the
3580 * address given back to us by the HC.
3581 */
d115b048 3582 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
c8d4af8e
AX
3583 /* Use kernel assigned address for devices; store xHC assigned
3584 * address locally. */
28ccd296
ME
3585 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3586 + 1;
f94e0186 3587 /* Zero the input context control for later use */
d115b048
JY
3588 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3589 ctrl_ctx->add_flags = 0;
3590 ctrl_ctx->drop_flags = 0;
3ffbba95 3591
c8d4af8e 3592 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3ffbba95
SS
3593
3594 return 0;
3595}
3596
9574323c
AX
3597#ifdef CONFIG_USB_SUSPEND
3598
3599/* BESL to HIRD Encoding array for USB2 LPM */
3600static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3601 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3602
3603/* Calculate HIRD/BESL for USB2 PORTPMSC*/
3604static int xhci_calculate_hird_besl(int u2del, bool use_besl)
3605{
3606 int hird;
3607
3608 if (use_besl) {
3609 for (hird = 0; hird < 16; hird++) {
3610 if (xhci_besl_encoding[hird] >= u2del)
3611 break;
3612 }
3613 } else {
3614 if (u2del <= 50)
3615 hird = 0;
3616 else
3617 hird = (u2del - 51) / 75 + 1;
3618
3619 if (hird > 15)
3620 hird = 15;
3621 }
3622
3623 return hird;
3624}
3625
3626static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3627 struct usb_device *udev)
3628{
3629 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3630 struct dev_info *dev_info;
3631 __le32 __iomem **port_array;
3632 __le32 __iomem *addr, *pm_addr;
3633 u32 temp, dev_id;
3634 unsigned int port_num;
3635 unsigned long flags;
3636 int u2del, hird;
3637 int ret;
3638
3639 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3640 !udev->lpm_capable)
3641 return -EINVAL;
3642
3643 /* we only support lpm for non-hub device connected to root hub yet */
3644 if (!udev->parent || udev->parent->parent ||
3645 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3646 return -EINVAL;
3647
3648 spin_lock_irqsave(&xhci->lock, flags);
3649
3650 /* Look for devices in lpm_failed_devs list */
3651 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3652 le16_to_cpu(udev->descriptor.idProduct);
3653 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3654 if (dev_info->dev_id == dev_id) {
3655 ret = -EINVAL;
3656 goto finish;
3657 }
3658 }
3659
3660 port_array = xhci->usb2_ports;
3661 port_num = udev->portnum - 1;
3662
3663 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3664 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3665 ret = -EINVAL;
3666 goto finish;
3667 }
3668
3669 /*
3670 * Test USB 2.0 software LPM.
3671 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3672 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3673 * in the June 2011 errata release.
3674 */
3675 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3676 /*
3677 * Set L1 Device Slot and HIRD/BESL.
3678 * Check device's USB 2.0 extension descriptor to determine whether
3679 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3680 */
3681 pm_addr = port_array[port_num] + 1;
3682 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3683 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3684 hird = xhci_calculate_hird_besl(u2del, 1);
3685 else
3686 hird = xhci_calculate_hird_besl(u2del, 0);
3687
3688 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3689 xhci_writel(xhci, temp, pm_addr);
3690
3691 /* Set port link state to U2(L1) */
3692 addr = port_array[port_num];
3693 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3694
3695 /* wait for ACK */
3696 spin_unlock_irqrestore(&xhci->lock, flags);
3697 msleep(10);
3698 spin_lock_irqsave(&xhci->lock, flags);
3699
3700 /* Check L1 Status */
3701 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3702 if (ret != -ETIMEDOUT) {
3703 /* enter L1 successfully */
3704 temp = xhci_readl(xhci, addr);
3705 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3706 port_num, temp);
3707 ret = 0;
3708 } else {
3709 temp = xhci_readl(xhci, pm_addr);
3710 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3711 port_num, temp & PORT_L1S_MASK);
3712 ret = -EINVAL;
3713 }
3714
3715 /* Resume the port */
3716 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3717
3718 spin_unlock_irqrestore(&xhci->lock, flags);
3719 msleep(10);
3720 spin_lock_irqsave(&xhci->lock, flags);
3721
3722 /* Clear PLC */
3723 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3724
3725 /* Check PORTSC to make sure the device is in the right state */
3726 if (!ret) {
3727 temp = xhci_readl(xhci, addr);
3728 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3729 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3730 (temp & PORT_PLS_MASK) != XDEV_U0) {
3731 xhci_dbg(xhci, "port L1 resume fail\n");
3732 ret = -EINVAL;
3733 }
3734 }
3735
3736 if (ret) {
3737 /* Insert dev to lpm_failed_devs list */
3738 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3739 "re-enumerate\n");
3740 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3741 if (!dev_info) {
3742 ret = -ENOMEM;
3743 goto finish;
3744 }
3745 dev_info->dev_id = dev_id;
3746 INIT_LIST_HEAD(&dev_info->list);
3747 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3748 } else {
3749 xhci_ring_device(xhci, udev->slot_id);
3750 }
3751
3752finish:
3753 spin_unlock_irqrestore(&xhci->lock, flags);
3754 return ret;
3755}
3756
65580b43
AX
3757int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3758 struct usb_device *udev, int enable)
3759{
3760 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3761 __le32 __iomem **port_array;
3762 __le32 __iomem *pm_addr;
3763 u32 temp;
3764 unsigned int port_num;
3765 unsigned long flags;
3766 int u2del, hird;
3767
3768 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3769 !udev->lpm_capable)
3770 return -EPERM;
3771
3772 if (!udev->parent || udev->parent->parent ||
3773 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3774 return -EPERM;
3775
3776 if (udev->usb2_hw_lpm_capable != 1)
3777 return -EPERM;
3778
3779 spin_lock_irqsave(&xhci->lock, flags);
3780
3781 port_array = xhci->usb2_ports;
3782 port_num = udev->portnum - 1;
3783 pm_addr = port_array[port_num] + 1;
3784 temp = xhci_readl(xhci, pm_addr);
3785
3786 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3787 enable ? "enable" : "disable", port_num);
3788
3789 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3790 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3791 hird = xhci_calculate_hird_besl(u2del, 1);
3792 else
3793 hird = xhci_calculate_hird_besl(u2del, 0);
3794
3795 if (enable) {
3796 temp &= ~PORT_HIRD_MASK;
3797 temp |= PORT_HIRD(hird) | PORT_RWE;
3798 xhci_writel(xhci, temp, pm_addr);
3799 temp = xhci_readl(xhci, pm_addr);
3800 temp |= PORT_HLE;
3801 xhci_writel(xhci, temp, pm_addr);
3802 } else {
3803 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3804 xhci_writel(xhci, temp, pm_addr);
3805 }
3806
3807 spin_unlock_irqrestore(&xhci->lock, flags);
3808 return 0;
3809}
3810
9574323c
AX
3811int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3812{
3813 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3814 int ret;
3815
3816 ret = xhci_usb2_software_lpm_test(hcd, udev);
65580b43 3817 if (!ret) {
9574323c 3818 xhci_dbg(xhci, "software LPM test succeed\n");
65580b43
AX
3819 if (xhci->hw_lpm_support == 1) {
3820 udev->usb2_hw_lpm_capable = 1;
3821 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3822 if (!ret)
3823 udev->usb2_hw_lpm_enabled = 1;
3824 }
3825 }
9574323c
AX
3826
3827 return 0;
3828}
3829
3830#else
3831
65580b43
AX
3832int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3833 struct usb_device *udev, int enable)
3834{
3835 return 0;
3836}
3837
9574323c
AX
3838int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3839{
3840 return 0;
3841}
3842
3843#endif /* CONFIG_USB_SUSPEND */
3844
ac1c1b7f
SS
3845/* Once a hub descriptor is fetched for a device, we need to update the xHC's
3846 * internal data structures for the device.
3847 */
3848int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
3849 struct usb_tt *tt, gfp_t mem_flags)
3850{
3851 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3852 struct xhci_virt_device *vdev;
3853 struct xhci_command *config_cmd;
3854 struct xhci_input_control_ctx *ctrl_ctx;
3855 struct xhci_slot_ctx *slot_ctx;
3856 unsigned long flags;
3857 unsigned think_time;
3858 int ret;
3859
3860 /* Ignore root hubs */
3861 if (!hdev->parent)
3862 return 0;
3863
3864 vdev = xhci->devs[hdev->slot_id];
3865 if (!vdev) {
3866 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
3867 return -EINVAL;
3868 }
a1d78c16 3869 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
ac1c1b7f
SS
3870 if (!config_cmd) {
3871 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3872 return -ENOMEM;
3873 }
3874
3875 spin_lock_irqsave(&xhci->lock, flags);
839c817c
SS
3876 if (hdev->speed == USB_SPEED_HIGH &&
3877 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
3878 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
3879 xhci_free_command(xhci, config_cmd);
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3881 return -ENOMEM;
3882 }
3883
ac1c1b7f
SS
3884 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
3885 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
28ccd296 3886 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ac1c1b7f 3887 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
28ccd296 3888 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
ac1c1b7f 3889 if (tt->multi)
28ccd296 3890 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
ac1c1b7f
SS
3891 if (xhci->hci_version > 0x95) {
3892 xhci_dbg(xhci, "xHCI version %x needs hub "
3893 "TT think time and number of ports\n",
3894 (unsigned int) xhci->hci_version);
28ccd296 3895 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
ac1c1b7f
SS
3896 /* Set TT think time - convert from ns to FS bit times.
3897 * 0 = 8 FS bit times, 1 = 16 FS bit times,
3898 * 2 = 24 FS bit times, 3 = 32 FS bit times.
700b4173
AX
3899 *
3900 * xHCI 1.0: this field shall be 0 if the device is not a
3901 * High-spped hub.
ac1c1b7f
SS
3902 */
3903 think_time = tt->think_time;
3904 if (think_time != 0)
3905 think_time = (think_time / 666) - 1;
700b4173
AX
3906 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
3907 slot_ctx->tt_info |=
3908 cpu_to_le32(TT_THINK_TIME(think_time));
ac1c1b7f
SS
3909 } else {
3910 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
3911 "TT think time or number of ports\n",
3912 (unsigned int) xhci->hci_version);
3913 }
3914 slot_ctx->dev_state = 0;
3915 spin_unlock_irqrestore(&xhci->lock, flags);
3916
3917 xhci_dbg(xhci, "Set up %s for hub device.\n",
3918 (xhci->hci_version > 0x95) ?
3919 "configure endpoint" : "evaluate context");
3920 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
3921 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
3922
3923 /* Issue and wait for the configure endpoint or
3924 * evaluate context command.
3925 */
3926 if (xhci->hci_version > 0x95)
3927 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3928 false, false);
3929 else
3930 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3931 true, false);
3932
3933 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
3934 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
3935
3936 xhci_free_command(xhci, config_cmd);
3937 return ret;
3938}
3939
66d4eadd
SS
3940int xhci_get_frame(struct usb_hcd *hcd)
3941{
3942 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3943 /* EHCI mods by the periodic size. Why? */
3944 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
3945}
3946
552e0c4f
SAS
3947int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
3948{
3949 struct xhci_hcd *xhci;
3950 struct device *dev = hcd->self.controller;
3951 int retval;
3952 u32 temp;
3953
3954 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
3955
3956 if (usb_hcd_is_primary_hcd(hcd)) {
3957 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
3958 if (!xhci)
3959 return -ENOMEM;
3960 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
3961 xhci->main_hcd = hcd;
3962 /* Mark the first roothub as being USB 2.0.
3963 * The xHCI driver will register the USB 3.0 roothub.
3964 */
3965 hcd->speed = HCD_USB2;
3966 hcd->self.root_hub->speed = USB_SPEED_HIGH;
3967 /*
3968 * USB 2.0 roothub under xHCI has an integrated TT,
3969 * (rate matching hub) as opposed to having an OHCI/UHCI
3970 * companion controller.
3971 */
3972 hcd->has_tt = 1;
3973 } else {
3974 /* xHCI private pointer was set in xhci_pci_probe for the second
3975 * registered roothub.
3976 */
3977 xhci = hcd_to_xhci(hcd);
3978 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
3979 if (HCC_64BIT_ADDR(temp)) {
3980 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
3981 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
3982 } else {
3983 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
3984 }
3985 return 0;
3986 }
3987
3988 xhci->cap_regs = hcd->regs;
3989 xhci->op_regs = hcd->regs +
3990 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
3991 xhci->run_regs = hcd->regs +
3992 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
3993 /* Cache read-only capability registers */
3994 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
3995 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
3996 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
3997 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
3998 xhci->hci_version = HC_VERSION(xhci->hcc_params);
3999 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4000 xhci_print_registers(xhci);
4001
4002 get_quirks(dev, xhci);
4003
4004 /* Make sure the HC is halted. */
4005 retval = xhci_halt(xhci);
4006 if (retval)
4007 goto error;
4008
4009 xhci_dbg(xhci, "Resetting HCD\n");
4010 /* Reset the internal HC memory state and registers. */
4011 retval = xhci_reset(xhci);
4012 if (retval)
4013 goto error;
4014 xhci_dbg(xhci, "Reset complete\n");
4015
4016 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4017 if (HCC_64BIT_ADDR(temp)) {
4018 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4019 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4020 } else {
4021 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4022 }
4023
4024 xhci_dbg(xhci, "Calling HCD init\n");
4025 /* Initialize HCD and host controller data structures. */
4026 retval = xhci_init(hcd);
4027 if (retval)
4028 goto error;
4029 xhci_dbg(xhci, "Called HCD init\n");
4030 return 0;
4031error:
4032 kfree(xhci);
4033 return retval;
4034}
4035
66d4eadd
SS
4036MODULE_DESCRIPTION(DRIVER_DESC);
4037MODULE_AUTHOR(DRIVER_AUTHOR);
4038MODULE_LICENSE("GPL");
4039
4040static int __init xhci_hcd_init(void)
4041{
4042#ifdef CONFIG_PCI
4043 int retval = 0;
4044
4045 retval = xhci_register_pci();
4046
4047 if (retval < 0) {
4048 printk(KERN_DEBUG "Problem registering PCI driver.");
4049 return retval;
4050 }
4051#endif
98441973
SS
4052 /*
4053 * Check the compiler generated sizes of structures that must be laid
4054 * out in specific ways for hardware access.
4055 */
4056 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4057 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4058 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4059 /* xhci_device_control has eight fields, and also
4060 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4061 */
98441973
SS
4062 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4063 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4064 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4065 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4066 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4067 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4068 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4069 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
66d4eadd
SS
4070 return 0;
4071}
4072module_init(xhci_hcd_init);
4073
4074static void __exit xhci_hcd_cleanup(void)
4075{
4076#ifdef CONFIG_PCI
4077 xhci_unregister_pci();
4078#endif
4079}
4080module_exit(xhci_hcd_cleanup);