Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
43b86af8 | 23 | #include <linux/pci.h> |
66d4eadd | 24 | #include <linux/irq.h> |
8df75f42 | 25 | #include <linux/log2.h> |
66d4eadd | 26 | #include <linux/module.h> |
b0567b3f | 27 | #include <linux/moduleparam.h> |
5a0e3ad6 | 28 | #include <linux/slab.h> |
66d4eadd SS |
29 | |
30 | #include "xhci.h" | |
31 | ||
32 | #define DRIVER_AUTHOR "Sarah Sharp" | |
33 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | |
34 | ||
b0567b3f SS |
35 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
36 | static int link_quirk; | |
37 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | |
38 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | |
39 | ||
66d4eadd SS |
40 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
41 | /* | |
42 | * handshake - spin reading hc until handshake completes or fails | |
43 | * @ptr: address of hc register to be read | |
44 | * @mask: bits to look at in result of read | |
45 | * @done: value of those bits when handshake succeeds | |
46 | * @usec: timeout in microseconds | |
47 | * | |
48 | * Returns negative errno, or zero on success | |
49 | * | |
50 | * Success happens when the "mask" bits have the specified value (hardware | |
51 | * handshake done). There are two failure modes: "usec" have passed (major | |
52 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
53 | */ | |
54 | static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, | |
55 | u32 mask, u32 done, int usec) | |
56 | { | |
57 | u32 result; | |
58 | ||
59 | do { | |
60 | result = xhci_readl(xhci, ptr); | |
61 | if (result == ~(u32)0) /* card removed */ | |
62 | return -ENODEV; | |
63 | result &= mask; | |
64 | if (result == done) | |
65 | return 0; | |
66 | udelay(1); | |
67 | usec--; | |
68 | } while (usec > 0); | |
69 | return -ETIMEDOUT; | |
70 | } | |
71 | ||
72 | /* | |
4f0f0bae | 73 | * Disable interrupts and begin the xHCI halting process. |
66d4eadd | 74 | */ |
4f0f0bae | 75 | void xhci_quiesce(struct xhci_hcd *xhci) |
66d4eadd SS |
76 | { |
77 | u32 halted; | |
78 | u32 cmd; | |
79 | u32 mask; | |
80 | ||
66d4eadd SS |
81 | mask = ~(XHCI_IRQS); |
82 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; | |
83 | if (!halted) | |
84 | mask &= ~CMD_RUN; | |
85 | ||
86 | cmd = xhci_readl(xhci, &xhci->op_regs->command); | |
87 | cmd &= mask; | |
88 | xhci_writel(xhci, cmd, &xhci->op_regs->command); | |
4f0f0bae SS |
89 | } |
90 | ||
91 | /* | |
92 | * Force HC into halt state. | |
93 | * | |
94 | * Disable any IRQs and clear the run/stop bit. | |
95 | * HC will complete any current and actively pipelined transactions, and | |
bdfca502 | 96 | * should halt within 16 ms of the run/stop bit being cleared. |
4f0f0bae | 97 | * Read HC Halted bit in the status register to see when the HC is finished. |
4f0f0bae SS |
98 | */ |
99 | int xhci_halt(struct xhci_hcd *xhci) | |
100 | { | |
c6cc27c7 | 101 | int ret; |
4f0f0bae SS |
102 | xhci_dbg(xhci, "// Halt the HC\n"); |
103 | xhci_quiesce(xhci); | |
66d4eadd | 104 | |
c6cc27c7 | 105 | ret = handshake(xhci, &xhci->op_regs->status, |
66d4eadd | 106 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
c6cc27c7 SS |
107 | if (!ret) |
108 | xhci->xhc_state |= XHCI_STATE_HALTED; | |
109 | return ret; | |
66d4eadd SS |
110 | } |
111 | ||
ed07453f SS |
112 | /* |
113 | * Set the run bit and wait for the host to be running. | |
114 | */ | |
8212a49d | 115 | static int xhci_start(struct xhci_hcd *xhci) |
ed07453f SS |
116 | { |
117 | u32 temp; | |
118 | int ret; | |
119 | ||
120 | temp = xhci_readl(xhci, &xhci->op_regs->command); | |
121 | temp |= (CMD_RUN); | |
122 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | |
123 | temp); | |
124 | xhci_writel(xhci, temp, &xhci->op_regs->command); | |
125 | ||
126 | /* | |
127 | * Wait for the HCHalted Status bit to be 0 to indicate the host is | |
128 | * running. | |
129 | */ | |
130 | ret = handshake(xhci, &xhci->op_regs->status, | |
131 | STS_HALT, 0, XHCI_MAX_HALT_USEC); | |
132 | if (ret == -ETIMEDOUT) | |
133 | xhci_err(xhci, "Host took too long to start, " | |
134 | "waited %u microseconds.\n", | |
135 | XHCI_MAX_HALT_USEC); | |
c6cc27c7 SS |
136 | if (!ret) |
137 | xhci->xhc_state &= ~XHCI_STATE_HALTED; | |
ed07453f SS |
138 | return ret; |
139 | } | |
140 | ||
66d4eadd | 141 | /* |
ac04e6ff | 142 | * Reset a halted HC. |
66d4eadd SS |
143 | * |
144 | * This resets pipelines, timers, counters, state machines, etc. | |
145 | * Transactions will be terminated immediately, and operational registers | |
146 | * will be set to their defaults. | |
147 | */ | |
148 | int xhci_reset(struct xhci_hcd *xhci) | |
149 | { | |
150 | u32 command; | |
151 | u32 state; | |
2d62f3ee | 152 | int ret; |
66d4eadd SS |
153 | |
154 | state = xhci_readl(xhci, &xhci->op_regs->status); | |
d3512f63 SS |
155 | if ((state & STS_HALT) == 0) { |
156 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | |
157 | return 0; | |
158 | } | |
66d4eadd SS |
159 | |
160 | xhci_dbg(xhci, "// Reset the HC\n"); | |
161 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
162 | command |= CMD_RESET; | |
163 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
66d4eadd | 164 | |
2d62f3ee SS |
165 | ret = handshake(xhci, &xhci->op_regs->command, |
166 | CMD_RESET, 0, 250 * 1000); | |
167 | if (ret) | |
168 | return ret; | |
169 | ||
170 | xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); | |
171 | /* | |
172 | * xHCI cannot write to any doorbells or operational registers other | |
173 | * than status until the "Controller Not Ready" flag is cleared. | |
174 | */ | |
175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); | |
66d4eadd SS |
176 | } |
177 | ||
43b86af8 DN |
178 | /* |
179 | * Free IRQs | |
180 | * free all IRQs request | |
181 | */ | |
182 | static void xhci_free_irq(struct xhci_hcd *xhci) | |
183 | { | |
184 | int i; | |
185 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
186 | ||
187 | /* return if using legacy interrupt */ | |
188 | if (xhci_to_hcd(xhci)->irq >= 0) | |
189 | return; | |
190 | ||
191 | if (xhci->msix_entries) { | |
192 | for (i = 0; i < xhci->msix_count; i++) | |
193 | if (xhci->msix_entries[i].vector) | |
194 | free_irq(xhci->msix_entries[i].vector, | |
195 | xhci_to_hcd(xhci)); | |
196 | } else if (pdev->irq >= 0) | |
197 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | |
198 | ||
199 | return; | |
200 | } | |
201 | ||
202 | /* | |
203 | * Set up MSI | |
204 | */ | |
205 | static int xhci_setup_msi(struct xhci_hcd *xhci) | |
66d4eadd SS |
206 | { |
207 | int ret; | |
43b86af8 DN |
208 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
209 | ||
210 | ret = pci_enable_msi(pdev); | |
211 | if (ret) { | |
212 | xhci_err(xhci, "failed to allocate MSI entry\n"); | |
213 | return ret; | |
214 | } | |
215 | ||
216 | ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, | |
217 | 0, "xhci_hcd", xhci_to_hcd(xhci)); | |
218 | if (ret) { | |
219 | xhci_err(xhci, "disable MSI interrupt\n"); | |
220 | pci_disable_msi(pdev); | |
221 | } | |
222 | ||
223 | return ret; | |
224 | } | |
225 | ||
226 | /* | |
227 | * Set up MSI-X | |
228 | */ | |
229 | static int xhci_setup_msix(struct xhci_hcd *xhci) | |
230 | { | |
231 | int i, ret = 0; | |
0029227f AX |
232 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
233 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | |
66d4eadd | 234 | |
43b86af8 DN |
235 | /* |
236 | * calculate number of msi-x vectors supported. | |
237 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, | |
238 | * with max number of interrupters based on the xhci HCSPARAMS1. | |
239 | * - num_online_cpus: maximum msi-x vectors per CPUs core. | |
240 | * Add additional 1 vector to ensure always available interrupt. | |
241 | */ | |
242 | xhci->msix_count = min(num_online_cpus() + 1, | |
243 | HCS_MAX_INTRS(xhci->hcs_params1)); | |
244 | ||
245 | xhci->msix_entries = | |
246 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, | |
86871975 | 247 | GFP_KERNEL); |
66d4eadd SS |
248 | if (!xhci->msix_entries) { |
249 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); | |
250 | return -ENOMEM; | |
251 | } | |
43b86af8 DN |
252 | |
253 | for (i = 0; i < xhci->msix_count; i++) { | |
254 | xhci->msix_entries[i].entry = i; | |
255 | xhci->msix_entries[i].vector = 0; | |
256 | } | |
66d4eadd SS |
257 | |
258 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); | |
259 | if (ret) { | |
260 | xhci_err(xhci, "Failed to enable MSI-X\n"); | |
261 | goto free_entries; | |
262 | } | |
263 | ||
43b86af8 DN |
264 | for (i = 0; i < xhci->msix_count; i++) { |
265 | ret = request_irq(xhci->msix_entries[i].vector, | |
266 | (irq_handler_t)xhci_msi_irq, | |
267 | 0, "xhci_hcd", xhci_to_hcd(xhci)); | |
268 | if (ret) | |
269 | goto disable_msix; | |
66d4eadd | 270 | } |
43b86af8 | 271 | |
0029227f | 272 | hcd->msix_enabled = 1; |
43b86af8 | 273 | return ret; |
66d4eadd SS |
274 | |
275 | disable_msix: | |
43b86af8 DN |
276 | xhci_err(xhci, "disable MSI-X interrupt\n"); |
277 | xhci_free_irq(xhci); | |
66d4eadd SS |
278 | pci_disable_msix(pdev); |
279 | free_entries: | |
280 | kfree(xhci->msix_entries); | |
281 | xhci->msix_entries = NULL; | |
282 | return ret; | |
283 | } | |
284 | ||
66d4eadd SS |
285 | /* Free any IRQs and disable MSI-X */ |
286 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |
287 | { | |
0029227f AX |
288 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
289 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | |
66d4eadd | 290 | |
43b86af8 DN |
291 | xhci_free_irq(xhci); |
292 | ||
293 | if (xhci->msix_entries) { | |
294 | pci_disable_msix(pdev); | |
295 | kfree(xhci->msix_entries); | |
296 | xhci->msix_entries = NULL; | |
297 | } else { | |
298 | pci_disable_msi(pdev); | |
299 | } | |
300 | ||
0029227f | 301 | hcd->msix_enabled = 0; |
43b86af8 | 302 | return; |
66d4eadd | 303 | } |
66d4eadd SS |
304 | |
305 | /* | |
306 | * Initialize memory for HCD and xHC (one-time init). | |
307 | * | |
308 | * Program the PAGESIZE register, initialize the device context array, create | |
309 | * device contexts (?), set up a command ring segment (or two?), create event | |
310 | * ring (one for now). | |
311 | */ | |
312 | int xhci_init(struct usb_hcd *hcd) | |
313 | { | |
314 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
315 | int retval = 0; | |
316 | ||
317 | xhci_dbg(xhci, "xhci_init\n"); | |
318 | spin_lock_init(&xhci->lock); | |
b0567b3f SS |
319 | if (link_quirk) { |
320 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | |
321 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | |
322 | } else { | |
ac9d8fe7 | 323 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); |
b0567b3f | 324 | } |
66d4eadd SS |
325 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
326 | xhci_dbg(xhci, "Finished xhci_init\n"); | |
327 | ||
328 | return retval; | |
329 | } | |
330 | ||
7f84eef0 SS |
331 | /*-------------------------------------------------------------------------*/ |
332 | ||
7f84eef0 SS |
333 | |
334 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | |
8212a49d | 335 | static void xhci_event_ring_work(unsigned long arg) |
7f84eef0 SS |
336 | { |
337 | unsigned long flags; | |
338 | int temp; | |
8e595a5d | 339 | u64 temp_64; |
7f84eef0 SS |
340 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
341 | int i, j; | |
342 | ||
343 | xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); | |
344 | ||
345 | spin_lock_irqsave(&xhci->lock, flags); | |
346 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
347 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | |
7bd89b40 SS |
348 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
349 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
e4ab05df SS |
350 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
351 | spin_unlock_irqrestore(&xhci->lock, flags); | |
352 | return; | |
353 | } | |
354 | ||
7f84eef0 SS |
355 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
356 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); | |
7f84eef0 SS |
357 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); |
358 | xhci->error_bitmask = 0; | |
359 | xhci_dbg(xhci, "Event ring:\n"); | |
360 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | |
361 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | |
8e595a5d SS |
362 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
363 | temp_64 &= ~ERST_PTR_MASK; | |
364 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | |
7f84eef0 SS |
365 | xhci_dbg(xhci, "Command ring:\n"); |
366 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | |
367 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | |
368 | xhci_dbg_cmd_ptrs(xhci); | |
3ffbba95 | 369 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
63a0d9ab SS |
370 | if (!xhci->devs[i]) |
371 | continue; | |
372 | for (j = 0; j < 31; ++j) { | |
e9df17eb | 373 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
3ffbba95 SS |
374 | } |
375 | } | |
7f84eef0 SS |
376 | spin_unlock_irqrestore(&xhci->lock, flags); |
377 | ||
378 | if (!xhci->zombie) | |
379 | mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); | |
380 | else | |
381 | xhci_dbg(xhci, "Quit polling the event ring.\n"); | |
382 | } | |
383 | #endif | |
384 | ||
f6ff0ac8 SS |
385 | static int xhci_run_finished(struct xhci_hcd *xhci) |
386 | { | |
387 | if (xhci_start(xhci)) { | |
388 | xhci_halt(xhci); | |
389 | return -ENODEV; | |
390 | } | |
391 | xhci->shared_hcd->state = HC_STATE_RUNNING; | |
392 | ||
393 | if (xhci->quirks & XHCI_NEC_HOST) | |
394 | xhci_ring_cmd_db(xhci); | |
395 | ||
396 | xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); | |
397 | return 0; | |
398 | } | |
399 | ||
66d4eadd SS |
400 | /* |
401 | * Start the HC after it was halted. | |
402 | * | |
403 | * This function is called by the USB core when the HC driver is added. | |
404 | * Its opposite is xhci_stop(). | |
405 | * | |
406 | * xhci_init() must be called once before this function can be called. | |
407 | * Reset the HC, enable device slot contexts, program DCBAAP, and | |
408 | * set command ring pointer and event ring pointer. | |
409 | * | |
410 | * Setup MSI-X vectors and enable interrupts. | |
411 | */ | |
412 | int xhci_run(struct usb_hcd *hcd) | |
413 | { | |
414 | u32 temp; | |
8e595a5d | 415 | u64 temp_64; |
43b86af8 | 416 | u32 ret; |
66d4eadd | 417 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
43b86af8 | 418 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
66d4eadd | 419 | |
f6ff0ac8 SS |
420 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
421 | * is setup. | |
422 | */ | |
66d4eadd | 423 | |
0f2a7930 | 424 | hcd->uses_new_polling = 1; |
f6ff0ac8 SS |
425 | if (!usb_hcd_is_primary_hcd(hcd)) |
426 | return xhci_run_finished(xhci); | |
0f2a7930 | 427 | |
7f84eef0 | 428 | xhci_dbg(xhci, "xhci_run\n"); |
43b86af8 DN |
429 | /* unregister the legacy interrupt */ |
430 | if (hcd->irq) | |
431 | free_irq(hcd->irq, hcd); | |
432 | hcd->irq = -1; | |
433 | ||
f5182b41 SS |
434 | /* Some Fresco Logic host controllers advertise MSI, but fail to |
435 | * generate interrupts. Don't even try to enable MSI. | |
436 | */ | |
437 | if (xhci->quirks & XHCI_BROKEN_MSI) | |
438 | goto legacy_irq; | |
439 | ||
66d4eadd | 440 | ret = xhci_setup_msix(xhci); |
43b86af8 DN |
441 | if (ret) |
442 | /* fall back to msi*/ | |
443 | ret = xhci_setup_msi(xhci); | |
444 | ||
445 | if (ret) { | |
f5182b41 | 446 | legacy_irq: |
43b86af8 DN |
447 | /* fall back to legacy interrupt*/ |
448 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | |
449 | hcd->irq_descr, hcd); | |
450 | if (ret) { | |
451 | xhci_err(xhci, "request interrupt %d failed\n", | |
452 | pdev->irq); | |
453 | return ret; | |
454 | } | |
455 | hcd->irq = pdev->irq; | |
456 | } | |
66d4eadd | 457 | |
7f84eef0 SS |
458 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
459 | init_timer(&xhci->event_ring_timer); | |
460 | xhci->event_ring_timer.data = (unsigned long) xhci; | |
23e3be11 | 461 | xhci->event_ring_timer.function = xhci_event_ring_work; |
7f84eef0 SS |
462 | /* Poll the event ring */ |
463 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; | |
464 | xhci->zombie = 0; | |
465 | xhci_dbg(xhci, "Setting event ring polling timer\n"); | |
466 | add_timer(&xhci->event_ring_timer); | |
467 | #endif | |
468 | ||
66e49d87 SS |
469 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
470 | xhci_debug_ring(xhci, xhci->cmd_ring); | |
471 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | |
472 | xhci_dbg_cmd_ptrs(xhci); | |
473 | ||
474 | xhci_dbg(xhci, "ERST memory map follows:\n"); | |
475 | xhci_dbg_erst(xhci, &xhci->erst); | |
476 | xhci_dbg(xhci, "Event ring:\n"); | |
477 | xhci_debug_ring(xhci, xhci->event_ring); | |
478 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | |
479 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | |
480 | temp_64 &= ~ERST_PTR_MASK; | |
481 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | |
482 | ||
66d4eadd SS |
483 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
484 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | |
a4d88302 | 485 | temp &= ~ER_IRQ_INTERVAL_MASK; |
66d4eadd SS |
486 | temp |= (u32) 160; |
487 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); | |
488 | ||
489 | /* Set the HCD state before we enable the irqs */ | |
66d4eadd SS |
490 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
491 | temp |= (CMD_EIE); | |
492 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", | |
493 | temp); | |
494 | xhci_writel(xhci, temp, &xhci->op_regs->command); | |
495 | ||
496 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
700e2052 GKH |
497 | xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", |
498 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | |
66d4eadd SS |
499 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
500 | &xhci->ir_set->irq_pending); | |
09ece30e | 501 | xhci_print_ir_set(xhci, 0); |
66d4eadd | 502 | |
0238634d SS |
503 | if (xhci->quirks & XHCI_NEC_HOST) |
504 | xhci_queue_vendor_command(xhci, 0, 0, 0, | |
505 | TRB_TYPE(TRB_NEC_GET_FW)); | |
7f84eef0 | 506 | |
f6ff0ac8 SS |
507 | xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); |
508 | return 0; | |
509 | } | |
ed07453f | 510 | |
f6ff0ac8 SS |
511 | static void xhci_only_stop_hcd(struct usb_hcd *hcd) |
512 | { | |
513 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
66d4eadd | 514 | |
f6ff0ac8 SS |
515 | spin_lock_irq(&xhci->lock); |
516 | xhci_halt(xhci); | |
517 | ||
518 | /* The shared_hcd is going to be deallocated shortly (the USB core only | |
519 | * calls this function when allocation fails in usb_add_hcd(), or | |
520 | * usb_remove_hcd() is called). So we need to unset xHCI's pointer. | |
521 | */ | |
522 | xhci->shared_hcd = NULL; | |
523 | spin_unlock_irq(&xhci->lock); | |
66d4eadd SS |
524 | } |
525 | ||
526 | /* | |
527 | * Stop xHCI driver. | |
528 | * | |
529 | * This function is called by the USB core when the HC driver is removed. | |
530 | * Its opposite is xhci_run(). | |
531 | * | |
532 | * Disable device contexts, disable IRQs, and quiesce the HC. | |
533 | * Reset the HC, finish any completed transactions, and cleanup memory. | |
534 | */ | |
535 | void xhci_stop(struct usb_hcd *hcd) | |
536 | { | |
537 | u32 temp; | |
538 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
539 | ||
f6ff0ac8 SS |
540 | if (!usb_hcd_is_primary_hcd(hcd)) { |
541 | xhci_only_stop_hcd(xhci->shared_hcd); | |
542 | return; | |
543 | } | |
544 | ||
66d4eadd | 545 | spin_lock_irq(&xhci->lock); |
f6ff0ac8 SS |
546 | /* Make sure the xHC is halted for a USB3 roothub |
547 | * (xhci_stop() could be called as part of failed init). | |
548 | */ | |
66d4eadd SS |
549 | xhci_halt(xhci); |
550 | xhci_reset(xhci); | |
551 | spin_unlock_irq(&xhci->lock); | |
552 | ||
40a9fb17 ZR |
553 | xhci_cleanup_msix(xhci); |
554 | ||
7f84eef0 SS |
555 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
556 | /* Tell the event ring poll function not to reschedule */ | |
557 | xhci->zombie = 1; | |
558 | del_timer_sync(&xhci->event_ring_timer); | |
559 | #endif | |
560 | ||
c41136b0 AX |
561 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
562 | usb_amd_dev_put(); | |
563 | ||
66d4eadd SS |
564 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
565 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
566 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | |
567 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
568 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | |
569 | &xhci->ir_set->irq_pending); | |
09ece30e | 570 | xhci_print_ir_set(xhci, 0); |
66d4eadd SS |
571 | |
572 | xhci_dbg(xhci, "cleaning up memory\n"); | |
573 | xhci_mem_cleanup(xhci); | |
574 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | |
575 | xhci_readl(xhci, &xhci->op_regs->status)); | |
576 | } | |
577 | ||
578 | /* | |
579 | * Shutdown HC (not bus-specific) | |
580 | * | |
581 | * This is called when the machine is rebooting or halting. We assume that the | |
582 | * machine will be powered off, and the HC's internal state will be reset. | |
583 | * Don't bother to free memory. | |
f6ff0ac8 SS |
584 | * |
585 | * This will only ever be called with the main usb_hcd (the USB3 roothub). | |
66d4eadd SS |
586 | */ |
587 | void xhci_shutdown(struct usb_hcd *hcd) | |
588 | { | |
589 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
590 | ||
591 | spin_lock_irq(&xhci->lock); | |
592 | xhci_halt(xhci); | |
43b86af8 | 593 | spin_unlock_irq(&xhci->lock); |
66d4eadd | 594 | |
40a9fb17 ZR |
595 | xhci_cleanup_msix(xhci); |
596 | ||
66d4eadd SS |
597 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", |
598 | xhci_readl(xhci, &xhci->op_regs->status)); | |
599 | } | |
600 | ||
b5b5c3ac | 601 | #ifdef CONFIG_PM |
5535b1d5 AX |
602 | static void xhci_save_registers(struct xhci_hcd *xhci) |
603 | { | |
604 | xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); | |
605 | xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); | |
606 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | |
607 | xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
608 | xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
609 | xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); | |
610 | xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
611 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); | |
612 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | |
613 | } | |
614 | ||
615 | static void xhci_restore_registers(struct xhci_hcd *xhci) | |
616 | { | |
617 | xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); | |
618 | xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | |
619 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); | |
620 | xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); | |
621 | xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); | |
622 | xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); | |
623 | xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); | |
624 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); | |
625 | } | |
626 | ||
89821320 SS |
627 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
628 | { | |
629 | u64 val_64; | |
630 | ||
631 | /* step 2: initialize command ring buffer */ | |
632 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | |
633 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | |
634 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | |
635 | xhci->cmd_ring->dequeue) & | |
636 | (u64) ~CMD_RING_RSVD_BITS) | | |
637 | xhci->cmd_ring->cycle_state; | |
638 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | |
639 | (long unsigned long) val_64); | |
640 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | |
641 | } | |
642 | ||
643 | /* | |
644 | * The whole command ring must be cleared to zero when we suspend the host. | |
645 | * | |
646 | * The host doesn't save the command ring pointer in the suspend well, so we | |
647 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | |
648 | * aligned, because of the reserved bits in the command ring dequeue pointer | |
649 | * register. Therefore, we can't just set the dequeue pointer back in the | |
650 | * middle of the ring (TRBs are 16-byte aligned). | |
651 | */ | |
652 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | |
653 | { | |
654 | struct xhci_ring *ring; | |
655 | struct xhci_segment *seg; | |
656 | ||
657 | ring = xhci->cmd_ring; | |
658 | seg = ring->deq_seg; | |
659 | do { | |
660 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
661 | seg = seg->next; | |
662 | } while (seg != ring->deq_seg); | |
663 | ||
664 | /* Reset the software enqueue and dequeue pointers */ | |
665 | ring->deq_seg = ring->first_seg; | |
666 | ring->dequeue = ring->first_seg->trbs; | |
667 | ring->enq_seg = ring->deq_seg; | |
668 | ring->enqueue = ring->dequeue; | |
669 | ||
670 | /* | |
671 | * Ring is now zeroed, so the HW should look for change of ownership | |
672 | * when the cycle bit is set to 1. | |
673 | */ | |
674 | ring->cycle_state = 1; | |
675 | ||
676 | /* | |
677 | * Reset the hardware dequeue pointer. | |
678 | * Yes, this will need to be re-written after resume, but we're paranoid | |
679 | * and want to make sure the hardware doesn't access bogus memory | |
680 | * because, say, the BIOS or an SMI started the host without changing | |
681 | * the command ring pointers. | |
682 | */ | |
683 | xhci_set_cmd_ring_deq(xhci); | |
684 | } | |
685 | ||
5535b1d5 AX |
686 | /* |
687 | * Stop HC (not bus-specific) | |
688 | * | |
689 | * This is called when the machine transition into S3/S4 mode. | |
690 | * | |
691 | */ | |
692 | int xhci_suspend(struct xhci_hcd *xhci) | |
693 | { | |
694 | int rc = 0; | |
695 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
696 | u32 command; | |
0029227f | 697 | int i; |
5535b1d5 AX |
698 | |
699 | spin_lock_irq(&xhci->lock); | |
700 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
b3209379 | 701 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
5535b1d5 AX |
702 | /* step 1: stop endpoint */ |
703 | /* skipped assuming that port suspend has done */ | |
704 | ||
705 | /* step 2: clear Run/Stop bit */ | |
706 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
707 | command &= ~CMD_RUN; | |
708 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
709 | if (handshake(xhci, &xhci->op_regs->status, | |
710 | STS_HALT, STS_HALT, 100*100)) { | |
711 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); | |
712 | spin_unlock_irq(&xhci->lock); | |
713 | return -ETIMEDOUT; | |
714 | } | |
89821320 | 715 | xhci_clear_command_ring(xhci); |
5535b1d5 AX |
716 | |
717 | /* step 3: save registers */ | |
718 | xhci_save_registers(xhci); | |
719 | ||
720 | /* step 4: set CSS flag */ | |
721 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
722 | command |= CMD_CSS; | |
723 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
724 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { | |
725 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); | |
726 | spin_unlock_irq(&xhci->lock); | |
727 | return -ETIMEDOUT; | |
728 | } | |
5535b1d5 AX |
729 | spin_unlock_irq(&xhci->lock); |
730 | ||
0029227f AX |
731 | /* step 5: remove core well power */ |
732 | /* synchronize irq when using MSI-X */ | |
733 | if (xhci->msix_entries) { | |
734 | for (i = 0; i < xhci->msix_count; i++) | |
735 | synchronize_irq(xhci->msix_entries[i].vector); | |
736 | } | |
737 | ||
5535b1d5 AX |
738 | return rc; |
739 | } | |
740 | ||
741 | /* | |
742 | * start xHC (not bus-specific) | |
743 | * | |
744 | * This is called when the machine transition from S3/S4 mode. | |
745 | * | |
746 | */ | |
747 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |
748 | { | |
749 | u32 command, temp = 0; | |
750 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
65b22f93 | 751 | struct usb_hcd *secondary_hcd; |
019a35f1 | 752 | int retval; |
5535b1d5 | 753 | |
f6ff0ac8 | 754 | /* Wait a bit if either of the roothubs need to settle from the |
25985edc | 755 | * transition into bus suspend. |
20b67cf5 | 756 | */ |
f6ff0ac8 SS |
757 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || |
758 | time_before(jiffies, | |
759 | xhci->bus_state[1].next_statechange)) | |
5535b1d5 AX |
760 | msleep(100); |
761 | ||
762 | spin_lock_irq(&xhci->lock); | |
c877b3b2 ML |
763 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
764 | hibernated = true; | |
5535b1d5 AX |
765 | |
766 | if (!hibernated) { | |
767 | /* step 1: restore register */ | |
768 | xhci_restore_registers(xhci); | |
769 | /* step 2: initialize command ring buffer */ | |
89821320 | 770 | xhci_set_cmd_ring_deq(xhci); |
5535b1d5 AX |
771 | /* step 3: restore state and start state*/ |
772 | /* step 3: set CRS flag */ | |
773 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
774 | command |= CMD_CRS; | |
775 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
776 | if (handshake(xhci, &xhci->op_regs->status, | |
777 | STS_RESTORE, 0, 10*100)) { | |
778 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); | |
779 | spin_unlock_irq(&xhci->lock); | |
780 | return -ETIMEDOUT; | |
781 | } | |
782 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
783 | } | |
784 | ||
785 | /* If restore operation fails, re-initialize the HC during resume */ | |
786 | if ((temp & STS_SRE) || hibernated) { | |
fedd383e SS |
787 | /* Let the USB core know _both_ roothubs lost power. */ |
788 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); | |
789 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); | |
5535b1d5 AX |
790 | |
791 | xhci_dbg(xhci, "Stop HCD\n"); | |
792 | xhci_halt(xhci); | |
793 | xhci_reset(xhci); | |
5535b1d5 | 794 | spin_unlock_irq(&xhci->lock); |
0029227f | 795 | xhci_cleanup_msix(xhci); |
5535b1d5 AX |
796 | |
797 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | |
798 | /* Tell the event ring poll function not to reschedule */ | |
799 | xhci->zombie = 1; | |
800 | del_timer_sync(&xhci->event_ring_timer); | |
801 | #endif | |
802 | ||
803 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | |
804 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
805 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | |
806 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
807 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | |
808 | &xhci->ir_set->irq_pending); | |
09ece30e | 809 | xhci_print_ir_set(xhci, 0); |
5535b1d5 AX |
810 | |
811 | xhci_dbg(xhci, "cleaning up memory\n"); | |
812 | xhci_mem_cleanup(xhci); | |
813 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | |
814 | xhci_readl(xhci, &xhci->op_regs->status)); | |
815 | ||
65b22f93 SS |
816 | /* USB core calls the PCI reinit and start functions twice: |
817 | * first with the primary HCD, and then with the secondary HCD. | |
818 | * If we don't do the same, the host will never be started. | |
819 | */ | |
820 | if (!usb_hcd_is_primary_hcd(hcd)) | |
821 | secondary_hcd = hcd; | |
822 | else | |
823 | secondary_hcd = xhci->shared_hcd; | |
824 | ||
825 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); | |
826 | retval = xhci_init(hcd->primary_hcd); | |
5535b1d5 AX |
827 | if (retval) |
828 | return retval; | |
65b22f93 SS |
829 | xhci_dbg(xhci, "Start the primary HCD\n"); |
830 | retval = xhci_run(hcd->primary_hcd); | |
831 | if (retval) | |
832 | goto failed_restart; | |
5535b1d5 | 833 | |
65b22f93 SS |
834 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
835 | retval = xhci_run(secondary_hcd); | |
b3209379 | 836 | if (!retval) { |
5535b1d5 | 837 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
b3209379 SS |
838 | set_bit(HCD_FLAG_HW_ACCESSIBLE, |
839 | &xhci->shared_hcd->flags); | |
840 | } | |
65b22f93 | 841 | failed_restart: |
5535b1d5 | 842 | hcd->state = HC_STATE_SUSPENDED; |
b3209379 | 843 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
5535b1d5 AX |
844 | return retval; |
845 | } | |
846 | ||
5535b1d5 AX |
847 | /* step 4: set Run/Stop bit */ |
848 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
849 | command |= CMD_RUN; | |
850 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
851 | handshake(xhci, &xhci->op_regs->status, STS_HALT, | |
852 | 0, 250 * 1000); | |
853 | ||
854 | /* step 5: walk topology and initialize portsc, | |
855 | * portpmsc and portli | |
856 | */ | |
857 | /* this is done in bus_resume */ | |
858 | ||
859 | /* step 6: restart each of the previously | |
860 | * Running endpoints by ringing their doorbells | |
861 | */ | |
862 | ||
863 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
b3209379 | 864 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
5535b1d5 AX |
865 | |
866 | spin_unlock_irq(&xhci->lock); | |
867 | return 0; | |
868 | } | |
b5b5c3ac SS |
869 | #endif /* CONFIG_PM */ |
870 | ||
7f84eef0 SS |
871 | /*-------------------------------------------------------------------------*/ |
872 | ||
d0e96f5a SS |
873 | /** |
874 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and | |
875 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | |
876 | * value to right shift 1 for the bitmask. | |
877 | * | |
878 | * Index = (epnum * 2) + direction - 1, | |
879 | * where direction = 0 for OUT, 1 for IN. | |
880 | * For control endpoints, the IN index is used (OUT index is unused), so | |
881 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | |
882 | */ | |
883 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | |
884 | { | |
885 | unsigned int index; | |
886 | if (usb_endpoint_xfer_control(desc)) | |
887 | index = (unsigned int) (usb_endpoint_num(desc)*2); | |
888 | else | |
889 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | |
890 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | |
891 | return index; | |
892 | } | |
893 | ||
f94e0186 SS |
894 | /* Find the flag for this endpoint (for use in the control context). Use the |
895 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
896 | * bit 1, etc. | |
897 | */ | |
898 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | |
899 | { | |
900 | return 1 << (xhci_get_endpoint_index(desc) + 1); | |
901 | } | |
902 | ||
ac9d8fe7 SS |
903 | /* Find the flag for this endpoint (for use in the control context). Use the |
904 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
905 | * bit 1, etc. | |
906 | */ | |
907 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | |
908 | { | |
909 | return 1 << (ep_index + 1); | |
910 | } | |
911 | ||
f94e0186 SS |
912 | /* Compute the last valid endpoint context index. Basically, this is the |
913 | * endpoint index plus one. For slot contexts with more than valid endpoint, | |
914 | * we find the most significant bit set in the added contexts flags. | |
915 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | |
916 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | |
917 | */ | |
ac9d8fe7 | 918 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
f94e0186 SS |
919 | { |
920 | return fls(added_ctxs) - 1; | |
921 | } | |
922 | ||
d0e96f5a SS |
923 | /* Returns 1 if the arguments are OK; |
924 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | |
925 | */ | |
8212a49d | 926 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
64927730 AX |
927 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
928 | const char *func) { | |
929 | struct xhci_hcd *xhci; | |
930 | struct xhci_virt_device *virt_dev; | |
931 | ||
d0e96f5a SS |
932 | if (!hcd || (check_ep && !ep) || !udev) { |
933 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", | |
934 | func); | |
935 | return -EINVAL; | |
936 | } | |
937 | if (!udev->parent) { | |
938 | printk(KERN_DEBUG "xHCI %s called for root hub\n", | |
939 | func); | |
940 | return 0; | |
941 | } | |
64927730 | 942 | |
7bd89b40 SS |
943 | xhci = hcd_to_xhci(hcd); |
944 | if (xhci->xhc_state & XHCI_STATE_HALTED) | |
945 | return -ENODEV; | |
946 | ||
64927730 | 947 | if (check_virt_dev) { |
64927730 AX |
948 | if (!udev->slot_id || !xhci->devs |
949 | || !xhci->devs[udev->slot_id]) { | |
950 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | |
951 | "device\n", func); | |
952 | return -EINVAL; | |
953 | } | |
954 | ||
955 | virt_dev = xhci->devs[udev->slot_id]; | |
956 | if (virt_dev->udev != udev) { | |
957 | printk(KERN_DEBUG "xHCI %s called with udev and " | |
958 | "virt_dev does not match\n", func); | |
959 | return -EINVAL; | |
960 | } | |
d0e96f5a | 961 | } |
64927730 | 962 | |
d0e96f5a SS |
963 | return 1; |
964 | } | |
965 | ||
2d3f1fac | 966 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
913a8a34 SS |
967 | struct usb_device *udev, struct xhci_command *command, |
968 | bool ctx_change, bool must_succeed); | |
2d3f1fac SS |
969 | |
970 | /* | |
971 | * Full speed devices may have a max packet size greater than 8 bytes, but the | |
972 | * USB core doesn't know that until it reads the first 8 bytes of the | |
973 | * descriptor. If the usb_device's max packet size changes after that point, | |
974 | * we need to issue an evaluate context command and wait on it. | |
975 | */ | |
976 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |
977 | unsigned int ep_index, struct urb *urb) | |
978 | { | |
979 | struct xhci_container_ctx *in_ctx; | |
980 | struct xhci_container_ctx *out_ctx; | |
981 | struct xhci_input_control_ctx *ctrl_ctx; | |
982 | struct xhci_ep_ctx *ep_ctx; | |
983 | int max_packet_size; | |
984 | int hw_max_packet_size; | |
985 | int ret = 0; | |
986 | ||
987 | out_ctx = xhci->devs[slot_id]->out_ctx; | |
988 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | |
28ccd296 | 989 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
29cc8897 | 990 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
2d3f1fac SS |
991 | if (hw_max_packet_size != max_packet_size) { |
992 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | |
993 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | |
994 | max_packet_size); | |
995 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", | |
996 | hw_max_packet_size); | |
997 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | |
998 | ||
999 | /* Set up the modified control endpoint 0 */ | |
913a8a34 SS |
1000 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
1001 | xhci->devs[slot_id]->out_ctx, ep_index); | |
2d3f1fac SS |
1002 | in_ctx = xhci->devs[slot_id]->in_ctx; |
1003 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
28ccd296 ME |
1004 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1005 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); | |
2d3f1fac SS |
1006 | |
1007 | /* Set up the input context flags for the command */ | |
1008 | /* FIXME: This won't work if a non-default control endpoint | |
1009 | * changes max packet sizes. | |
1010 | */ | |
1011 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
28ccd296 | 1012 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
2d3f1fac SS |
1013 | ctrl_ctx->drop_flags = 0; |
1014 | ||
1015 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | |
1016 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | |
1017 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | |
1018 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | |
1019 | ||
913a8a34 SS |
1020 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, |
1021 | true, false); | |
2d3f1fac SS |
1022 | |
1023 | /* Clean up the input context for later use by bandwidth | |
1024 | * functions. | |
1025 | */ | |
28ccd296 | 1026 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
2d3f1fac SS |
1027 | } |
1028 | return ret; | |
1029 | } | |
1030 | ||
d0e96f5a SS |
1031 | /* |
1032 | * non-error returns are a promise to giveback() the urb later | |
1033 | * we drop ownership so next owner (or urb unlink) can get it | |
1034 | */ | |
1035 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |
1036 | { | |
1037 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
2ffdea25 | 1038 | struct xhci_td *buffer; |
d0e96f5a SS |
1039 | unsigned long flags; |
1040 | int ret = 0; | |
1041 | unsigned int slot_id, ep_index; | |
8e51adcc AX |
1042 | struct urb_priv *urb_priv; |
1043 | int size, i; | |
2d3f1fac | 1044 | |
64927730 AX |
1045 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, |
1046 | true, true, __func__) <= 0) | |
d0e96f5a SS |
1047 | return -EINVAL; |
1048 | ||
1049 | slot_id = urb->dev->slot_id; | |
1050 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
d0e96f5a | 1051 | |
541c7d43 | 1052 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
d0e96f5a SS |
1053 | if (!in_interrupt()) |
1054 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | |
1055 | ret = -ESHUTDOWN; | |
1056 | goto exit; | |
1057 | } | |
8e51adcc AX |
1058 | |
1059 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | |
1060 | size = urb->number_of_packets; | |
1061 | else | |
1062 | size = 1; | |
1063 | ||
1064 | urb_priv = kzalloc(sizeof(struct urb_priv) + | |
1065 | size * sizeof(struct xhci_td *), mem_flags); | |
1066 | if (!urb_priv) | |
1067 | return -ENOMEM; | |
1068 | ||
2ffdea25 AX |
1069 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); |
1070 | if (!buffer) { | |
1071 | kfree(urb_priv); | |
1072 | return -ENOMEM; | |
1073 | } | |
1074 | ||
8e51adcc | 1075 | for (i = 0; i < size; i++) { |
2ffdea25 AX |
1076 | urb_priv->td[i] = buffer; |
1077 | buffer++; | |
8e51adcc AX |
1078 | } |
1079 | ||
1080 | urb_priv->length = size; | |
1081 | urb_priv->td_cnt = 0; | |
1082 | urb->hcpriv = urb_priv; | |
1083 | ||
2d3f1fac SS |
1084 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
1085 | /* Check to see if the max packet size for the default control | |
1086 | * endpoint changed during FS device enumeration | |
1087 | */ | |
1088 | if (urb->dev->speed == USB_SPEED_FULL) { | |
1089 | ret = xhci_check_maxpacket(xhci, slot_id, | |
1090 | ep_index, urb); | |
d13565c1 SS |
1091 | if (ret < 0) { |
1092 | xhci_urb_free_priv(xhci, urb_priv); | |
1093 | urb->hcpriv = NULL; | |
2d3f1fac | 1094 | return ret; |
d13565c1 | 1095 | } |
2d3f1fac SS |
1096 | } |
1097 | ||
b11069f5 SS |
1098 | /* We have a spinlock and interrupts disabled, so we must pass |
1099 | * atomic context to this function, which may allocate memory. | |
1100 | */ | |
2d3f1fac | 1101 | spin_lock_irqsave(&xhci->lock, flags); |
6f5165cf SS |
1102 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1103 | goto dying; | |
b11069f5 | 1104 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
23e3be11 | 1105 | slot_id, ep_index); |
d13565c1 SS |
1106 | if (ret) |
1107 | goto free_priv; | |
2d3f1fac SS |
1108 | spin_unlock_irqrestore(&xhci->lock, flags); |
1109 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | |
1110 | spin_lock_irqsave(&xhci->lock, flags); | |
6f5165cf SS |
1111 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1112 | goto dying; | |
8df75f42 SS |
1113 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
1114 | EP_GETTING_STREAMS) { | |
1115 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
1116 | "is transitioning to using streams.\n"); | |
1117 | ret = -EINVAL; | |
1118 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & | |
1119 | EP_GETTING_NO_STREAMS) { | |
1120 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
1121 | "is transitioning to " | |
1122 | "not having streams.\n"); | |
1123 | ret = -EINVAL; | |
1124 | } else { | |
1125 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | |
1126 | slot_id, ep_index); | |
1127 | } | |
d13565c1 SS |
1128 | if (ret) |
1129 | goto free_priv; | |
2d3f1fac | 1130 | spin_unlock_irqrestore(&xhci->lock, flags); |
624defa1 SS |
1131 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
1132 | spin_lock_irqsave(&xhci->lock, flags); | |
6f5165cf SS |
1133 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1134 | goto dying; | |
624defa1 SS |
1135 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1136 | slot_id, ep_index); | |
d13565c1 SS |
1137 | if (ret) |
1138 | goto free_priv; | |
624defa1 | 1139 | spin_unlock_irqrestore(&xhci->lock, flags); |
2d3f1fac | 1140 | } else { |
787f4e5a AX |
1141 | spin_lock_irqsave(&xhci->lock, flags); |
1142 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1143 | goto dying; | |
1144 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, | |
1145 | slot_id, ep_index); | |
d13565c1 SS |
1146 | if (ret) |
1147 | goto free_priv; | |
787f4e5a | 1148 | spin_unlock_irqrestore(&xhci->lock, flags); |
2d3f1fac | 1149 | } |
d0e96f5a | 1150 | exit: |
d0e96f5a | 1151 | return ret; |
6f5165cf SS |
1152 | dying: |
1153 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | |
1154 | "non-responsive xHCI host.\n", | |
1155 | urb->ep->desc.bEndpointAddress, urb); | |
d13565c1 SS |
1156 | ret = -ESHUTDOWN; |
1157 | free_priv: | |
1158 | xhci_urb_free_priv(xhci, urb_priv); | |
1159 | urb->hcpriv = NULL; | |
6f5165cf | 1160 | spin_unlock_irqrestore(&xhci->lock, flags); |
d13565c1 | 1161 | return ret; |
d0e96f5a SS |
1162 | } |
1163 | ||
021bff91 SS |
1164 | /* Get the right ring for the given URB. |
1165 | * If the endpoint supports streams, boundary check the URB's stream ID. | |
1166 | * If the endpoint doesn't support streams, return the singular endpoint ring. | |
1167 | */ | |
1168 | static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, | |
1169 | struct urb *urb) | |
1170 | { | |
1171 | unsigned int slot_id; | |
1172 | unsigned int ep_index; | |
1173 | unsigned int stream_id; | |
1174 | struct xhci_virt_ep *ep; | |
1175 | ||
1176 | slot_id = urb->dev->slot_id; | |
1177 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
1178 | stream_id = urb->stream_id; | |
1179 | ep = &xhci->devs[slot_id]->eps[ep_index]; | |
1180 | /* Common case: no streams */ | |
1181 | if (!(ep->ep_state & EP_HAS_STREAMS)) | |
1182 | return ep->ring; | |
1183 | ||
1184 | if (stream_id == 0) { | |
1185 | xhci_warn(xhci, | |
1186 | "WARN: Slot ID %u, ep index %u has streams, " | |
1187 | "but URB has no stream ID.\n", | |
1188 | slot_id, ep_index); | |
1189 | return NULL; | |
1190 | } | |
1191 | ||
1192 | if (stream_id < ep->stream_info->num_streams) | |
1193 | return ep->stream_info->stream_rings[stream_id]; | |
1194 | ||
1195 | xhci_warn(xhci, | |
1196 | "WARN: Slot ID %u, ep index %u has " | |
1197 | "stream IDs 1 to %u allocated, " | |
1198 | "but stream ID %u is requested.\n", | |
1199 | slot_id, ep_index, | |
1200 | ep->stream_info->num_streams - 1, | |
1201 | stream_id); | |
1202 | return NULL; | |
1203 | } | |
1204 | ||
ae636747 SS |
1205 | /* |
1206 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | |
1207 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | |
1208 | * should pick up where it left off in the TD, unless a Set Transfer Ring | |
1209 | * Dequeue Pointer is issued. | |
1210 | * | |
1211 | * The TRBs that make up the buffers for the canceled URB will be "removed" from | |
1212 | * the ring. Since the ring is a contiguous structure, they can't be physically | |
1213 | * removed. Instead, there are two options: | |
1214 | * | |
1215 | * 1) If the HC is in the middle of processing the URB to be canceled, we | |
1216 | * simply move the ring's dequeue pointer past those TRBs using the Set | |
1217 | * Transfer Ring Dequeue Pointer command. This will be the common case, | |
1218 | * when drivers timeout on the last submitted URB and attempt to cancel. | |
1219 | * | |
1220 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a | |
1221 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The | |
1222 | * HC will need to invalidate the any TRBs it has cached after the stop | |
1223 | * endpoint command, as noted in the xHCI 0.95 errata. | |
1224 | * | |
1225 | * 3) The TD may have completed by the time the Stop Endpoint Command | |
1226 | * completes, so software needs to handle that case too. | |
1227 | * | |
1228 | * This function should protect against the TD enqueueing code ringing the | |
1229 | * doorbell while this code is waiting for a Stop Endpoint command to complete. | |
1230 | * It also needs to account for multiple cancellations on happening at the same | |
1231 | * time for the same endpoint. | |
1232 | * | |
1233 | * Note that this function can be called in any context, or so says | |
1234 | * usb_hcd_unlink_urb() | |
d0e96f5a SS |
1235 | */ |
1236 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
1237 | { | |
ae636747 | 1238 | unsigned long flags; |
8e51adcc | 1239 | int ret, i; |
e34b2fbf | 1240 | u32 temp; |
ae636747 | 1241 | struct xhci_hcd *xhci; |
8e51adcc | 1242 | struct urb_priv *urb_priv; |
ae636747 SS |
1243 | struct xhci_td *td; |
1244 | unsigned int ep_index; | |
1245 | struct xhci_ring *ep_ring; | |
63a0d9ab | 1246 | struct xhci_virt_ep *ep; |
ae636747 SS |
1247 | |
1248 | xhci = hcd_to_xhci(hcd); | |
1249 | spin_lock_irqsave(&xhci->lock, flags); | |
1250 | /* Make sure the URB hasn't completed or been unlinked already */ | |
1251 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
1252 | if (ret || !urb->hcpriv) | |
1253 | goto done; | |
e34b2fbf | 1254 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
c6cc27c7 | 1255 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
e34b2fbf | 1256 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
8e51adcc | 1257 | urb_priv = urb->hcpriv; |
585df1d9 SS |
1258 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { |
1259 | td = urb_priv->td[i]; | |
1260 | if (!list_empty(&td->td_list)) | |
1261 | list_del_init(&td->td_list); | |
1262 | if (!list_empty(&td->cancelled_td_list)) | |
1263 | list_del_init(&td->cancelled_td_list); | |
1264 | } | |
e34b2fbf SS |
1265 | |
1266 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
1267 | spin_unlock_irqrestore(&xhci->lock, flags); | |
214f76f7 | 1268 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); |
8e51adcc | 1269 | xhci_urb_free_priv(xhci, urb_priv); |
e34b2fbf SS |
1270 | return ret; |
1271 | } | |
7bd89b40 SS |
1272 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
1273 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
6f5165cf SS |
1274 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " |
1275 | "non-responsive xHCI host.\n", | |
1276 | urb->ep->desc.bEndpointAddress, urb); | |
1277 | /* Let the stop endpoint command watchdog timer (which set this | |
1278 | * state) finish cleaning up the endpoint TD lists. We must | |
1279 | * have caught it in the middle of dropping a lock and giving | |
1280 | * back an URB. | |
1281 | */ | |
1282 | goto done; | |
1283 | } | |
ae636747 | 1284 | |
700e2052 | 1285 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
66e49d87 SS |
1286 | xhci_dbg(xhci, "Event ring:\n"); |
1287 | xhci_debug_ring(xhci, xhci->event_ring); | |
ae636747 | 1288 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
63a0d9ab | 1289 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
e9df17eb SS |
1290 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
1291 | if (!ep_ring) { | |
1292 | ret = -EINVAL; | |
1293 | goto done; | |
1294 | } | |
1295 | ||
66e49d87 SS |
1296 | xhci_dbg(xhci, "Endpoint ring:\n"); |
1297 | xhci_debug_ring(xhci, ep_ring); | |
ae636747 | 1298 | |
8e51adcc AX |
1299 | urb_priv = urb->hcpriv; |
1300 | ||
1301 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { | |
1302 | td = urb_priv->td[i]; | |
1303 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | |
1304 | } | |
1305 | ||
ae636747 SS |
1306 | /* Queue a stop endpoint command, but only if this is |
1307 | * the first cancellation to be handled. | |
1308 | */ | |
678539cf SS |
1309 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
1310 | ep->ep_state |= EP_HALT_PENDING; | |
6f5165cf SS |
1311 | ep->stop_cmds_pending++; |
1312 | ep->stop_cmd_timer.expires = jiffies + | |
1313 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | |
1314 | add_timer(&ep->stop_cmd_timer); | |
be88fe4f | 1315 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); |
23e3be11 | 1316 | xhci_ring_cmd_db(xhci); |
ae636747 SS |
1317 | } |
1318 | done: | |
1319 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1320 | return ret; | |
d0e96f5a SS |
1321 | } |
1322 | ||
f94e0186 SS |
1323 | /* Drop an endpoint from a new bandwidth configuration for this device. |
1324 | * Only one call to this function is allowed per endpoint before | |
1325 | * check_bandwidth() or reset_bandwidth() must be called. | |
1326 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1327 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1328 | * different endpoint descriptor in usb_host_endpoint. | |
1329 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1330 | * not allowed. | |
f88ba78d SS |
1331 | * |
1332 | * The USB core will not allow URBs to be queued to an endpoint that is being | |
1333 | * disabled, so there's no need for mutual exclusion to protect | |
1334 | * the xhci->devs[slot_id] structure. | |
f94e0186 SS |
1335 | */ |
1336 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
1337 | struct usb_host_endpoint *ep) | |
1338 | { | |
f94e0186 | 1339 | struct xhci_hcd *xhci; |
d115b048 JY |
1340 | struct xhci_container_ctx *in_ctx, *out_ctx; |
1341 | struct xhci_input_control_ctx *ctrl_ctx; | |
1342 | struct xhci_slot_ctx *slot_ctx; | |
f94e0186 SS |
1343 | unsigned int last_ctx; |
1344 | unsigned int ep_index; | |
1345 | struct xhci_ep_ctx *ep_ctx; | |
1346 | u32 drop_flag; | |
1347 | u32 new_add_flags, new_drop_flags, new_slot_info; | |
1348 | int ret; | |
1349 | ||
64927730 | 1350 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
f94e0186 SS |
1351 | if (ret <= 0) |
1352 | return ret; | |
1353 | xhci = hcd_to_xhci(hcd); | |
fe6c6c13 SS |
1354 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1355 | return -ENODEV; | |
f94e0186 | 1356 | |
fe6c6c13 | 1357 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
1358 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
1359 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | |
1360 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | |
1361 | __func__, drop_flag); | |
1362 | return 0; | |
1363 | } | |
1364 | ||
f94e0186 | 1365 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
d115b048 JY |
1366 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1367 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
f94e0186 | 1368 | ep_index = xhci_get_endpoint_index(&ep->desc); |
d115b048 | 1369 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
f94e0186 SS |
1370 | /* If the HC already knows the endpoint is disabled, |
1371 | * or the HCD has noted it is disabled, ignore this request | |
1372 | */ | |
f5960b69 ME |
1373 | if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == |
1374 | cpu_to_le32(EP_STATE_DISABLED)) || | |
28ccd296 ME |
1375 | le32_to_cpu(ctrl_ctx->drop_flags) & |
1376 | xhci_get_endpoint_flag(&ep->desc)) { | |
700e2052 GKH |
1377 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
1378 | __func__, ep); | |
f94e0186 SS |
1379 | return 0; |
1380 | } | |
1381 | ||
28ccd296 ME |
1382 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1383 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); | |
f94e0186 | 1384 | |
28ccd296 ME |
1385 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1386 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
f94e0186 | 1387 | |
28ccd296 | 1388 | last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); |
d115b048 | 1389 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
f94e0186 | 1390 | /* Update the last valid endpoint context, if we deleted the last one */ |
28ccd296 ME |
1391 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > |
1392 | LAST_CTX(last_ctx)) { | |
1393 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); | |
1394 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | |
f94e0186 | 1395 | } |
28ccd296 | 1396 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
f94e0186 SS |
1397 | |
1398 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | |
1399 | ||
f94e0186 SS |
1400 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
1401 | (unsigned int) ep->desc.bEndpointAddress, | |
1402 | udev->slot_id, | |
1403 | (unsigned int) new_drop_flags, | |
1404 | (unsigned int) new_add_flags, | |
1405 | (unsigned int) new_slot_info); | |
1406 | return 0; | |
1407 | } | |
1408 | ||
1409 | /* Add an endpoint to a new possible bandwidth configuration for this device. | |
1410 | * Only one call to this function is allowed per endpoint before | |
1411 | * check_bandwidth() or reset_bandwidth() must be called. | |
1412 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1413 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1414 | * different endpoint descriptor in usb_host_endpoint. | |
1415 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1416 | * not allowed. | |
f88ba78d SS |
1417 | * |
1418 | * The USB core will not allow URBs to be queued to an endpoint until the | |
1419 | * configuration or alt setting is installed in the device, so there's no need | |
1420 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. | |
f94e0186 SS |
1421 | */ |
1422 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
1423 | struct usb_host_endpoint *ep) | |
1424 | { | |
f94e0186 | 1425 | struct xhci_hcd *xhci; |
d115b048 | 1426 | struct xhci_container_ctx *in_ctx, *out_ctx; |
f94e0186 SS |
1427 | unsigned int ep_index; |
1428 | struct xhci_ep_ctx *ep_ctx; | |
d115b048 JY |
1429 | struct xhci_slot_ctx *slot_ctx; |
1430 | struct xhci_input_control_ctx *ctrl_ctx; | |
f94e0186 SS |
1431 | u32 added_ctxs; |
1432 | unsigned int last_ctx; | |
1433 | u32 new_add_flags, new_drop_flags, new_slot_info; | |
fa75ac37 | 1434 | struct xhci_virt_device *virt_dev; |
f94e0186 SS |
1435 | int ret = 0; |
1436 | ||
64927730 | 1437 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
a1587d97 SS |
1438 | if (ret <= 0) { |
1439 | /* So we won't queue a reset ep command for a root hub */ | |
1440 | ep->hcpriv = NULL; | |
f94e0186 | 1441 | return ret; |
a1587d97 | 1442 | } |
f94e0186 | 1443 | xhci = hcd_to_xhci(hcd); |
fe6c6c13 SS |
1444 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1445 | return -ENODEV; | |
f94e0186 SS |
1446 | |
1447 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | |
1448 | last_ctx = xhci_last_valid_endpoint(added_ctxs); | |
1449 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { | |
1450 | /* FIXME when we have to issue an evaluate endpoint command to | |
1451 | * deal with ep0 max packet size changing once we get the | |
1452 | * descriptors | |
1453 | */ | |
1454 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", | |
1455 | __func__, added_ctxs); | |
1456 | return 0; | |
1457 | } | |
1458 | ||
fa75ac37 SS |
1459 | virt_dev = xhci->devs[udev->slot_id]; |
1460 | in_ctx = virt_dev->in_ctx; | |
1461 | out_ctx = virt_dev->out_ctx; | |
d115b048 | 1462 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
f94e0186 | 1463 | ep_index = xhci_get_endpoint_index(&ep->desc); |
d115b048 | 1464 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
fa75ac37 SS |
1465 | |
1466 | /* If this endpoint is already in use, and the upper layers are trying | |
1467 | * to add it again without dropping it, reject the addition. | |
1468 | */ | |
1469 | if (virt_dev->eps[ep_index].ring && | |
1470 | !(le32_to_cpu(ctrl_ctx->drop_flags) & | |
1471 | xhci_get_endpoint_flag(&ep->desc))) { | |
1472 | xhci_warn(xhci, "Trying to add endpoint 0x%x " | |
1473 | "without dropping it.\n", | |
1474 | (unsigned int) ep->desc.bEndpointAddress); | |
1475 | return -EINVAL; | |
1476 | } | |
1477 | ||
f94e0186 SS |
1478 | /* If the HCD has already noted the endpoint is enabled, |
1479 | * ignore this request. | |
1480 | */ | |
28ccd296 ME |
1481 | if (le32_to_cpu(ctrl_ctx->add_flags) & |
1482 | xhci_get_endpoint_flag(&ep->desc)) { | |
700e2052 GKH |
1483 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
1484 | __func__, ep); | |
f94e0186 SS |
1485 | return 0; |
1486 | } | |
1487 | ||
f88ba78d SS |
1488 | /* |
1489 | * Configuration and alternate setting changes must be done in | |
1490 | * process context, not interrupt context (or so documenation | |
1491 | * for usb_set_interface() and usb_set_configuration() claim). | |
1492 | */ | |
fa75ac37 | 1493 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
f94e0186 SS |
1494 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
1495 | __func__, ep->desc.bEndpointAddress); | |
f94e0186 SS |
1496 | return -ENOMEM; |
1497 | } | |
1498 | ||
28ccd296 ME |
1499 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
1500 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
f94e0186 SS |
1501 | |
1502 | /* If xhci_endpoint_disable() was called for this endpoint, but the | |
1503 | * xHC hasn't been notified yet through the check_bandwidth() call, | |
1504 | * this re-adds a new state for the endpoint from the new endpoint | |
1505 | * descriptors. We must drop and re-add this endpoint, so we leave the | |
1506 | * drop flags alone. | |
1507 | */ | |
28ccd296 | 1508 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
f94e0186 | 1509 | |
d115b048 | 1510 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
f94e0186 | 1511 | /* Update the last valid endpoint context, if we just added one past */ |
28ccd296 ME |
1512 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < |
1513 | LAST_CTX(last_ctx)) { | |
1514 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); | |
1515 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | |
f94e0186 | 1516 | } |
28ccd296 | 1517 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
f94e0186 | 1518 | |
a1587d97 SS |
1519 | /* Store the usb_device pointer for later use */ |
1520 | ep->hcpriv = udev; | |
1521 | ||
f94e0186 SS |
1522 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
1523 | (unsigned int) ep->desc.bEndpointAddress, | |
1524 | udev->slot_id, | |
1525 | (unsigned int) new_drop_flags, | |
1526 | (unsigned int) new_add_flags, | |
1527 | (unsigned int) new_slot_info); | |
1528 | return 0; | |
1529 | } | |
1530 | ||
d115b048 | 1531 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
f94e0186 | 1532 | { |
d115b048 | 1533 | struct xhci_input_control_ctx *ctrl_ctx; |
f94e0186 | 1534 | struct xhci_ep_ctx *ep_ctx; |
d115b048 | 1535 | struct xhci_slot_ctx *slot_ctx; |
f94e0186 SS |
1536 | int i; |
1537 | ||
1538 | /* When a device's add flag and drop flag are zero, any subsequent | |
1539 | * configure endpoint command will leave that endpoint's state | |
1540 | * untouched. Make sure we don't leave any old state in the input | |
1541 | * endpoint contexts. | |
1542 | */ | |
d115b048 JY |
1543 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1544 | ctrl_ctx->drop_flags = 0; | |
1545 | ctrl_ctx->add_flags = 0; | |
1546 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
28ccd296 | 1547 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
f94e0186 | 1548 | /* Endpoint 0 is always valid */ |
28ccd296 | 1549 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
f94e0186 | 1550 | for (i = 1; i < 31; ++i) { |
d115b048 | 1551 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
f94e0186 SS |
1552 | ep_ctx->ep_info = 0; |
1553 | ep_ctx->ep_info2 = 0; | |
8e595a5d | 1554 | ep_ctx->deq = 0; |
f94e0186 SS |
1555 | ep_ctx->tx_info = 0; |
1556 | } | |
1557 | } | |
1558 | ||
f2217e8e | 1559 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
00161f7d | 1560 | struct usb_device *udev, u32 *cmd_status) |
f2217e8e SS |
1561 | { |
1562 | int ret; | |
1563 | ||
913a8a34 | 1564 | switch (*cmd_status) { |
f2217e8e SS |
1565 | case COMP_ENOMEM: |
1566 | dev_warn(&udev->dev, "Not enough host controller resources " | |
1567 | "for new device state.\n"); | |
1568 | ret = -ENOMEM; | |
1569 | /* FIXME: can we allocate more resources for the HC? */ | |
1570 | break; | |
1571 | case COMP_BW_ERR: | |
1572 | dev_warn(&udev->dev, "Not enough bandwidth " | |
1573 | "for new device state.\n"); | |
1574 | ret = -ENOSPC; | |
1575 | /* FIXME: can we go back to the old state? */ | |
1576 | break; | |
1577 | case COMP_TRB_ERR: | |
1578 | /* the HCD set up something wrong */ | |
1579 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | |
1580 | "add flag = 1, " | |
1581 | "and endpoint is not disabled.\n"); | |
1582 | ret = -EINVAL; | |
1583 | break; | |
f6ba6fe2 AH |
1584 | case COMP_DEV_ERR: |
1585 | dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " | |
1586 | "configure command.\n"); | |
1587 | ret = -ENODEV; | |
1588 | break; | |
f2217e8e SS |
1589 | case COMP_SUCCESS: |
1590 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | |
1591 | ret = 0; | |
1592 | break; | |
1593 | default: | |
1594 | xhci_err(xhci, "ERROR: unexpected command completion " | |
913a8a34 | 1595 | "code 0x%x.\n", *cmd_status); |
f2217e8e SS |
1596 | ret = -EINVAL; |
1597 | break; | |
1598 | } | |
1599 | return ret; | |
1600 | } | |
1601 | ||
1602 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |
00161f7d | 1603 | struct usb_device *udev, u32 *cmd_status) |
f2217e8e SS |
1604 | { |
1605 | int ret; | |
913a8a34 | 1606 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
f2217e8e | 1607 | |
913a8a34 | 1608 | switch (*cmd_status) { |
f2217e8e SS |
1609 | case COMP_EINVAL: |
1610 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | |
1611 | "context command.\n"); | |
1612 | ret = -EINVAL; | |
1613 | break; | |
1614 | case COMP_EBADSLT: | |
1615 | dev_warn(&udev->dev, "WARN: slot not enabled for" | |
1616 | "evaluate context command.\n"); | |
1617 | case COMP_CTX_STATE: | |
1618 | dev_warn(&udev->dev, "WARN: invalid context state for " | |
1619 | "evaluate context command.\n"); | |
1620 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | |
1621 | ret = -EINVAL; | |
1622 | break; | |
f6ba6fe2 AH |
1623 | case COMP_DEV_ERR: |
1624 | dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " | |
1625 | "context command.\n"); | |
1626 | ret = -ENODEV; | |
1627 | break; | |
1bb73a88 AH |
1628 | case COMP_MEL_ERR: |
1629 | /* Max Exit Latency too large error */ | |
1630 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); | |
1631 | ret = -EINVAL; | |
1632 | break; | |
f2217e8e SS |
1633 | case COMP_SUCCESS: |
1634 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | |
1635 | ret = 0; | |
1636 | break; | |
1637 | default: | |
1638 | xhci_err(xhci, "ERROR: unexpected command completion " | |
913a8a34 | 1639 | "code 0x%x.\n", *cmd_status); |
f2217e8e SS |
1640 | ret = -EINVAL; |
1641 | break; | |
1642 | } | |
1643 | return ret; | |
1644 | } | |
1645 | ||
2cf95c18 SS |
1646 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
1647 | struct xhci_container_ctx *in_ctx) | |
1648 | { | |
1649 | struct xhci_input_control_ctx *ctrl_ctx; | |
1650 | u32 valid_add_flags; | |
1651 | u32 valid_drop_flags; | |
1652 | ||
1653 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
1654 | /* Ignore the slot flag (bit 0), and the default control endpoint flag | |
1655 | * (bit 1). The default control endpoint is added during the Address | |
1656 | * Device command and is never removed until the slot is disabled. | |
1657 | */ | |
1658 | valid_add_flags = ctrl_ctx->add_flags >> 2; | |
1659 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | |
1660 | ||
1661 | /* Use hweight32 to count the number of ones in the add flags, or | |
1662 | * number of endpoints added. Don't count endpoints that are changed | |
1663 | * (both added and dropped). | |
1664 | */ | |
1665 | return hweight32(valid_add_flags) - | |
1666 | hweight32(valid_add_flags & valid_drop_flags); | |
1667 | } | |
1668 | ||
1669 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, | |
1670 | struct xhci_container_ctx *in_ctx) | |
1671 | { | |
1672 | struct xhci_input_control_ctx *ctrl_ctx; | |
1673 | u32 valid_add_flags; | |
1674 | u32 valid_drop_flags; | |
1675 | ||
1676 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
1677 | valid_add_flags = ctrl_ctx->add_flags >> 2; | |
1678 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | |
1679 | ||
1680 | return hweight32(valid_drop_flags) - | |
1681 | hweight32(valid_add_flags & valid_drop_flags); | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * We need to reserve the new number of endpoints before the configure endpoint | |
1686 | * command completes. We can't subtract the dropped endpoints from the number | |
1687 | * of active endpoints until the command completes because we can oversubscribe | |
1688 | * the host in this case: | |
1689 | * | |
1690 | * - the first configure endpoint command drops more endpoints than it adds | |
1691 | * - a second configure endpoint command that adds more endpoints is queued | |
1692 | * - the first configure endpoint command fails, so the config is unchanged | |
1693 | * - the second command may succeed, even though there isn't enough resources | |
1694 | * | |
1695 | * Must be called with xhci->lock held. | |
1696 | */ | |
1697 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, | |
1698 | struct xhci_container_ctx *in_ctx) | |
1699 | { | |
1700 | u32 added_eps; | |
1701 | ||
1702 | added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | |
1703 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { | |
1704 | xhci_dbg(xhci, "Not enough ep ctxs: " | |
1705 | "%u active, need to add %u, limit is %u.\n", | |
1706 | xhci->num_active_eps, added_eps, | |
1707 | xhci->limit_active_eps); | |
1708 | return -ENOMEM; | |
1709 | } | |
1710 | xhci->num_active_eps += added_eps; | |
1711 | xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, | |
1712 | xhci->num_active_eps); | |
1713 | return 0; | |
1714 | } | |
1715 | ||
1716 | /* | |
1717 | * The configure endpoint was failed by the xHC for some other reason, so we | |
1718 | * need to revert the resources that failed configuration would have used. | |
1719 | * | |
1720 | * Must be called with xhci->lock held. | |
1721 | */ | |
1722 | static void xhci_free_host_resources(struct xhci_hcd *xhci, | |
1723 | struct xhci_container_ctx *in_ctx) | |
1724 | { | |
1725 | u32 num_failed_eps; | |
1726 | ||
1727 | num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | |
1728 | xhci->num_active_eps -= num_failed_eps; | |
1729 | xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", | |
1730 | num_failed_eps, | |
1731 | xhci->num_active_eps); | |
1732 | } | |
1733 | ||
1734 | /* | |
1735 | * Now that the command has completed, clean up the active endpoint count by | |
1736 | * subtracting out the endpoints that were dropped (but not changed). | |
1737 | * | |
1738 | * Must be called with xhci->lock held. | |
1739 | */ | |
1740 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | |
1741 | struct xhci_container_ctx *in_ctx) | |
1742 | { | |
1743 | u32 num_dropped_eps; | |
1744 | ||
1745 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); | |
1746 | xhci->num_active_eps -= num_dropped_eps; | |
1747 | if (num_dropped_eps) | |
1748 | xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", | |
1749 | num_dropped_eps, | |
1750 | xhci->num_active_eps); | |
1751 | } | |
1752 | ||
c29eea62 SS |
1753 | unsigned int xhci_get_block_size(struct usb_device *udev) |
1754 | { | |
1755 | switch (udev->speed) { | |
1756 | case USB_SPEED_LOW: | |
1757 | case USB_SPEED_FULL: | |
1758 | return FS_BLOCK; | |
1759 | case USB_SPEED_HIGH: | |
1760 | return HS_BLOCK; | |
1761 | case USB_SPEED_SUPER: | |
1762 | return SS_BLOCK; | |
1763 | case USB_SPEED_UNKNOWN: | |
1764 | case USB_SPEED_WIRELESS: | |
1765 | default: | |
1766 | /* Should never happen */ | |
1767 | return 1; | |
1768 | } | |
1769 | } | |
1770 | ||
1771 | unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) | |
1772 | { | |
1773 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) | |
1774 | return LS_OVERHEAD; | |
1775 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) | |
1776 | return FS_OVERHEAD; | |
1777 | return HS_OVERHEAD; | |
1778 | } | |
1779 | ||
1780 | /* If we are changing a LS/FS device under a HS hub, | |
1781 | * make sure (if we are activating a new TT) that the HS bus has enough | |
1782 | * bandwidth for this new TT. | |
1783 | */ | |
1784 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, | |
1785 | struct xhci_virt_device *virt_dev, | |
1786 | int old_active_eps) | |
1787 | { | |
1788 | struct xhci_interval_bw_table *bw_table; | |
1789 | struct xhci_tt_bw_info *tt_info; | |
1790 | ||
1791 | /* Find the bandwidth table for the root port this TT is attached to. */ | |
1792 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; | |
1793 | tt_info = virt_dev->tt_info; | |
1794 | /* If this TT already had active endpoints, the bandwidth for this TT | |
1795 | * has already been added. Removing all periodic endpoints (and thus | |
1796 | * making the TT enactive) will only decrease the bandwidth used. | |
1797 | */ | |
1798 | if (old_active_eps) | |
1799 | return 0; | |
1800 | if (old_active_eps == 0 && tt_info->active_eps != 0) { | |
1801 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) | |
1802 | return -ENOMEM; | |
1803 | return 0; | |
1804 | } | |
1805 | /* Not sure why we would have no new active endpoints... | |
1806 | * | |
1807 | * Maybe because of an Evaluate Context change for a hub update or a | |
1808 | * control endpoint 0 max packet size change? | |
1809 | * FIXME: skip the bandwidth calculation in that case. | |
1810 | */ | |
1811 | return 0; | |
1812 | } | |
1813 | ||
1814 | /* | |
1815 | * This algorithm is a very conservative estimate of the worst-case scheduling | |
1816 | * scenario for any one interval. The hardware dynamically schedules the | |
1817 | * packets, so we can't tell which microframe could be the limiting factor in | |
1818 | * the bandwidth scheduling. This only takes into account periodic endpoints. | |
1819 | * | |
1820 | * Obviously, we can't solve an NP complete problem to find the minimum worst | |
1821 | * case scenario. Instead, we come up with an estimate that is no less than | |
1822 | * the worst case bandwidth used for any one microframe, but may be an | |
1823 | * over-estimate. | |
1824 | * | |
1825 | * We walk the requirements for each endpoint by interval, starting with the | |
1826 | * smallest interval, and place packets in the schedule where there is only one | |
1827 | * possible way to schedule packets for that interval. In order to simplify | |
1828 | * this algorithm, we record the largest max packet size for each interval, and | |
1829 | * assume all packets will be that size. | |
1830 | * | |
1831 | * For interval 0, we obviously must schedule all packets for each interval. | |
1832 | * The bandwidth for interval 0 is just the amount of data to be transmitted | |
1833 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times | |
1834 | * the number of packets). | |
1835 | * | |
1836 | * For interval 1, we have two possible microframes to schedule those packets | |
1837 | * in. For this algorithm, if we can schedule the same number of packets for | |
1838 | * each possible scheduling opportunity (each microframe), we will do so. The | |
1839 | * remaining number of packets will be saved to be transmitted in the gaps in | |
1840 | * the next interval's scheduling sequence. | |
1841 | * | |
1842 | * As we move those remaining packets to be scheduled with interval 2 packets, | |
1843 | * we have to double the number of remaining packets to transmit. This is | |
1844 | * because the intervals are actually powers of 2, and we would be transmitting | |
1845 | * the previous interval's packets twice in this interval. We also have to be | |
1846 | * sure that when we look at the largest max packet size for this interval, we | |
1847 | * also look at the largest max packet size for the remaining packets and take | |
1848 | * the greater of the two. | |
1849 | * | |
1850 | * The algorithm continues to evenly distribute packets in each scheduling | |
1851 | * opportunity, and push the remaining packets out, until we get to the last | |
1852 | * interval. Then those packets and their associated overhead are just added | |
1853 | * to the bandwidth used. | |
2e27980e SS |
1854 | */ |
1855 | static int xhci_check_bw_table(struct xhci_hcd *xhci, | |
1856 | struct xhci_virt_device *virt_dev, | |
1857 | int old_active_eps) | |
1858 | { | |
c29eea62 SS |
1859 | unsigned int bw_reserved; |
1860 | unsigned int max_bandwidth; | |
1861 | unsigned int bw_used; | |
1862 | unsigned int block_size; | |
1863 | struct xhci_interval_bw_table *bw_table; | |
1864 | unsigned int packet_size = 0; | |
1865 | unsigned int overhead = 0; | |
1866 | unsigned int packets_transmitted = 0; | |
1867 | unsigned int packets_remaining = 0; | |
1868 | unsigned int i; | |
1869 | ||
1870 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { | |
1871 | max_bandwidth = HS_BW_LIMIT; | |
1872 | /* Convert percent of bus BW reserved to blocks reserved */ | |
1873 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); | |
1874 | } else { | |
1875 | max_bandwidth = FS_BW_LIMIT; | |
1876 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); | |
1877 | } | |
1878 | ||
1879 | bw_table = virt_dev->bw_table; | |
1880 | /* We need to translate the max packet size and max ESIT payloads into | |
1881 | * the units the hardware uses. | |
1882 | */ | |
1883 | block_size = xhci_get_block_size(virt_dev->udev); | |
1884 | ||
1885 | /* If we are manipulating a LS/FS device under a HS hub, double check | |
1886 | * that the HS bus has enough bandwidth if we are activing a new TT. | |
1887 | */ | |
1888 | if (virt_dev->tt_info) { | |
1889 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | |
1890 | virt_dev->real_port); | |
1891 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { | |
1892 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " | |
1893 | "newly activated TT.\n"); | |
1894 | return -ENOMEM; | |
1895 | } | |
1896 | xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", | |
1897 | virt_dev->tt_info->slot_id, | |
1898 | virt_dev->tt_info->ttport); | |
1899 | } else { | |
1900 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | |
1901 | virt_dev->real_port); | |
1902 | } | |
1903 | ||
1904 | /* Add in how much bandwidth will be used for interval zero, or the | |
1905 | * rounded max ESIT payload + number of packets * largest overhead. | |
1906 | */ | |
1907 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + | |
1908 | bw_table->interval_bw[0].num_packets * | |
1909 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); | |
1910 | ||
1911 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { | |
1912 | unsigned int bw_added; | |
1913 | unsigned int largest_mps; | |
1914 | unsigned int interval_overhead; | |
1915 | ||
1916 | /* | |
1917 | * How many packets could we transmit in this interval? | |
1918 | * If packets didn't fit in the previous interval, we will need | |
1919 | * to transmit that many packets twice within this interval. | |
1920 | */ | |
1921 | packets_remaining = 2 * packets_remaining + | |
1922 | bw_table->interval_bw[i].num_packets; | |
1923 | ||
1924 | /* Find the largest max packet size of this or the previous | |
1925 | * interval. | |
1926 | */ | |
1927 | if (list_empty(&bw_table->interval_bw[i].endpoints)) | |
1928 | largest_mps = 0; | |
1929 | else { | |
1930 | struct xhci_virt_ep *virt_ep; | |
1931 | struct list_head *ep_entry; | |
1932 | ||
1933 | ep_entry = bw_table->interval_bw[i].endpoints.next; | |
1934 | virt_ep = list_entry(ep_entry, | |
1935 | struct xhci_virt_ep, bw_endpoint_list); | |
1936 | /* Convert to blocks, rounding up */ | |
1937 | largest_mps = DIV_ROUND_UP( | |
1938 | virt_ep->bw_info.max_packet_size, | |
1939 | block_size); | |
1940 | } | |
1941 | if (largest_mps > packet_size) | |
1942 | packet_size = largest_mps; | |
1943 | ||
1944 | /* Use the larger overhead of this or the previous interval. */ | |
1945 | interval_overhead = xhci_get_largest_overhead( | |
1946 | &bw_table->interval_bw[i]); | |
1947 | if (interval_overhead > overhead) | |
1948 | overhead = interval_overhead; | |
1949 | ||
1950 | /* How many packets can we evenly distribute across | |
1951 | * (1 << (i + 1)) possible scheduling opportunities? | |
1952 | */ | |
1953 | packets_transmitted = packets_remaining >> (i + 1); | |
1954 | ||
1955 | /* Add in the bandwidth used for those scheduled packets */ | |
1956 | bw_added = packets_transmitted * (overhead + packet_size); | |
1957 | ||
1958 | /* How many packets do we have remaining to transmit? */ | |
1959 | packets_remaining = packets_remaining % (1 << (i + 1)); | |
1960 | ||
1961 | /* What largest max packet size should those packets have? */ | |
1962 | /* If we've transmitted all packets, don't carry over the | |
1963 | * largest packet size. | |
1964 | */ | |
1965 | if (packets_remaining == 0) { | |
1966 | packet_size = 0; | |
1967 | overhead = 0; | |
1968 | } else if (packets_transmitted > 0) { | |
1969 | /* Otherwise if we do have remaining packets, and we've | |
1970 | * scheduled some packets in this interval, take the | |
1971 | * largest max packet size from endpoints with this | |
1972 | * interval. | |
1973 | */ | |
1974 | packet_size = largest_mps; | |
1975 | overhead = interval_overhead; | |
1976 | } | |
1977 | /* Otherwise carry over packet_size and overhead from the last | |
1978 | * time we had a remainder. | |
1979 | */ | |
1980 | bw_used += bw_added; | |
1981 | if (bw_used > max_bandwidth) { | |
1982 | xhci_warn(xhci, "Not enough bandwidth. " | |
1983 | "Proposed: %u, Max: %u\n", | |
1984 | bw_used, max_bandwidth); | |
1985 | return -ENOMEM; | |
1986 | } | |
1987 | } | |
1988 | /* | |
1989 | * Ok, we know we have some packets left over after even-handedly | |
1990 | * scheduling interval 15. We don't know which microframes they will | |
1991 | * fit into, so we over-schedule and say they will be scheduled every | |
1992 | * microframe. | |
1993 | */ | |
1994 | if (packets_remaining > 0) | |
1995 | bw_used += overhead + packet_size; | |
1996 | ||
1997 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { | |
1998 | unsigned int port_index = virt_dev->real_port - 1; | |
1999 | ||
2000 | /* OK, we're manipulating a HS device attached to a | |
2001 | * root port bandwidth domain. Include the number of active TTs | |
2002 | * in the bandwidth used. | |
2003 | */ | |
2004 | bw_used += TT_HS_OVERHEAD * | |
2005 | xhci->rh_bw[port_index].num_active_tts; | |
2006 | } | |
2007 | ||
2008 | xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " | |
2009 | "Available: %u " "percent\n", | |
2010 | bw_used, max_bandwidth, bw_reserved, | |
2011 | (max_bandwidth - bw_used - bw_reserved) * 100 / | |
2012 | max_bandwidth); | |
2013 | ||
2014 | bw_used += bw_reserved; | |
2015 | if (bw_used > max_bandwidth) { | |
2016 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", | |
2017 | bw_used, max_bandwidth); | |
2018 | return -ENOMEM; | |
2019 | } | |
2020 | ||
2021 | bw_table->bw_used = bw_used; | |
2e27980e SS |
2022 | return 0; |
2023 | } | |
2024 | ||
2025 | static bool xhci_is_async_ep(unsigned int ep_type) | |
2026 | { | |
2027 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && | |
2028 | ep_type != ISOC_IN_EP && | |
2029 | ep_type != INT_IN_EP); | |
2030 | } | |
2031 | ||
2032 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, | |
2033 | struct xhci_bw_info *ep_bw, | |
2034 | struct xhci_interval_bw_table *bw_table, | |
2035 | struct usb_device *udev, | |
2036 | struct xhci_virt_ep *virt_ep, | |
2037 | struct xhci_tt_bw_info *tt_info) | |
2038 | { | |
2039 | struct xhci_interval_bw *interval_bw; | |
2040 | int normalized_interval; | |
2041 | ||
2042 | if (xhci_is_async_ep(ep_bw->type) || | |
2043 | list_empty(&virt_ep->bw_endpoint_list)) | |
2044 | return; | |
2045 | ||
2046 | /* For LS/FS devices, we need to translate the interval expressed in | |
2047 | * microframes to frames. | |
2048 | */ | |
2049 | if (udev->speed == USB_SPEED_HIGH) | |
2050 | normalized_interval = ep_bw->ep_interval; | |
2051 | else | |
2052 | normalized_interval = ep_bw->ep_interval - 3; | |
2053 | ||
2054 | if (normalized_interval == 0) | |
2055 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; | |
2056 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2057 | interval_bw->num_packets -= ep_bw->num_packets; | |
2058 | switch (udev->speed) { | |
2059 | case USB_SPEED_LOW: | |
2060 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; | |
2061 | break; | |
2062 | case USB_SPEED_FULL: | |
2063 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; | |
2064 | break; | |
2065 | case USB_SPEED_HIGH: | |
2066 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; | |
2067 | break; | |
2068 | case USB_SPEED_SUPER: | |
2069 | case USB_SPEED_UNKNOWN: | |
2070 | case USB_SPEED_WIRELESS: | |
2071 | /* Should never happen because only LS/FS/HS endpoints will get | |
2072 | * added to the endpoint list. | |
2073 | */ | |
2074 | return; | |
2075 | } | |
2076 | if (tt_info) | |
2077 | tt_info->active_eps -= 1; | |
2078 | list_del_init(&virt_ep->bw_endpoint_list); | |
2079 | } | |
2080 | ||
2081 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, | |
2082 | struct xhci_bw_info *ep_bw, | |
2083 | struct xhci_interval_bw_table *bw_table, | |
2084 | struct usb_device *udev, | |
2085 | struct xhci_virt_ep *virt_ep, | |
2086 | struct xhci_tt_bw_info *tt_info) | |
2087 | { | |
2088 | struct xhci_interval_bw *interval_bw; | |
2089 | struct xhci_virt_ep *smaller_ep; | |
2090 | int normalized_interval; | |
2091 | ||
2092 | if (xhci_is_async_ep(ep_bw->type)) | |
2093 | return; | |
2094 | ||
2095 | /* For LS/FS devices, we need to translate the interval expressed in | |
2096 | * microframes to frames. | |
2097 | */ | |
2098 | if (udev->speed == USB_SPEED_HIGH) | |
2099 | normalized_interval = ep_bw->ep_interval; | |
2100 | else | |
2101 | normalized_interval = ep_bw->ep_interval - 3; | |
2102 | ||
2103 | if (normalized_interval == 0) | |
2104 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; | |
2105 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2106 | interval_bw->num_packets += ep_bw->num_packets; | |
2107 | switch (udev->speed) { | |
2108 | case USB_SPEED_LOW: | |
2109 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; | |
2110 | break; | |
2111 | case USB_SPEED_FULL: | |
2112 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; | |
2113 | break; | |
2114 | case USB_SPEED_HIGH: | |
2115 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; | |
2116 | break; | |
2117 | case USB_SPEED_SUPER: | |
2118 | case USB_SPEED_UNKNOWN: | |
2119 | case USB_SPEED_WIRELESS: | |
2120 | /* Should never happen because only LS/FS/HS endpoints will get | |
2121 | * added to the endpoint list. | |
2122 | */ | |
2123 | return; | |
2124 | } | |
2125 | ||
2126 | if (tt_info) | |
2127 | tt_info->active_eps += 1; | |
2128 | /* Insert the endpoint into the list, largest max packet size first. */ | |
2129 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, | |
2130 | bw_endpoint_list) { | |
2131 | if (ep_bw->max_packet_size >= | |
2132 | smaller_ep->bw_info.max_packet_size) { | |
2133 | /* Add the new ep before the smaller endpoint */ | |
2134 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2135 | &smaller_ep->bw_endpoint_list); | |
2136 | return; | |
2137 | } | |
2138 | } | |
2139 | /* Add the new endpoint at the end of the list. */ | |
2140 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2141 | &interval_bw->endpoints); | |
2142 | } | |
2143 | ||
2144 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, | |
2145 | struct xhci_virt_device *virt_dev, | |
2146 | int old_active_eps) | |
2147 | { | |
2148 | struct xhci_root_port_bw_info *rh_bw_info; | |
2149 | if (!virt_dev->tt_info) | |
2150 | return; | |
2151 | ||
2152 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; | |
2153 | if (old_active_eps == 0 && | |
2154 | virt_dev->tt_info->active_eps != 0) { | |
2155 | rh_bw_info->num_active_tts += 1; | |
c29eea62 | 2156 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
2e27980e SS |
2157 | } else if (old_active_eps != 0 && |
2158 | virt_dev->tt_info->active_eps == 0) { | |
2159 | rh_bw_info->num_active_tts -= 1; | |
c29eea62 | 2160 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
2e27980e SS |
2161 | } |
2162 | } | |
2163 | ||
2164 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, | |
2165 | struct xhci_virt_device *virt_dev, | |
2166 | struct xhci_container_ctx *in_ctx) | |
2167 | { | |
2168 | struct xhci_bw_info ep_bw_info[31]; | |
2169 | int i; | |
2170 | struct xhci_input_control_ctx *ctrl_ctx; | |
2171 | int old_active_eps = 0; | |
2172 | ||
2173 | if (virt_dev->udev->speed == USB_SPEED_SUPER) | |
2174 | return 0; | |
2175 | ||
2176 | if (virt_dev->tt_info) | |
2177 | old_active_eps = virt_dev->tt_info->active_eps; | |
2178 | ||
2179 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
2180 | ||
2181 | for (i = 0; i < 31; i++) { | |
2182 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2183 | continue; | |
2184 | ||
2185 | /* Make a copy of the BW info in case we need to revert this */ | |
2186 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, | |
2187 | sizeof(ep_bw_info[i])); | |
2188 | /* Drop the endpoint from the interval table if the endpoint is | |
2189 | * being dropped or changed. | |
2190 | */ | |
2191 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2192 | xhci_drop_ep_from_interval_table(xhci, | |
2193 | &virt_dev->eps[i].bw_info, | |
2194 | virt_dev->bw_table, | |
2195 | virt_dev->udev, | |
2196 | &virt_dev->eps[i], | |
2197 | virt_dev->tt_info); | |
2198 | } | |
2199 | /* Overwrite the information stored in the endpoints' bw_info */ | |
2200 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); | |
2201 | for (i = 0; i < 31; i++) { | |
2202 | /* Add any changed or added endpoints to the interval table */ | |
2203 | if (EP_IS_ADDED(ctrl_ctx, i)) | |
2204 | xhci_add_ep_to_interval_table(xhci, | |
2205 | &virt_dev->eps[i].bw_info, | |
2206 | virt_dev->bw_table, | |
2207 | virt_dev->udev, | |
2208 | &virt_dev->eps[i], | |
2209 | virt_dev->tt_info); | |
2210 | } | |
2211 | ||
2212 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { | |
2213 | /* Ok, this fits in the bandwidth we have. | |
2214 | * Update the number of active TTs. | |
2215 | */ | |
2216 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
2217 | return 0; | |
2218 | } | |
2219 | ||
2220 | /* We don't have enough bandwidth for this, revert the stored info. */ | |
2221 | for (i = 0; i < 31; i++) { | |
2222 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2223 | continue; | |
2224 | ||
2225 | /* Drop the new copies of any added or changed endpoints from | |
2226 | * the interval table. | |
2227 | */ | |
2228 | if (EP_IS_ADDED(ctrl_ctx, i)) { | |
2229 | xhci_drop_ep_from_interval_table(xhci, | |
2230 | &virt_dev->eps[i].bw_info, | |
2231 | virt_dev->bw_table, | |
2232 | virt_dev->udev, | |
2233 | &virt_dev->eps[i], | |
2234 | virt_dev->tt_info); | |
2235 | } | |
2236 | /* Revert the endpoint back to its old information */ | |
2237 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], | |
2238 | sizeof(ep_bw_info[i])); | |
2239 | /* Add any changed or dropped endpoints back into the table */ | |
2240 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2241 | xhci_add_ep_to_interval_table(xhci, | |
2242 | &virt_dev->eps[i].bw_info, | |
2243 | virt_dev->bw_table, | |
2244 | virt_dev->udev, | |
2245 | &virt_dev->eps[i], | |
2246 | virt_dev->tt_info); | |
2247 | } | |
2248 | return -ENOMEM; | |
2249 | } | |
2250 | ||
2251 | ||
f2217e8e SS |
2252 | /* Issue a configure endpoint command or evaluate context command |
2253 | * and wait for it to finish. | |
2254 | */ | |
2255 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |
913a8a34 SS |
2256 | struct usb_device *udev, |
2257 | struct xhci_command *command, | |
2258 | bool ctx_change, bool must_succeed) | |
f2217e8e SS |
2259 | { |
2260 | int ret; | |
2261 | int timeleft; | |
2262 | unsigned long flags; | |
913a8a34 SS |
2263 | struct xhci_container_ctx *in_ctx; |
2264 | struct completion *cmd_completion; | |
28ccd296 | 2265 | u32 *cmd_status; |
913a8a34 | 2266 | struct xhci_virt_device *virt_dev; |
f2217e8e SS |
2267 | |
2268 | spin_lock_irqsave(&xhci->lock, flags); | |
913a8a34 | 2269 | virt_dev = xhci->devs[udev->slot_id]; |
750645f8 SS |
2270 | |
2271 | if (command) | |
913a8a34 | 2272 | in_ctx = command->in_ctx; |
750645f8 SS |
2273 | else |
2274 | in_ctx = virt_dev->in_ctx; | |
2cf95c18 | 2275 | |
750645f8 SS |
2276 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
2277 | xhci_reserve_host_resources(xhci, in_ctx)) { | |
2278 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2279 | xhci_warn(xhci, "Not enough host resources, " | |
2280 | "active endpoint contexts = %u\n", | |
2281 | xhci->num_active_eps); | |
2282 | return -ENOMEM; | |
2283 | } | |
2e27980e SS |
2284 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
2285 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { | |
2286 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | |
2287 | xhci_free_host_resources(xhci, in_ctx); | |
2288 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2289 | xhci_warn(xhci, "Not enough bandwidth\n"); | |
2290 | return -ENOMEM; | |
2291 | } | |
750645f8 SS |
2292 | |
2293 | if (command) { | |
913a8a34 SS |
2294 | cmd_completion = command->completion; |
2295 | cmd_status = &command->status; | |
2296 | command->command_trb = xhci->cmd_ring->enqueue; | |
7a3783ef PZ |
2297 | |
2298 | /* Enqueue pointer can be left pointing to the link TRB, | |
2299 | * we must handle that | |
2300 | */ | |
f5960b69 | 2301 | if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) |
7a3783ef PZ |
2302 | command->command_trb = |
2303 | xhci->cmd_ring->enq_seg->next->trbs; | |
2304 | ||
913a8a34 SS |
2305 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
2306 | } else { | |
913a8a34 SS |
2307 | cmd_completion = &virt_dev->cmd_completion; |
2308 | cmd_status = &virt_dev->cmd_status; | |
2309 | } | |
1d68064a | 2310 | init_completion(cmd_completion); |
913a8a34 | 2311 | |
f2217e8e | 2312 | if (!ctx_change) |
913a8a34 SS |
2313 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
2314 | udev->slot_id, must_succeed); | |
f2217e8e | 2315 | else |
913a8a34 | 2316 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, |
f2217e8e SS |
2317 | udev->slot_id); |
2318 | if (ret < 0) { | |
c01591bd SS |
2319 | if (command) |
2320 | list_del(&command->cmd_list); | |
2cf95c18 SS |
2321 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
2322 | xhci_free_host_resources(xhci, in_ctx); | |
f2217e8e SS |
2323 | spin_unlock_irqrestore(&xhci->lock, flags); |
2324 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | |
2325 | return -ENOMEM; | |
2326 | } | |
2327 | xhci_ring_cmd_db(xhci); | |
2328 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2329 | ||
2330 | /* Wait for the configure endpoint command to complete */ | |
2331 | timeleft = wait_for_completion_interruptible_timeout( | |
913a8a34 | 2332 | cmd_completion, |
f2217e8e SS |
2333 | USB_CTRL_SET_TIMEOUT); |
2334 | if (timeleft <= 0) { | |
2335 | xhci_warn(xhci, "%s while waiting for %s command\n", | |
2336 | timeleft == 0 ? "Timeout" : "Signal", | |
2337 | ctx_change == 0 ? | |
2338 | "configure endpoint" : | |
2339 | "evaluate context"); | |
2340 | /* FIXME cancel the configure endpoint command */ | |
2341 | return -ETIME; | |
2342 | } | |
2343 | ||
2344 | if (!ctx_change) | |
2cf95c18 SS |
2345 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); |
2346 | else | |
2347 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); | |
2348 | ||
2349 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
2350 | spin_lock_irqsave(&xhci->lock, flags); | |
2351 | /* If the command failed, remove the reserved resources. | |
2352 | * Otherwise, clean up the estimate to include dropped eps. | |
2353 | */ | |
2354 | if (ret) | |
2355 | xhci_free_host_resources(xhci, in_ctx); | |
2356 | else | |
2357 | xhci_finish_resource_reservation(xhci, in_ctx); | |
2358 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2359 | } | |
2360 | return ret; | |
f2217e8e SS |
2361 | } |
2362 | ||
f88ba78d SS |
2363 | /* Called after one or more calls to xhci_add_endpoint() or |
2364 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | |
2365 | * to call xhci_reset_bandwidth(). | |
2366 | * | |
2367 | * Since we are in the middle of changing either configuration or | |
2368 | * installing a new alt setting, the USB core won't allow URBs to be | |
2369 | * enqueued for any endpoint on the old config or interface. Nothing | |
2370 | * else should be touching the xhci->devs[slot_id] structure, so we | |
2371 | * don't need to take the xhci->lock for manipulating that. | |
2372 | */ | |
f94e0186 SS |
2373 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
2374 | { | |
2375 | int i; | |
2376 | int ret = 0; | |
f94e0186 SS |
2377 | struct xhci_hcd *xhci; |
2378 | struct xhci_virt_device *virt_dev; | |
d115b048 JY |
2379 | struct xhci_input_control_ctx *ctrl_ctx; |
2380 | struct xhci_slot_ctx *slot_ctx; | |
f94e0186 | 2381 | |
64927730 | 2382 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
f94e0186 SS |
2383 | if (ret <= 0) |
2384 | return ret; | |
2385 | xhci = hcd_to_xhci(hcd); | |
fe6c6c13 SS |
2386 | if (xhci->xhc_state & XHCI_STATE_DYING) |
2387 | return -ENODEV; | |
f94e0186 | 2388 | |
700e2052 | 2389 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
2390 | virt_dev = xhci->devs[udev->slot_id]; |
2391 | ||
2392 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | |
d115b048 | 2393 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
28ccd296 ME |
2394 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2395 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | |
2396 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); | |
2dc37539 SS |
2397 | |
2398 | /* Don't issue the command if there's no endpoints to update. */ | |
2399 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | |
2400 | ctrl_ctx->drop_flags == 0) | |
2401 | return 0; | |
2402 | ||
f94e0186 | 2403 | xhci_dbg(xhci, "New Input Control Context:\n"); |
d115b048 JY |
2404 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
2405 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | |
28ccd296 | 2406 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
f94e0186 | 2407 | |
913a8a34 SS |
2408 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
2409 | false, false); | |
f94e0186 SS |
2410 | if (ret) { |
2411 | /* Callee should call reset_bandwidth() */ | |
f94e0186 SS |
2412 | return ret; |
2413 | } | |
2414 | ||
2415 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | |
d115b048 | 2416 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
28ccd296 | 2417 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
f94e0186 | 2418 | |
834cb0fc SS |
2419 | /* Free any rings that were dropped, but not changed. */ |
2420 | for (i = 1; i < 31; ++i) { | |
4819fef5 ME |
2421 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
2422 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) | |
834cb0fc SS |
2423 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
2424 | } | |
d115b048 | 2425 | xhci_zero_in_ctx(xhci, virt_dev); |
834cb0fc SS |
2426 | /* |
2427 | * Install any rings for completely new endpoints or changed endpoints, | |
2428 | * and free or cache any old rings from changed endpoints. | |
2429 | */ | |
f94e0186 | 2430 | for (i = 1; i < 31; ++i) { |
74f9fe21 SS |
2431 | if (!virt_dev->eps[i].new_ring) |
2432 | continue; | |
2433 | /* Only cache or free the old ring if it exists. | |
2434 | * It may not if this is the first add of an endpoint. | |
2435 | */ | |
2436 | if (virt_dev->eps[i].ring) { | |
412566bd | 2437 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
f94e0186 | 2438 | } |
74f9fe21 SS |
2439 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
2440 | virt_dev->eps[i].new_ring = NULL; | |
f94e0186 SS |
2441 | } |
2442 | ||
f94e0186 SS |
2443 | return ret; |
2444 | } | |
2445 | ||
2446 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |
2447 | { | |
f94e0186 SS |
2448 | struct xhci_hcd *xhci; |
2449 | struct xhci_virt_device *virt_dev; | |
2450 | int i, ret; | |
2451 | ||
64927730 | 2452 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
f94e0186 SS |
2453 | if (ret <= 0) |
2454 | return; | |
2455 | xhci = hcd_to_xhci(hcd); | |
2456 | ||
700e2052 | 2457 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
2458 | virt_dev = xhci->devs[udev->slot_id]; |
2459 | /* Free any rings allocated for added endpoints */ | |
2460 | for (i = 0; i < 31; ++i) { | |
63a0d9ab SS |
2461 | if (virt_dev->eps[i].new_ring) { |
2462 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); | |
2463 | virt_dev->eps[i].new_ring = NULL; | |
f94e0186 SS |
2464 | } |
2465 | } | |
d115b048 | 2466 | xhci_zero_in_ctx(xhci, virt_dev); |
f94e0186 SS |
2467 | } |
2468 | ||
5270b951 | 2469 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
913a8a34 SS |
2470 | struct xhci_container_ctx *in_ctx, |
2471 | struct xhci_container_ctx *out_ctx, | |
2472 | u32 add_flags, u32 drop_flags) | |
5270b951 SS |
2473 | { |
2474 | struct xhci_input_control_ctx *ctrl_ctx; | |
913a8a34 | 2475 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
28ccd296 ME |
2476 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
2477 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); | |
913a8a34 | 2478 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
28ccd296 | 2479 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
5270b951 | 2480 | |
913a8a34 SS |
2481 | xhci_dbg(xhci, "Input Context:\n"); |
2482 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | |
5270b951 SS |
2483 | } |
2484 | ||
8212a49d | 2485 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
ac9d8fe7 SS |
2486 | unsigned int slot_id, unsigned int ep_index, |
2487 | struct xhci_dequeue_state *deq_state) | |
2488 | { | |
2489 | struct xhci_container_ctx *in_ctx; | |
ac9d8fe7 SS |
2490 | struct xhci_ep_ctx *ep_ctx; |
2491 | u32 added_ctxs; | |
2492 | dma_addr_t addr; | |
2493 | ||
913a8a34 SS |
2494 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
2495 | xhci->devs[slot_id]->out_ctx, ep_index); | |
ac9d8fe7 SS |
2496 | in_ctx = xhci->devs[slot_id]->in_ctx; |
2497 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
2498 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | |
2499 | deq_state->new_deq_ptr); | |
2500 | if (addr == 0) { | |
2501 | xhci_warn(xhci, "WARN Cannot submit config ep after " | |
2502 | "reset ep command\n"); | |
2503 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | |
2504 | deq_state->new_deq_seg, | |
2505 | deq_state->new_deq_ptr); | |
2506 | return; | |
2507 | } | |
28ccd296 | 2508 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
ac9d8fe7 | 2509 | |
ac9d8fe7 | 2510 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
913a8a34 SS |
2511 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
2512 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); | |
ac9d8fe7 SS |
2513 | } |
2514 | ||
82d1009f | 2515 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
63a0d9ab | 2516 | struct usb_device *udev, unsigned int ep_index) |
82d1009f SS |
2517 | { |
2518 | struct xhci_dequeue_state deq_state; | |
63a0d9ab | 2519 | struct xhci_virt_ep *ep; |
82d1009f SS |
2520 | |
2521 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | |
63a0d9ab | 2522 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
82d1009f SS |
2523 | /* We need to move the HW's dequeue pointer past this TD, |
2524 | * or it will attempt to resend it on the next doorbell ring. | |
2525 | */ | |
2526 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | |
e9df17eb | 2527 | ep_index, ep->stopped_stream, ep->stopped_td, |
ac9d8fe7 | 2528 | &deq_state); |
82d1009f | 2529 | |
ac9d8fe7 SS |
2530 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
2531 | * issue a configure endpoint command later. | |
2532 | */ | |
2533 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | |
2534 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | |
63a0d9ab | 2535 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
e9df17eb | 2536 | ep_index, ep->stopped_stream, &deq_state); |
ac9d8fe7 SS |
2537 | } else { |
2538 | /* Better hope no one uses the input context between now and the | |
2539 | * reset endpoint completion! | |
e9df17eb SS |
2540 | * XXX: No idea how this hardware will react when stream rings |
2541 | * are enabled. | |
ac9d8fe7 SS |
2542 | */ |
2543 | xhci_dbg(xhci, "Setting up input context for " | |
2544 | "configure endpoint command\n"); | |
2545 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | |
2546 | ep_index, &deq_state); | |
2547 | } | |
82d1009f SS |
2548 | } |
2549 | ||
a1587d97 SS |
2550 | /* Deal with stalled endpoints. The core should have sent the control message |
2551 | * to clear the halt condition. However, we need to make the xHCI hardware | |
2552 | * reset its sequence number, since a device will expect a sequence number of | |
2553 | * zero after the halt condition is cleared. | |
2554 | * Context: in_interrupt | |
2555 | */ | |
2556 | void xhci_endpoint_reset(struct usb_hcd *hcd, | |
2557 | struct usb_host_endpoint *ep) | |
2558 | { | |
2559 | struct xhci_hcd *xhci; | |
2560 | struct usb_device *udev; | |
2561 | unsigned int ep_index; | |
2562 | unsigned long flags; | |
2563 | int ret; | |
63a0d9ab | 2564 | struct xhci_virt_ep *virt_ep; |
a1587d97 SS |
2565 | |
2566 | xhci = hcd_to_xhci(hcd); | |
2567 | udev = (struct usb_device *) ep->hcpriv; | |
2568 | /* Called with a root hub endpoint (or an endpoint that wasn't added | |
2569 | * with xhci_add_endpoint() | |
2570 | */ | |
2571 | if (!ep->hcpriv) | |
2572 | return; | |
2573 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
63a0d9ab SS |
2574 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
2575 | if (!virt_ep->stopped_td) { | |
c92bcfa7 SS |
2576 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
2577 | ep->desc.bEndpointAddress); | |
2578 | return; | |
2579 | } | |
82d1009f SS |
2580 | if (usb_endpoint_xfer_control(&ep->desc)) { |
2581 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); | |
2582 | return; | |
2583 | } | |
a1587d97 SS |
2584 | |
2585 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | |
2586 | spin_lock_irqsave(&xhci->lock, flags); | |
2587 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | |
c92bcfa7 SS |
2588 | /* |
2589 | * Can't change the ring dequeue pointer until it's transitioned to the | |
2590 | * stopped state, which is only upon a successful reset endpoint | |
2591 | * command. Better hope that last command worked! | |
2592 | */ | |
a1587d97 | 2593 | if (!ret) { |
63a0d9ab SS |
2594 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
2595 | kfree(virt_ep->stopped_td); | |
a1587d97 SS |
2596 | xhci_ring_cmd_db(xhci); |
2597 | } | |
1624ae1c SS |
2598 | virt_ep->stopped_td = NULL; |
2599 | virt_ep->stopped_trb = NULL; | |
5e5cf6fc | 2600 | virt_ep->stopped_stream = 0; |
a1587d97 SS |
2601 | spin_unlock_irqrestore(&xhci->lock, flags); |
2602 | ||
2603 | if (ret) | |
2604 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | |
2605 | } | |
2606 | ||
8df75f42 SS |
2607 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
2608 | struct usb_device *udev, struct usb_host_endpoint *ep, | |
2609 | unsigned int slot_id) | |
2610 | { | |
2611 | int ret; | |
2612 | unsigned int ep_index; | |
2613 | unsigned int ep_state; | |
2614 | ||
2615 | if (!ep) | |
2616 | return -EINVAL; | |
64927730 | 2617 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
8df75f42 SS |
2618 | if (ret <= 0) |
2619 | return -EINVAL; | |
842f1690 | 2620 | if (ep->ss_ep_comp.bmAttributes == 0) { |
8df75f42 SS |
2621 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
2622 | " descriptor for ep 0x%x does not support streams\n", | |
2623 | ep->desc.bEndpointAddress); | |
2624 | return -EINVAL; | |
2625 | } | |
2626 | ||
2627 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
2628 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
2629 | if (ep_state & EP_HAS_STREAMS || | |
2630 | ep_state & EP_GETTING_STREAMS) { | |
2631 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | |
2632 | "already has streams set up.\n", | |
2633 | ep->desc.bEndpointAddress); | |
2634 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | |
2635 | "dynamic stream context array reallocation.\n"); | |
2636 | return -EINVAL; | |
2637 | } | |
2638 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | |
2639 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | |
2640 | "endpoint 0x%x; URBs are pending.\n", | |
2641 | ep->desc.bEndpointAddress); | |
2642 | return -EINVAL; | |
2643 | } | |
2644 | return 0; | |
2645 | } | |
2646 | ||
2647 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | |
2648 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | |
2649 | { | |
2650 | unsigned int max_streams; | |
2651 | ||
2652 | /* The stream context array size must be a power of two */ | |
2653 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | |
2654 | /* | |
2655 | * Find out how many primary stream array entries the host controller | |
2656 | * supports. Later we may use secondary stream arrays (similar to 2nd | |
2657 | * level page entries), but that's an optional feature for xHCI host | |
2658 | * controllers. xHCs must support at least 4 stream IDs. | |
2659 | */ | |
2660 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | |
2661 | if (*num_stream_ctxs > max_streams) { | |
2662 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | |
2663 | max_streams); | |
2664 | *num_stream_ctxs = max_streams; | |
2665 | *num_streams = max_streams; | |
2666 | } | |
2667 | } | |
2668 | ||
2669 | /* Returns an error code if one of the endpoint already has streams. | |
2670 | * This does not change any data structures, it only checks and gathers | |
2671 | * information. | |
2672 | */ | |
2673 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | |
2674 | struct usb_device *udev, | |
2675 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
2676 | unsigned int *num_streams, u32 *changed_ep_bitmask) | |
2677 | { | |
8df75f42 SS |
2678 | unsigned int max_streams; |
2679 | unsigned int endpoint_flag; | |
2680 | int i; | |
2681 | int ret; | |
2682 | ||
2683 | for (i = 0; i < num_eps; i++) { | |
2684 | ret = xhci_check_streams_endpoint(xhci, udev, | |
2685 | eps[i], udev->slot_id); | |
2686 | if (ret < 0) | |
2687 | return ret; | |
2688 | ||
842f1690 AS |
2689 | max_streams = USB_SS_MAX_STREAMS( |
2690 | eps[i]->ss_ep_comp.bmAttributes); | |
8df75f42 SS |
2691 | if (max_streams < (*num_streams - 1)) { |
2692 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | |
2693 | eps[i]->desc.bEndpointAddress, | |
2694 | max_streams); | |
2695 | *num_streams = max_streams+1; | |
2696 | } | |
2697 | ||
2698 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | |
2699 | if (*changed_ep_bitmask & endpoint_flag) | |
2700 | return -EINVAL; | |
2701 | *changed_ep_bitmask |= endpoint_flag; | |
2702 | } | |
2703 | return 0; | |
2704 | } | |
2705 | ||
2706 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | |
2707 | struct usb_device *udev, | |
2708 | struct usb_host_endpoint **eps, unsigned int num_eps) | |
2709 | { | |
2710 | u32 changed_ep_bitmask = 0; | |
2711 | unsigned int slot_id; | |
2712 | unsigned int ep_index; | |
2713 | unsigned int ep_state; | |
2714 | int i; | |
2715 | ||
2716 | slot_id = udev->slot_id; | |
2717 | if (!xhci->devs[slot_id]) | |
2718 | return 0; | |
2719 | ||
2720 | for (i = 0; i < num_eps; i++) { | |
2721 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2722 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
2723 | /* Are streams already being freed for the endpoint? */ | |
2724 | if (ep_state & EP_GETTING_NO_STREAMS) { | |
2725 | xhci_warn(xhci, "WARN Can't disable streams for " | |
2726 | "endpoint 0x%x\n, " | |
2727 | "streams are being disabled already.", | |
2728 | eps[i]->desc.bEndpointAddress); | |
2729 | return 0; | |
2730 | } | |
2731 | /* Are there actually any streams to free? */ | |
2732 | if (!(ep_state & EP_HAS_STREAMS) && | |
2733 | !(ep_state & EP_GETTING_STREAMS)) { | |
2734 | xhci_warn(xhci, "WARN Can't disable streams for " | |
2735 | "endpoint 0x%x\n, " | |
2736 | "streams are already disabled!", | |
2737 | eps[i]->desc.bEndpointAddress); | |
2738 | xhci_warn(xhci, "WARN xhci_free_streams() called " | |
2739 | "with non-streams endpoint\n"); | |
2740 | return 0; | |
2741 | } | |
2742 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | |
2743 | } | |
2744 | return changed_ep_bitmask; | |
2745 | } | |
2746 | ||
2747 | /* | |
2748 | * The USB device drivers use this function (though the HCD interface in USB | |
2749 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to | |
2750 | * coordinate mass storage command queueing across multiple endpoints (basically | |
2751 | * a stream ID == a task ID). | |
2752 | * | |
2753 | * Setting up streams involves allocating the same size stream context array | |
2754 | * for each endpoint and issuing a configure endpoint command for all endpoints. | |
2755 | * | |
2756 | * Don't allow the call to succeed if one endpoint only supports one stream | |
2757 | * (which means it doesn't support streams at all). | |
2758 | * | |
2759 | * Drivers may get less stream IDs than they asked for, if the host controller | |
2760 | * hardware or endpoints claim they can't support the number of requested | |
2761 | * stream IDs. | |
2762 | */ | |
2763 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
2764 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
2765 | unsigned int num_streams, gfp_t mem_flags) | |
2766 | { | |
2767 | int i, ret; | |
2768 | struct xhci_hcd *xhci; | |
2769 | struct xhci_virt_device *vdev; | |
2770 | struct xhci_command *config_cmd; | |
2771 | unsigned int ep_index; | |
2772 | unsigned int num_stream_ctxs; | |
2773 | unsigned long flags; | |
2774 | u32 changed_ep_bitmask = 0; | |
2775 | ||
2776 | if (!eps) | |
2777 | return -EINVAL; | |
2778 | ||
2779 | /* Add one to the number of streams requested to account for | |
2780 | * stream 0 that is reserved for xHCI usage. | |
2781 | */ | |
2782 | num_streams += 1; | |
2783 | xhci = hcd_to_xhci(hcd); | |
2784 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | |
2785 | num_streams); | |
2786 | ||
2787 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | |
2788 | if (!config_cmd) { | |
2789 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
2790 | return -ENOMEM; | |
2791 | } | |
2792 | ||
2793 | /* Check to make sure all endpoints are not already configured for | |
2794 | * streams. While we're at it, find the maximum number of streams that | |
2795 | * all the endpoints will support and check for duplicate endpoints. | |
2796 | */ | |
2797 | spin_lock_irqsave(&xhci->lock, flags); | |
2798 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | |
2799 | num_eps, &num_streams, &changed_ep_bitmask); | |
2800 | if (ret < 0) { | |
2801 | xhci_free_command(xhci, config_cmd); | |
2802 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2803 | return ret; | |
2804 | } | |
2805 | if (num_streams <= 1) { | |
2806 | xhci_warn(xhci, "WARN: endpoints can't handle " | |
2807 | "more than one stream.\n"); | |
2808 | xhci_free_command(xhci, config_cmd); | |
2809 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2810 | return -EINVAL; | |
2811 | } | |
2812 | vdev = xhci->devs[udev->slot_id]; | |
25985edc | 2813 | /* Mark each endpoint as being in transition, so |
8df75f42 SS |
2814 | * xhci_urb_enqueue() will reject all URBs. |
2815 | */ | |
2816 | for (i = 0; i < num_eps; i++) { | |
2817 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2818 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | |
2819 | } | |
2820 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2821 | ||
2822 | /* Setup internal data structures and allocate HW data structures for | |
2823 | * streams (but don't install the HW structures in the input context | |
2824 | * until we're sure all memory allocation succeeded). | |
2825 | */ | |
2826 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | |
2827 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | |
2828 | num_stream_ctxs, num_streams); | |
2829 | ||
2830 | for (i = 0; i < num_eps; i++) { | |
2831 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2832 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, | |
2833 | num_stream_ctxs, | |
2834 | num_streams, mem_flags); | |
2835 | if (!vdev->eps[ep_index].stream_info) | |
2836 | goto cleanup; | |
2837 | /* Set maxPstreams in endpoint context and update deq ptr to | |
2838 | * point to stream context array. FIXME | |
2839 | */ | |
2840 | } | |
2841 | ||
2842 | /* Set up the input context for a configure endpoint command. */ | |
2843 | for (i = 0; i < num_eps; i++) { | |
2844 | struct xhci_ep_ctx *ep_ctx; | |
2845 | ||
2846 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2847 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | |
2848 | ||
2849 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | |
2850 | vdev->out_ctx, ep_index); | |
2851 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | |
2852 | vdev->eps[ep_index].stream_info); | |
2853 | } | |
2854 | /* Tell the HW to drop its old copy of the endpoint context info | |
2855 | * and add the updated copy from the input context. | |
2856 | */ | |
2857 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | |
2858 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | |
2859 | ||
2860 | /* Issue and wait for the configure endpoint command */ | |
2861 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | |
2862 | false, false); | |
2863 | ||
2864 | /* xHC rejected the configure endpoint command for some reason, so we | |
2865 | * leave the old ring intact and free our internal streams data | |
2866 | * structure. | |
2867 | */ | |
2868 | if (ret < 0) | |
2869 | goto cleanup; | |
2870 | ||
2871 | spin_lock_irqsave(&xhci->lock, flags); | |
2872 | for (i = 0; i < num_eps; i++) { | |
2873 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2874 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
2875 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | |
2876 | udev->slot_id, ep_index); | |
2877 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | |
2878 | } | |
2879 | xhci_free_command(xhci, config_cmd); | |
2880 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2881 | ||
2882 | /* Subtract 1 for stream 0, which drivers can't use */ | |
2883 | return num_streams - 1; | |
2884 | ||
2885 | cleanup: | |
2886 | /* If it didn't work, free the streams! */ | |
2887 | for (i = 0; i < num_eps; i++) { | |
2888 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2889 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 2890 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
2891 | /* FIXME Unset maxPstreams in endpoint context and |
2892 | * update deq ptr to point to normal string ring. | |
2893 | */ | |
2894 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
2895 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
2896 | xhci_endpoint_zero(xhci, vdev, eps[i]); | |
2897 | } | |
2898 | xhci_free_command(xhci, config_cmd); | |
2899 | return -ENOMEM; | |
2900 | } | |
2901 | ||
2902 | /* Transition the endpoint from using streams to being a "normal" endpoint | |
2903 | * without streams. | |
2904 | * | |
2905 | * Modify the endpoint context state, submit a configure endpoint command, | |
2906 | * and free all endpoint rings for streams if that completes successfully. | |
2907 | */ | |
2908 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
2909 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
2910 | gfp_t mem_flags) | |
2911 | { | |
2912 | int i, ret; | |
2913 | struct xhci_hcd *xhci; | |
2914 | struct xhci_virt_device *vdev; | |
2915 | struct xhci_command *command; | |
2916 | unsigned int ep_index; | |
2917 | unsigned long flags; | |
2918 | u32 changed_ep_bitmask; | |
2919 | ||
2920 | xhci = hcd_to_xhci(hcd); | |
2921 | vdev = xhci->devs[udev->slot_id]; | |
2922 | ||
2923 | /* Set up a configure endpoint command to remove the streams rings */ | |
2924 | spin_lock_irqsave(&xhci->lock, flags); | |
2925 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | |
2926 | udev, eps, num_eps); | |
2927 | if (changed_ep_bitmask == 0) { | |
2928 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2929 | return -EINVAL; | |
2930 | } | |
2931 | ||
2932 | /* Use the xhci_command structure from the first endpoint. We may have | |
2933 | * allocated too many, but the driver may call xhci_free_streams() for | |
2934 | * each endpoint it grouped into one call to xhci_alloc_streams(). | |
2935 | */ | |
2936 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | |
2937 | command = vdev->eps[ep_index].stream_info->free_streams_command; | |
2938 | for (i = 0; i < num_eps; i++) { | |
2939 | struct xhci_ep_ctx *ep_ctx; | |
2940 | ||
2941 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2942 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | |
2943 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | |
2944 | EP_GETTING_NO_STREAMS; | |
2945 | ||
2946 | xhci_endpoint_copy(xhci, command->in_ctx, | |
2947 | vdev->out_ctx, ep_index); | |
2948 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, | |
2949 | &vdev->eps[ep_index]); | |
2950 | } | |
2951 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | |
2952 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | |
2953 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2954 | ||
2955 | /* Issue and wait for the configure endpoint command, | |
2956 | * which must succeed. | |
2957 | */ | |
2958 | ret = xhci_configure_endpoint(xhci, udev, command, | |
2959 | false, true); | |
2960 | ||
2961 | /* xHC rejected the configure endpoint command for some reason, so we | |
2962 | * leave the streams rings intact. | |
2963 | */ | |
2964 | if (ret < 0) | |
2965 | return ret; | |
2966 | ||
2967 | spin_lock_irqsave(&xhci->lock, flags); | |
2968 | for (i = 0; i < num_eps; i++) { | |
2969 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
2970 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 2971 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
2972 | /* FIXME Unset maxPstreams in endpoint context and |
2973 | * update deq ptr to point to normal string ring. | |
2974 | */ | |
2975 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | |
2976 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
2977 | } | |
2978 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2979 | ||
2980 | return 0; | |
2981 | } | |
2982 | ||
2cf95c18 SS |
2983 | /* |
2984 | * Deletes endpoint resources for endpoints that were active before a Reset | |
2985 | * Device command, or a Disable Slot command. The Reset Device command leaves | |
2986 | * the control endpoint intact, whereas the Disable Slot command deletes it. | |
2987 | * | |
2988 | * Must be called with xhci->lock held. | |
2989 | */ | |
2990 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | |
2991 | struct xhci_virt_device *virt_dev, bool drop_control_ep) | |
2992 | { | |
2993 | int i; | |
2994 | unsigned int num_dropped_eps = 0; | |
2995 | unsigned int drop_flags = 0; | |
2996 | ||
2997 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { | |
2998 | if (virt_dev->eps[i].ring) { | |
2999 | drop_flags |= 1 << i; | |
3000 | num_dropped_eps++; | |
3001 | } | |
3002 | } | |
3003 | xhci->num_active_eps -= num_dropped_eps; | |
3004 | if (num_dropped_eps) | |
3005 | xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " | |
3006 | "%u now active.\n", | |
3007 | num_dropped_eps, drop_flags, | |
3008 | xhci->num_active_eps); | |
3009 | } | |
3010 | ||
2a8f82c4 SS |
3011 | /* |
3012 | * This submits a Reset Device Command, which will set the device state to 0, | |
3013 | * set the device address to 0, and disable all the endpoints except the default | |
3014 | * control endpoint. The USB core should come back and call | |
3015 | * xhci_address_device(), and then re-set up the configuration. If this is | |
3016 | * called because of a usb_reset_and_verify_device(), then the old alternate | |
3017 | * settings will be re-installed through the normal bandwidth allocation | |
3018 | * functions. | |
3019 | * | |
3020 | * Wait for the Reset Device command to finish. Remove all structures | |
3021 | * associated with the endpoints that were disabled. Clear the input device | |
3022 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? | |
f0615c45 AX |
3023 | * |
3024 | * If the virt_dev to be reset does not exist or does not match the udev, | |
3025 | * it means the device is lost, possibly due to the xHC restore error and | |
3026 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to | |
3027 | * re-allocate the device. | |
2a8f82c4 | 3028 | */ |
f0615c45 | 3029 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
2a8f82c4 SS |
3030 | { |
3031 | int ret, i; | |
3032 | unsigned long flags; | |
3033 | struct xhci_hcd *xhci; | |
3034 | unsigned int slot_id; | |
3035 | struct xhci_virt_device *virt_dev; | |
3036 | struct xhci_command *reset_device_cmd; | |
3037 | int timeleft; | |
3038 | int last_freed_endpoint; | |
001fd382 | 3039 | struct xhci_slot_ctx *slot_ctx; |
2e27980e | 3040 | int old_active_eps = 0; |
2a8f82c4 | 3041 | |
f0615c45 | 3042 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
2a8f82c4 SS |
3043 | if (ret <= 0) |
3044 | return ret; | |
3045 | xhci = hcd_to_xhci(hcd); | |
3046 | slot_id = udev->slot_id; | |
3047 | virt_dev = xhci->devs[slot_id]; | |
f0615c45 AX |
3048 | if (!virt_dev) { |
3049 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3050 | "not exist. Re-allocate the device\n", slot_id); | |
3051 | ret = xhci_alloc_dev(hcd, udev); | |
3052 | if (ret == 1) | |
3053 | return 0; | |
3054 | else | |
3055 | return -EINVAL; | |
3056 | } | |
3057 | ||
3058 | if (virt_dev->udev != udev) { | |
3059 | /* If the virt_dev and the udev does not match, this virt_dev | |
3060 | * may belong to another udev. | |
3061 | * Re-allocate the device. | |
3062 | */ | |
3063 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3064 | "not match the udev. Re-allocate the device\n", | |
3065 | slot_id); | |
3066 | ret = xhci_alloc_dev(hcd, udev); | |
3067 | if (ret == 1) | |
3068 | return 0; | |
3069 | else | |
3070 | return -EINVAL; | |
3071 | } | |
2a8f82c4 | 3072 | |
001fd382 ML |
3073 | /* If device is not setup, there is no point in resetting it */ |
3074 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | |
3075 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == | |
3076 | SLOT_STATE_DISABLED) | |
3077 | return 0; | |
3078 | ||
2a8f82c4 SS |
3079 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
3080 | /* Allocate the command structure that holds the struct completion. | |
3081 | * Assume we're in process context, since the normal device reset | |
3082 | * process has to wait for the device anyway. Storage devices are | |
3083 | * reset as part of error handling, so use GFP_NOIO instead of | |
3084 | * GFP_KERNEL. | |
3085 | */ | |
3086 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | |
3087 | if (!reset_device_cmd) { | |
3088 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | |
3089 | return -ENOMEM; | |
3090 | } | |
3091 | ||
3092 | /* Attempt to submit the Reset Device command to the command ring */ | |
3093 | spin_lock_irqsave(&xhci->lock, flags); | |
3094 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; | |
7a3783ef PZ |
3095 | |
3096 | /* Enqueue pointer can be left pointing to the link TRB, | |
3097 | * we must handle that | |
3098 | */ | |
f5960b69 | 3099 | if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) |
7a3783ef PZ |
3100 | reset_device_cmd->command_trb = |
3101 | xhci->cmd_ring->enq_seg->next->trbs; | |
3102 | ||
2a8f82c4 SS |
3103 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
3104 | ret = xhci_queue_reset_device(xhci, slot_id); | |
3105 | if (ret) { | |
3106 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3107 | list_del(&reset_device_cmd->cmd_list); | |
3108 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3109 | goto command_cleanup; | |
3110 | } | |
3111 | xhci_ring_cmd_db(xhci); | |
3112 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3113 | ||
3114 | /* Wait for the Reset Device command to finish */ | |
3115 | timeleft = wait_for_completion_interruptible_timeout( | |
3116 | reset_device_cmd->completion, | |
3117 | USB_CTRL_SET_TIMEOUT); | |
3118 | if (timeleft <= 0) { | |
3119 | xhci_warn(xhci, "%s while waiting for reset device command\n", | |
3120 | timeleft == 0 ? "Timeout" : "Signal"); | |
3121 | spin_lock_irqsave(&xhci->lock, flags); | |
3122 | /* The timeout might have raced with the event ring handler, so | |
3123 | * only delete from the list if the item isn't poisoned. | |
3124 | */ | |
3125 | if (reset_device_cmd->cmd_list.next != LIST_POISON1) | |
3126 | list_del(&reset_device_cmd->cmd_list); | |
3127 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3128 | ret = -ETIME; | |
3129 | goto command_cleanup; | |
3130 | } | |
3131 | ||
3132 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, | |
3133 | * unless we tried to reset a slot ID that wasn't enabled, | |
3134 | * or the device wasn't in the addressed or configured state. | |
3135 | */ | |
3136 | ret = reset_device_cmd->status; | |
3137 | switch (ret) { | |
3138 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ | |
3139 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ | |
3140 | xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", | |
3141 | slot_id, | |
3142 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); | |
3143 | xhci_info(xhci, "Not freeing device rings.\n"); | |
3144 | /* Don't treat this as an error. May change my mind later. */ | |
3145 | ret = 0; | |
3146 | goto command_cleanup; | |
3147 | case COMP_SUCCESS: | |
3148 | xhci_dbg(xhci, "Successful reset device command.\n"); | |
3149 | break; | |
3150 | default: | |
3151 | if (xhci_is_vendor_info_code(xhci, ret)) | |
3152 | break; | |
3153 | xhci_warn(xhci, "Unknown completion code %u for " | |
3154 | "reset device command.\n", ret); | |
3155 | ret = -EINVAL; | |
3156 | goto command_cleanup; | |
3157 | } | |
3158 | ||
2cf95c18 SS |
3159 | /* Free up host controller endpoint resources */ |
3160 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
3161 | spin_lock_irqsave(&xhci->lock, flags); | |
3162 | /* Don't delete the default control endpoint resources */ | |
3163 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); | |
3164 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3165 | } | |
3166 | ||
2a8f82c4 SS |
3167 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
3168 | last_freed_endpoint = 1; | |
3169 | for (i = 1; i < 31; ++i) { | |
2dea75d9 DT |
3170 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
3171 | ||
3172 | if (ep->ep_state & EP_HAS_STREAMS) { | |
3173 | xhci_free_stream_info(xhci, ep->stream_info); | |
3174 | ep->stream_info = NULL; | |
3175 | ep->ep_state &= ~EP_HAS_STREAMS; | |
3176 | } | |
3177 | ||
3178 | if (ep->ring) { | |
3179 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | |
3180 | last_freed_endpoint = i; | |
3181 | } | |
2e27980e SS |
3182 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
3183 | xhci_drop_ep_from_interval_table(xhci, | |
3184 | &virt_dev->eps[i].bw_info, | |
3185 | virt_dev->bw_table, | |
3186 | udev, | |
3187 | &virt_dev->eps[i], | |
3188 | virt_dev->tt_info); | |
9af5d71d | 3189 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); |
2a8f82c4 | 3190 | } |
2e27980e SS |
3191 | /* If necessary, update the number of active TTs on this root port */ |
3192 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
3193 | ||
2a8f82c4 SS |
3194 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
3195 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | |
3196 | ret = 0; | |
3197 | ||
3198 | command_cleanup: | |
3199 | xhci_free_command(xhci, reset_device_cmd); | |
3200 | return ret; | |
3201 | } | |
3202 | ||
3ffbba95 SS |
3203 | /* |
3204 | * At this point, the struct usb_device is about to go away, the device has | |
3205 | * disconnected, and all traffic has been stopped and the endpoints have been | |
3206 | * disabled. Free any HC data structures associated with that device. | |
3207 | */ | |
3208 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
3209 | { | |
3210 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
6f5165cf | 3211 | struct xhci_virt_device *virt_dev; |
3ffbba95 | 3212 | unsigned long flags; |
c526d0d4 | 3213 | u32 state; |
64927730 | 3214 | int i, ret; |
3ffbba95 | 3215 | |
64927730 | 3216 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
7bd89b40 SS |
3217 | /* If the host is halted due to driver unload, we still need to free the |
3218 | * device. | |
3219 | */ | |
3220 | if (ret <= 0 && ret != -ENODEV) | |
3ffbba95 | 3221 | return; |
64927730 | 3222 | |
6f5165cf | 3223 | virt_dev = xhci->devs[udev->slot_id]; |
6f5165cf SS |
3224 | |
3225 | /* Stop any wayward timer functions (which may grab the lock) */ | |
3226 | for (i = 0; i < 31; ++i) { | |
3227 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | |
3228 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | |
3229 | } | |
3ffbba95 SS |
3230 | |
3231 | spin_lock_irqsave(&xhci->lock, flags); | |
c526d0d4 SS |
3232 | /* Don't disable the slot if the host controller is dead. */ |
3233 | state = xhci_readl(xhci, &xhci->op_regs->status); | |
7bd89b40 SS |
3234 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
3235 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
c526d0d4 SS |
3236 | xhci_free_virt_device(xhci, udev->slot_id); |
3237 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3238 | return; | |
3239 | } | |
3240 | ||
23e3be11 | 3241 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { |
3ffbba95 SS |
3242 | spin_unlock_irqrestore(&xhci->lock, flags); |
3243 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3244 | return; | |
3245 | } | |
23e3be11 | 3246 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
3247 | spin_unlock_irqrestore(&xhci->lock, flags); |
3248 | /* | |
3249 | * Event command completion handler will free any data structures | |
f88ba78d | 3250 | * associated with the slot. XXX Can free sleep? |
3ffbba95 SS |
3251 | */ |
3252 | } | |
3253 | ||
2cf95c18 SS |
3254 | /* |
3255 | * Checks if we have enough host controller resources for the default control | |
3256 | * endpoint. | |
3257 | * | |
3258 | * Must be called with xhci->lock held. | |
3259 | */ | |
3260 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) | |
3261 | { | |
3262 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { | |
3263 | xhci_dbg(xhci, "Not enough ep ctxs: " | |
3264 | "%u active, need to add 1, limit is %u.\n", | |
3265 | xhci->num_active_eps, xhci->limit_active_eps); | |
3266 | return -ENOMEM; | |
3267 | } | |
3268 | xhci->num_active_eps += 1; | |
3269 | xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", | |
3270 | xhci->num_active_eps); | |
3271 | return 0; | |
3272 | } | |
3273 | ||
3274 | ||
3ffbba95 SS |
3275 | /* |
3276 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | |
3277 | * timed out, or allocating memory failed. Returns 1 on success. | |
3278 | */ | |
3279 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
3280 | { | |
3281 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3282 | unsigned long flags; | |
3283 | int timeleft; | |
3284 | int ret; | |
3285 | ||
3286 | spin_lock_irqsave(&xhci->lock, flags); | |
23e3be11 | 3287 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
3ffbba95 SS |
3288 | if (ret) { |
3289 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3290 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3291 | return 0; | |
3292 | } | |
23e3be11 | 3293 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
3294 | spin_unlock_irqrestore(&xhci->lock, flags); |
3295 | ||
3296 | /* XXX: how much time for xHC slot assignment? */ | |
3297 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | |
3298 | USB_CTRL_SET_TIMEOUT); | |
3299 | if (timeleft <= 0) { | |
3300 | xhci_warn(xhci, "%s while waiting for a slot\n", | |
3301 | timeleft == 0 ? "Timeout" : "Signal"); | |
3302 | /* FIXME cancel the enable slot request */ | |
3303 | return 0; | |
3304 | } | |
3305 | ||
3ffbba95 SS |
3306 | if (!xhci->slot_id) { |
3307 | xhci_err(xhci, "Error while assigning device slot ID\n"); | |
3ffbba95 SS |
3308 | return 0; |
3309 | } | |
2cf95c18 SS |
3310 | |
3311 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
3312 | spin_lock_irqsave(&xhci->lock, flags); | |
3313 | ret = xhci_reserve_host_control_ep_resources(xhci); | |
3314 | if (ret) { | |
3315 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3316 | xhci_warn(xhci, "Not enough host resources, " | |
3317 | "active endpoint contexts = %u\n", | |
3318 | xhci->num_active_eps); | |
3319 | goto disable_slot; | |
3320 | } | |
3321 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3322 | } | |
3323 | /* Use GFP_NOIO, since this function can be called from | |
a6d940dd SS |
3324 | * xhci_discover_or_reset_device(), which may be called as part of |
3325 | * mass storage driver error handling. | |
3326 | */ | |
3327 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { | |
3ffbba95 | 3328 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
2cf95c18 | 3329 | goto disable_slot; |
3ffbba95 SS |
3330 | } |
3331 | udev->slot_id = xhci->slot_id; | |
3332 | /* Is this a LS or FS device under a HS hub? */ | |
3333 | /* Hub or peripherial? */ | |
3ffbba95 | 3334 | return 1; |
2cf95c18 SS |
3335 | |
3336 | disable_slot: | |
3337 | /* Disable slot, if we can do it without mem alloc */ | |
3338 | spin_lock_irqsave(&xhci->lock, flags); | |
3339 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | |
3340 | xhci_ring_cmd_db(xhci); | |
3341 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3342 | return 0; | |
3ffbba95 SS |
3343 | } |
3344 | ||
3345 | /* | |
3346 | * Issue an Address Device command (which will issue a SetAddress request to | |
3347 | * the device). | |
3348 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so | |
3349 | * we should only issue and wait on one address command at the same time. | |
3350 | * | |
3351 | * We add one to the device address issued by the hardware because the USB core | |
3352 | * uses address 1 for the root hubs (even though they're not really devices). | |
3353 | */ | |
3354 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |
3355 | { | |
3356 | unsigned long flags; | |
3357 | int timeleft; | |
3358 | struct xhci_virt_device *virt_dev; | |
3359 | int ret = 0; | |
3360 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
d115b048 JY |
3361 | struct xhci_slot_ctx *slot_ctx; |
3362 | struct xhci_input_control_ctx *ctrl_ctx; | |
8e595a5d | 3363 | u64 temp_64; |
3ffbba95 SS |
3364 | |
3365 | if (!udev->slot_id) { | |
3366 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | |
3367 | return -EINVAL; | |
3368 | } | |
3369 | ||
3ffbba95 SS |
3370 | virt_dev = xhci->devs[udev->slot_id]; |
3371 | ||
7ed603ec ME |
3372 | if (WARN_ON(!virt_dev)) { |
3373 | /* | |
3374 | * In plug/unplug torture test with an NEC controller, | |
3375 | * a zero-dereference was observed once due to virt_dev = 0. | |
3376 | * Print useful debug rather than crash if it is observed again! | |
3377 | */ | |
3378 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", | |
3379 | udev->slot_id); | |
3380 | return -EINVAL; | |
3381 | } | |
3382 | ||
f0615c45 AX |
3383 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
3384 | /* | |
3385 | * If this is the first Set Address since device plug-in or | |
3386 | * virt_device realloaction after a resume with an xHCI power loss, | |
3387 | * then set up the slot context. | |
3388 | */ | |
3389 | if (!slot_ctx->dev_info) | |
3ffbba95 | 3390 | xhci_setup_addressable_virt_dev(xhci, udev); |
f0615c45 | 3391 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
2d1ee590 SS |
3392 | else |
3393 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | |
66e49d87 | 3394 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
d115b048 | 3395 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
3ffbba95 | 3396 | |
f88ba78d | 3397 | spin_lock_irqsave(&xhci->lock, flags); |
d115b048 JY |
3398 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
3399 | udev->slot_id); | |
3ffbba95 SS |
3400 | if (ret) { |
3401 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3402 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3403 | return ret; | |
3404 | } | |
23e3be11 | 3405 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
3406 | spin_unlock_irqrestore(&xhci->lock, flags); |
3407 | ||
3408 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | |
3409 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | |
3410 | USB_CTRL_SET_TIMEOUT); | |
3411 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing | |
3412 | * the SetAddress() "recovery interval" required by USB and aborting the | |
3413 | * command on a timeout. | |
3414 | */ | |
3415 | if (timeleft <= 0) { | |
3416 | xhci_warn(xhci, "%s while waiting for a slot\n", | |
3417 | timeleft == 0 ? "Timeout" : "Signal"); | |
3418 | /* FIXME cancel the address device command */ | |
3419 | return -ETIME; | |
3420 | } | |
3421 | ||
3ffbba95 SS |
3422 | switch (virt_dev->cmd_status) { |
3423 | case COMP_CTX_STATE: | |
3424 | case COMP_EBADSLT: | |
3425 | xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", | |
3426 | udev->slot_id); | |
3427 | ret = -EINVAL; | |
3428 | break; | |
3429 | case COMP_TX_ERR: | |
3430 | dev_warn(&udev->dev, "Device not responding to set address.\n"); | |
3431 | ret = -EPROTO; | |
3432 | break; | |
f6ba6fe2 AH |
3433 | case COMP_DEV_ERR: |
3434 | dev_warn(&udev->dev, "ERROR: Incompatible device for address " | |
3435 | "device command.\n"); | |
3436 | ret = -ENODEV; | |
3437 | break; | |
3ffbba95 SS |
3438 | case COMP_SUCCESS: |
3439 | xhci_dbg(xhci, "Successful Address Device command\n"); | |
3440 | break; | |
3441 | default: | |
3442 | xhci_err(xhci, "ERROR: unexpected command completion " | |
3443 | "code 0x%x.\n", virt_dev->cmd_status); | |
66e49d87 | 3444 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
d115b048 | 3445 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
3ffbba95 SS |
3446 | ret = -EINVAL; |
3447 | break; | |
3448 | } | |
3449 | if (ret) { | |
3ffbba95 SS |
3450 | return ret; |
3451 | } | |
8e595a5d SS |
3452 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
3453 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); | |
3454 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", | |
28ccd296 ME |
3455 | udev->slot_id, |
3456 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | |
3457 | (unsigned long long) | |
3458 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); | |
700e2052 | 3459 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
d115b048 | 3460 | (unsigned long long)virt_dev->out_ctx->dma); |
3ffbba95 | 3461 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
d115b048 | 3462 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
3ffbba95 | 3463 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
d115b048 | 3464 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
3ffbba95 SS |
3465 | /* |
3466 | * USB core uses address 1 for the roothubs, so we add one to the | |
3467 | * address given back to us by the HC. | |
3468 | */ | |
d115b048 | 3469 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
c8d4af8e AX |
3470 | /* Use kernel assigned address for devices; store xHC assigned |
3471 | * address locally. */ | |
28ccd296 ME |
3472 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
3473 | + 1; | |
f94e0186 | 3474 | /* Zero the input context control for later use */ |
d115b048 JY |
3475 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
3476 | ctrl_ctx->add_flags = 0; | |
3477 | ctrl_ctx->drop_flags = 0; | |
3ffbba95 | 3478 | |
c8d4af8e | 3479 | xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); |
3ffbba95 SS |
3480 | |
3481 | return 0; | |
3482 | } | |
3483 | ||
ac1c1b7f SS |
3484 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
3485 | * internal data structures for the device. | |
3486 | */ | |
3487 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |
3488 | struct usb_tt *tt, gfp_t mem_flags) | |
3489 | { | |
3490 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3491 | struct xhci_virt_device *vdev; | |
3492 | struct xhci_command *config_cmd; | |
3493 | struct xhci_input_control_ctx *ctrl_ctx; | |
3494 | struct xhci_slot_ctx *slot_ctx; | |
3495 | unsigned long flags; | |
3496 | unsigned think_time; | |
3497 | int ret; | |
3498 | ||
3499 | /* Ignore root hubs */ | |
3500 | if (!hdev->parent) | |
3501 | return 0; | |
3502 | ||
3503 | vdev = xhci->devs[hdev->slot_id]; | |
3504 | if (!vdev) { | |
3505 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | |
3506 | return -EINVAL; | |
3507 | } | |
a1d78c16 | 3508 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
ac1c1b7f SS |
3509 | if (!config_cmd) { |
3510 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
3511 | return -ENOMEM; | |
3512 | } | |
3513 | ||
3514 | spin_lock_irqsave(&xhci->lock, flags); | |
839c817c SS |
3515 | if (hdev->speed == USB_SPEED_HIGH && |
3516 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { | |
3517 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); | |
3518 | xhci_free_command(xhci, config_cmd); | |
3519 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3520 | return -ENOMEM; | |
3521 | } | |
3522 | ||
ac1c1b7f SS |
3523 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
3524 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | |
28ccd296 | 3525 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
ac1c1b7f | 3526 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
28ccd296 | 3527 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
ac1c1b7f | 3528 | if (tt->multi) |
28ccd296 | 3529 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
ac1c1b7f SS |
3530 | if (xhci->hci_version > 0x95) { |
3531 | xhci_dbg(xhci, "xHCI version %x needs hub " | |
3532 | "TT think time and number of ports\n", | |
3533 | (unsigned int) xhci->hci_version); | |
28ccd296 | 3534 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
ac1c1b7f SS |
3535 | /* Set TT think time - convert from ns to FS bit times. |
3536 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | |
3537 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | |
700b4173 AX |
3538 | * |
3539 | * xHCI 1.0: this field shall be 0 if the device is not a | |
3540 | * High-spped hub. | |
ac1c1b7f SS |
3541 | */ |
3542 | think_time = tt->think_time; | |
3543 | if (think_time != 0) | |
3544 | think_time = (think_time / 666) - 1; | |
700b4173 AX |
3545 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
3546 | slot_ctx->tt_info |= | |
3547 | cpu_to_le32(TT_THINK_TIME(think_time)); | |
ac1c1b7f SS |
3548 | } else { |
3549 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | |
3550 | "TT think time or number of ports\n", | |
3551 | (unsigned int) xhci->hci_version); | |
3552 | } | |
3553 | slot_ctx->dev_state = 0; | |
3554 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3555 | ||
3556 | xhci_dbg(xhci, "Set up %s for hub device.\n", | |
3557 | (xhci->hci_version > 0x95) ? | |
3558 | "configure endpoint" : "evaluate context"); | |
3559 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | |
3560 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | |
3561 | ||
3562 | /* Issue and wait for the configure endpoint or | |
3563 | * evaluate context command. | |
3564 | */ | |
3565 | if (xhci->hci_version > 0x95) | |
3566 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
3567 | false, false); | |
3568 | else | |
3569 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
3570 | true, false); | |
3571 | ||
3572 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | |
3573 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | |
3574 | ||
3575 | xhci_free_command(xhci, config_cmd); | |
3576 | return ret; | |
3577 | } | |
3578 | ||
66d4eadd SS |
3579 | int xhci_get_frame(struct usb_hcd *hcd) |
3580 | { | |
3581 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3582 | /* EHCI mods by the periodic size. Why? */ | |
3583 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; | |
3584 | } | |
3585 | ||
3586 | MODULE_DESCRIPTION(DRIVER_DESC); | |
3587 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
3588 | MODULE_LICENSE("GPL"); | |
3589 | ||
3590 | static int __init xhci_hcd_init(void) | |
3591 | { | |
3592 | #ifdef CONFIG_PCI | |
3593 | int retval = 0; | |
3594 | ||
3595 | retval = xhci_register_pci(); | |
3596 | ||
3597 | if (retval < 0) { | |
3598 | printk(KERN_DEBUG "Problem registering PCI driver."); | |
3599 | return retval; | |
3600 | } | |
3601 | #endif | |
98441973 SS |
3602 | /* |
3603 | * Check the compiler generated sizes of structures that must be laid | |
3604 | * out in specific ways for hardware access. | |
3605 | */ | |
3606 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
3607 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); | |
3608 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); | |
3609 | /* xhci_device_control has eight fields, and also | |
3610 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | |
3611 | */ | |
98441973 SS |
3612 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
3613 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | |
3614 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | |
3615 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); | |
3616 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); | |
3617 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ | |
3618 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); | |
3619 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
66d4eadd SS |
3620 | return 0; |
3621 | } | |
3622 | module_init(xhci_hcd_init); | |
3623 | ||
3624 | static void __exit xhci_hcd_cleanup(void) | |
3625 | { | |
3626 | #ifdef CONFIG_PCI | |
3627 | xhci_unregister_pci(); | |
3628 | #endif | |
3629 | } | |
3630 | module_exit(xhci_hcd_cleanup); |