Merge tag 'v3.10.56' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / host / xhci-mem.c
1 /*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27
28 #include "xhci.h"
29 #include <mach/mt_boot.h>
30 #include <linux/dma-mapping.h>
31
32 /*
33 * Allocates a generic ring segment from the ring pool, sets the dma address,
34 * initializes the segment to zero, and sets the private next pointer to NULL.
35 *
36 * Section 4.11.1.1:
37 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
38 */
39 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
40 unsigned int cycle_state, gfp_t flags)
41 {
42 struct xhci_segment *seg;
43 dma_addr_t dma;
44 int i;
45
46 seg = kzalloc(sizeof *seg, flags);
47 if (!seg)
48 return NULL;
49
50 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
51 if (!seg->trbs) {
52 kfree(seg);
53 return NULL;
54 }
55
56 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
57 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= TRB_CYCLE;
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66 }
67
68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69 {
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg);
75 }
76
77 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
78 struct xhci_segment *first)
79 {
80 struct xhci_segment *seg;
81
82 seg = first->next;
83 while (seg != first) {
84 struct xhci_segment *next = seg->next;
85 xhci_segment_free(xhci, seg);
86 seg = next;
87 }
88 xhci_segment_free(xhci, first);
89 }
90
91 /*
92 * Make the prev segment point to the next segment.
93 *
94 * Change the last TRB in the prev segment to be a Link TRB which points to the
95 * DMA address of the next segment. The caller needs to set any Link TRB
96 * related flags, such as End TRB, Toggle Cycle, and no snoop.
97 */
98 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
99 struct xhci_segment *next, enum xhci_ring_type type)
100 {
101 u32 val;
102
103 if (!prev || !next)
104 return;
105 prev->next = next;
106 if (type != TYPE_EVENT) {
107 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
108 cpu_to_le64(next->dma);
109
110 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
111 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
112 val &= ~TRB_TYPE_BITMASK;
113 val |= TRB_TYPE(TRB_LINK);
114 /* Always set the chain bit with 0.95 hardware */
115 /* Set chain bit for isoc rings on AMD 0.96 host */
116 #ifndef CONFIG_MTK_XHCI
117 if (xhci_link_trb_quirk(xhci) ||
118 (type == TYPE_ISOC &&
119 (xhci->quirks & XHCI_AMD_0x96_HOST)))
120 val |= TRB_CHAIN;
121 #endif
122 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
123 }
124 }
125
126 /*
127 * Link the ring to the new segments.
128 * Set Toggle Cycle for the new ring if needed.
129 */
130 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
131 struct xhci_segment *first, struct xhci_segment *last,
132 unsigned int num_segs)
133 {
134 struct xhci_segment *next;
135
136 if (!ring || !first || !last)
137 return;
138
139 next = ring->enq_seg->next;
140 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
141 xhci_link_segments(xhci, last, next, ring->type);
142 ring->num_segs += num_segs;
143 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
144
145 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
146 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
147 &= ~cpu_to_le32(LINK_TOGGLE);
148 last->trbs[TRBS_PER_SEGMENT-1].link.control
149 |= cpu_to_le32(LINK_TOGGLE);
150 ring->last_seg = last;
151 }
152 }
153
154 /* XXX: Do we need the hcd structure in all these functions? */
155 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
156 {
157 if (!ring)
158 return;
159
160 if (ring->first_seg)
161 xhci_free_segments_for_ring(xhci, ring->first_seg);
162
163 kfree(ring);
164 }
165
166 static void xhci_initialize_ring_info(struct xhci_ring *ring,
167 unsigned int cycle_state)
168 {
169 /* The ring is empty, so the enqueue pointer == dequeue pointer */
170 ring->enqueue = ring->first_seg->trbs;
171 ring->enq_seg = ring->first_seg;
172 ring->dequeue = ring->enqueue;
173 ring->deq_seg = ring->first_seg;
174 /* The ring is initialized to 0. The producer must write 1 to the cycle
175 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
176 * compare CCS to the cycle bit to check ownership, so CCS = 1.
177 *
178 * New rings are initialized with cycle state equal to 1; if we are
179 * handling ring expansion, set the cycle state equal to the old ring.
180 */
181 ring->cycle_state = cycle_state;
182 /* Not necessary for new rings, but needed for re-initialized rings */
183 ring->enq_updates = 0;
184 ring->deq_updates = 0;
185
186 /*
187 * Each segment has a link TRB, and leave an extra TRB for SW
188 * accounting purpose
189 */
190 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
191 }
192
193 /* Allocate segments and link them for a ring */
194 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
195 struct xhci_segment **first, struct xhci_segment **last,
196 unsigned int num_segs, unsigned int cycle_state,
197 enum xhci_ring_type type, gfp_t flags)
198 {
199 struct xhci_segment *prev;
200
201 prev = xhci_segment_alloc(xhci, cycle_state, flags);
202 if (!prev)
203 return -ENOMEM;
204 num_segs--;
205
206 *first = prev;
207 while (num_segs > 0) {
208 struct xhci_segment *next;
209
210 next = xhci_segment_alloc(xhci, cycle_state, flags);
211 if (!next) {
212 prev = *first;
213 while (prev) {
214 next = prev->next;
215 xhci_segment_free(xhci, prev);
216 prev = next;
217 }
218 return -ENOMEM;
219 }
220 xhci_link_segments(xhci, prev, next, type);
221
222 prev = next;
223 num_segs--;
224 }
225 xhci_link_segments(xhci, prev, *first, type);
226 *last = prev;
227
228 return 0;
229 }
230
231 /**
232 * Create a new ring with zero or more segments.
233 *
234 * Link each segment together into a ring.
235 * Set the end flag and the cycle toggle bit on the last segment.
236 * See section 4.9.1 and figures 15 and 16.
237 */
238 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
239 unsigned int num_segs, unsigned int cycle_state,
240 enum xhci_ring_type type, gfp_t flags)
241 {
242 struct xhci_ring *ring;
243 int ret;
244
245 ring = kzalloc(sizeof *(ring), flags);
246 if (!ring)
247 return NULL;
248
249 ring->num_segs = num_segs;
250 INIT_LIST_HEAD(&ring->td_list);
251 ring->type = type;
252 if (num_segs == 0)
253 return ring;
254
255 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
256 &ring->last_seg, num_segs, cycle_state, type, flags);
257 if (ret)
258 goto fail;
259
260 /* Only event ring does not use link TRB */
261 if (type != TYPE_EVENT) {
262 /* See section 4.9.2.1 and 6.4.4.1 */
263 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
264 cpu_to_le32(LINK_TOGGLE);
265 }
266 xhci_initialize_ring_info(ring, cycle_state);
267 return ring;
268
269 fail:
270 kfree(ring);
271 return NULL;
272 }
273
274 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
275 struct xhci_virt_device *virt_dev,
276 unsigned int ep_index)
277 {
278 int rings_cached;
279
280 rings_cached = virt_dev->num_rings_cached;
281 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
282 virt_dev->ring_cache[rings_cached] =
283 virt_dev->eps[ep_index].ring;
284 virt_dev->num_rings_cached++;
285 xhci_dbg(xhci, "Cached old ring, "
286 "%d ring%s cached\n",
287 virt_dev->num_rings_cached,
288 (virt_dev->num_rings_cached > 1) ? "s" : "");
289 } else {
290 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
291 xhci_dbg(xhci, "Ring cache full (%d rings), "
292 "freeing ring\n",
293 virt_dev->num_rings_cached);
294 }
295 virt_dev->eps[ep_index].ring = NULL;
296 }
297
298 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
299 * pointers to the beginning of the ring.
300 */
301 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
302 struct xhci_ring *ring, unsigned int cycle_state,
303 enum xhci_ring_type type)
304 {
305 struct xhci_segment *seg = ring->first_seg;
306 int i;
307
308 do {
309 memset(seg->trbs, 0,
310 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
311 if (cycle_state == 0) {
312 for (i = 0; i < TRBS_PER_SEGMENT; i++)
313 seg->trbs[i].link.control |= TRB_CYCLE;
314 }
315 /* All endpoint rings have link TRBs */
316 xhci_link_segments(xhci, seg, seg->next, type);
317 seg = seg->next;
318 } while (seg != ring->first_seg);
319 ring->type = type;
320 xhci_initialize_ring_info(ring, cycle_state);
321 /* td list should be empty since all URBs have been cancelled,
322 * but just in case...
323 */
324 INIT_LIST_HEAD(&ring->td_list);
325 }
326
327 /*
328 * Expand an existing ring.
329 * Look for a cached ring or allocate a new ring which has same segment numbers
330 * and link the two rings.
331 */
332 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
333 unsigned int num_trbs, gfp_t flags)
334 {
335 struct xhci_segment *first;
336 struct xhci_segment *last;
337 unsigned int num_segs;
338 unsigned int num_segs_needed;
339 int ret;
340
341 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
342 (TRBS_PER_SEGMENT - 1);
343
344 /* Allocate number of segments we needed, or double the ring size */
345 num_segs = ring->num_segs > num_segs_needed ?
346 ring->num_segs : num_segs_needed;
347
348 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
349 num_segs, ring->cycle_state, ring->type, flags);
350 if (ret)
351 return -ENOMEM;
352
353 xhci_link_rings(xhci, ring, first, last, num_segs);
354 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
355 ring->num_segs);
356
357 return 0;
358 }
359
360 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
361
362 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
363 int type, gfp_t flags)
364 {
365 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
366 if (!ctx)
367 return NULL;
368
369 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
370 ctx->type = type;
371 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
372 if (type == XHCI_CTX_TYPE_INPUT)
373 ctx->size += CTX_SIZE(xhci->hcc_params);
374
375 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
376 if (!ctx->bytes) {
377 kfree(ctx);
378 return NULL;
379 }
380 memset(ctx->bytes, 0, ctx->size);
381 return ctx;
382 }
383
384 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
385 struct xhci_container_ctx *ctx)
386 {
387 if (!ctx)
388 return;
389 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
390 kfree(ctx);
391 }
392
393 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
394 struct xhci_container_ctx *ctx)
395 {
396 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
397 return (struct xhci_input_control_ctx *)ctx->bytes;
398 }
399
400 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
401 struct xhci_container_ctx *ctx)
402 {
403 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
404 return (struct xhci_slot_ctx *)ctx->bytes;
405
406 return (struct xhci_slot_ctx *)
407 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
408 }
409
410 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
411 struct xhci_container_ctx *ctx,
412 unsigned int ep_index)
413 {
414 /* increment ep index by offset of start of ep ctx array */
415 ep_index++;
416 if (ctx->type == XHCI_CTX_TYPE_INPUT)
417 ep_index++;
418
419 return (struct xhci_ep_ctx *)
420 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
421 }
422
423
424 /***************** Streams structures manipulation *************************/
425
426 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
427 unsigned int num_stream_ctxs,
428 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
429 {
430 struct device *dev = xhci_to_hcd(xhci)->self.controller;
431
432 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
433 dma_free_coherent(dev,
434 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
435 #ifdef CONFIG_MTK_XHCI
436 xhci->erst.entries, xhci->erst.erst_dma_addr);
437 #else
438 stream_ctx, dma);
439 #endif
440 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
441 return dma_pool_free(xhci->small_streams_pool,
442 stream_ctx, dma);
443 else
444 return dma_pool_free(xhci->medium_streams_pool,
445 stream_ctx, dma);
446 }
447
448 /*
449 * The stream context array for each endpoint with bulk streams enabled can
450 * vary in size, based on:
451 * - how many streams the endpoint supports,
452 * - the maximum primary stream array size the host controller supports,
453 * - and how many streams the device driver asks for.
454 *
455 * The stream context array must be a power of 2, and can be as small as
456 * 64 bytes or as large as 1MB.
457 */
458 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
459 unsigned int num_stream_ctxs, dma_addr_t *dma,
460 gfp_t mem_flags)
461 {
462 struct device *dev = xhci_to_hcd(xhci)->self.controller;
463
464 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
465 return dma_alloc_coherent(dev,
466 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
467 dma, mem_flags);
468 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
469 return dma_pool_alloc(xhci->small_streams_pool,
470 mem_flags, dma);
471 else
472 return dma_pool_alloc(xhci->medium_streams_pool,
473 mem_flags, dma);
474 }
475
476 struct xhci_ring *xhci_dma_to_transfer_ring(
477 struct xhci_virt_ep *ep,
478 u64 address)
479 {
480 if (ep->ep_state & EP_HAS_STREAMS)
481 return radix_tree_lookup(&ep->stream_info->trb_address_map,
482 address >> TRB_SEGMENT_SHIFT);
483 return ep->ring;
484 }
485
486 /* Only use this when you know stream_info is valid */
487 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
488 static struct xhci_ring *dma_to_stream_ring(
489 struct xhci_stream_info *stream_info,
490 u64 address)
491 {
492 return radix_tree_lookup(&stream_info->trb_address_map,
493 address >> TRB_SEGMENT_SHIFT);
494 }
495 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
496
497 struct xhci_ring *xhci_stream_id_to_ring(
498 struct xhci_virt_device *dev,
499 unsigned int ep_index,
500 unsigned int stream_id)
501 {
502 struct xhci_virt_ep *ep = &dev->eps[ep_index];
503
504 if (stream_id == 0)
505 return ep->ring;
506 if (!ep->stream_info)
507 return NULL;
508
509 if (stream_id > ep->stream_info->num_streams)
510 return NULL;
511 return ep->stream_info->stream_rings[stream_id];
512 }
513
514 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
515 static int xhci_test_radix_tree(struct xhci_hcd *xhci,
516 unsigned int num_streams,
517 struct xhci_stream_info *stream_info)
518 {
519 u32 cur_stream;
520 struct xhci_ring *cur_ring;
521 u64 addr;
522
523 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
524 struct xhci_ring *mapped_ring;
525 int trb_size = sizeof(union xhci_trb);
526
527 cur_ring = stream_info->stream_rings[cur_stream];
528 for (addr = cur_ring->first_seg->dma;
529 addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
530 addr += trb_size) {
531 mapped_ring = dma_to_stream_ring(stream_info, addr);
532 if (cur_ring != mapped_ring) {
533 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
534 "didn't map to stream ID %u; "
535 "mapped to ring %p\n",
536 (unsigned long long) addr,
537 cur_stream,
538 mapped_ring);
539 return -EINVAL;
540 }
541 }
542 /* One TRB after the end of the ring segment shouldn't return a
543 * pointer to the current ring (although it may be a part of a
544 * different ring).
545 */
546 mapped_ring = dma_to_stream_ring(stream_info, addr);
547 if (mapped_ring != cur_ring) {
548 /* One TRB before should also fail */
549 addr = cur_ring->first_seg->dma - trb_size;
550 mapped_ring = dma_to_stream_ring(stream_info, addr);
551 }
552 if (mapped_ring == cur_ring) {
553 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
554 "mapped to valid stream ID %u; "
555 "mapped ring = %p\n",
556 (unsigned long long) addr,
557 cur_stream,
558 mapped_ring);
559 return -EINVAL;
560 }
561 }
562 return 0;
563 }
564 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
565
566 /*
567 * Change an endpoint's internal structure so it supports stream IDs. The
568 * number of requested streams includes stream 0, which cannot be used by device
569 * drivers.
570 *
571 * The number of stream contexts in the stream context array may be bigger than
572 * the number of streams the driver wants to use. This is because the number of
573 * stream context array entries must be a power of two.
574 *
575 * We need a radix tree for mapping physical addresses of TRBs to which stream
576 * ID they belong to. We need to do this because the host controller won't tell
577 * us which stream ring the TRB came from. We could store the stream ID in an
578 * event data TRB, but that doesn't help us for the cancellation case, since the
579 * endpoint may stop before it reaches that event data TRB.
580 *
581 * The radix tree maps the upper portion of the TRB DMA address to a ring
582 * segment that has the same upper portion of DMA addresses. For example, say I
583 * have segments of size 1KB, that are always 64-byte aligned. A segment may
584 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
585 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
586 * pass the radix tree a key to get the right stream ID:
587 *
588 * 0x10c90fff >> 10 = 0x43243
589 * 0x10c912c0 >> 10 = 0x43244
590 * 0x10c91400 >> 10 = 0x43245
591 *
592 * Obviously, only those TRBs with DMA addresses that are within the segment
593 * will make the radix tree return the stream ID for that ring.
594 *
595 * Caveats for the radix tree:
596 *
597 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
598 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
599 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
600 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
601 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
602 * extended systems (where the DMA address can be bigger than 32-bits),
603 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
604 */
605 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
606 unsigned int num_stream_ctxs,
607 unsigned int num_streams, gfp_t mem_flags)
608 {
609 struct xhci_stream_info *stream_info;
610 u32 cur_stream;
611 struct xhci_ring *cur_ring;
612 unsigned long key;
613 u64 addr;
614 int ret;
615
616 xhci_dbg(xhci, "Allocating %u streams and %u "
617 "stream context array entries.\n",
618 num_streams, num_stream_ctxs);
619 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
620 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
621 return NULL;
622 }
623 xhci->cmd_ring_reserved_trbs++;
624
625 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
626 if (!stream_info)
627 goto cleanup_trbs;
628
629 stream_info->num_streams = num_streams;
630 stream_info->num_stream_ctxs = num_stream_ctxs;
631
632 /* Initialize the array of virtual pointers to stream rings. */
633 stream_info->stream_rings = kzalloc(
634 sizeof(struct xhci_ring *)*num_streams,
635 mem_flags);
636 if (!stream_info->stream_rings)
637 goto cleanup_info;
638
639 /* Initialize the array of DMA addresses for stream rings for the HW. */
640 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
641 num_stream_ctxs, &stream_info->ctx_array_dma,
642 mem_flags);
643 if (!stream_info->stream_ctx_array)
644 goto cleanup_ctx;
645 memset(stream_info->stream_ctx_array, 0,
646 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
647
648 /* Allocate everything needed to free the stream rings later */
649 stream_info->free_streams_command =
650 xhci_alloc_command(xhci, true, true, mem_flags);
651 if (!stream_info->free_streams_command)
652 goto cleanup_ctx;
653
654 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
655
656 /* Allocate rings for all the streams that the driver will use,
657 * and add their segment DMA addresses to the radix tree.
658 * Stream 0 is reserved.
659 */
660 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
661 stream_info->stream_rings[cur_stream] =
662 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
663 cur_ring = stream_info->stream_rings[cur_stream];
664 if (!cur_ring)
665 goto cleanup_rings;
666 cur_ring->stream_id = cur_stream;
667 /* Set deq ptr, cycle bit, and stream context type */
668 addr = cur_ring->first_seg->dma |
669 SCT_FOR_CTX(SCT_PRI_TR) |
670 cur_ring->cycle_state;
671 stream_info->stream_ctx_array[cur_stream].stream_ring =
672 cpu_to_le64(addr);
673 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
674 cur_stream, (unsigned long long) addr);
675
676 key = (unsigned long)
677 (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
678 ret = radix_tree_insert(&stream_info->trb_address_map,
679 key, cur_ring);
680 if (ret) {
681 xhci_ring_free(xhci, cur_ring);
682 stream_info->stream_rings[cur_stream] = NULL;
683 goto cleanup_rings;
684 }
685 }
686 /* Leave the other unused stream ring pointers in the stream context
687 * array initialized to zero. This will cause the xHC to give us an
688 * error if the device asks for a stream ID we don't have setup (if it
689 * was any other way, the host controller would assume the ring is
690 * "empty" and wait forever for data to be queued to that stream ID).
691 */
692 #if XHCI_DEBUG
693 /* Do a little test on the radix tree to make sure it returns the
694 * correct values.
695 */
696 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
697 goto cleanup_rings;
698 #endif
699
700 return stream_info;
701
702 cleanup_rings:
703 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
704 cur_ring = stream_info->stream_rings[cur_stream];
705 if (cur_ring) {
706 addr = cur_ring->first_seg->dma;
707 radix_tree_delete(&stream_info->trb_address_map,
708 addr >> TRB_SEGMENT_SHIFT);
709 xhci_ring_free(xhci, cur_ring);
710 stream_info->stream_rings[cur_stream] = NULL;
711 }
712 }
713 xhci_free_command(xhci, stream_info->free_streams_command);
714 cleanup_ctx:
715 kfree(stream_info->stream_rings);
716 cleanup_info:
717 kfree(stream_info);
718 cleanup_trbs:
719 xhci->cmd_ring_reserved_trbs--;
720 return NULL;
721 }
722 /*
723 * Sets the MaxPStreams field and the Linear Stream Array field.
724 * Sets the dequeue pointer to the stream context array.
725 */
726 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
727 struct xhci_ep_ctx *ep_ctx,
728 struct xhci_stream_info *stream_info)
729 {
730 u32 max_primary_streams;
731 /* MaxPStreams is the number of stream context array entries, not the
732 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
733 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
734 */
735 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
736 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
737 1 << (max_primary_streams + 1));
738 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
739 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
740 | EP_HAS_LSA);
741 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
742 }
743
744 /*
745 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
746 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
747 * not at the beginning of the ring).
748 */
749 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
750 struct xhci_ep_ctx *ep_ctx,
751 struct xhci_virt_ep *ep)
752 {
753 dma_addr_t addr;
754 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
755 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
756 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
757 }
758
759 /* Frees all stream contexts associated with the endpoint,
760 *
761 * Caller should fix the endpoint context streams fields.
762 */
763 void xhci_free_stream_info(struct xhci_hcd *xhci,
764 struct xhci_stream_info *stream_info)
765 {
766 int cur_stream;
767 struct xhci_ring *cur_ring;
768 dma_addr_t addr;
769
770 if (!stream_info)
771 return;
772
773 for (cur_stream = 1; cur_stream < stream_info->num_streams;
774 cur_stream++) {
775 cur_ring = stream_info->stream_rings[cur_stream];
776 if (cur_ring) {
777 addr = cur_ring->first_seg->dma;
778 radix_tree_delete(&stream_info->trb_address_map,
779 addr >> TRB_SEGMENT_SHIFT);
780 xhci_ring_free(xhci, cur_ring);
781 stream_info->stream_rings[cur_stream] = NULL;
782 }
783 }
784 xhci_free_command(xhci, stream_info->free_streams_command);
785 xhci->cmd_ring_reserved_trbs--;
786 if (stream_info->stream_ctx_array)
787 xhci_free_stream_ctx(xhci,
788 stream_info->num_stream_ctxs,
789 stream_info->stream_ctx_array,
790 stream_info->ctx_array_dma);
791
792 if (stream_info)
793 kfree(stream_info->stream_rings);
794 kfree(stream_info);
795 }
796
797
798 /***************** Device context manipulation *************************/
799
800 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
801 struct xhci_virt_ep *ep)
802 {
803 init_timer(&ep->stop_cmd_timer);
804 ep->stop_cmd_timer.data = (unsigned long) ep;
805 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
806 ep->xhci = xhci;
807 }
808
809 static void xhci_free_tt_info(struct xhci_hcd *xhci,
810 struct xhci_virt_device *virt_dev,
811 int slot_id)
812 {
813 struct list_head *tt_list_head;
814 struct xhci_tt_bw_info *tt_info, *next;
815 bool slot_found = false;
816
817 /* If the device never made it past the Set Address stage,
818 * it may not have the real_port set correctly.
819 */
820 if (virt_dev->real_port == 0 ||
821 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
822 xhci_dbg(xhci, "Bad real port.\n");
823 return;
824 }
825
826 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
827 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
828 /* Multi-TT hubs will have more than one entry */
829 if (tt_info->slot_id == slot_id) {
830 slot_found = true;
831 list_del(&tt_info->tt_list);
832 kfree(tt_info);
833 } else if (slot_found) {
834 break;
835 }
836 }
837 }
838
839 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
840 struct xhci_virt_device *virt_dev,
841 struct usb_device *hdev,
842 struct usb_tt *tt, gfp_t mem_flags)
843 {
844 struct xhci_tt_bw_info *tt_info;
845 unsigned int num_ports;
846 int i, j;
847
848 if (!tt->multi)
849 num_ports = 1;
850 else
851 num_ports = hdev->maxchild;
852
853 for (i = 0; i < num_ports; i++, tt_info++) {
854 struct xhci_interval_bw_table *bw_table;
855
856 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
857 if (!tt_info)
858 goto free_tts;
859 INIT_LIST_HEAD(&tt_info->tt_list);
860 list_add(&tt_info->tt_list,
861 &xhci->rh_bw[virt_dev->real_port - 1].tts);
862 tt_info->slot_id = virt_dev->udev->slot_id;
863 if (tt->multi)
864 tt_info->ttport = i+1;
865 bw_table = &tt_info->bw_table;
866 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
867 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
868 }
869 return 0;
870
871 free_tts:
872 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
873 return -ENOMEM;
874 }
875
876
877 /* All the xhci_tds in the ring's TD list should be freed at this point.
878 * Should be called with xhci->lock held if there is any chance the TT lists
879 * will be manipulated by the configure endpoint, allocate device, or update
880 * hub functions while this function is removing the TT entries from the list.
881 */
882 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
883 {
884 struct xhci_virt_device *dev;
885 int i;
886 int old_active_eps = 0;
887
888 /* Slot ID 0 is reserved */
889 if (slot_id == 0 || !xhci->devs[slot_id])
890 return;
891
892 dev = xhci->devs[slot_id];
893 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
894 if (!dev)
895 return;
896
897 if (dev->tt_info)
898 old_active_eps = dev->tt_info->active_eps;
899
900 for (i = 0; i < 31; ++i) {
901 if (dev->eps[i].ring)
902 xhci_ring_free(xhci, dev->eps[i].ring);
903 if (dev->eps[i].stream_info)
904 xhci_free_stream_info(xhci,
905 dev->eps[i].stream_info);
906 /* Endpoints on the TT/root port lists should have been removed
907 * when usb_disable_device() was called for the device.
908 * We can't drop them anyway, because the udev might have gone
909 * away by this point, and we can't tell what speed it was.
910 */
911 if (!list_empty(&dev->eps[i].bw_endpoint_list))
912 xhci_warn(xhci, "Slot %u endpoint %u "
913 "not removed from BW list!\n",
914 slot_id, i);
915 }
916 /* If this is a hub, free the TT(s) from the TT list */
917 xhci_free_tt_info(xhci, dev, slot_id);
918 /* If necessary, update the number of active TTs on this root port */
919 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
920
921 if (dev->ring_cache) {
922 for (i = 0; i < dev->num_rings_cached; i++)
923 xhci_ring_free(xhci, dev->ring_cache[i]);
924 kfree(dev->ring_cache);
925 }
926
927 if (dev->in_ctx)
928 xhci_free_container_ctx(xhci, dev->in_ctx);
929 if (dev->out_ctx)
930 xhci_free_container_ctx(xhci, dev->out_ctx);
931
932 kfree(xhci->devs[slot_id]);
933 xhci->devs[slot_id] = NULL;
934 }
935
936 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
937 struct usb_device *udev, gfp_t flags)
938 {
939 struct xhci_virt_device *dev;
940 int i;
941
942 /* Slot ID 0 is reserved */
943 if (slot_id == 0 || xhci->devs[slot_id]) {
944 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
945 return 0;
946 }
947
948 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
949 if (!xhci->devs[slot_id])
950 return 0;
951 dev = xhci->devs[slot_id];
952
953 /* Allocate the (output) device context that will be used in the HC. */
954 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
955 if (!dev->out_ctx)
956 goto fail;
957
958 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
959 (unsigned long long)dev->out_ctx->dma);
960
961 /* Allocate the (input) device context for address device command */
962 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
963 if (!dev->in_ctx)
964 goto fail;
965
966 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
967 (unsigned long long)dev->in_ctx->dma);
968
969 /* Initialize the cancellation list and watchdog timers for each ep */
970 for (i = 0; i < 31; i++) {
971 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
972 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
973 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
974 }
975
976 /* Allocate endpoint 0 ring */
977 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
978 if (!dev->eps[0].ring)
979 goto fail;
980
981 /* Allocate pointers to the ring cache */
982 dev->ring_cache = kzalloc(
983 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
984 flags);
985 if (!dev->ring_cache)
986 goto fail;
987 dev->num_rings_cached = 0;
988
989 init_completion(&dev->cmd_completion);
990 INIT_LIST_HEAD(&dev->cmd_list);
991 dev->udev = udev;
992
993 /* Point to output device context in dcbaa. */
994 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
995 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
996 slot_id,
997 &xhci->dcbaa->dev_context_ptrs[slot_id],
998 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
999
1000 return 1;
1001 fail:
1002 xhci_free_virt_device(xhci, slot_id);
1003 return 0;
1004 }
1005
1006 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1007 struct usb_device *udev)
1008 {
1009 struct xhci_virt_device *virt_dev;
1010 struct xhci_ep_ctx *ep0_ctx;
1011 struct xhci_ring *ep_ring;
1012
1013 virt_dev = xhci->devs[udev->slot_id];
1014 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1015 ep_ring = virt_dev->eps[0].ring;
1016 /*
1017 * FIXME we don't keep track of the dequeue pointer very well after a
1018 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1019 * host to our enqueue pointer. This should only be called after a
1020 * configured device has reset, so all control transfers should have
1021 * been completed or cancelled before the reset.
1022 */
1023 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1024 ep_ring->enqueue)
1025 | ep_ring->cycle_state);
1026 }
1027
1028 /*
1029 * The xHCI roothub may have ports of differing speeds in any order in the port
1030 * status registers. xhci->port_array provides an array of the port speed for
1031 * each offset into the port status registers.
1032 *
1033 * The xHCI hardware wants to know the roothub port number that the USB device
1034 * is attached to (or the roothub port its ancestor hub is attached to). All we
1035 * know is the index of that port under either the USB 2.0 or the USB 3.0
1036 * roothub, but that doesn't give us the real index into the HW port status
1037 * registers. Call xhci_find_raw_port_number() to get real index.
1038 */
1039 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1040 struct usb_device *udev)
1041 {
1042 struct usb_device *top_dev;
1043 struct usb_hcd *hcd;
1044
1045 if (udev->speed == USB_SPEED_SUPER)
1046 hcd = xhci->shared_hcd;
1047 else
1048 hcd = xhci->main_hcd;
1049
1050 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1051 top_dev = top_dev->parent)
1052 /* Found device below root hub */;
1053
1054 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1055 }
1056
1057 /* Setup an xHCI virtual device for a Set Address command */
1058 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1059 {
1060 struct xhci_virt_device *dev;
1061 struct xhci_ep_ctx *ep0_ctx;
1062 struct xhci_slot_ctx *slot_ctx;
1063 u32 port_num;
1064 struct usb_device *top_dev;
1065
1066 dev = xhci->devs[udev->slot_id];
1067 /* Slot ID 0 is reserved */
1068 if (udev->slot_id == 0 || !dev) {
1069 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1070 udev->slot_id);
1071 return -EINVAL;
1072 }
1073 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1074 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1075
1076 /* 3) Only the control endpoint is valid - one endpoint context */
1077 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1078 switch (udev->speed) {
1079 case USB_SPEED_SUPER:
1080 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1081 break;
1082 case USB_SPEED_HIGH:
1083 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1084 break;
1085 case USB_SPEED_FULL:
1086 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1087 break;
1088 case USB_SPEED_LOW:
1089 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1090 break;
1091 case USB_SPEED_WIRELESS:
1092 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1093 return -EINVAL;
1094 break;
1095 default:
1096 /* Speed was set earlier, this shouldn't happen. */
1097 BUG();
1098 }
1099 /* Find the root hub port this device is under */
1100 port_num = xhci_find_real_port_number(xhci, udev);
1101 if (!port_num)
1102 return -EINVAL;
1103 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1104 /* Set the port number in the virtual_device to the faked port number */
1105 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1106 top_dev = top_dev->parent)
1107 /* Found device below root hub */;
1108 dev->fake_port = top_dev->portnum;
1109 dev->real_port = port_num;
1110 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1111 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1112
1113 /* Find the right bandwidth table that this device will be a part of.
1114 * If this is a full speed device attached directly to a root port (or a
1115 * decendent of one), it counts as a primary bandwidth domain, not a
1116 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1117 * will never be created for the HS root hub.
1118 */
1119 if (!udev->tt || !udev->tt->hub->parent) {
1120 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1121 } else {
1122 struct xhci_root_port_bw_info *rh_bw;
1123 struct xhci_tt_bw_info *tt_bw;
1124
1125 rh_bw = &xhci->rh_bw[port_num - 1];
1126 /* Find the right TT. */
1127 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1128 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1129 continue;
1130
1131 if (!dev->udev->tt->multi ||
1132 (udev->tt->multi &&
1133 tt_bw->ttport == dev->udev->ttport)) {
1134 dev->bw_table = &tt_bw->bw_table;
1135 dev->tt_info = tt_bw;
1136 break;
1137 }
1138 }
1139 if (!dev->tt_info)
1140 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1141 }
1142
1143 /* Is this a LS/FS device under an external HS hub? */
1144 if (udev->tt && udev->tt->hub->parent) {
1145 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1146 (udev->ttport << 8));
1147 if (udev->tt->multi)
1148 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1149 }
1150 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1151 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1152
1153 /* Step 4 - ring already allocated */
1154 /* Step 5 */
1155 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1156 /*
1157 * XXX: Not sure about wireless USB devices.
1158 */
1159 switch (udev->speed) {
1160 case USB_SPEED_SUPER:
1161 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
1162 break;
1163 case USB_SPEED_HIGH:
1164 /* USB core guesses at a 64-byte max packet first for FS devices */
1165 case USB_SPEED_FULL:
1166 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
1167 break;
1168 case USB_SPEED_LOW:
1169 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
1170 break;
1171 case USB_SPEED_WIRELESS:
1172 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1173 return -EINVAL;
1174 break;
1175 default:
1176 /* New speed? */
1177 BUG();
1178 }
1179 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1180 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
1181
1182 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1183 dev->eps[0].ring->cycle_state);
1184
1185 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1186
1187 return 0;
1188 }
1189
1190 /*
1191 * Convert interval expressed as 2^(bInterval - 1) == interval into
1192 * straight exponent value 2^n == interval.
1193 *
1194 */
1195 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1196 struct usb_host_endpoint *ep)
1197 {
1198 unsigned int interval;
1199
1200 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1201 if (interval != ep->desc.bInterval - 1)
1202 dev_warn(&udev->dev,
1203 "ep %#x - rounding interval to %d %sframes\n",
1204 ep->desc.bEndpointAddress,
1205 1 << interval,
1206 udev->speed == USB_SPEED_FULL ? "" : "micro");
1207
1208 if (udev->speed == USB_SPEED_FULL) {
1209 /*
1210 * Full speed isoc endpoints specify interval in frames,
1211 * not microframes. We are using microframes everywhere,
1212 * so adjust accordingly.
1213 */
1214 interval += 3; /* 1 frame = 2^3 uframes */
1215 }
1216
1217 return interval;
1218 }
1219
1220 /*
1221 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1222 * microframes, rounded down to nearest power of 2.
1223 */
1224 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1225 struct usb_host_endpoint *ep, unsigned int desc_interval,
1226 unsigned int min_exponent, unsigned int max_exponent)
1227 {
1228 unsigned int interval;
1229
1230 interval = fls(desc_interval) - 1;
1231 interval = clamp_val(interval, min_exponent, max_exponent);
1232 if ((1 << interval) != desc_interval)
1233 dev_warn(&udev->dev,
1234 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1235 ep->desc.bEndpointAddress,
1236 1 << interval,
1237 desc_interval);
1238
1239 return interval;
1240 }
1241
1242 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1243 struct usb_host_endpoint *ep)
1244 {
1245 if (ep->desc.bInterval == 0)
1246 return 0;
1247 return xhci_microframes_to_exponent(udev, ep,
1248 ep->desc.bInterval, 0, 15);
1249 }
1250
1251
1252 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1253 struct usb_host_endpoint *ep)
1254 {
1255 return xhci_microframes_to_exponent(udev, ep,
1256 ep->desc.bInterval * 8, 3, 10);
1257 }
1258
1259 /* Return the polling or NAK interval.
1260 *
1261 * The polling interval is expressed in "microframes". If xHCI's Interval field
1262 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1263 *
1264 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1265 * is set to 0.
1266 */
1267 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1268 struct usb_host_endpoint *ep)
1269 {
1270 unsigned int interval = 0;
1271
1272 switch (udev->speed) {
1273 case USB_SPEED_HIGH:
1274 /* Max NAK rate */
1275 if (usb_endpoint_xfer_control(&ep->desc) ||
1276 usb_endpoint_xfer_bulk(&ep->desc)) {
1277 interval = xhci_parse_microframe_interval(udev, ep);
1278 break;
1279 }
1280 /* Fall through - SS and HS isoc/int have same decoding */
1281
1282 case USB_SPEED_SUPER:
1283 if (usb_endpoint_xfer_int(&ep->desc) ||
1284 usb_endpoint_xfer_isoc(&ep->desc)) {
1285 interval = xhci_parse_exponent_interval(udev, ep);
1286 }
1287 break;
1288
1289 case USB_SPEED_FULL:
1290 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1291 interval = xhci_parse_exponent_interval(udev, ep);
1292 break;
1293 }
1294 /*
1295 * Fall through for interrupt endpoint interval decoding
1296 * since it uses the same rules as low speed interrupt
1297 * endpoints.
1298 */
1299
1300 case USB_SPEED_LOW:
1301 if (usb_endpoint_xfer_int(&ep->desc) ||
1302 usb_endpoint_xfer_isoc(&ep->desc)) {
1303
1304 interval = xhci_parse_frame_interval(udev, ep);
1305 }
1306 break;
1307
1308 default:
1309 BUG();
1310 }
1311 return EP_INTERVAL(interval);
1312 }
1313
1314 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1315 * High speed endpoint descriptors can define "the number of additional
1316 * transaction opportunities per microframe", but that goes in the Max Burst
1317 * endpoint context field.
1318 */
1319 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1320 struct usb_host_endpoint *ep)
1321 {
1322 if (udev->speed != USB_SPEED_SUPER ||
1323 !usb_endpoint_xfer_isoc(&ep->desc))
1324 return 0;
1325 return ep->ss_ep_comp.bmAttributes;
1326 }
1327
1328 static u32 xhci_get_endpoint_type(struct usb_device *udev,
1329 struct usb_host_endpoint *ep)
1330 {
1331 int in;
1332 u32 type;
1333
1334 in = usb_endpoint_dir_in(&ep->desc);
1335 if (usb_endpoint_xfer_control(&ep->desc)) {
1336 type = EP_TYPE(CTRL_EP);
1337 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1338 if (in)
1339 type = EP_TYPE(BULK_IN_EP);
1340 else
1341 type = EP_TYPE(BULK_OUT_EP);
1342 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1343 if (in)
1344 type = EP_TYPE(ISOC_IN_EP);
1345 else
1346 type = EP_TYPE(ISOC_OUT_EP);
1347 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1348 if (in)
1349 type = EP_TYPE(INT_IN_EP);
1350 else
1351 type = EP_TYPE(INT_OUT_EP);
1352 } else {
1353 BUG();
1354 }
1355 return type;
1356 }
1357
1358 /* Return the maximum endpoint service interval time (ESIT) payload.
1359 * Basically, this is the maxpacket size, multiplied by the burst size
1360 * and mult size.
1361 */
1362 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1363 struct usb_device *udev,
1364 struct usb_host_endpoint *ep)
1365 {
1366 int max_burst;
1367 int max_packet;
1368
1369 /* Only applies for interrupt or isochronous endpoints */
1370 if (usb_endpoint_xfer_control(&ep->desc) ||
1371 usb_endpoint_xfer_bulk(&ep->desc))
1372 return 0;
1373
1374 if (udev->speed == USB_SPEED_SUPER)
1375 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1376
1377 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1378 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1379 /* A 0 in max burst means 1 transfer per ESIT */
1380 return max_packet * (max_burst + 1);
1381 }
1382
1383 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1384 * Drivers will have to call usb_alloc_streams() to do that.
1385 */
1386 int xhci_endpoint_init(struct xhci_hcd *xhci,
1387 struct xhci_virt_device *virt_dev,
1388 struct usb_device *udev,
1389 struct usb_host_endpoint *ep,
1390 gfp_t mem_flags)
1391 {
1392 unsigned int ep_index;
1393 struct xhci_ep_ctx *ep_ctx;
1394 struct xhci_ring *ep_ring;
1395 unsigned int max_packet;
1396 unsigned int max_burst;
1397 enum xhci_ring_type type;
1398 u32 max_esit_payload;
1399
1400 ep_index = xhci_get_endpoint_index(&ep->desc);
1401 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1402
1403 type = usb_endpoint_type(&ep->desc);
1404 /* Set up the endpoint ring */
1405 virt_dev->eps[ep_index].new_ring =
1406 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1407 if (!virt_dev->eps[ep_index].new_ring) {
1408 /* Attempt to use the ring cache */
1409 if (virt_dev->num_rings_cached == 0)
1410 return -ENOMEM;
1411 virt_dev->eps[ep_index].new_ring =
1412 virt_dev->ring_cache[virt_dev->num_rings_cached];
1413 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1414 virt_dev->num_rings_cached--;
1415 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1416 1, type);
1417 }
1418 virt_dev->eps[ep_index].skip = false;
1419 ep_ring = virt_dev->eps[ep_index].new_ring;
1420 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1421
1422 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1423 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1424
1425 /* FIXME dig Mult and streams info out of ep companion desc */
1426
1427 /* Allow 3 retries for everything but isoc;
1428 * CErr shall be set to 0 for Isoch endpoints.
1429 */
1430 if (!usb_endpoint_xfer_isoc(&ep->desc))
1431 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1432 else
1433 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
1434
1435 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1436
1437 /* Set the max packet size and max burst */
1438 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1439 max_burst = 0;
1440 switch (udev->speed) {
1441 case USB_SPEED_SUPER:
1442 /* dig out max burst from ep companion desc */
1443 max_burst = ep->ss_ep_comp.bMaxBurst;
1444 break;
1445 case USB_SPEED_HIGH:
1446 /* Some devices get this wrong */
1447 if (usb_endpoint_xfer_bulk(&ep->desc))
1448 max_packet = 512;
1449 /* bits 11:12 specify the number of additional transaction
1450 * opportunities per microframe (USB 2.0, section 9.6.6)
1451 */
1452 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1453 usb_endpoint_xfer_int(&ep->desc)) {
1454 max_burst = (usb_endpoint_maxp(&ep->desc)
1455 & 0x1800) >> 11;
1456 }
1457 break;
1458 case USB_SPEED_FULL:
1459 case USB_SPEED_LOW:
1460 {
1461 CHIP_SW_VER sw_code = mt_get_chip_sw_ver();
1462 unsigned int hw_code = mt_get_chip_hw_code();
1463
1464 if((hw_code == 0x6595) && (sw_code <= CHIP_SW_VER_01)){
1465 /* workaround for maxp size issue of RXXE */
1466 if((max_packet % 4 == 2) && (max_packet % 16 != 14) &&
1467 (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
1468 max_packet += 2;
1469 }
1470 break;
1471 }
1472 default:
1473 BUG();
1474 }
1475 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1476 MAX_BURST(max_burst));
1477 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1478 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1479
1480 /*
1481 * XXX no idea how to calculate the average TRB buffer length for bulk
1482 * endpoints, as the driver gives us no clue how big each scatter gather
1483 * list entry (or buffer) is going to be.
1484 *
1485 * For isochronous and interrupt endpoints, we set it to the max
1486 * available, until we have new API in the USB core to allow drivers to
1487 * declare how much bandwidth they actually need.
1488 *
1489 * Normally, it would be calculated by taking the total of the buffer
1490 * lengths in the TD and then dividing by the number of TRBs in a TD,
1491 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1492 * use Event Data TRBs, and we don't chain in a link TRB on short
1493 * transfers, we're basically dividing by 1.
1494 *
1495 * xHCI 1.0 specification indicates that the Average TRB Length should
1496 * be set to 8 for control endpoints.
1497 */
1498 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1499 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1500 else
1501 ep_ctx->tx_info |=
1502 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1503
1504 /* FIXME Debug endpoint context */
1505 return 0;
1506 }
1507
1508 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1509 struct xhci_virt_device *virt_dev,
1510 struct usb_host_endpoint *ep)
1511 {
1512 unsigned int ep_index;
1513 struct xhci_ep_ctx *ep_ctx;
1514
1515 ep_index = xhci_get_endpoint_index(&ep->desc);
1516 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1517
1518 ep_ctx->ep_info = 0;
1519 ep_ctx->ep_info2 = 0;
1520 ep_ctx->deq = 0;
1521 ep_ctx->tx_info = 0;
1522 /* Don't free the endpoint ring until the set interface or configuration
1523 * request succeeds.
1524 */
1525 }
1526
1527 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1528 {
1529 bw_info->ep_interval = 0;
1530 bw_info->mult = 0;
1531 bw_info->num_packets = 0;
1532 bw_info->max_packet_size = 0;
1533 bw_info->type = 0;
1534 bw_info->max_esit_payload = 0;
1535 }
1536
1537 void xhci_update_bw_info(struct xhci_hcd *xhci,
1538 struct xhci_container_ctx *in_ctx,
1539 struct xhci_input_control_ctx *ctrl_ctx,
1540 struct xhci_virt_device *virt_dev)
1541 {
1542 struct xhci_bw_info *bw_info;
1543 struct xhci_ep_ctx *ep_ctx;
1544 unsigned int ep_type;
1545 int i;
1546
1547 for (i = 1; i < 31; ++i) {
1548 bw_info = &virt_dev->eps[i].bw_info;
1549
1550 /* We can't tell what endpoint type is being dropped, but
1551 * unconditionally clearing the bandwidth info for non-periodic
1552 * endpoints should be harmless because the info will never be
1553 * set in the first place.
1554 */
1555 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1556 /* Dropped endpoint */
1557 xhci_clear_endpoint_bw_info(bw_info);
1558 continue;
1559 }
1560
1561 if (EP_IS_ADDED(ctrl_ctx, i)) {
1562 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1563 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1564
1565 /* Ignore non-periodic endpoints */
1566 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1567 ep_type != ISOC_IN_EP &&
1568 ep_type != INT_IN_EP)
1569 continue;
1570
1571 /* Added or changed endpoint */
1572 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1573 le32_to_cpu(ep_ctx->ep_info));
1574 /* Number of packets and mult are zero-based in the
1575 * input context, but we want one-based for the
1576 * interval table.
1577 */
1578 bw_info->mult = CTX_TO_EP_MULT(
1579 le32_to_cpu(ep_ctx->ep_info)) + 1;
1580 bw_info->num_packets = CTX_TO_MAX_BURST(
1581 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1582 bw_info->max_packet_size = MAX_PACKET_DECODED(
1583 le32_to_cpu(ep_ctx->ep_info2));
1584 bw_info->type = ep_type;
1585 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1586 le32_to_cpu(ep_ctx->tx_info));
1587 }
1588 }
1589 }
1590
1591 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1592 * Useful when you want to change one particular aspect of the endpoint and then
1593 * issue a configure endpoint command.
1594 */
1595 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1596 struct xhci_container_ctx *in_ctx,
1597 struct xhci_container_ctx *out_ctx,
1598 unsigned int ep_index)
1599 {
1600 struct xhci_ep_ctx *out_ep_ctx;
1601 struct xhci_ep_ctx *in_ep_ctx;
1602
1603 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1604 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1605
1606 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1607 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1608 in_ep_ctx->deq = out_ep_ctx->deq;
1609 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1610 }
1611
1612 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1613 * Useful when you want to change one particular aspect of the endpoint and then
1614 * issue a configure endpoint command. Only the context entries field matters,
1615 * but we'll copy the whole thing anyway.
1616 */
1617 void xhci_slot_copy(struct xhci_hcd *xhci,
1618 struct xhci_container_ctx *in_ctx,
1619 struct xhci_container_ctx *out_ctx)
1620 {
1621 struct xhci_slot_ctx *in_slot_ctx;
1622 struct xhci_slot_ctx *out_slot_ctx;
1623
1624 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1625 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1626
1627 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1628 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1629 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1630 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1631 }
1632
1633 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1634 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1635 {
1636 int i;
1637 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1638 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1639
1640 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1641
1642 if (!num_sp)
1643 return 0;
1644
1645 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1646 if (!xhci->scratchpad)
1647 goto fail_sp;
1648
1649 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1650 num_sp * sizeof(u64),
1651 &xhci->scratchpad->sp_dma, flags);
1652 if (!xhci->scratchpad->sp_array)
1653 goto fail_sp2;
1654
1655 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1656 if (!xhci->scratchpad->sp_buffers)
1657 goto fail_sp3;
1658
1659 xhci->scratchpad->sp_dma_buffers =
1660 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1661
1662 if (!xhci->scratchpad->sp_dma_buffers)
1663 goto fail_sp4;
1664
1665 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1666 for (i = 0; i < num_sp; i++) {
1667 dma_addr_t dma;
1668 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1669 flags);
1670 if (!buf)
1671 goto fail_sp5;
1672
1673 xhci->scratchpad->sp_array[i] = dma;
1674 xhci->scratchpad->sp_buffers[i] = buf;
1675 xhci->scratchpad->sp_dma_buffers[i] = dma;
1676 }
1677
1678 return 0;
1679
1680 fail_sp5:
1681 for (i = i - 1; i >= 0; i--) {
1682 dma_free_coherent(dev, xhci->page_size,
1683 xhci->scratchpad->sp_buffers[i],
1684 xhci->scratchpad->sp_dma_buffers[i]);
1685 }
1686 kfree(xhci->scratchpad->sp_dma_buffers);
1687
1688 fail_sp4:
1689 kfree(xhci->scratchpad->sp_buffers);
1690
1691 fail_sp3:
1692 dma_free_coherent(dev, num_sp * sizeof(u64),
1693 xhci->scratchpad->sp_array,
1694 xhci->scratchpad->sp_dma);
1695
1696 fail_sp2:
1697 kfree(xhci->scratchpad);
1698 xhci->scratchpad = NULL;
1699
1700 fail_sp:
1701 return -ENOMEM;
1702 }
1703
1704 static void scratchpad_free(struct xhci_hcd *xhci)
1705 {
1706 int num_sp;
1707 int i;
1708 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1709
1710 if (!xhci->scratchpad)
1711 return;
1712
1713 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1714
1715 for (i = 0; i < num_sp; i++) {
1716 dma_free_coherent(dev, xhci->page_size,
1717 xhci->scratchpad->sp_buffers[i],
1718 xhci->scratchpad->sp_dma_buffers[i]);
1719 }
1720 kfree(xhci->scratchpad->sp_dma_buffers);
1721 kfree(xhci->scratchpad->sp_buffers);
1722 dma_free_coherent(dev, num_sp * sizeof(u64),
1723 xhci->scratchpad->sp_array,
1724 xhci->scratchpad->sp_dma);
1725 kfree(xhci->scratchpad);
1726 xhci->scratchpad = NULL;
1727 }
1728
1729 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1730 bool allocate_in_ctx, bool allocate_completion,
1731 gfp_t mem_flags)
1732 {
1733 struct xhci_command *command;
1734
1735 command = kzalloc(sizeof(*command), mem_flags);
1736 if (!command)
1737 return NULL;
1738
1739 if (allocate_in_ctx) {
1740 command->in_ctx =
1741 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1742 mem_flags);
1743 if (!command->in_ctx) {
1744 kfree(command);
1745 return NULL;
1746 }
1747 }
1748
1749 if (allocate_completion) {
1750 command->completion =
1751 kzalloc(sizeof(struct completion), mem_flags);
1752 if (!command->completion) {
1753 xhci_free_container_ctx(xhci, command->in_ctx);
1754 kfree(command);
1755 return NULL;
1756 }
1757 init_completion(command->completion);
1758 }
1759
1760 command->status = 0;
1761 INIT_LIST_HEAD(&command->cmd_list);
1762 return command;
1763 }
1764
1765 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1766 {
1767 if (urb_priv) {
1768 kfree(urb_priv->td[0]);
1769 kfree(urb_priv);
1770 }
1771 }
1772
1773 void xhci_free_command(struct xhci_hcd *xhci,
1774 struct xhci_command *command)
1775 {
1776 xhci_free_container_ctx(xhci,
1777 command->in_ctx);
1778 kfree(command->completion);
1779 kfree(command);
1780 }
1781
1782 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1783 {
1784 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1785 struct dev_info *dev_info, *next;
1786 struct xhci_cd *cur_cd, *next_cd;
1787 unsigned long flags;
1788 int size;
1789 int i, j, num_ports;
1790
1791 /* Free the Event Ring Segment Table and the actual Event Ring */
1792 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1793 if (xhci->erst.entries)
1794 dma_free_coherent(dev, size,
1795 xhci->erst.entries, xhci->erst.erst_dma_addr);
1796 xhci->erst.entries = NULL;
1797 xhci_dbg(xhci, "Freed ERST\n");
1798 if (xhci->event_ring)
1799 xhci_ring_free(xhci, xhci->event_ring);
1800 xhci->event_ring = NULL;
1801 xhci_dbg(xhci, "Freed event ring\n");
1802
1803 if (xhci->lpm_command)
1804 xhci_free_command(xhci, xhci->lpm_command);
1805 xhci->cmd_ring_reserved_trbs = 0;
1806 if (xhci->cmd_ring)
1807 xhci_ring_free(xhci, xhci->cmd_ring);
1808 xhci->cmd_ring = NULL;
1809 xhci_dbg(xhci, "Freed command ring\n");
1810 list_for_each_entry_safe(cur_cd, next_cd,
1811 &xhci->cancel_cmd_list, cancel_cmd_list) {
1812 list_del(&cur_cd->cancel_cmd_list);
1813 kfree(cur_cd);
1814 }
1815
1816 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1817 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1818 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1819 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1820 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1821 while (!list_empty(ep))
1822 list_del_init(ep->next);
1823 }
1824 }
1825
1826 for (i = 1; i < MAX_HC_SLOTS; ++i)
1827 xhci_free_virt_device(xhci, i);
1828
1829 if (xhci->segment_pool)
1830 dma_pool_destroy(xhci->segment_pool);
1831 xhci->segment_pool = NULL;
1832 xhci_dbg(xhci, "Freed segment pool\n");
1833
1834 if (xhci->device_pool)
1835 dma_pool_destroy(xhci->device_pool);
1836 xhci->device_pool = NULL;
1837 xhci_dbg(xhci, "Freed device context pool\n");
1838
1839 if (xhci->small_streams_pool)
1840 dma_pool_destroy(xhci->small_streams_pool);
1841 xhci->small_streams_pool = NULL;
1842 xhci_dbg(xhci, "Freed small stream array pool\n");
1843
1844 if (xhci->medium_streams_pool)
1845 dma_pool_destroy(xhci->medium_streams_pool);
1846 xhci->medium_streams_pool = NULL;
1847 xhci_dbg(xhci, "Freed medium stream array pool\n");
1848
1849 if (xhci->dcbaa)
1850 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1851 xhci->dcbaa, xhci->dcbaa->dma);
1852 xhci->dcbaa = NULL;
1853
1854 scratchpad_free(xhci);
1855
1856 spin_lock_irqsave(&xhci->lock, flags);
1857 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1858 list_del(&dev_info->list);
1859 kfree(dev_info);
1860 }
1861 spin_unlock_irqrestore(&xhci->lock, flags);
1862
1863 if (!xhci->rh_bw)
1864 goto no_bw;
1865
1866 for (i = 0; i < num_ports; i++) {
1867 struct xhci_tt_bw_info *tt, *n;
1868 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1869 list_del(&tt->tt_list);
1870 kfree(tt);
1871 }
1872 }
1873
1874 no_bw:
1875 xhci->num_usb2_ports = 0;
1876 xhci->num_usb3_ports = 0;
1877 xhci->num_active_eps = 0;
1878 kfree(xhci->usb2_ports);
1879 kfree(xhci->usb3_ports);
1880 kfree(xhci->port_array);
1881 kfree(xhci->rh_bw);
1882
1883 xhci->page_size = 0;
1884 xhci->page_shift = 0;
1885 xhci->bus_state[0].bus_suspended = 0;
1886 xhci->bus_state[1].bus_suspended = 0;
1887 }
1888
1889 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1890 struct xhci_segment *input_seg,
1891 union xhci_trb *start_trb,
1892 union xhci_trb *end_trb,
1893 dma_addr_t input_dma,
1894 struct xhci_segment *result_seg,
1895 char *test_name, int test_number)
1896 {
1897 unsigned long long start_dma;
1898 unsigned long long end_dma;
1899 struct xhci_segment *seg;
1900
1901 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1902 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1903
1904 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1905 if (seg != result_seg) {
1906 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1907 test_name, test_number);
1908 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1909 "input DMA 0x%llx\n",
1910 input_seg,
1911 (unsigned long long) input_dma);
1912 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1913 "ending TRB %p (0x%llx DMA)\n",
1914 start_trb, start_dma,
1915 end_trb, end_dma);
1916 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1917 result_seg, seg);
1918 return -1;
1919 }
1920 return 0;
1921 }
1922
1923 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1924 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1925 {
1926 struct {
1927 dma_addr_t input_dma;
1928 struct xhci_segment *result_seg;
1929 } simple_test_vector [] = {
1930 /* A zeroed DMA field should fail */
1931 { 0, NULL },
1932 /* One TRB before the ring start should fail */
1933 { xhci->event_ring->first_seg->dma - 16, NULL },
1934 /* One byte before the ring start should fail */
1935 { xhci->event_ring->first_seg->dma - 1, NULL },
1936 /* Starting TRB should succeed */
1937 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1938 /* Ending TRB should succeed */
1939 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1940 xhci->event_ring->first_seg },
1941 /* One byte after the ring end should fail */
1942 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1943 /* One TRB after the ring end should fail */
1944 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1945 /* An address of all ones should fail */
1946 { (dma_addr_t) (~0), NULL },
1947 };
1948 struct {
1949 struct xhci_segment *input_seg;
1950 union xhci_trb *start_trb;
1951 union xhci_trb *end_trb;
1952 dma_addr_t input_dma;
1953 struct xhci_segment *result_seg;
1954 } complex_test_vector [] = {
1955 /* Test feeding a valid DMA address from a different ring */
1956 { .input_seg = xhci->event_ring->first_seg,
1957 .start_trb = xhci->event_ring->first_seg->trbs,
1958 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1959 .input_dma = xhci->cmd_ring->first_seg->dma,
1960 .result_seg = NULL,
1961 },
1962 /* Test feeding a valid end TRB from a different ring */
1963 { .input_seg = xhci->event_ring->first_seg,
1964 .start_trb = xhci->event_ring->first_seg->trbs,
1965 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1966 .input_dma = xhci->cmd_ring->first_seg->dma,
1967 .result_seg = NULL,
1968 },
1969 /* Test feeding a valid start and end TRB from a different ring */
1970 { .input_seg = xhci->event_ring->first_seg,
1971 .start_trb = xhci->cmd_ring->first_seg->trbs,
1972 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1973 .input_dma = xhci->cmd_ring->first_seg->dma,
1974 .result_seg = NULL,
1975 },
1976 /* TRB in this ring, but after this TD */
1977 { .input_seg = xhci->event_ring->first_seg,
1978 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1979 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1980 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1981 .result_seg = NULL,
1982 },
1983 /* TRB in this ring, but before this TD */
1984 { .input_seg = xhci->event_ring->first_seg,
1985 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1986 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1987 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1988 .result_seg = NULL,
1989 },
1990 /* TRB in this ring, but after this wrapped TD */
1991 { .input_seg = xhci->event_ring->first_seg,
1992 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1993 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1994 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1995 .result_seg = NULL,
1996 },
1997 /* TRB in this ring, but before this wrapped TD */
1998 { .input_seg = xhci->event_ring->first_seg,
1999 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2000 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2001 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2002 .result_seg = NULL,
2003 },
2004 /* TRB not in this ring, and we have a wrapped TD */
2005 { .input_seg = xhci->event_ring->first_seg,
2006 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2007 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2008 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2009 .result_seg = NULL,
2010 },
2011 };
2012
2013 unsigned int num_tests;
2014 int i, ret;
2015
2016 num_tests = ARRAY_SIZE(simple_test_vector);
2017 for (i = 0; i < num_tests; i++) {
2018 ret = xhci_test_trb_in_td(xhci,
2019 xhci->event_ring->first_seg,
2020 xhci->event_ring->first_seg->trbs,
2021 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2022 simple_test_vector[i].input_dma,
2023 simple_test_vector[i].result_seg,
2024 "Simple", i);
2025 if (ret < 0)
2026 return ret;
2027 }
2028
2029 num_tests = ARRAY_SIZE(complex_test_vector);
2030 for (i = 0; i < num_tests; i++) {
2031 ret = xhci_test_trb_in_td(xhci,
2032 complex_test_vector[i].input_seg,
2033 complex_test_vector[i].start_trb,
2034 complex_test_vector[i].end_trb,
2035 complex_test_vector[i].input_dma,
2036 complex_test_vector[i].result_seg,
2037 "Complex", i);
2038 if (ret < 0)
2039 return ret;
2040 }
2041 xhci_dbg(xhci, "TRB math tests passed.\n");
2042 return 0;
2043 }
2044
2045 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2046 {
2047 u64 temp;
2048 dma_addr_t deq;
2049
2050 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2051 xhci->event_ring->dequeue);
2052 if (deq == 0 && !in_interrupt())
2053 xhci_warn(xhci, "WARN something wrong with SW event ring "
2054 "dequeue ptr.\n");
2055 /* Update HC event ring dequeue pointer */
2056 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2057 temp &= ERST_PTR_MASK;
2058 /* Don't clear the EHB bit (which is RW1C) because
2059 * there might be more events to service.
2060 */
2061 temp &= ~ERST_EHB;
2062 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
2063 "preserving EHB bit\n");
2064 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2065 &xhci->ir_set->erst_dequeue);
2066 }
2067
2068 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2069 __le32 __iomem *addr, u8 major_revision)
2070 {
2071 u32 temp, port_offset, port_count;
2072 int i;
2073
2074 if (major_revision > 0x03) {
2075 xhci_warn(xhci, "Ignoring unknown port speed, "
2076 "Ext Cap %p, revision = 0x%x\n",
2077 addr, major_revision);
2078 /* Ignoring port protocol we can't understand. FIXME */
2079 return;
2080 }
2081
2082 /* Port offset and count in the third dword, see section 7.2 */
2083 temp = xhci_readl(xhci, addr + 2);
2084 port_offset = XHCI_EXT_PORT_OFF(temp);
2085 port_count = XHCI_EXT_PORT_COUNT(temp);
2086 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
2087 "count = %u, revision = 0x%x\n",
2088 addr, port_offset, port_count, major_revision);
2089 /* Port count includes the current port offset */
2090 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2091 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2092 return;
2093
2094 /* Check the host's USB2 LPM capability */
2095 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2096 (temp & XHCI_L1C)) {
2097 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
2098 xhci->sw_lpm_support = 1;
2099 }
2100
2101 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2102 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
2103 xhci->sw_lpm_support = 1;
2104 if (temp & XHCI_HLC) {
2105 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
2106 xhci->hw_lpm_support = 1;
2107 }
2108 }
2109
2110 port_offset--;
2111 for (i = port_offset; i < (port_offset + port_count); i++) {
2112 /* Duplicate entry. Ignore the port if the revisions differ. */
2113 if (xhci->port_array[i] != 0) {
2114 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2115 " port %u\n", addr, i);
2116 xhci_warn(xhci, "Port was marked as USB %u, "
2117 "duplicated as USB %u\n",
2118 xhci->port_array[i], major_revision);
2119 /* Only adjust the roothub port counts if we haven't
2120 * found a similar duplicate.
2121 */
2122 if (xhci->port_array[i] != major_revision &&
2123 xhci->port_array[i] != DUPLICATE_ENTRY) {
2124 if (xhci->port_array[i] == 0x03)
2125 xhci->num_usb3_ports--;
2126 else
2127 xhci->num_usb2_ports--;
2128 xhci->port_array[i] = DUPLICATE_ENTRY;
2129 }
2130 /* FIXME: Should we disable the port? */
2131 continue;
2132 }
2133 xhci->port_array[i] = major_revision;
2134 if (major_revision == 0x03)
2135 xhci->num_usb3_ports++;
2136 else
2137 xhci->num_usb2_ports++;
2138 }
2139 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2140 }
2141
2142 /*
2143 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2144 * specify what speeds each port is supposed to be. We can't count on the port
2145 * speed bits in the PORTSC register being correct until a device is connected,
2146 * but we need to set up the two fake roothubs with the correct number of USB
2147 * 3.0 and USB 2.0 ports at host controller initialization time.
2148 */
2149 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2150 {
2151 __le32 __iomem *addr;
2152 u32 offset;
2153 unsigned int num_ports;
2154 int i, j, port_index;
2155
2156 addr = &xhci->cap_regs->hcc_params;
2157 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2158 if (offset == 0) {
2159 xhci_err(xhci, "No Extended Capability registers, "
2160 "unable to set up roothub.\n");
2161 return -ENODEV;
2162 }
2163
2164 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2165 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2166 if (!xhci->port_array)
2167 return -ENOMEM;
2168
2169 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2170 if (!xhci->rh_bw)
2171 return -ENOMEM;
2172 for (i = 0; i < num_ports; i++) {
2173 struct xhci_interval_bw_table *bw_table;
2174
2175 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2176 bw_table = &xhci->rh_bw[i].bw_table;
2177 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2178 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2179 }
2180
2181 /*
2182 * For whatever reason, the first capability offset is from the
2183 * capability register base, not from the HCCPARAMS register.
2184 * See section 5.3.6 for offset calculation.
2185 */
2186 addr = &xhci->cap_regs->hc_capbase + offset;
2187 while (1) {
2188 u32 cap_id;
2189
2190 cap_id = xhci_readl(xhci, addr);
2191 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2192 xhci_add_in_port(xhci, num_ports, addr,
2193 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2194 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2195 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2196 == num_ports)
2197 break;
2198 /*
2199 * Once you're into the Extended Capabilities, the offset is
2200 * always relative to the register holding the offset.
2201 */
2202 addr += offset;
2203 }
2204
2205 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2206 xhci_warn(xhci, "No ports on the roothubs?\n");
2207 return -ENODEV;
2208 }
2209 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2210 xhci->num_usb2_ports, xhci->num_usb3_ports);
2211
2212 /* Place limits on the number of roothub ports so that the hub
2213 * descriptors aren't longer than the USB core will allocate.
2214 */
2215 if (xhci->num_usb3_ports > 15) {
2216 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2217 xhci->num_usb3_ports = 15;
2218 }
2219 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2220 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2221 USB_MAXCHILDREN);
2222 xhci->num_usb2_ports = USB_MAXCHILDREN;
2223 }
2224
2225 /*
2226 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2227 * Not sure how the USB core will handle a hub with no ports...
2228 */
2229 if (xhci->num_usb2_ports) {
2230 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2231 xhci->num_usb2_ports, flags);
2232 if (!xhci->usb2_ports)
2233 return -ENOMEM;
2234
2235 port_index = 0;
2236 for (i = 0; i < num_ports; i++) {
2237 if (xhci->port_array[i] == 0x03 ||
2238 xhci->port_array[i] == 0 ||
2239 xhci->port_array[i] == DUPLICATE_ENTRY)
2240 continue;
2241
2242 xhci->usb2_ports[port_index] =
2243 &xhci->op_regs->port_status_base +
2244 NUM_PORT_REGS*i;
2245 xhci_dbg(xhci, "USB 2.0 port at index %u, "
2246 "addr = %p\n", i,
2247 xhci->usb2_ports[port_index]);
2248 port_index++;
2249 if (port_index == xhci->num_usb2_ports)
2250 break;
2251 }
2252 }
2253 if (xhci->num_usb3_ports) {
2254 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2255 xhci->num_usb3_ports, flags);
2256 if (!xhci->usb3_ports)
2257 return -ENOMEM;
2258
2259 port_index = 0;
2260 for (i = 0; i < num_ports; i++)
2261 if (xhci->port_array[i] == 0x03) {
2262 xhci->usb3_ports[port_index] =
2263 &xhci->op_regs->port_status_base +
2264 NUM_PORT_REGS*i;
2265 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2266 "addr = %p\n", i,
2267 xhci->usb3_ports[port_index]);
2268 port_index++;
2269 if (port_index == xhci->num_usb3_ports)
2270 break;
2271 }
2272 }
2273 return 0;
2274 }
2275
2276 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2277 {
2278 dma_addr_t dma;
2279 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2280 unsigned int val, val2;
2281 u64 val_64;
2282 struct xhci_segment *seg;
2283 u32 page_size, temp;
2284 int i;
2285
2286 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2287 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2288
2289 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2290 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2291 for (i = 0; i < 16; i++) {
2292 if ((0x1 & page_size) != 0)
2293 break;
2294 page_size = page_size >> 1;
2295 }
2296 if (i < 16)
2297 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2298 else
2299 xhci_warn(xhci, "WARN: no supported page size\n");
2300 /* Use 4K pages, since that's common and the minimum the HC supports */
2301 xhci->page_shift = 12;
2302 xhci->page_size = 1 << xhci->page_shift;
2303 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2304
2305 /*
2306 * Program the Number of Device Slots Enabled field in the CONFIG
2307 * register with the max value of slots the HC can handle.
2308 */
2309 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2310 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2311 (unsigned int) val);
2312 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2313 val |= (val2 & ~HCS_SLOTS_MASK);
2314 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2315 (unsigned int) val);
2316 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2317
2318 /*
2319 * Section 5.4.8 - doorbell array must be
2320 * "physically contiguous and 64-byte (cache line) aligned".
2321 */
2322 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2323 GFP_KERNEL);
2324 if (!xhci->dcbaa)
2325 goto fail;
2326 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2327 xhci->dcbaa->dma = dma;
2328 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2329 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2330 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2331
2332 /*
2333 * Initialize the ring segment pool. The ring must be a contiguous
2334 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2335 * however, the command ring segment needs 64-byte aligned segments,
2336 * so we pick the greater alignment need.
2337 */
2338 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2339 TRB_SEGMENT_SIZE, 64, xhci->page_size);
2340
2341 /* See Table 46 and Note on Figure 55 */
2342 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2343 2112, 64, xhci->page_size);
2344 if (!xhci->segment_pool || !xhci->device_pool)
2345 goto fail;
2346
2347 /* Linear stream context arrays don't have any boundary restrictions,
2348 * and only need to be 16-byte aligned.
2349 */
2350 xhci->small_streams_pool =
2351 dma_pool_create("xHCI 256 byte stream ctx arrays",
2352 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2353 xhci->medium_streams_pool =
2354 dma_pool_create("xHCI 1KB stream ctx arrays",
2355 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2356 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2357 * will be allocated with dma_alloc_coherent()
2358 */
2359
2360 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2361 goto fail;
2362
2363 /* Set up the command ring to have one segments for now. */
2364 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2365 if (!xhci->cmd_ring)
2366 goto fail;
2367 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2368 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2369 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2370
2371 /* Set the address in the Command Ring Control register */
2372 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2373 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2374 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2375 xhci->cmd_ring->cycle_state;
2376 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2377 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2378 xhci_dbg_cmd_ptrs(xhci);
2379
2380 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2381 if (!xhci->lpm_command)
2382 goto fail;
2383
2384 /* Reserve one command ring TRB for disabling LPM.
2385 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2386 * disabling LPM, we only need to reserve one TRB for all devices.
2387 */
2388 xhci->cmd_ring_reserved_trbs++;
2389
2390 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2391 val &= DBOFF_MASK;
2392 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2393 " from cap regs base addr\n", val);
2394 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2395 xhci_dbg_regs(xhci);
2396 xhci_print_run_regs(xhci);
2397 /* Set ir_set to interrupt register set 0 */
2398 xhci->ir_set = &xhci->run_regs->ir_set[0];
2399
2400 /*
2401 * Event ring setup: Allocate a normal ring, but also setup
2402 * the event ring segment table (ERST). Section 4.9.3.
2403 */
2404 xhci_dbg(xhci, "// Allocating event ring\n");
2405 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2406 flags);
2407 if (!xhci->event_ring)
2408 goto fail;
2409 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2410 goto fail;
2411
2412 xhci->erst.entries = dma_alloc_coherent(dev,
2413 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2414 GFP_KERNEL);
2415 if (!xhci->erst.entries)
2416 goto fail;
2417 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2418 (unsigned long long)dma);
2419
2420 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2421 xhci->erst.num_entries = ERST_NUM_SEGS;
2422 xhci->erst.erst_dma_addr = dma;
2423 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2424 xhci->erst.num_entries,
2425 xhci->erst.entries,
2426 (unsigned long long)xhci->erst.erst_dma_addr);
2427
2428 /* set ring base address and size for each segment table entry */
2429 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2430 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2431 entry->seg_addr = cpu_to_le64(seg->dma);
2432 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2433 entry->rsvd = 0;
2434 seg = seg->next;
2435 }
2436
2437 /* set ERST count with the number of entries in the segment table */
2438 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2439 val &= ERST_SIZE_MASK;
2440 val |= ERST_NUM_SEGS;
2441 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2442 val);
2443 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2444
2445 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2446 /* set the segment table base address */
2447 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2448 (unsigned long long)xhci->erst.erst_dma_addr);
2449 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2450 val_64 &= ERST_PTR_MASK;
2451 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2452 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2453
2454 /* Set the event ring dequeue address */
2455 xhci_set_hc_event_deq(xhci);
2456 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
2457 xhci_print_ir_set(xhci, 0);
2458
2459 /*
2460 * XXX: Might need to set the Interrupter Moderation Register to
2461 * something other than the default (~1ms minimum between interrupts).
2462 * See section 5.5.1.2.
2463 */
2464 init_completion(&xhci->addr_dev);
2465 for (i = 0; i < MAX_HC_SLOTS; ++i)
2466 xhci->devs[i] = NULL;
2467 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2468 xhci->bus_state[0].resume_done[i] = 0;
2469 xhci->bus_state[1].resume_done[i] = 0;
2470 }
2471
2472 if (scratchpad_alloc(xhci, flags))
2473 goto fail;
2474 if (xhci_setup_port_arrays(xhci, flags))
2475 goto fail;
2476
2477 /* Enable USB 3.0 device notifications for function remote wake, which
2478 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2479 * U3 (device suspend).
2480 */
2481 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
2482 temp &= ~DEV_NOTE_MASK;
2483 temp |= DEV_NOTE_FWAKE;
2484 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
2485
2486 return 0;
2487
2488 fail:
2489 xhci_warn(xhci, "Couldn't initialize memory\n");
2490 xhci_halt(xhci);
2491 xhci_reset(xhci);
2492 xhci_mem_cleanup(xhci);
2493 return -ENOMEM;
2494 }