2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma-mapping.h>
30 #include "xhci-mtk-scheduler.h"
33 * Allocates a generic ring segment from the ring pool, sets the dma address,
34 * initializes the segment to zero, and sets the private next pointer to NULL.
37 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
39 struct xhci_segment
*mtktest_xhci_segment_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
41 struct xhci_segment
*seg
;
44 seg
= kzalloc(sizeof *seg
, flags
);
47 xhci_dbg(xhci
, "Allocating priv segment structure at %p\n", seg
);
49 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
54 xhci_dbg(xhci
, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
55 seg
->trbs
, (unsigned long long)dma
);
57 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
64 void mtktest_xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
69 xhci_dbg(xhci
, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
70 seg
->trbs
, (unsigned long long)seg
->dma
);
71 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
74 xhci_dbg(xhci
, "Freeing priv segment structure at %p\n", seg
);
79 * Make the prev segment point to the next segment.
81 * Change the last TRB in the prev segment to be a Link TRB which points to the
82 * DMA address of the next segment. The caller needs to set any Link TRB
83 * related flags, such as End TRB, Toggle Cycle, and no snoop.
85 void mtktest_xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
86 struct xhci_segment
*next
, bool link_trbs
)
94 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= next
->dma
;
96 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
97 val
= prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
;
98 val
&= ~TRB_TYPE_BITMASK
;
99 val
|= TRB_TYPE(TRB_LINK
);
100 /* Always set the chain bit with 0.95 hardware */
101 if (xhci_link_trb_quirk(xhci
))
103 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= val
;
105 xhci_dbg(xhci
, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
106 (unsigned long long)prev
->dma
,
107 (unsigned long long)next
->dma
);
110 /* XXX: Do we need the hcd structure in all these functions? */
111 void mtktest_xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
113 struct xhci_segment
*seg
;
114 struct xhci_segment
*first_seg
;
116 if (!ring
|| !ring
->first_seg
)
118 first_seg
= ring
->first_seg
;
119 seg
= first_seg
->next
;
120 xhci_dbg(xhci
, "Freeing ring at %p\n", ring
);
121 while (seg
!= first_seg
) {
122 struct xhci_segment
*next
= seg
->next
;
123 mtktest_xhci_segment_free(xhci
, seg
);
126 mtktest_xhci_segment_free(xhci
, first_seg
);
127 ring
->first_seg
= NULL
;
131 static void xhci_initialize_ring_info(struct xhci_ring
*ring
)
133 /* The ring is empty, so the enqueue pointer == dequeue pointer */
134 ring
->enqueue
= ring
->first_seg
->trbs
;
135 ring
->enq_seg
= ring
->first_seg
;
136 ring
->dequeue
= ring
->enqueue
;
137 ring
->deq_seg
= ring
->first_seg
;
138 /* The ring is initialized to 0. The producer must write 1 to the cycle
139 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
140 * compare CCS to the cycle bit to check ownership, so CCS = 1.
142 ring
->cycle_state
= 1;
143 /* Not necessary for new rings, but needed for re-initialized rings */
144 ring
->enq_updates
= 0;
145 ring
->deq_updates
= 0;
149 * Create a new ring with zero or more segments.
151 * Link each segment together into a ring.
152 * Set the end flag and the cycle toggle bit on the last segment.
153 * See section 4.9.1 and figures 15 and 16.
155 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
156 unsigned int num_segs
, bool link_trbs
, gfp_t flags
)
158 struct xhci_ring
*ring
;
159 struct xhci_segment
*prev
;
161 ring
= kzalloc(sizeof *(ring
), flags
);
162 xhci_dbg(xhci
, "Allocating ring at %p\n", ring
);
166 INIT_LIST_HEAD(&ring
->td_list
);
170 ring
->first_seg
= mtktest_xhci_segment_alloc(xhci
, flags
);
171 if (!ring
->first_seg
)
175 prev
= ring
->first_seg
;
176 while (num_segs
> 0) {
177 struct xhci_segment
*next
;
179 next
= mtktest_xhci_segment_alloc(xhci
, flags
);
182 mtktest_xhci_link_segments(xhci
, prev
, next
, link_trbs
);
187 mtktest_xhci_link_segments(xhci
, prev
, ring
->first_seg
, link_trbs
);
190 /* See section 4.9.2.1 and 6.4.4.1 */
191 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|= (LINK_TOGGLE
);
192 xhci_dbg(xhci
, "Wrote link toggle flag to"
193 " segment %p (virtual), 0x%llx (DMA)\n",
194 prev
, (unsigned long long)prev
->dma
);
196 xhci_initialize_ring_info(ring
);
197 // spin_lock_init(&ring->lock);
201 mtktest_xhci_ring_free(xhci
, ring
);
205 void mtktest_xhci_free_or_cache_endpoint_ring(struct xhci_hcd
*xhci
,
206 struct xhci_virt_device
*virt_dev
,
207 unsigned int ep_index
)
211 rings_cached
= virt_dev
->num_rings_cached
;
212 if (rings_cached
< XHCI_MAX_RINGS_CACHED
) {
213 virt_dev
->num_rings_cached
++;
214 rings_cached
= virt_dev
->num_rings_cached
;
215 virt_dev
->ring_cache
[rings_cached
] =
216 virt_dev
->eps
[ep_index
].ring
;
217 xhci_dbg(xhci
, "Cached old ring, "
218 "%d ring%s cached\n",
220 (rings_cached
> 1) ? "s" : "");
222 mtktest_xhci_ring_free(xhci
, virt_dev
->eps
[ep_index
].ring
);
223 xhci_dbg(xhci
, "Ring cache full (%d rings), "
225 virt_dev
->num_rings_cached
);
227 virt_dev
->eps
[ep_index
].ring
= NULL
;
230 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
231 * pointers to the beginning of the ring.
233 static void xhci_reinit_cached_ring(struct xhci_hcd
*xhci
,
234 struct xhci_ring
*ring
)
236 struct xhci_segment
*seg
= ring
->first_seg
;
239 sizeof(union xhci_trb
)*TRBS_PER_SEGMENT
);
240 /* All endpoint rings have link TRBs */
241 mtktest_xhci_link_segments(xhci
, seg
, seg
->next
, 1);
243 } while (seg
!= ring
->first_seg
);
244 xhci_initialize_ring_info(ring
);
245 /* td list should be empty since all URBs have been cancelled,
246 * but just in case...
248 INIT_LIST_HEAD(&ring
->td_list
);
251 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
253 static struct xhci_container_ctx
*xhci_alloc_container_ctx(struct xhci_hcd
*xhci
,
254 int type
, gfp_t flags
)
256 struct xhci_container_ctx
*ctx
= kzalloc(sizeof(*ctx
), flags
);
260 BUG_ON((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
));
262 ctx
->size
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
) ? 2048 : 1024;
263 if (type
== XHCI_CTX_TYPE_INPUT
)
264 ctx
->size
+= CTX_SIZE(xhci
->hcc_params
);
266 ctx
->bytes
= dma_pool_alloc(xhci
->device_pool
, flags
, &ctx
->dma
);
267 memset(ctx
->bytes
, 0, ctx
->size
);
271 static void xhci_free_container_ctx(struct xhci_hcd
*xhci
,
272 struct xhci_container_ctx
*ctx
)
276 dma_pool_free(xhci
->device_pool
, ctx
->bytes
, ctx
->dma
);
280 struct xhci_input_control_ctx
*mtktest_xhci_get_input_control_ctx(struct xhci_hcd
*xhci
,
281 struct xhci_container_ctx
*ctx
)
283 BUG_ON(ctx
->type
!= XHCI_CTX_TYPE_INPUT
);
284 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
287 struct xhci_slot_ctx
*mtktest_xhci_get_slot_ctx(struct xhci_hcd
*xhci
,
288 struct xhci_container_ctx
*ctx
)
290 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
291 return (struct xhci_slot_ctx
*)ctx
->bytes
;
293 return (struct xhci_slot_ctx
*)
294 (ctx
->bytes
+ CTX_SIZE(xhci
->hcc_params
));
297 struct xhci_ep_ctx
*mtktest_xhci_get_ep_ctx(struct xhci_hcd
*xhci
,
298 struct xhci_container_ctx
*ctx
,
299 unsigned int ep_index
)
301 /* increment ep index by offset of start of ep ctx array */
303 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
306 return (struct xhci_ep_ctx
*)
307 (ctx
->bytes
+ (ep_index
* CTX_SIZE(xhci
->hcc_params
)));
311 /***************** Streams structures manipulation *************************/
313 void mtktest_xhci_free_stream_ctx(struct xhci_hcd
*xhci
,
314 unsigned int num_stream_ctxs
,
315 struct xhci_stream_ctx
*stream_ctx
, dma_addr_t dma
)
317 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
319 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
321 if (num_stream_ctxs
> MEDIUM_STREAM_ARRAY_SIZE
)
323 pci_free_consistent(pdev
,
324 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
,
327 dma_free_coherent(dev
, sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
,
328 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
329 else if (num_stream_ctxs
<= SMALL_STREAM_ARRAY_SIZE
)
330 return dma_pool_free(xhci
->small_streams_pool
,
333 return dma_pool_free(xhci
->medium_streams_pool
,
338 * The stream context array for each endpoint with bulk streams enabled can
339 * vary in size, based on:
340 * - how many streams the endpoint supports,
341 * - the maximum primary stream array size the host controller supports,
342 * - and how many streams the device driver asks for.
344 * The stream context array must be a power of 2, and can be as small as
345 * 64 bytes or as large as 1MB.
347 struct xhci_stream_ctx
*mtktest_xhci_alloc_stream_ctx(struct xhci_hcd
*xhci
,
348 unsigned int num_stream_ctxs
, dma_addr_t
*dma
,
351 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
353 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
355 if (num_stream_ctxs
> MEDIUM_STREAM_ARRAY_SIZE
)
357 return pci_alloc_consistent(pdev
,
358 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
,
361 return dma_alloc_coherent(dev
,
362 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
, dma
, GFP_ATOMIC
);
363 else if (num_stream_ctxs
<= SMALL_STREAM_ARRAY_SIZE
)
364 return dma_pool_alloc(xhci
->small_streams_pool
,
367 return dma_pool_alloc(xhci
->medium_streams_pool
,
371 struct xhci_ring
*mtktest_xhci_dma_to_transfer_ring(
372 struct xhci_virt_ep
*ep
,
375 if (ep
->ep_state
& EP_HAS_STREAMS
)
376 return radix_tree_lookup(&ep
->stream_info
->trb_address_map
,
377 address
>> SEGMENT_SHIFT
);
381 /* Only use this when you know stream_info is valid */
382 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
383 static struct xhci_ring
*dma_to_stream_ring(
384 struct xhci_stream_info
*stream_info
,
387 return radix_tree_lookup(&stream_info
->trb_address_map
,
388 address
>> SEGMENT_SHIFT
);
390 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
392 struct xhci_ring
*mtktest_xhci_stream_id_to_ring(
393 struct xhci_virt_device
*dev
,
394 unsigned int ep_index
,
395 unsigned int stream_id
)
397 struct xhci_virt_ep
*ep
= &dev
->eps
[ep_index
];
401 if (!ep
->stream_info
)
404 if (stream_id
> ep
->stream_info
->num_streams
)
406 return ep
->stream_info
->stream_rings
[stream_id
];
409 struct xhci_ring
*mtktest_xhci_triad_to_transfer_ring(struct xhci_hcd
*xhci
,
410 unsigned int slot_id
, unsigned int ep_index
,
411 unsigned int stream_id
)
413 struct xhci_virt_ep
*ep
;
414 xhci_dbg(xhci
, "slot_id %d, ep_index %d\n", slot_id
, ep_index
);
415 ep
= &xhci
->devs
[slot_id
]->eps
[ep_index
];
416 xhci_dbg(xhci
, "ep->ep_state 0x%x\n", ep
->ep_state
);
417 xhci_dbg(xhci
, "ep->ring 0x%p\n", ep
->ring
);
418 /* Common case: no streams */
419 if (!(ep
->ep_state
& EP_HAS_STREAMS
))
422 if (stream_id
== 0) {
424 "WARN: Slot ID %u, ep index %u has streams, "
425 "but URB has no stream ID.\n",
430 if (stream_id
< ep
->stream_info
->num_streams
)
431 return ep
->stream_info
->stream_rings
[stream_id
];
434 "WARN: Slot ID %u, ep index %u has "
435 "stream IDs 1 to %u allocated, "
436 "but stream ID %u is requested.\n",
438 ep
->stream_info
->num_streams
- 1,
443 /* Get the right ring for the given URB.
444 * If the endpoint supports streams, boundary check the URB's stream ID.
445 * If the endpoint doesn't support streams, return the singular endpoint ring.
447 struct xhci_ring
*mtktest_xhci_urb_to_transfer_ring(struct xhci_hcd
*xhci
,
450 return mtktest_xhci_triad_to_transfer_ring(xhci
, urb
->dev
->slot_id
,
451 mtktest_xhci_get_endpoint_index(&urb
->ep
->desc
), urb
->stream_id
);
454 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
455 static int xhci_test_radix_tree(struct xhci_hcd
*xhci
,
456 unsigned int num_streams
,
457 struct xhci_stream_info
*stream_info
)
460 struct xhci_ring
*cur_ring
;
463 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
464 struct xhci_ring
*mapped_ring
;
465 int trb_size
= sizeof(union xhci_trb
);
467 cur_ring
= stream_info
->stream_rings
[cur_stream
];
468 for (addr
= cur_ring
->first_seg
->dma
;
469 addr
< cur_ring
->first_seg
->dma
+ SEGMENT_SIZE
;
471 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
472 if (cur_ring
!= mapped_ring
) {
473 xhci_warn(xhci
, "WARN: DMA address 0x%08llx "
474 "didn't map to stream ID %u; "
475 "mapped to ring %p\n",
476 (unsigned long long) addr
,
482 /* One TRB after the end of the ring segment shouldn't return a
483 * pointer to the current ring (although it may be a part of a
486 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
487 if (mapped_ring
!= cur_ring
) {
488 /* One TRB before should also fail */
489 addr
= cur_ring
->first_seg
->dma
- trb_size
;
490 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
492 if (mapped_ring
== cur_ring
) {
493 xhci_warn(xhci
, "WARN: Bad DMA address 0x%08llx "
494 "mapped to valid stream ID %u; "
495 "mapped ring = %p\n",
496 (unsigned long long) addr
,
504 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
507 * Change an endpoint's internal structure so it supports stream IDs. The
508 * number of requested streams includes stream 0, which cannot be used by device
511 * The number of stream contexts in the stream context array may be bigger than
512 * the number of streams the driver wants to use. This is because the number of
513 * stream context array entries must be a power of two.
515 * We need a radix tree for mapping physical addresses of TRBs to which stream
516 * ID they belong to. We need to do this because the host controller won't tell
517 * us which stream ring the TRB came from. We could store the stream ID in an
518 * event data TRB, but that doesn't help us for the cancellation case, since the
519 * endpoint may stop before it reaches that event data TRB.
521 * The radix tree maps the upper portion of the TRB DMA address to a ring
522 * segment that has the same upper portion of DMA addresses. For example, say I
523 * have segments of size 1KB, that are always 64-byte aligned. A segment may
524 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
525 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
526 * pass the radix tree a key to get the right stream ID:
528 * 0x10c90fff >> 10 = 0x43243
529 * 0x10c912c0 >> 10 = 0x43244
530 * 0x10c91400 >> 10 = 0x43245
532 * Obviously, only those TRBs with DMA addresses that are within the segment
533 * will make the radix tree return the stream ID for that ring.
535 * Caveats for the radix tree:
537 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
538 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
539 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
540 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
541 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
542 * extended systems (where the DMA address can be bigger than 32-bits),
543 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
545 struct xhci_stream_info
*mtktest_xhci_alloc_stream_info(struct xhci_hcd
*xhci
,
546 unsigned int num_stream_ctxs
,
547 unsigned int num_streams
, gfp_t mem_flags
)
549 struct xhci_stream_info
*stream_info
;
551 struct xhci_ring
*cur_ring
;
556 xhci_dbg(xhci
, "Allocating %u streams and %u "
557 "stream context array entries.\n",
558 num_streams
, num_stream_ctxs
);
559 if (xhci
->cmd_ring_reserved_trbs
== MAX_RSVD_CMD_TRBS
) {
560 xhci_dbg(xhci
, "Command ring has no reserved TRBs available\n");
563 xhci
->cmd_ring_reserved_trbs
++;
565 stream_info
= kzalloc(sizeof(struct xhci_stream_info
), mem_flags
);
569 stream_info
->num_streams
= num_streams
;
570 stream_info
->num_stream_ctxs
= num_stream_ctxs
;
572 /* Initialize the array of virtual pointers to stream rings. */
573 stream_info
->stream_rings
= kzalloc(
574 sizeof(struct xhci_ring
*)*num_streams
,
576 if (!stream_info
->stream_rings
)
579 /* Initialize the array of DMA addresses for stream rings for the HW. */
580 stream_info
->stream_ctx_array
= mtktest_xhci_alloc_stream_ctx(xhci
,
581 num_stream_ctxs
, &stream_info
->ctx_array_dma
,
583 if (!stream_info
->stream_ctx_array
)
585 memset(stream_info
->stream_ctx_array
, 0,
586 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
);
588 /* Allocate everything needed to free the stream rings later */
589 stream_info
->free_streams_command
=
590 mtktest_xhci_alloc_command(xhci
, true, true, mem_flags
);
591 if (!stream_info
->free_streams_command
)
594 INIT_RADIX_TREE(&stream_info
->trb_address_map
, GFP_ATOMIC
);
596 /* Allocate rings for all the streams that the driver will use,
597 * and add their segment DMA addresses to the radix tree.
598 * Stream 0 is reserved.
600 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
601 stream_info
->stream_rings
[cur_stream
] =
602 xhci_ring_alloc(xhci
, 1, true, mem_flags
);
603 cur_ring
= stream_info
->stream_rings
[cur_stream
];
606 cur_ring
->stream_id
= cur_stream
;
607 /* Set deq ptr, cycle bit, and stream context type */
608 addr
= cur_ring
->first_seg
->dma
|
609 SCT_FOR_CTX(SCT_PRI_TR
) |
610 cur_ring
->cycle_state
;
611 stream_info
->stream_ctx_array
[cur_stream
].stream_ring
= addr
;
612 xhci_dbg(xhci
, "Setting stream %d ring ptr to 0x%08llx\n",
613 cur_stream
, (unsigned long long) addr
);
615 key
= (unsigned long)
616 (cur_ring
->first_seg
->dma
>> SEGMENT_SHIFT
);
617 ret
= radix_tree_insert(&stream_info
->trb_address_map
,
620 mtktest_xhci_ring_free(xhci
, cur_ring
);
621 stream_info
->stream_rings
[cur_stream
] = NULL
;
625 /* Leave the other unused stream ring pointers in the stream context
626 * array initialized to zero. This will cause the xHC to give us an
627 * error if the device asks for a stream ID we don't have setup (if it
628 * was any other way, the host controller would assume the ring is
629 * "empty" and wait forever for data to be queued to that stream ID).
632 /* Do a little test on the radix tree to make sure it returns the
635 if (xhci_test_radix_tree(xhci
, num_streams
, stream_info
))
642 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
643 cur_ring
= stream_info
->stream_rings
[cur_stream
];
645 addr
= cur_ring
->first_seg
->dma
;
646 radix_tree_delete(&stream_info
->trb_address_map
,
647 addr
>> SEGMENT_SHIFT
);
648 mtktest_xhci_ring_free(xhci
, cur_ring
);
649 stream_info
->stream_rings
[cur_stream
] = NULL
;
652 mtktest_xhci_free_command(xhci
, stream_info
->free_streams_command
);
654 kfree(stream_info
->stream_rings
);
658 xhci
->cmd_ring_reserved_trbs
--;
662 * Sets the MaxPStreams field and the Linear Stream Array field.
663 * Sets the dequeue pointer to the stream context array.
665 void mtktest_xhci_setup_streams_ep_input_ctx(struct xhci_hcd
*xhci
,
666 struct xhci_ep_ctx
*ep_ctx
,
667 struct xhci_stream_info
*stream_info
)
669 u32 max_primary_streams
;
670 /* MaxPStreams is the number of stream context array entries, not the
671 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
672 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
674 max_primary_streams
= fls(stream_info
->num_stream_ctxs
) - 2;
675 xhci_dbg(xhci
, "Setting number of stream ctx array entries to %u\n",
676 1 << (max_primary_streams
+ 1));
677 ep_ctx
->ep_info
&= ~EP_MAXPSTREAMS_MASK
;
678 ep_ctx
->ep_info
|= EP_MAXPSTREAMS(max_primary_streams
);
679 ep_ctx
->ep_info
|= EP_HAS_LSA
;
680 ep_ctx
->deq
= stream_info
->ctx_array_dma
;
684 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
685 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
686 * not at the beginning of the ring).
688 void mtktest_xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd
*xhci
,
689 struct xhci_ep_ctx
*ep_ctx
,
690 struct xhci_virt_ep
*ep
)
693 ep_ctx
->ep_info
&= ~EP_MAXPSTREAMS_MASK
;
694 ep_ctx
->ep_info
&= ~EP_HAS_LSA
;
695 addr
= mtktest_xhci_trb_virt_to_dma(ep
->ring
->deq_seg
, ep
->ring
->dequeue
);
696 ep_ctx
->deq
= addr
| ep
->ring
->cycle_state
;
699 /* Frees all stream contexts associated with the endpoint,
701 * Caller should fix the endpoint context streams fields.
703 void mtktest_xhci_free_stream_info(struct xhci_hcd
*xhci
,
704 struct xhci_stream_info
*stream_info
)
707 struct xhci_ring
*cur_ring
;
713 for (cur_stream
= 1; cur_stream
< stream_info
->num_streams
;
715 cur_ring
= stream_info
->stream_rings
[cur_stream
];
717 addr
= cur_ring
->first_seg
->dma
;
718 radix_tree_delete(&stream_info
->trb_address_map
,
719 addr
>> SEGMENT_SHIFT
);
720 mtktest_xhci_ring_free(xhci
, cur_ring
);
721 stream_info
->stream_rings
[cur_stream
] = NULL
;
724 mtktest_xhci_free_command(xhci
, stream_info
->free_streams_command
);
725 xhci
->cmd_ring_reserved_trbs
--;
726 if (stream_info
->stream_ctx_array
)
727 mtktest_xhci_free_stream_ctx(xhci
,
728 stream_info
->num_stream_ctxs
,
729 stream_info
->stream_ctx_array
,
730 stream_info
->ctx_array_dma
);
733 kfree(stream_info
->stream_rings
);
738 /***************** Device context manipulation *************************/
740 static void xhci_init_endpoint_timer(struct xhci_hcd
*xhci
,
741 struct xhci_virt_ep
*ep
)
743 init_timer(&ep
->stop_cmd_timer
);
744 ep
->stop_cmd_timer
.data
= (unsigned long) ep
;
745 ep
->stop_cmd_timer
.function
= mtktest_xhci_stop_endpoint_command_watchdog
;
749 /* All the xhci_tds in the ring's TD list should be freed at this point */
750 void mtktest_xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
752 struct xhci_virt_device
*dev
;
755 xhci_err(xhci
, "mtktest_xhci_free_virt_device begin,slot_id is %d, dev is 0x%p\n", slot_id
, xhci
->devs
[slot_id
]);
756 /* Slot ID 0 is reserved */
757 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
760 dev
= xhci
->devs
[slot_id
];
761 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
765 for (i
= 0; i
< 31; ++i
) {
766 if (dev
->eps
[i
].ring
)
767 mtktest_xhci_ring_free(xhci
, dev
->eps
[i
].ring
);
768 if (dev
->eps
[i
].stream_info
)
769 mtktest_xhci_free_stream_info(xhci
,
770 dev
->eps
[i
].stream_info
);
773 if (dev
->ring_cache
) {
774 for (i
= 0; i
< dev
->num_rings_cached
; i
++)
775 mtktest_xhci_ring_free(xhci
, dev
->ring_cache
[i
]);
776 kfree(dev
->ring_cache
);
780 xhci_free_container_ctx(xhci
, dev
->in_ctx
);
782 xhci_free_container_ctx(xhci
, dev
->out_ctx
);
784 kfree(xhci
->devs
[slot_id
]);
785 xhci
->devs
[slot_id
] = NULL
;
787 xhci_err(xhci
, "mtktest_xhci_free_virt_device done\n");
790 int mtktest_xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
791 struct usb_device
*udev
, gfp_t flags
)
793 struct xhci_virt_device
*dev
;
796 xhci_err(xhci
, "mtktest_xhci_alloc_virt_device begin\n");
797 /* Slot ID 0 is reserved */
798 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
799 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
803 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
804 if (!xhci
->devs
[slot_id
])
806 dev
= xhci
->devs
[slot_id
];
808 /* Allocate the (output) device context that will be used in the HC. */
809 dev
->out_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
813 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
814 (unsigned long long)dev
->out_ctx
->dma
);
816 /* Allocate the (input) device context for address device command */
817 dev
->in_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, flags
);
821 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
822 (unsigned long long)dev
->in_ctx
->dma
);
824 /* Initialize the cancellation list and watchdog timers for each ep */
825 for (i
= 0; i
< 31; i
++) {
826 xhci_init_endpoint_timer(xhci
, &dev
->eps
[i
]);
827 INIT_LIST_HEAD(&dev
->eps
[i
].cancelled_td_list
);
830 /* Allocate endpoint 0 ring */
831 dev
->eps
[0].ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
832 if (!dev
->eps
[0].ring
)
835 /* Allocate pointers to the ring cache */
836 dev
->ring_cache
= kzalloc(
837 sizeof(struct xhci_ring
*)*XHCI_MAX_RINGS_CACHED
,
839 if (!dev
->ring_cache
)
841 dev
->num_rings_cached
= 0;
843 init_completion(&dev
->cmd_completion
);
844 INIT_LIST_HEAD(&dev
->cmd_list
);
846 /* Point to output device context in dcbaa. */
847 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = dev
->out_ctx
->dma
;
848 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
850 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
851 (unsigned long long) xhci
->dcbaa
->dev_context_ptrs
[slot_id
]);
853 xhci_err(xhci
, "mtktest_xhci_alloc_virt_device done\n");
857 xhci_err(xhci
, "mtktest_xhci_alloc_virt_device FAIL\n");
858 mtktest_xhci_free_virt_device(xhci
, slot_id
);
862 void mtktest_xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd
*xhci
,
863 struct usb_device
*udev
)
865 struct xhci_virt_device
*virt_dev
;
866 struct xhci_ep_ctx
*ep0_ctx
;
867 struct xhci_ring
*ep_ring
;
869 virt_dev
= xhci
->devs
[udev
->slot_id
];
870 ep0_ctx
= mtktest_xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, 0);
871 ep_ring
= virt_dev
->eps
[0].ring
;
873 * FIXME we don't keep track of the dequeue pointer very well after a
874 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
875 * host to our enqueue pointer. This should only be called after a
876 * configured device has reset, so all control transfers should have
877 * been completed or cancelled before the reset.
879 ep0_ctx
->deq
= mtktest_xhci_trb_virt_to_dma(ep_ring
->enq_seg
, ep_ring
->enqueue
);
880 ep0_ctx
->deq
|= ep_ring
->cycle_state
;
883 /* Setup an xHCI virtual device for a Set Address command */
884 int mtktest_xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
886 struct xhci_virt_device
*dev
;
887 struct xhci_ep_ctx
*ep0_ctx
;
888 struct usb_device
*top_dev
;
889 struct xhci_slot_ctx
*slot_ctx
;
890 struct xhci_input_control_ctx
*ctrl_ctx
;
892 dev
= xhci
->devs
[udev
->slot_id
];
893 /* Slot ID 0 is reserved */
894 if (udev
->slot_id
== 0 || !dev
) {
895 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
899 ep0_ctx
= mtktest_xhci_get_ep_ctx(xhci
, dev
->in_ctx
, 0);
900 ctrl_ctx
= mtktest_xhci_get_input_control_ctx(xhci
, dev
->in_ctx
);
901 slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, dev
->in_ctx
);
903 /* 2) New slot context and endpoint 0 context are valid*/
904 ctrl_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
906 /* 3) Only the control endpoint is valid - one endpoint context */
907 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
908 slot_ctx
->dev_info
|= LAST_CTX(1);
910 slot_ctx
->dev_info
|= (u32
) udev
->route
;
911 xhci_dbg(xhci
, "device route 0x%x\n", udev
->route
);
912 xhci_dbg(xhci
, "dev_info 0x%x\n", slot_ctx
->dev_info
);
913 switch (udev
->speed
) {
914 case USB_SPEED_SUPER
:
915 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_SS
;
918 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_HS
;
921 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_FS
;
924 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_LS
;
926 case USB_SPEED_WIRELESS
:
927 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
931 /* Speed was set earlier, this shouldn't happen. */
932 xhci_dbg(xhci
, "BUG\n");
935 /* Find the root hub port this device is under */
936 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
937 top_dev
= top_dev
->parent
)
938 /* Found device below root hub */;
939 slot_ctx
->dev_info2
|= (u32
) ROOT_HUB_PORT(top_dev
->portnum
);
940 xhci_dbg(xhci
, "Set root hub portnum to %d\n", top_dev
->portnum
);
942 /* Is this a LS/FS device under a HS hub? */
943 if ((udev
->speed
== USB_SPEED_LOW
|| udev
->speed
== USB_SPEED_FULL
) &&
945 slot_ctx
->tt_info
= udev
->tt
->hub
->slot_id
;
946 slot_ctx
->tt_info
|= udev
->ttport
<< 8;
948 slot_ctx
->dev_info
|= DEV_MTT
;
951 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
952 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
954 /* Step 4 - ring already allocated */
956 ep0_ctx
->ep_info2
= EP_TYPE(CTRL_EP
);
958 * XXX: Not sure about wireless USB devices.
960 switch (udev
->speed
) {
961 case USB_SPEED_SUPER
:
962 ep0_ctx
->ep_info2
|= MAX_PACKET(512);
965 /* USB core guesses at a 64-byte max packet first for FS devices */
967 ep0_ctx
->ep_info2
|= MAX_PACKET(64);
970 ep0_ctx
->ep_info2
|= MAX_PACKET(8);
972 case USB_SPEED_WIRELESS
:
973 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
978 xhci_dbg(xhci
, "BUG\n");
981 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
982 ep0_ctx
->ep_info2
|= MAX_BURST(0);
983 ep0_ctx
->ep_info2
|= ERROR_COUNT(3);
986 dev
->eps
[0].ring
->first_seg
->dma
;
987 ep0_ctx
->deq
|= dev
->eps
[0].ring
->cycle_state
;
989 /* Steps 7 and 8 were done in mtktest_xhci_alloc_virt_device() */
990 xhci_dbg(xhci
, "slot_ctx 0x%x 0x%x 0x%x 0x%x\n", slot_ctx
->dev_info
, slot_ctx
->dev_info2
, slot_ctx
->tt_info
, slot_ctx
->dev_state
);
991 xhci_dbg(xhci
, "ep0_ctx 0x%x 0x%x %llx 0x%x\n", ep0_ctx
->ep_info
, ep0_ctx
->ep_info2
, ep0_ctx
->deq
, ep0_ctx
->tx_info
);
992 xhci_dbg(xhci
, "ctrl_ctx 0x%x 0x%x\n", ctrl_ctx
->drop_flags
, ctrl_ctx
->add_flags
);
996 /* Return the polling or NAK interval.
998 * The polling interval is expressed in "microframes". If xHCI's Interval field
999 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1001 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1004 static inline unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
1005 struct usb_host_endpoint
*ep
)
1007 unsigned int interval
= 0;
1009 switch (udev
->speed
) {
1010 case USB_SPEED_HIGH
:
1012 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1013 usb_endpoint_xfer_bulk(&ep
->desc
))
1014 interval
= ep
->desc
.bInterval
;
1015 /* Fall through - SS and HS isoc/int have same decoding */
1016 case USB_SPEED_SUPER
:
1017 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1018 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1019 if (ep
->desc
.bInterval
== 0)
1022 interval
= ep
->desc
.bInterval
- 1;
1025 if (interval
!= ep
->desc
.bInterval
+ 1)
1026 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
1027 ep
->desc
.bEndpointAddress
, 1 << interval
);
1030 /* Convert bInterval (in 1-255 frames) to microframes and round down to
1031 * nearest power of 2.
1033 case USB_SPEED_FULL
:
1035 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1036 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1037 interval
= fls(8*ep
->desc
.bInterval
) - 1;
1042 if ((1 << interval
) != 8*ep
->desc
.bInterval
)
1043 dev_warn(&udev
->dev
,
1044 "ep %#x - rounding interval"
1045 " to %d microframes, "
1046 "ep desc says %d microframes\n",
1047 ep
->desc
.bEndpointAddress
,
1049 8*ep
->desc
.bInterval
);
1055 return EP_INTERVAL(interval
);
1058 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1059 * High speed endpoint descriptors can define "the number of additional
1060 * transaction opportunities per microframe", but that goes in the Max Burst
1061 * endpoint context field.
1063 static inline u32
xhci_get_endpoint_mult(struct usb_device
*udev
,
1064 struct usb_host_endpoint
*ep
)
1066 if (udev
->speed
!= USB_SPEED_SUPER
||
1067 !usb_endpoint_xfer_isoc(&ep
->desc
))
1069 return ep
->ss_ep_comp
.bmAttributes
;
1072 static inline u32
xhci_get_endpoint_type(struct usb_device
*udev
,
1073 struct usb_host_endpoint
*ep
)
1078 in
= usb_endpoint_dir_in(&ep
->desc
);
1079 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1080 type
= EP_TYPE(CTRL_EP
);
1081 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
1083 type
= EP_TYPE(BULK_IN_EP
);
1085 type
= EP_TYPE(BULK_OUT_EP
);
1086 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
1088 type
= EP_TYPE(ISOC_IN_EP
);
1090 type
= EP_TYPE(ISOC_OUT_EP
);
1091 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
1093 type
= EP_TYPE(INT_IN_EP
);
1095 type
= EP_TYPE(INT_OUT_EP
);
1102 /* Return the maximum endpoint service interval time (ESIT) payload.
1103 * Basically, this is the maxpacket size, multiplied by the burst size
1106 u32
xhci_get_max_esit_payload(struct xhci_hcd
*xhci
,
1107 struct usb_device
*udev
,
1108 struct usb_host_endpoint
*ep
)
1113 /* Only applies for interrupt or isochronous endpoints */
1114 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1115 usb_endpoint_xfer_bulk(&ep
->desc
))
1118 if (udev
->speed
== USB_SPEED_SUPER
)
1119 return ep
->ss_ep_comp
.wBytesPerInterval
;
1121 // max_packet = ep->desc.wMaxPacketSize & 0x3ff;
1122 max_packet
= ep
->desc
.wMaxPacketSize
& 0x7ff;
1123 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
1124 /* A 0 in max burst means 1 transfer per ESIT */
1125 return max_packet
* (max_burst
+ 1);
1128 int mtktest_mtk_xhci_endpoint_scheduler_init(struct xhci_hcd
*xhci
,
1129 struct usb_device
*udev
,
1130 struct usb_host_endpoint
*ep
,
1131 struct xhci_ep_ctx
*ep_ctx
)
1140 xhci_dbg(xhci
, "mtktest_mtk_xhci_endpoint_scheduler_init is called\n");
1141 switch (udev
->speed
) {
1142 case USB_SPEED_SUPER
:
1143 if(usb_endpoint_xfer_int(&ep
->desc
)){
1144 bPkts
= ep
->ss_ep_comp
.bMaxBurst
+1;
1146 else if(usb_endpoint_xfer_isoc(&ep
->desc
)){
1147 interval
= xhci_get_endpoint_interval(udev
, ep
);
1149 bPkts
= (ep
->ss_ep_comp
.bMaxBurst
+1) * (ep
->ss_ep_comp
.bmAttributes
+1);
1153 bPkts
= (ep
->ss_ep_comp
.bMaxBurst
+1);
1161 case USB_SPEED_HIGH
:
1162 if(usb_endpoint_xfer_int(&ep
->desc
)){
1163 bPkts
= ((ep
->desc
.wMaxPacketSize
& 0x1800) >> 11) + 1;
1165 if(usb_endpoint_xfer_isoc(&ep
->desc
)){
1166 bPkts
= ((ep
->desc
.wMaxPacketSize
& 0x1800) >> 11) + 1;
1169 case USB_SPEED_FULL
:
1170 if(usb_endpoint_xfer_int(&ep
->desc
)){
1173 if(usb_endpoint_xfer_isoc(&ep
->desc
)){
1178 if(usb_endpoint_xfer_int(&ep
->desc
)){
1183 ep_ctx
->reserved
[0] |= (BPKTS(bPkts
) | BCSCOUNT(bCsCount
) | BBM(bBm
));
1184 ep_ctx
->reserved
[1] |= (BOFFSET(bOffset
) | BREPEAT(bRepeat
));
1189 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1190 * Drivers will have to call usb_alloc_streams() to do that.
1192 int mtktest_xhci_endpoint_init(struct xhci_hcd
*xhci
,
1193 struct xhci_virt_device
*virt_dev
,
1194 struct usb_device
*udev
,
1195 struct usb_host_endpoint
*ep
,
1198 unsigned int ep_index
;
1199 struct xhci_ep_ctx
*ep_ctx
;
1200 struct xhci_ring
*ep_ring
;
1201 unsigned int max_packet
;
1202 unsigned int max_burst
;
1203 u32 max_esit_payload
;
1206 struct xhci_slot_ctx
*slot_ctx
;
1207 struct sch_ep
*sch_ep
;
1217 ep_index
= mtktest_xhci_get_endpoint_index(&ep
->desc
);
1218 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1220 /* Set up the endpoint ring */
1221 virt_dev
->eps
[ep_index
].new_ring
=
1222 xhci_ring_alloc(xhci
, 1, true, mem_flags
);
1223 if (!virt_dev
->eps
[ep_index
].new_ring
) {
1224 /* Attempt to use the ring cache */
1225 if (virt_dev
->num_rings_cached
== 0)
1227 virt_dev
->eps
[ep_index
].new_ring
=
1228 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
];
1229 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
] = NULL
;
1230 virt_dev
->num_rings_cached
--;
1231 xhci_reinit_cached_ring(xhci
, virt_dev
->eps
[ep_index
].new_ring
);
1233 ep_ring
= virt_dev
->eps
[ep_index
].new_ring
;
1234 ep_ctx
->deq
= ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
;
1236 ep_ctx
->ep_info
= xhci_get_endpoint_interval(udev
, ep
);
1237 ep_ctx
->ep_info
|= EP_MULT(xhci_get_endpoint_mult(udev
, ep
));
1239 /* FIXME dig Mult and streams info out of ep companion desc */
1241 /* Allow 3 retries for everything but isoc;
1242 * error count = 0 means infinite retries.
1244 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
1245 ep_ctx
->ep_info2
= ERROR_COUNT(3);
1247 ep_ctx
->ep_info2
= ERROR_COUNT(0);
1249 ep_ctx
->ep_info2
|= xhci_get_endpoint_type(udev
, ep
);
1251 /* Set the max packet size and max burst */
1252 switch (udev
->speed
) {
1253 case USB_SPEED_SUPER
:
1254 max_packet
= ep
->desc
.wMaxPacketSize
;
1255 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
1256 /* dig out max burst from ep companion desc */
1257 max_packet
= ep
->ss_ep_comp
.bMaxBurst
;
1259 xhci_warn(xhci
, "WARN no SS endpoint bMaxBurst\n");
1260 ep_ctx
->ep_info2
|= MAX_BURST(max_packet
);
1262 case USB_SPEED_HIGH
:
1263 /* bits 11:12 specify the number of additional transaction
1264 * opportunities per microframe (USB 2.0, section 9.6.6)
1266 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
1267 usb_endpoint_xfer_int(&ep
->desc
)) {
1268 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
1269 ep_ctx
->ep_info2
|= MAX_BURST(max_burst
);
1271 case USB_SPEED_FULL
:
1273 /* high speed intr allow 1024 bytes, should not & 0x3ff */
1274 max_packet
= ep
->desc
.wMaxPacketSize
& 0x7ff;
1275 /* workaround for maxp size issue of mac */
1276 if((max_packet
% 4 == 2) && (max_packet
% 16 != 14) &&
1277 (max_burst
== 0) && usb_endpoint_dir_in(&ep
->desc
))
1279 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
1284 max_esit_payload
= xhci_get_max_esit_payload(xhci
, udev
, ep
);
1285 ep_ctx
->tx_info
= MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload
);
1288 * XXX no idea how to calculate the average TRB buffer length for bulk
1289 * endpoints, as the driver gives us no clue how big each scatter gather
1290 * list entry (or buffer) is going to be.
1292 * For isochronous and interrupt endpoints, we set it to the max
1293 * available, until we have new API in the USB core to allow drivers to
1294 * declare how much bandwidth they actually need.
1296 * Normally, it would be calculated by taking the total of the buffer
1297 * lengths in the TD and then dividing by the number of TRBs in a TD,
1298 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1299 * use Event Data TRBs, and we don't chain in a link TRB on short
1300 * transfers, we're basically dividing by 1.
1302 ep_ctx
->tx_info
|= AVG_TRB_LENGTH_FOR_EP(max_esit_payload
);
1304 /* FIXME Debug endpoint context */
1305 /* MTK scheduler parameters */
1306 //mtktest_mtk_xhci_endpoint_scheduler_init(xhci, udev, ep, ep_ctx);
1308 slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
1309 if((slot_ctx
->tt_info
& 0xff) > 0){
1315 if(usb_endpoint_xfer_int(&ep
->desc
)){
1316 ep_type
= USB_EP_INT
;
1318 else if(usb_endpoint_xfer_isoc(&ep
->desc
)){
1319 ep_type
= USB_EP_ISOC
;
1321 else if(usb_endpoint_xfer_bulk(&ep
->desc
)){
1322 ep_type
= USB_EP_BULK
;
1324 if(udev
->speed
== USB_SPEED_FULL
|| udev
->speed
== USB_SPEED_HIGH
1325 || udev
->speed
== USB_SPEED_LOW
){
1326 maxp
= ep
->desc
.wMaxPacketSize
& 0x7FF;
1327 burst
= ep
->desc
.wMaxPacketSize
>> 11;
1330 else if(udev
->speed
== USB_SPEED_SUPER
){
1331 maxp
= ep
->desc
.wMaxPacketSize
& 0x7FF;
1332 burst
= ep
->ss_ep_comp
.bMaxBurst
;
1333 mult
= ep
->ss_ep_comp
.bmAttributes
& 0x3;
1335 interval
= (1 << ((ep_ctx
->ep_info
>> 16) & 0xff));
1336 sch_ep
= kmalloc(sizeof(struct sch_ep
), GFP_KERNEL
);
1337 if(mtktest_mtk_xhci_scheduler_add_ep(udev
->speed
, usb_endpoint_dir_in(&ep
->desc
),
1338 isTT
, ep_type
, maxp
, interval
, burst
, mult
, (mtk_u32
*)ep
1339 , (mtk_u32
*)ep_ctx
, sch_ep
) != SCH_SUCCESS
){
1340 xhci_err(xhci
, "[MTK] not enough bandwidth\n");
1344 mtktest_mtk_xhci_scheduler_add_ep(xhci
, udev
, ep
, ep_ctx
);
1347 xhci_dbg(xhci
, "Endpoint %02d Context: %#08x %#08x %#08llx %#08x %#08x %#08x %#08x\n"
1348 , ep_index
, ep_ctx
->ep_info
, ep_ctx
->ep_info2
, ep_ctx
->deq
, ep_ctx
->tx_info
1349 , ep_ctx
->reserved
[0], ep_ctx
->reserved
[1], ep_ctx
->reserved
[2]);
1354 void mtktest_xhci_endpoint_zero(struct xhci_hcd
*xhci
,
1355 struct xhci_virt_device
*virt_dev
,
1356 struct usb_host_endpoint
*ep
)
1358 unsigned int ep_index
;
1359 struct xhci_ep_ctx
*ep_ctx
;
1361 ep_index
= mtktest_xhci_get_endpoint_index(&ep
->desc
);
1362 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1364 ep_ctx
->ep_info
= 0;
1365 ep_ctx
->ep_info2
= 0;
1367 ep_ctx
->tx_info
= 0;
1368 /* Don't free the endpoint ring until the set interface or configuration
1373 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1374 * Useful when you want to change one particular aspect of the endpoint and then
1375 * issue a configure endpoint command.
1377 void mtktest_xhci_endpoint_copy(struct xhci_hcd
*xhci
,
1378 struct xhci_container_ctx
*in_ctx
,
1379 struct xhci_container_ctx
*out_ctx
,
1380 unsigned int ep_index
)
1382 struct xhci_ep_ctx
*out_ep_ctx
;
1383 struct xhci_ep_ctx
*in_ep_ctx
;
1385 out_ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1386 in_ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1388 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
1389 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
1390 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
1391 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
1394 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1395 * Useful when you want to change one particular aspect of the endpoint and then
1396 * issue a configure endpoint command. Only the context entries field matters,
1397 * but we'll copy the whole thing anyway.
1399 void mtktest_xhci_slot_copy(struct xhci_hcd
*xhci
,
1400 struct xhci_container_ctx
*in_ctx
,
1401 struct xhci_container_ctx
*out_ctx
)
1403 struct xhci_slot_ctx
*in_slot_ctx
;
1404 struct xhci_slot_ctx
*out_slot_ctx
;
1406 in_slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, in_ctx
);
1407 out_slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, out_ctx
);
1409 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
1410 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
1411 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
1412 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
1415 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1416 static int scratchpad_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
1419 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1420 int num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1422 xhci_dbg(xhci
, "Allocating %d scratchpad buffers\n", num_sp
);
1427 xhci
->scratchpad
= kzalloc(sizeof(*xhci
->scratchpad
), flags
);
1428 if (!xhci
->scratchpad
)
1431 xhci
->scratchpad
->sp_array
=
1432 pci_alloc_consistent(to_pci_dev(dev
),
1433 num_sp
* sizeof(u64
),
1434 &xhci
->scratchpad
->sp_dma
);
1435 if (!xhci
->scratchpad
->sp_array
)
1438 xhci
->scratchpad
->sp_buffers
= kzalloc(sizeof(void *) * num_sp
, flags
);
1439 if (!xhci
->scratchpad
->sp_buffers
)
1442 xhci
->scratchpad
->sp_dma_buffers
=
1443 kzalloc(sizeof(dma_addr_t
) * num_sp
, flags
);
1445 if (!xhci
->scratchpad
->sp_dma_buffers
)
1448 xhci
->dcbaa
->dev_context_ptrs
[0] = xhci
->scratchpad
->sp_dma
;
1449 for (i
= 0; i
< num_sp
; i
++) {
1452 void *buf
= pci_alloc_consistent(to_pci_dev(dev
),
1453 xhci
->page_size
, &dma
);
1455 void *buf
= dma_alloc_coherent(dev
,
1456 xhci
->page_size
, &dma
, GFP_ATOMIC
);
1460 xhci
->scratchpad
->sp_array
[i
] = dma
;
1461 xhci
->scratchpad
->sp_buffers
[i
] = buf
;
1462 xhci
->scratchpad
->sp_dma_buffers
[i
] = dma
;
1468 for (i
= i
- 1; i
>= 0; i
--) {
1470 pci_free_consistent(to_pci_dev(dev
), xhci
->page_size
,
1471 xhci
->scratchpad
->sp_buffers
[i
],
1472 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1474 dma_free_coherent(dev
, xhci
->page_size
,
1475 xhci
->scratchpad
->sp_buffers
[i
],
1476 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1478 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1481 kfree(xhci
->scratchpad
->sp_buffers
);
1484 dma_free_coherent(dev
, num_sp
* sizeof(u64
),
1485 xhci
->scratchpad
->sp_array
,
1486 xhci
->scratchpad
->sp_dma
);
1488 pci_free_consistent(to_pci_dev(dev
), num_sp
* sizeof(u64
),
1489 xhci
->scratchpad
->sp_array
,
1490 xhci
->scratchpad
->sp_dma
);
1494 kfree(xhci
->scratchpad
);
1495 xhci
->scratchpad
= NULL
;
1501 static void scratchpad_free(struct xhci_hcd
*xhci
)
1505 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1507 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
1509 if (!xhci
->scratchpad
)
1512 num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1514 for (i
= 0; i
< num_sp
; i
++) {
1516 pci_free_consistent(pdev
, xhci
->page_size
,
1517 xhci
->scratchpad
->sp_buffers
[i
],
1518 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1520 dma_free_coherent(dev
, xhci
->page_size
,
1521 xhci
->scratchpad
->sp_buffers
[i
],
1522 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1524 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1525 kfree(xhci
->scratchpad
->sp_buffers
);
1527 pci_free_consistent(pdev
, num_sp
* sizeof(u64
),
1528 xhci
->scratchpad
->sp_array
,
1529 xhci
->scratchpad
->sp_dma
);
1531 dma_free_coherent(dev
, num_sp
* sizeof(u64
),
1532 xhci
->scratchpad
->sp_array
,
1533 xhci
->scratchpad
->sp_dma
);
1534 kfree(xhci
->scratchpad
);
1535 xhci
->scratchpad
= NULL
;
1538 struct xhci_command
*mtktest_xhci_alloc_command(struct xhci_hcd
*xhci
,
1539 bool allocate_in_ctx
, bool allocate_completion
,
1542 struct xhci_command
*command
;
1544 command
= kzalloc(sizeof(*command
), mem_flags
);
1548 if (allocate_in_ctx
) {
1550 xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
,
1552 if (!command
->in_ctx
) {
1558 if (allocate_completion
) {
1559 command
->completion
=
1560 kzalloc(sizeof(struct completion
), mem_flags
);
1561 if (!command
->completion
) {
1562 xhci_free_container_ctx(xhci
, command
->in_ctx
);
1566 init_completion(command
->completion
);
1569 command
->status
= 0;
1570 INIT_LIST_HEAD(&command
->cmd_list
);
1574 void mtktest_xhci_urb_free_priv(struct xhci_hcd
*xhci
, struct urb_priv
*urb_priv
)
1581 last
= urb_priv
->length
- 1;
1584 for (i
= 0; i
<= last
; i
++)
1585 kfree(urb_priv
->td
[i
]);
1591 void mtktest_xhci_free_command(struct xhci_hcd
*xhci
,
1592 struct xhci_command
*command
)
1594 xhci_free_container_ctx(xhci
,
1596 kfree(command
->completion
);
1600 void mtktest_xhci_mem_cleanup(struct xhci_hcd
*xhci
)
1602 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1604 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
1609 /* Free the Event Ring Segment Table and the actual Event Ring */
1611 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_size
);
1612 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_base
);
1613 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_dequeue
);
1615 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
1616 if (xhci
->erst
.entries
)
1617 dma_free_coherent(dev
, size
,
1618 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
1620 pci_free_consistent(pdev
, size
,
1621 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
1623 xhci
->erst
.entries
= NULL
;
1624 xhci_dbg(xhci
, "Freed ERST\n");
1625 if (xhci
->event_ring
)
1626 mtktest_xhci_ring_free(xhci
, xhci
->event_ring
);
1627 xhci
->event_ring
= NULL
;
1628 xhci_dbg(xhci
, "Freed event ring\n");
1630 xhci_write_64(xhci
, 0, &xhci
->op_regs
->cmd_ring
);
1632 mtktest_xhci_ring_free(xhci
, xhci
->cmd_ring
);
1633 xhci
->cmd_ring
= NULL
;
1634 xhci_dbg(xhci
, "Freed command ring\n");
1636 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
1637 mtktest_xhci_free_virt_device(xhci
, i
);
1639 if (xhci
->segment_pool
)
1640 dma_pool_destroy(xhci
->segment_pool
);
1641 xhci
->segment_pool
= NULL
;
1642 xhci_dbg(xhci
, "Freed segment pool\n");
1644 if (xhci
->device_pool
)
1645 dma_pool_destroy(xhci
->device_pool
);
1646 xhci
->device_pool
= NULL
;
1647 xhci_dbg(xhci
, "Freed device context pool\n");
1649 if (xhci
->small_streams_pool
)
1650 dma_pool_destroy(xhci
->small_streams_pool
);
1651 xhci
->small_streams_pool
= NULL
;
1652 xhci_dbg(xhci
, "Freed small stream array pool\n");
1654 if (xhci
->medium_streams_pool
)
1655 dma_pool_destroy(xhci
->medium_streams_pool
);
1656 xhci
->medium_streams_pool
= NULL
;
1657 xhci_dbg(xhci
, "Freed medium stream array pool\n");
1659 xhci_write_64(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
);
1661 dma_free_coherent(dev
, sizeof(*xhci
->dcbaa
),
1662 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
1664 pci_free_consistent(pdev
, sizeof(*xhci
->dcbaa
),
1665 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
1669 scratchpad_free(xhci
);
1670 xhci
->page_size
= 0;
1671 xhci
->page_shift
= 0;
1674 static int xhci_test_trb_in_td(struct xhci_hcd
*xhci
,
1675 struct xhci_segment
*input_seg
,
1676 union xhci_trb
*start_trb
,
1677 union xhci_trb
*end_trb
,
1678 dma_addr_t input_dma
,
1679 struct xhci_segment
*result_seg
,
1680 char *test_name
, int test_number
)
1682 unsigned long long start_dma
;
1683 unsigned long long end_dma
;
1684 struct xhci_segment
*seg
;
1686 start_dma
= mtktest_xhci_trb_virt_to_dma(input_seg
, start_trb
);
1687 end_dma
= mtktest_xhci_trb_virt_to_dma(input_seg
, end_trb
);
1689 seg
= mtktest_trb_in_td(input_seg
, start_trb
, end_trb
, input_dma
);
1690 if (seg
!= result_seg
) {
1691 xhci_warn(xhci
, "WARN: %s TRB math test %d failed!\n",
1692 test_name
, test_number
);
1693 xhci_warn(xhci
, "Tested TRB math w/ seg %p and "
1694 "input DMA 0x%llx\n",
1696 (unsigned long long) input_dma
);
1697 xhci_warn(xhci
, "starting TRB %p (0x%llx DMA), "
1698 "ending TRB %p (0x%llx DMA)\n",
1699 start_trb
, start_dma
,
1701 xhci_warn(xhci
, "Expected seg %p, got seg %p\n",
1708 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1709 static int xhci_check_trb_in_td_math(struct xhci_hcd
*xhci
, gfp_t mem_flags
)
1712 dma_addr_t input_dma
;
1713 struct xhci_segment
*result_seg
;
1714 } simple_test_vector
[] = {
1715 /* A zeroed DMA field should fail */
1717 /* One TRB before the ring start should fail */
1718 { xhci
->event_ring
->first_seg
->dma
- 16, NULL
},
1719 /* One byte before the ring start should fail */
1720 { xhci
->event_ring
->first_seg
->dma
- 1, NULL
},
1721 /* Starting TRB should succeed */
1722 { xhci
->event_ring
->first_seg
->dma
, xhci
->event_ring
->first_seg
},
1723 /* Ending TRB should succeed */
1724 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16,
1725 xhci
->event_ring
->first_seg
},
1726 /* One byte after the ring end should fail */
1727 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16 + 1, NULL
},
1728 /* One TRB after the ring end should fail */
1729 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
)*16, NULL
},
1730 /* An address of all ones should fail */
1731 { (dma_addr_t
) (~0), NULL
},
1734 struct xhci_segment
*input_seg
;
1735 union xhci_trb
*start_trb
;
1736 union xhci_trb
*end_trb
;
1737 dma_addr_t input_dma
;
1738 struct xhci_segment
*result_seg
;
1739 } complex_test_vector
[] = {
1740 /* Test feeding a valid DMA address from a different ring */
1741 { .input_seg
= xhci
->event_ring
->first_seg
,
1742 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1743 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1744 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1747 /* Test feeding a valid end TRB from a different ring */
1748 { .input_seg
= xhci
->event_ring
->first_seg
,
1749 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1750 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1751 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1754 /* Test feeding a valid start and end TRB from a different ring */
1755 { .input_seg
= xhci
->event_ring
->first_seg
,
1756 .start_trb
= xhci
->cmd_ring
->first_seg
->trbs
,
1757 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1758 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1761 /* TRB in this ring, but after this TD */
1762 { .input_seg
= xhci
->event_ring
->first_seg
,
1763 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[0],
1764 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1765 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 4*16,
1768 /* TRB in this ring, but before this TD */
1769 { .input_seg
= xhci
->event_ring
->first_seg
,
1770 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1771 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[6],
1772 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1775 /* TRB in this ring, but after this wrapped TD */
1776 { .input_seg
= xhci
->event_ring
->first_seg
,
1777 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1778 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1779 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1782 /* TRB in this ring, but before this wrapped TD */
1783 { .input_seg
= xhci
->event_ring
->first_seg
,
1784 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1785 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1786 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 4)*16,
1789 /* TRB not in this ring, and we have a wrapped TD */
1790 { .input_seg
= xhci
->event_ring
->first_seg
,
1791 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1792 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1793 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
+ 2*16,
1798 unsigned int num_tests
;
1801 num_tests
= ARRAY_SIZE(simple_test_vector
);
1802 for (i
= 0; i
< num_tests
; i
++) {
1803 ret
= xhci_test_trb_in_td(xhci
,
1804 xhci
->event_ring
->first_seg
,
1805 xhci
->event_ring
->first_seg
->trbs
,
1806 &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1807 simple_test_vector
[i
].input_dma
,
1808 simple_test_vector
[i
].result_seg
,
1814 num_tests
= ARRAY_SIZE(complex_test_vector
);
1815 for (i
= 0; i
< num_tests
; i
++) {
1816 ret
= xhci_test_trb_in_td(xhci
,
1817 complex_test_vector
[i
].input_seg
,
1818 complex_test_vector
[i
].start_trb
,
1819 complex_test_vector
[i
].end_trb
,
1820 complex_test_vector
[i
].input_dma
,
1821 complex_test_vector
[i
].result_seg
,
1826 xhci_dbg(xhci
, "TRB math tests passed.\n");
1831 int mtktest_xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
1834 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1835 unsigned int val
, val2
;
1837 struct xhci_segment
*seg
;
1841 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
1842 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
1843 for (i
= 0; i
< 16; i
++) {
1844 if ((0x1 & page_size
) != 0)
1846 page_size
= page_size
>> 1;
1849 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
1851 xhci_warn(xhci
, "WARN: no supported page size\n");
1852 /* Use 4K pages, since that's common and the minimum the HC supports */
1853 xhci
->page_shift
= 12;
1854 xhci
->page_size
= 1 << xhci
->page_shift
;
1855 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
1858 * Program the Number of Device Slots Enabled field in the CONFIG
1859 * register with the max value of slots the HC can handle.
1861 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
1862 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
1863 (unsigned int) val
);
1864 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
1865 val
|= (val2
& ~HCS_SLOTS_MASK
);
1866 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
1867 (unsigned int) val
);
1868 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
1871 * Section 5.4.6 - doorbell array must be
1872 * "physically contiguous and 64-byte (cache line) aligned".
1874 xhci
->dcbaa
= dma_alloc_coherent(dev
,
1875 sizeof(*xhci
->dcbaa
), &dma
, GFP_KERNEL
);
1877 xhci
->dcbaa
= pci_alloc_consistent(to_pci_dev(dev
),
1878 sizeof(*xhci
->dcbaa
), &dma
);
1882 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
1883 xhci
->dcbaa
->dma
= dma
;
1884 xhci_dbg(xhci
, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1885 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
1886 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
1889 * Initialize the ring segment pool. The ring must be a contiguous
1890 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1891 * however, the command ring segment needs 64-byte aligned segments,
1892 * so we pick the greater alignment need.
1894 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
1895 SEGMENT_SIZE
, 64, xhci
->page_size
);
1897 /* See Table 46 and Note on Figure 55 */
1898 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
1899 2112, 64, xhci
->page_size
);
1900 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
1903 /* Linear stream context arrays don't have any boundary restrictions,
1904 * and only need to be 16-byte aligned.
1906 xhci
->small_streams_pool
=
1907 dma_pool_create("xHCI 256 byte stream ctx arrays",
1908 dev
, SMALL_STREAM_ARRAY_SIZE
, 16, 0);
1909 xhci
->medium_streams_pool
=
1910 dma_pool_create("xHCI 1KB stream ctx arrays",
1911 dev
, MEDIUM_STREAM_ARRAY_SIZE
, 16, 0);
1912 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1913 * will be allocated with pci_alloc_consistent()
1916 if (!xhci
->small_streams_pool
|| !xhci
->medium_streams_pool
)
1919 /* Set up the command ring to have one segments for now. */
1920 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
1921 if (!xhci
->cmd_ring
)
1923 xhci_dbg(xhci
, "Allocated command ring at %p\n", xhci
->cmd_ring
);
1924 xhci_dbg(xhci
, "First segment DMA is 0x%llx\n",
1925 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
1927 /* Set the address in the Command Ring Control register */
1928 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
1929 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
1930 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
1931 xhci
->cmd_ring
->cycle_state
;
1932 xhci_dbg(xhci
, "// Setting command ring address to 0x%llx\n", val_64
);
1933 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
1934 mtktest_xhci_dbg_cmd_ptrs(xhci
);
1936 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
1938 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
1939 " from cap regs base addr\n", val
);
1940 xhci
->dba
= (void *) xhci
->cap_regs
+ val
;
1941 mtktest_xhci_dbg_regs(xhci
);
1942 mtktest_xhci_print_run_regs(xhci
);
1943 /* Set ir_set to interrupt register set 0 */
1944 xhci
->ir_set
= (void *) xhci
->run_regs
->ir_set
;
1947 * Event ring setup: Allocate a normal ring, but also setup
1948 * the event ring segment table (ERST). Section 4.9.3.
1950 xhci_dbg(xhci
, "// Allocating event ring\n");
1951 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, false, flags
);
1952 if (!xhci
->event_ring
)
1954 if (xhci_check_trb_in_td_math(xhci
, flags
) < 0)
1957 xhci
->erst
.entries
= pci_alloc_consistent(to_pci_dev(dev
),
1958 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
);
1960 xhci
->erst
.entries
= dma_alloc_coherent(dev
,
1961 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
, GFP_KERNEL
);
1962 if (!xhci
->erst
.entries
)
1964 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%llx\n",
1965 (unsigned long long)dma
);
1967 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
1968 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
1969 xhci
->erst
.erst_dma_addr
= dma
;
1970 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1971 xhci
->erst
.num_entries
,
1973 (unsigned long long)xhci
->erst
.erst_dma_addr
);
1975 /* set ring base address and size for each segment table entry */
1976 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
1977 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
1978 entry
->seg_addr
= seg
->dma
;
1979 entry
->seg_size
= TRBS_PER_SEGMENT
;
1984 /* set ERST count with the number of entries in the segment table */
1985 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
1986 val
&= ERST_SIZE_MASK
;
1987 val
|= ERST_NUM_SEGS
;
1988 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1990 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
1992 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
1993 /* set the segment table base address */
1994 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1995 (unsigned long long)xhci
->erst
.erst_dma_addr
);
1996 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
1997 val_64
&= ERST_PTR_MASK
;
1998 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
1999 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
2001 /* Set the event ring dequeue address */
2002 mtktest_xhci_set_hc_event_deq(xhci
);
2003 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
2004 mtktest_xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
2007 * XXX: Might need to set the Interrupter Moderation Register to
2008 * something other than the default (~1ms minimum between interrupts).
2009 * See section 5.5.1.2.
2011 init_completion(&xhci
->addr_dev
);
2012 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
2013 xhci
->devs
[i
] = NULL
;
2015 if (scratchpad_alloc(xhci
, flags
))
2021 xhci_warn(xhci
, "Couldn't initialize memory\n");
2022 mtktest_xhci_mem_cleanup(xhci
);