return NULL;
}
-void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
- int rings_cached;
-
- rings_cached = virt_dev->num_rings_cached;
- if (rings_cached < XHCI_MAX_RINGS_CACHED) {
- virt_dev->ring_cache[rings_cached] =
- virt_dev->eps[ep_index].ring;
- virt_dev->num_rings_cached++;
- xhci_dbg(xhci, "Cached old ring, "
- "%d ring%s cached\n",
- virt_dev->num_rings_cached,
- (virt_dev->num_rings_cached > 1) ? "s" : "");
- } else {
- xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
- xhci_dbg(xhci, "Ring cache full (%d rings), "
- "freeing ring\n",
- virt_dev->num_rings_cached);
- }
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
virt_dev->eps[ep_index].ring = NULL;
}
-/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
- * pointers to the beginning of the ring.
- */
-static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
- struct xhci_ring *ring, unsigned int cycle_state,
- enum xhci_ring_type type)
-{
- struct xhci_segment *seg = ring->first_seg;
- int i;
-
- do {
- memset(seg->trbs, 0,
- sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
- if (cycle_state == 0) {
- for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control |=
- cpu_to_le32(TRB_CYCLE);
- }
- /* All endpoint rings have link TRBs */
- xhci_link_segments(xhci, seg, seg->next, type);
- seg = seg->next;
- } while (seg != ring->first_seg);
- ring->type = type;
- xhci_initialize_ring_info(ring, cycle_state);
- /* td list should be empty since all URBs have been cancelled,
- * but just in case...
- */
- INIT_LIST_HEAD(&ring->td_list);
-}
-
/*
* Expand an existing ring.
- * Look for a cached ring or allocate a new ring which has same segment numbers
- * and link the two rings.
+ * Allocate a new ring which has same segment numbers and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, dev, old_active_eps);
- if (dev->ring_cache) {
- for (i = 0; i < dev->num_rings_cached; i++)
- xhci_ring_free(xhci, dev->ring_cache[i]);
- kfree(dev->ring_cache);
- }
-
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
if (!dev->eps[0].ring)
goto fail;
- /* Allocate pointers to the ring cache */
- dev->ring_cache = kzalloc(
- sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
- flags);
- if (!dev->ring_cache)
- goto fail;
- dev->num_rings_cached = 0;
-
dev->udev = udev;
/* Point to output device context in dcbaa. */
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
- if (!virt_dev->eps[ep_index].new_ring) {
- /* Attempt to use the ring cache */
- if (virt_dev->num_rings_cached == 0)
- return -ENOMEM;
- virt_dev->num_rings_cached--;
- virt_dev->eps[ep_index].new_ring =
- virt_dev->ring_cache[virt_dev->num_rings_cached];
- virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
- 1, ring_type);
- }
+ if (!virt_dev->eps[ep_index].new_ring)
+ return -ENOMEM;
+
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
if (xhci->quirks & XHCI_MTK_HOST) {
ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
if (ret < 0) {
- xhci_free_or_cache_endpoint_ring(xhci,
- virt_dev, ep_index);
+ xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
return ret;
}
}
for (i = 1; i < 31; i++) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
- xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_free_endpoint_ring(xhci, virt_dev, i);
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
}
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
* Install any rings for completely new endpoints or changed endpoints,
- * and free or cache any old rings from changed endpoints.
+ * and free any old rings from changed endpoints.
*/
for (i = 1; i < 31; i++) {
if (!virt_dev->eps[i].new_ring)
continue;
- /* Only cache or free the old ring if it exists.
+ /* Only free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
- xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_free_endpoint_ring(xhci, virt_dev, i);
}
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
*
* Wait for the Reset Device command to finish. Remove all structures
* associated with the endpoints that were disabled. Clear the input device
- * structure? Cache the rings? Reset the control endpoint 0 max packet size?
+ * structure? Reset the control endpoint 0 max packet size?
*
* If the virt_dev to be reset does not exist or does not match the udev,
* it means the device is lost, possibly due to the xHC restore error and
spin_unlock_irqrestore(&xhci->lock, flags);
}
- /* Everything but endpoint 0 is disabled, so free or cache the rings. */
+ /* Everything but endpoint 0 is disabled, so free the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; i++) {
struct xhci_virt_ep *ep = &virt_dev->eps[i];
}
if (ep->ring) {
- xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_free_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
- /* Rings saved to ensure old alt settings can be re-instated */
- struct xhci_ring **ring_cache;
- int num_rings_cached;
-#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
u8 fake_port;
u8 real_port;
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
-void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,