X-Git-Url: https://git.stricted.de/?p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git;a=blobdiff_plain;f=drivers%2Fusb%2Fhost%2Fxhci-mem.c;h=00b3b44d328f6f693733aef4d5600e914b4048b4;hp=fbf75e57628b72e0b7a74237d8a9610826b2ab93;hb=38b8911896108e42005e915eb1f1c4cc5151547b;hpb=554e6e9f816bdf08c2962e6f003f8298c935646b diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index fbf75e57628b..00b3b44d328f 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -26,6 +26,8 @@ #include #include "xhci.h" +#include +#include /* * Allocates a generic ring segment from the ring pool, sets the dma address, @@ -111,10 +113,12 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, val |= TRB_TYPE(TRB_LINK); /* Always set the chain bit with 0.95 hardware */ /* Set chain bit for isoc rings on AMD 0.96 host */ +#ifndef CONFIG_MTK_XHCI if (xhci_link_trb_quirk(xhci) || (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST))) val |= TRB_CHAIN; +#endif prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); } } @@ -369,6 +373,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci ctx->size += CTX_SIZE(xhci->hcc_params); ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } memset(ctx->bytes, 0, ctx->size); return ctx; } @@ -419,12 +427,16 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct device *dev = xhci_to_hcd(xhci)->self.controller; if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) - dma_free_coherent(&pdev->dev, - sizeof(struct xhci_stream_ctx)*num_stream_ctxs, - stream_ctx, dma); + dma_free_coherent(dev, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs, +#ifdef CONFIG_MTK_XHCI + xhci->erst.entries, xhci->erst.erst_dma_addr); +#else + stream_ctx, dma); +#endif else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_free(xhci->small_streams_pool, stream_ctx, dma); @@ -447,12 +459,12 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, dma_addr_t *dma, gfp_t mem_flags) { - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct device *dev = xhci_to_hcd(xhci)->self.controller; if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) - return dma_alloc_coherent(&pdev->dev, - sizeof(struct xhci_stream_ctx)*num_stream_ctxs, - dma, mem_flags); + return dma_alloc_coherent(dev, + sizeof(struct xhci_stream_ctx)*num_stream_ctxs, + dma, mem_flags); else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_alloc(xhci->small_streams_pool, mem_flags, dma); @@ -1396,10 +1408,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; + virt_dev->num_rings_cached--; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } @@ -1445,7 +1457,18 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, break; case USB_SPEED_FULL: case USB_SPEED_LOW: + { + CHIP_SW_VER sw_code = mt_get_chip_sw_ver(); + unsigned int hw_code = mt_get_chip_hw_code(); + + if((hw_code == 0x6595) && (sw_code <= CHIP_SW_VER_01)){ + /* workaround for maxp size issue of RXXE */ + if((max_packet % 4 == 2) && (max_packet % 16 != 14) && + (max_burst == 0) && usb_endpoint_dir_in(&ep->desc)) + max_packet += 2; + } break; + } default: BUG(); } @@ -1682,7 +1705,7 @@ static void scratchpad_free(struct xhci_hcd *xhci) { int num_sp; int i; - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct device *dev = xhci_to_hcd(xhci)->self.controller; if (!xhci->scratchpad) return; @@ -1690,13 +1713,13 @@ static void scratchpad_free(struct xhci_hcd *xhci) num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); for (i = 0; i < num_sp; i++) { - dma_free_coherent(&pdev->dev, xhci->page_size, + dma_free_coherent(dev, xhci->page_size, xhci->scratchpad->sp_buffers[i], xhci->scratchpad->sp_dma_buffers[i]); } kfree(xhci->scratchpad->sp_dma_buffers); kfree(xhci->scratchpad->sp_buffers); - dma_free_coherent(&pdev->dev, num_sp * sizeof(u64), + dma_free_coherent(dev, num_sp * sizeof(u64), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); kfree(xhci->scratchpad); @@ -1758,7 +1781,7 @@ void xhci_free_command(struct xhci_hcd *xhci, void xhci_mem_cleanup(struct xhci_hcd *xhci) { - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct device *dev = xhci_to_hcd(xhci)->self.controller; struct dev_info *dev_info, *next; struct xhci_cd *cur_cd, *next_cd; unsigned long flags; @@ -1768,7 +1791,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) - dma_free_coherent(&pdev->dev, size, + dma_free_coherent(dev, size, xhci->erst.entries, xhci->erst.erst_dma_addr); xhci->erst.entries = NULL; xhci_dbg(xhci, "Freed ERST\n"); @@ -1790,6 +1813,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) kfree(cur_cd); } + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + for (i = 0; i < num_ports && xhci->rh_bw; i++) { + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) { + struct list_head *ep = &bwt->interval_bw[j].endpoints; + while (!list_empty(ep)) + list_del_init(ep->next); + } + } + for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); @@ -1814,7 +1847,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_dbg(xhci, "Freed medium stream array pool\n"); if (xhci->dcbaa) - dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), + dma_free_coherent(dev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa = NULL; @@ -1830,16 +1863,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) if (!xhci->rh_bw) goto no_bw; - num_ports = HCS_MAX_PORTS(xhci->hcs_params1); - for (i = 0; i < num_ports; i++) { - struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; - for (j = 0; j < XHCI_MAX_INTERVAL; j++) { - struct list_head *ep = &bwt->interval_bw[j].endpoints; - while (!list_empty(ep)) - list_del_init(ep->next); - } - } - for (i = 0; i < num_ports; i++) { struct xhci_tt_bw_info *tt, *n; list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {