Merge tag 'v3.10.86' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / host / xhci-ring.c
index 1969c001b3f9a8bacbe926dfc4777c95206101a1..11e6e52c0c97d4d10f7f11e45fb8c25dd7fbf216 100644 (file)
@@ -122,6 +122,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
        return TRB_TYPE_LINK_LE32(link->control);
 }
 
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+       /* Enqueue pointer can be left pointing to the link TRB,
+        * we must handle that
+        */
+       if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+               return ring->enq_seg->next->trbs;
+       return ring->enqueue;
+}
+
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -232,9 +242,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
                         * carry over the chain bit of the previous TRB
                         * (which may mean the chain bit is cleared).
                         */
+                       #ifdef CONFIG_MTK_XHCI
+                       if (!xhci_link_trb_quirk(xhci)) {
+                       #else
                        if (!(ring->type == TYPE_ISOC &&
                                        (xhci->quirks & XHCI_AMD_0x96_HOST))
                                                && !xhci_link_trb_quirk(xhci)) {
+                       #endif
                                next->link.control &=
                                        cpu_to_le32(~TRB_CHAIN);
                                next->link.control |=
@@ -263,16 +277,20 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
                unsigned int num_trbs)
 {
+#ifndef CONFIG_MTK_XHCI
        int num_trbs_in_deq_seg;
+#endif
 
        if (ring->num_trbs_free < num_trbs)
                return 0;
 
+#ifndef CONFIG_MTK_XHCI
        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
                        return 0;
        }
+#endif
 
        return 1;
 }
@@ -434,7 +452,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 
        /* A ring has pending URBs if its TD list is not empty */
        if (!(ep->ep_state & EP_HAS_STREAMS)) {
-               if (!(list_empty(&ep->ring->td_list)))
+               if (ep->ring && !(list_empty(&ep->ring->td_list)))
                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
                return;
        }
@@ -720,10 +738,12 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
        if (urb_priv->td_cnt == urb_priv->length) {
                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+                       #ifndef CONFIG_MTK_XHCI
                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                        usb_amd_quirk_pll_enable();
                        }
+                       #endif
                }
                usb_hcd_unlink_urb_from_ep(hcd, urb);
 
@@ -847,8 +867,12 @@ remove_finished_td:
                /* Otherwise ring the doorbell(s) to restart queued transfers */
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
-       ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
+
+       /* Clear stopped_td and stopped_trb if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED)) {
+               ep->stopped_td = NULL;
+               ep->stopped_trb = NULL;
+       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1164,9 +1188,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
                                false);
                xhci_ring_cmd_db(xhci);
        } else {
-               /* Clear our internal halted state and restart the ring(s) */
+               /* Clear our internal halted state */
                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
-               ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 }
 
@@ -1390,6 +1413,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                        inc_deq(xhci, xhci->cmd_ring);
                        return;
                }
+               /* There is no command to handle if we get a stop event when the
+                * command ring is empty, event->cmd_trb points to the next
+                * unset command
+                */
+               if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+                       return;
        }
 
        switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1650,6 +1679,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
                usb_hcd_resume_root_hub(hcd);
        }
 
+       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+               bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
@@ -1678,7 +1710,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                } else {
                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
                        bus_state->resume_done[faked_port_index] = jiffies +
-                               msecs_to_jiffies(20);
+                               msecs_to_jiffies(USB_RESUME_TIMEOUT);
                        set_bit(faked_port_index, &bus_state->resuming_ports);
                        mod_timer(&hcd->rh_timer,
                                  bus_state->resume_done[faked_port_index]);
@@ -1960,11 +1992,13 @@ td_cleanup:
                        ret = 1;
                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                                xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+                               #ifndef CONFIG_MTK_XHCI
                                if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
                                        == 0) {
                                        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                                usb_amd_quirk_pll_enable();
                                }
+                               #endif
                        }
                }
        }
@@ -2045,7 +2079,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
        if (event_trb != ep_ring->dequeue) {
                /* The event was for the status stage */
                if (event_trb == td->last_trb) {
-                       if (td->urb->actual_length != 0) {
+                       if (td->urb_length_set) {
                                /* Don't overwrite a previously set error code
                                 */
                                if ((*status == -EINPROGRESS || *status == 0) &&
@@ -2059,7 +2093,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                        td->urb->transfer_buffer_length;
                        }
                } else {
-               /* Maybe the event was for the data stage? */
+                       /*
+                        * Maybe the event was for the data stage? If so, update
+                        * already the actual_length of the URB and flag it as
+                        * set, so that it is not overwritten in the event for
+                        * the last TRB.
+                        */
+                       td->urb_length_set = true;
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2119,8 +2159,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                break;
        case COMP_DEV_ERR:
        case COMP_STALL:
+               frame->status = -EPROTO;
+               skip_td = true;
+               break;
        case COMP_TX_ERR:
                frame->status = -EPROTO;
+               if (event_trb != td->last_trb)
+                       return 0;
                skip_td = true;
                break;
        case COMP_STOP:
@@ -2512,7 +2557,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * last TRB of the previous TD. The command completion handle
                 * will take care the rest.
                 */
-               if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+               if (!event_seg && (trb_comp_code == COMP_STOP ||
+                                  trb_comp_code == COMP_STOP_INVAL)) {
                        ret = 0;
                        goto cleanup;
                }
@@ -2524,7 +2570,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                 * successful event after a short transfer.
                                 * Ignore it.
                                 */
-                               if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
+                               if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
                                                ep_ring->last_td_was_short) {
                                        ep_ring->last_td_was_short = false;
                                        ret = 0;
@@ -2736,7 +2782,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
                xhci_halt(xhci);
 hw_died:
                spin_unlock(&xhci->lock);
-               return -ESHUTDOWN;
+               return IRQ_HANDLED;
        }
 
        /*
@@ -2893,13 +2939,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                        /* If we're not dealing with 0.95 hardware or isoc rings
                         * on AMD 0.96 host, clear the chain bit.
                         */
+                       #ifndef CONFIG_MTK_XHCI
                        if (!xhci_link_trb_quirk(xhci) &&
                                        !(ring->type == TYPE_ISOC &&
                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
                        else
                                next->link.control |= cpu_to_le32(TRB_CHAIN);
-
+                       #else
+                       next->link.control &= cpu_to_le32(~TRB_CHAIN);
+                       #endif
                        wmb();
                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
 
@@ -3078,6 +3127,29 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  * right shifted by 10.
  * It must fit in bits 21:17, so it can't be bigger than 31.
  */
+#ifdef CONFIG_MTK_XHCI
+static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
+       , unsigned int maxp, unsigned trb_buffer_length)
+{
+       u32 max = 31;
+       int remainder, td_packet_count, packet_transferred;
+
+       //0 for the last TRB
+       //FIXME: need to workaround if there is ZLP in this TD
+       if (td_running_total + trb_buffer_length == td_transfer_size)
+               return 0;
+
+       //FIXME: need to take care of high-bandwidth (MAX_ESIT)
+       packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
+       td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
+       remainder = td_packet_count - packet_transferred;
+
+       if (remainder > max)
+               return max << 17;
+       else
+               return remainder << 17;
+}
+#else
 static u32 xhci_td_remainder(unsigned int remainder)
 {
        u32 max = (1 << (21 - 17 + 1)) - 1;
@@ -3087,7 +3159,10 @@ static u32 xhci_td_remainder(unsigned int remainder)
        else
                return (remainder >> 10) << 17;
 }
+#endif
+
 
+#ifndef CONFIG_MTK_XHCI
 /*
  * For xHCI 1.0 host controllers, TD size is the number of max packet sized
  * packets remaining in the TD (*not* including this TRB).
@@ -3123,6 +3198,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
                return 31 << 17;
        return (total_packet_count - packets_transferred) << 17;
 }
+#endif
 
 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3224,6 +3300,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                        (unsigned int) addr + trb_buff_len);
                }
 
+               /* Set the TRB length, TD size, and interrupter fields. */
+               #ifdef CONFIG_MTK_XHCI
+               if(num_trbs >1){
+                       remainder = xhci_td_remainder(urb->transfer_buffer_length,
+                               running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
+               }
+               #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
@@ -3234,6 +3317,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
+               #endif
+
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@ -3292,7 +3377,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        bool more_trbs_coming;
        int start_cycle;
        u32 field, length_field;
-
+#ifdef CONFIG_MTK_XHCI
+       int max_packet = USB_SPEED_HIGH;
+#endif
        int running_total, trb_buff_len, ret;
        unsigned int total_packet_count;
        u64 addr;
@@ -3322,6 +3409,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        }
        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
+#ifdef CONFIG_MTK_XHCI
+       switch(urb->dev->speed){
+               case USB_SPEED_SUPER:
+                       max_packet = urb->ep->desc.wMaxPacketSize;
+                       break;
+               case USB_SPEED_HIGH:
+               case USB_SPEED_FULL:
+               case USB_SPEED_LOW:
+        default:
+                       max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+                       break;
+       }
+       if((urb->transfer_flags & URB_ZERO_PACKET)
+               && ((urb->transfer_buffer_length % max_packet) == 0)){
+               num_trbs++;
+       }
+#endif
+
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
@@ -3378,7 +3483,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                /* Only set interrupt on short packet for IN endpoints */
                if (usb_urb_dir_in(urb))
                        field |= TRB_ISP;
-
+               #ifdef CONFIG_MTK_XHCI
+               remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+               #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
@@ -3389,6 +3496,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
+               #endif
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@ -3478,7 +3586,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                field |= 0x1;
 
        /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+#ifdef CONFIG_MTK_XHCI
+       if(1){
+#else
        if (xhci->hci_version == 0x100) {
+#endif
                if (urb->transfer_buffer_length > 0) {
                        if (setup->bRequestType & USB_DIR_IN)
                                field |= TRB_TX_TYPE(TRB_DATA_IN);
@@ -3502,7 +3614,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                field = TRB_TYPE(TRB_DATA);
 
        length_field = TRB_LEN(urb->transfer_buffer_length) |
+       #ifdef CONFIG_MTK_XHCI
+               //CC: MTK style, no scatter-gather for control transfer
+               0 |
+       #else
                xhci_td_remainder(urb->transfer_buffer_length) |
+       #endif
                TRB_INTR_TARGET(0);
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
@@ -3570,7 +3687,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
                return 0;
 
        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
-       return roundup(total_packet_count, max_burst + 1) - 1;
+       return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
 }
 
 /*
@@ -3625,6 +3742,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        u64 start_addr, addr;
        int i, j;
        bool more_trbs_coming;
+#ifdef CONFIG_MTK_XHCI
+       int max_packet = USB_SPEED_HIGH;
+#endif
 
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -3638,6 +3758,19 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        start_trb = &ep_ring->enqueue->generic;
        start_cycle = ep_ring->cycle_state;
 
+#ifdef CONFIG_MTK_XHCI
+       switch(urb->dev->speed){
+               case USB_SPEED_SUPER:
+                       max_packet = urb->ep->desc.wMaxPacketSize;
+                       break;
+               case USB_SPEED_HIGH:
+               case USB_SPEED_FULL:
+               case USB_SPEED_LOW:
+        default:
+                       max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+                       break;
+       }
+#endif
        urb_priv = urb->hcpriv;
        /* Queue the first TRB, even if it's zero-length */
        for (i = 0; i < num_tds; i++) {
@@ -3726,6 +3859,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                trb_buff_len = td_remain_len;
 
                        /* Set the TRB length, TD size, & interrupter fields. */
+                       #ifdef CONFIG_MTK_XHCI
+                       remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+                       #else
                        if (xhci->hci_version < 0x100) {
                                remainder = xhci_td_remainder(
                                                td_len - running_total);
@@ -3735,6 +3871,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                                total_packet_count, urb,
                                                (trbs_per_td - j - 1));
                        }
+                       #endif
                        length_field = TRB_LEN(trb_buff_len) |
                                remainder |
                                TRB_INTR_TARGET(0);
@@ -3757,11 +3894,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        goto cleanup;
                }
        }
-
+       #ifndef CONFIG_MTK_XHCI
        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                        usb_amd_quirk_pll_disable();
        }
+       #endif
        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
 
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,