return TRB_TYPE_LINK_LE32(link->control);
}
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+ /* Enqueue pointer can be left pointing to the link TRB,
+ * we must handle that
+ */
+ if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+ return ring->enq_seg->next->trbs;
+ return ring->enqueue;
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
+ #ifdef CONFIG_MTK_XHCI
+ if (!xhci_link_trb_quirk(xhci)) {
+ #else
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
+ #endif
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
+#ifndef CONFIG_MTK_XHCI
int num_trbs_in_deq_seg;
+#endif
if (ring->num_trbs_free < num_trbs)
return 0;
+#ifndef CONFIG_MTK_XHCI
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
}
+#endif
return 1;
}
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
- if (!(list_empty(&ep->ring->td_list)))
+ if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
+ #endif
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
+
+ /* Clear stopped_td and stopped_trb if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED)) {
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
false);
xhci_ring_cmd_db(xhci);
} else {
- /* Clear our internal halted state and restart the ring(s) */
+ /* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
inc_deq(xhci, xhci->cmd_ring);
return;
}
+ /* There is no command to handle if we get a stop event when the
+ * command ring is empty, event->cmd_trb points to the next
+ * unset command
+ */
+ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+ return;
}
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
usb_hcd_resume_root_hub(hcd);
}
+ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+ bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
- msecs_to_jiffies(20);
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
ret = 1;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
== 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
+ #endif
}
}
}
if (event_trb != ep_ring->dequeue) {
/* The event was for the status stage */
if (event_trb == td->last_trb) {
- if (td->urb->actual_length != 0) {
+ if (td->urb_length_set) {
/* Don't overwrite a previously set error code
*/
if ((*status == -EINPROGRESS || *status == 0) &&
td->urb->transfer_buffer_length;
}
} else {
- /* Maybe the event was for the data stage? */
+ /*
+ * Maybe the event was for the data stage? If so, update
+ * already the actual_length of the URB and flag it as
+ * set, so that it is not overwritten in the event for
+ * the last TRB.
+ */
+ td->urb_length_set = true;
td->urb->actual_length =
td->urb->transfer_buffer_length -
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
break;
case COMP_DEV_ERR:
case COMP_STALL:
+ frame->status = -EPROTO;
+ skip_td = true;
+ break;
case COMP_TX_ERR:
frame->status = -EPROTO;
+ if (event_trb != td->last_trb)
+ return 0;
skip_td = true;
break;
case COMP_STOP:
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
- if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+ if (!event_seg && (trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_INVAL)) {
ret = 0;
goto cleanup;
}
* successful event after a short transfer.
* Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
xhci_halt(xhci);
hw_died:
spin_unlock(&xhci->lock);
- return -ESHUTDOWN;
+ return IRQ_HANDLED;
}
/*
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
+ #ifndef CONFIG_MTK_XHCI
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
-
+ #else
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ #endif
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
* right shifted by 10.
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
+#ifdef CONFIG_MTK_XHCI
+static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
+ , unsigned int maxp, unsigned trb_buffer_length)
+{
+ u32 max = 31;
+ int remainder, td_packet_count, packet_transferred;
+
+ //0 for the last TRB
+ //FIXME: need to workaround if there is ZLP in this TD
+ if (td_running_total + trb_buffer_length == td_transfer_size)
+ return 0;
+
+ //FIXME: need to take care of high-bandwidth (MAX_ESIT)
+ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
+ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
+ remainder = td_packet_count - packet_transferred;
+
+ if (remainder > max)
+ return max << 17;
+ else
+ return remainder << 17;
+}
+#else
static u32 xhci_td_remainder(unsigned int remainder)
{
u32 max = (1 << (21 - 17 + 1)) - 1;
else
return (remainder >> 10) << 17;
}
+#endif
+
+#ifndef CONFIG_MTK_XHCI
/*
* For xHCI 1.0 host controllers, TD size is the number of max packet sized
* packets remaining in the TD (*not* including this TRB).
return 31 << 17;
return (total_packet_count - packets_transferred) << 17;
}
+#endif
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
(unsigned int) addr + trb_buff_len);
}
+ /* Set the TRB length, TD size, and interrupter fields. */
+ #ifdef CONFIG_MTK_XHCI
+ if(num_trbs >1){
+ remainder = xhci_td_remainder(urb->transfer_buffer_length,
+ running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
+ }
+ #else
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
trb_buff_len, total_packet_count, urb,
num_trbs - 1);
}
+ #endif
+
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
bool more_trbs_coming;
int start_cycle;
u32 field, length_field;
-
+#ifdef CONFIG_MTK_XHCI
+ int max_packet = USB_SPEED_HIGH;
+#endif
int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr;
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+#ifdef CONFIG_MTK_XHCI
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ default:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+ if((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)){
+ num_trbs++;
+ }
+#endif
+
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
-
+ #ifdef CONFIG_MTK_XHCI
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ #else
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
trb_buff_len, total_packet_count, urb,
num_trbs - 1);
}
+ #endif
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
field |= 0x1;
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+#ifdef CONFIG_MTK_XHCI
+ if(1){
+#else
if (xhci->hci_version == 0x100) {
+#endif
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
field = TRB_TYPE(TRB_DATA);
length_field = TRB_LEN(urb->transfer_buffer_length) |
+ #ifdef CONFIG_MTK_XHCI
+ //CC: MTK style, no scatter-gather for control transfer
+ 0 |
+ #else
xhci_td_remainder(urb->transfer_buffer_length) |
+ #endif
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
- return roundup(total_packet_count, max_burst + 1) - 1;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
}
/*
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
+#ifdef CONFIG_MTK_XHCI
+ int max_packet = USB_SPEED_HIGH;
+#endif
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+#ifdef CONFIG_MTK_XHCI
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ default:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+#endif
urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
+ #ifdef CONFIG_MTK_XHCI
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ #else
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
td_len - running_total);
total_packet_count, urb,
(trbs_per_td - j - 1));
}
+ #endif
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
goto cleanup;
}
}
-
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
+ #endif
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,