Merge tag 'v3.10.86' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:47:17 +0000 (22:47 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:47:17 +0000 (22:47 +0100)
This is the 3.10.86 stable release

1  2 
Makefile
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/storage/unusual_devs.h
mm/memory.c

diff --combined Makefile
index 33d047b338008cf4ead3b02778d68cd5602c827b,25ee724c9089465e5549006eebe9852255f36eb9..8950dd7ebd85c5ca7e57d505ffd713b66b10b0b1
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 85
+ SUBLEVEL = 86
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
index 41b0d4855169374e1820ad66c33a972b8bdbf6c7,95fe1a432d29229056523a17ad003cf36e966ae0..11e6e52c0c97d4d10f7f11e45fb8c25dd7fbf216
@@@ -242,13 -242,9 +242,13 @@@ static void inc_enq(struct xhci_hcd *xh
                         * carry over the chain bit of the previous TRB
                         * (which may mean the chain bit is cleared).
                         */
 +                      #ifdef CONFIG_MTK_XHCI
 +                      if (!xhci_link_trb_quirk(xhci)) {
 +                      #else
                        if (!(ring->type == TYPE_ISOC &&
                                        (xhci->quirks & XHCI_AMD_0x96_HOST))
                                                && !xhci_link_trb_quirk(xhci)) {
 +                      #endif
                                next->link.control &=
                                        cpu_to_le32(~TRB_CHAIN);
                                next->link.control |=
  static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
                unsigned int num_trbs)
  {
 +#ifndef CONFIG_MTK_XHCI
        int num_trbs_in_deq_seg;
 +#endif
  
        if (ring->num_trbs_free < num_trbs)
                return 0;
  
 +#ifndef CONFIG_MTK_XHCI
        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
                        return 0;
        }
 +#endif
  
        return 1;
  }
@@@ -738,12 -730,10 +738,12 @@@ static void xhci_giveback_urb_in_irq(st
        if (urb_priv->td_cnt == urb_priv->length) {
                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 +                      #ifndef CONFIG_MTK_XHCI
                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                        usb_amd_quirk_pll_enable();
                        }
 +                      #endif
                }
                usb_hcd_unlink_urb_from_ep(hcd, urb);
  
@@@ -1679,6 -1669,9 +1679,9 @@@ static void handle_port_status(struct x
                usb_hcd_resume_root_hub(hcd);
        }
  
+       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+               bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  
@@@ -1989,13 -1982,11 +1992,13 @@@ td_cleanup
                        ret = 1;
                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                                xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 +                              #ifndef CONFIG_MTK_XHCI
                                if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
                                        == 0) {
                                        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                                usb_amd_quirk_pll_enable();
                                }
 +                              #endif
                        }
                }
        }
@@@ -2567,7 -2558,7 +2570,7 @@@ static int handle_tx_event(struct xhci_
                                 * successful event after a short transfer.
                                 * Ignore it.
                                 */
 -                              if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
 +                              if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
                                                ep_ring->last_td_was_short) {
                                        ep_ring->last_td_was_short = false;
                                        ret = 0;
@@@ -2936,16 -2927,13 +2939,16 @@@ static int prepare_ring(struct xhci_hc
                        /* If we're not dealing with 0.95 hardware or isoc rings
                         * on AMD 0.96 host, clear the chain bit.
                         */
 +                      #ifndef CONFIG_MTK_XHCI
                        if (!xhci_link_trb_quirk(xhci) &&
                                        !(ring->type == TYPE_ISOC &&
                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
                        else
                                next->link.control |= cpu_to_le32(TRB_CHAIN);
 -
 +                      #else
 +                      next->link.control &= cpu_to_le32(~TRB_CHAIN);
 +                      #endif
                        wmb();
                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
  
@@@ -3124,29 -3112,6 +3127,29 @@@ int xhci_queue_intr_tx(struct xhci_hcd 
   * right shifted by 10.
   * It must fit in bits 21:17, so it can't be bigger than 31.
   */
 +#ifdef CONFIG_MTK_XHCI
 +static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
 +      , unsigned int maxp, unsigned trb_buffer_length)
 +{
 +      u32 max = 31;
 +      int remainder, td_packet_count, packet_transferred;
 +
 +      //0 for the last TRB
 +      //FIXME: need to workaround if there is ZLP in this TD
 +      if (td_running_total + trb_buffer_length == td_transfer_size)
 +              return 0;
 +
 +      //FIXME: need to take care of high-bandwidth (MAX_ESIT)
 +      packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
 +      td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
 +      remainder = td_packet_count - packet_transferred;
 +
 +      if (remainder > max)
 +              return max << 17;
 +      else
 +              return remainder << 17;
 +}
 +#else
  static u32 xhci_td_remainder(unsigned int remainder)
  {
        u32 max = (1 << (21 - 17 + 1)) - 1;
        else
                return (remainder >> 10) << 17;
  }
 +#endif
 +
  
 +#ifndef CONFIG_MTK_XHCI
  /*
   * For xHCI 1.0 host controllers, TD size is the number of max packet sized
   * packets remaining in the TD (*not* including this TRB).
@@@ -3195,7 -3157,6 +3198,7 @@@ static u32 xhci_v1_0_td_remainder(int r
                return 31 << 17;
        return (total_packet_count - packets_transferred) << 17;
  }
 +#endif
  
  static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
                                        (unsigned int) addr + trb_buff_len);
                }
  
 +              /* Set the TRB length, TD size, and interrupter fields. */
 +              #ifdef CONFIG_MTK_XHCI
 +              if(num_trbs >1){
 +                      remainder = xhci_td_remainder(urb->transfer_buffer_length,
 +                              running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
 +              }
 +              #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
 +              #endif
 +
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@@ -3374,9 -3326,7 +3377,9 @@@ int xhci_queue_bulk_tx(struct xhci_hcd 
        bool more_trbs_coming;
        int start_cycle;
        u32 field, length_field;
 -
 +#ifdef CONFIG_MTK_XHCI
 +      int max_packet = USB_SPEED_HIGH;
 +#endif
        int running_total, trb_buff_len, ret;
        unsigned int total_packet_count;
        u64 addr;
        }
        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  
 +#ifdef CONFIG_MTK_XHCI
 +      switch(urb->dev->speed){
 +              case USB_SPEED_SUPER:
 +                      max_packet = urb->ep->desc.wMaxPacketSize;
 +                      break;
 +              case USB_SPEED_HIGH:
 +              case USB_SPEED_FULL:
 +              case USB_SPEED_LOW:
 +        default:
 +                      max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
 +                      break;
 +      }
 +      if((urb->transfer_flags & URB_ZERO_PACKET)
 +              && ((urb->transfer_buffer_length % max_packet) == 0)){
 +              num_trbs++;
 +      }
 +#endif
 +
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
                /* Only set interrupt on short packet for IN endpoints */
                if (usb_urb_dir_in(urb))
                        field |= TRB_ISP;
 -
 +              #ifdef CONFIG_MTK_XHCI
 +              remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
 +              #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
 +              #endif
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@@ -3583,11 -3512,7 +3586,11 @@@ int xhci_queue_ctrl_tx(struct xhci_hcd 
                field |= 0x1;
  
        /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
 +#ifdef CONFIG_MTK_XHCI
 +      if(1){
 +#else
        if (xhci->hci_version == 0x100) {
 +#endif
                if (urb->transfer_buffer_length > 0) {
                        if (setup->bRequestType & USB_DIR_IN)
                                field |= TRB_TX_TYPE(TRB_DATA_IN);
                field = TRB_TYPE(TRB_DATA);
  
        length_field = TRB_LEN(urb->transfer_buffer_length) |
 +      #ifdef CONFIG_MTK_XHCI
 +              //CC: MTK style, no scatter-gather for control transfer
 +              0 |
 +      #else
                xhci_td_remainder(urb->transfer_buffer_length) |
 +      #endif
                TRB_INTR_TARGET(0);
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
@@@ -3739,9 -3659,6 +3742,9 @@@ static int xhci_queue_isoc_tx(struct xh
        u64 start_addr, addr;
        int i, j;
        bool more_trbs_coming;
 +#ifdef CONFIG_MTK_XHCI
 +      int max_packet = USB_SPEED_HIGH;
 +#endif
  
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  
        start_trb = &ep_ring->enqueue->generic;
        start_cycle = ep_ring->cycle_state;
  
 +#ifdef CONFIG_MTK_XHCI
 +      switch(urb->dev->speed){
 +              case USB_SPEED_SUPER:
 +                      max_packet = urb->ep->desc.wMaxPacketSize;
 +                      break;
 +              case USB_SPEED_HIGH:
 +              case USB_SPEED_FULL:
 +              case USB_SPEED_LOW:
 +        default:
 +                      max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
 +                      break;
 +      }
 +#endif
        urb_priv = urb->hcpriv;
        /* Queue the first TRB, even if it's zero-length */
        for (i = 0; i < num_tds; i++) {
                                trb_buff_len = td_remain_len;
  
                        /* Set the TRB length, TD size, & interrupter fields. */
 +                      #ifdef CONFIG_MTK_XHCI
 +                      remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
 +                      #else
                        if (xhci->hci_version < 0x100) {
                                remainder = xhci_td_remainder(
                                                td_len - running_total);
                                                total_packet_count, urb,
                                                (trbs_per_td - j - 1));
                        }
 +                      #endif
                        length_field = TRB_LEN(trb_buff_len) |
                                remainder |
                                TRB_INTR_TARGET(0);
                        goto cleanup;
                }
        }
 -
 +      #ifndef CONFIG_MTK_XHCI
        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                        usb_amd_quirk_pll_disable();
        }
 +      #endif
        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
diff --combined drivers/usb/host/xhci.c
index c33a1f80c1c0a0d292bc152311dc1970610c30b7,1f901fc25590af7e0eddfcea18a739c05417564a..114004a87ae02f8615bd7471e5e08449865c9d9a
  
  #include "xhci.h"
  
 +#ifdef CONFIG_MTK_XHCI
 +#include <asm/uaccess.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/platform_device.h>
 +#include <linux/xhci/xhci-mtk-scheduler.h>
 +#include <linux/xhci/xhci-mtk-power.h>
 +#include <linux/xhci/xhci-mtk.h>
 +
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +#include <linux/proc_fs.h>
 +#include <asm/uaccess.h>
 +#include <linux/seq_file.h>
 +#include <linux/kobject.h>
 +#include <linux/miscdevice.h>
 +
 +static struct miscdevice mu3h_uevent_device = {
 +         .minor = MISC_DYNAMIC_MINOR,
 +         .name = "usbif_u3h_uevent",
 +         .fops = NULL,
 +};
 +#endif
 +#endif
 +
  #define DRIVER_AUTHOR "Sarah Sharp"
  #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  
@@@ -61,23 -38,6 +61,23 @@@ static int link_quirk
  module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +int usbif_u3h_send_event(char* event)
 +{
 +      char udev_event[128];
 +      char *envp[] = {udev_event, NULL };
 +      int ret ;
 +
 +      snprintf(udev_event, 128, "USBIF_EVENT=%s",event);
 +      printk("usbif_u3h_send_event - sending event - %s in %s\n", udev_event, kobject_get_path(&mu3h_uevent_device.this_device->kobj, GFP_KERNEL));
 +      ret = kobject_uevent_env(&mu3h_uevent_device.this_device->kobj, KOBJ_CHANGE, envp);
 +      if (ret < 0)
 +              printk("usbif_u3h_send_event sending failed with ret = %d, \n", ret);
 +
 +      return ret;
 +}
 +#endif
 +
  /* TODO: copied from ehci-hcd.c - can this be refactored? */
  /*
   * xhci_handshake - spin reading hc until handshake completes or fails
@@@ -180,7 -140,6 +180,7 @@@ static int xhci_start(struct xhci_hcd *
                                XHCI_MAX_HALT_USEC);
        if (!ret)
                xhci->xhc_state &= ~XHCI_STATE_HALTED;
 +
        return ret;
  }
  
@@@ -562,7 -521,6 +562,7 @@@ int xhci_init(struct usb_hcd *hcd
        } else {
                xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
        }
 +
        retval = xhci_mem_init(xhci, GFP_KERNEL);
        xhci_dbg(xhci, "Finished xhci_init\n");
  
@@@ -635,7 -593,6 +635,7 @@@ static int xhci_run_finished(struct xhc
                xhci_halt(xhci);
                return -ENODEV;
        }
 +
        xhci->shared_hcd->state = HC_STATE_RUNNING;
        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  
                xhci_ring_cmd_db(xhci);
  
        xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
 +
        return 0;
  }
  
@@@ -790,10 -746,10 +790,10 @@@ void xhci_stop(struct usb_hcd *hcd
                xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
                                __func__);
        }
 -
 +#ifndef CONFIG_MTK_XHCI
        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                usb_amd_dev_put();
 -
 +#endif
        xhci_dbg(xhci, "// Disabling event ring interrupts\n");
        temp = xhci_readl(xhci, &xhci->op_regs->status);
        xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@@ -1634,11 -1590,6 +1634,11 @@@ int xhci_drop_endpoint(struct usb_hcd *
        u32 drop_flag;
        u32 new_add_flags, new_drop_flags, new_slot_info;
        int ret;
 +#ifdef CONFIG_MTK_XHCI
 +      struct sch_ep *sch_ep = NULL;
 +      int isTT;
 +      int ep_type = 0;
 +#endif
  
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
        if (ret <= 0)
  
        xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
  
 +#ifdef CONFIG_MTK_XHCI
 +      slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx);
 +      if((slot_ctx->tt_info & 0xff) > 0){
 +              isTT = 1;
 +      }
 +      else{
 +              isTT = 0;
 +      }
 +      if(usb_endpoint_xfer_int(&ep->desc)){
 +              ep_type = USB_EP_INT;
 +      }
 +      else if(usb_endpoint_xfer_isoc(&ep->desc)){
 +              ep_type = USB_EP_ISOC;
 +      }
 +      else if(usb_endpoint_xfer_bulk(&ep->desc)){
 +              ep_type = USB_EP_BULK;
 +      }
 +      sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc)
 +              , isTT, ep_type, (mtk_u32 *)ep);
 +      if(sch_ep != NULL){
 +              kfree(sch_ep);
 +      }
 +      else{
 +              xhci_warn(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n");
 +      }
 +#endif
 +
        xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
                        (unsigned int) ep->desc.bEndpointAddress,
                        udev->slot_id,
                        (unsigned int) new_drop_flags,
                        (unsigned int) new_add_flags,
                        (unsigned int) new_slot_info);
 +
 +      #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_ep_count_dec();
 +      #endif
 +
        return 0;
  }
  
@@@ -1757,16 -1676,6 +1757,16 @@@ int xhci_add_endpoint(struct usb_hcd *h
        u32 new_add_flags, new_drop_flags, new_slot_info;
        struct xhci_virt_device *virt_dev;
        int ret = 0;
 +#ifdef CONFIG_MTK_XHCI
 +      struct xhci_ep_ctx *in_ep_ctx;
 +      struct sch_ep *sch_ep;
 +      int isTT;
 +      int ep_type = 0;
 +      int maxp = 0;
 +      int burst = 0;
 +      int mult = 0;
 +      int interval = 0;
 +#endif
  
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
        if (ret <= 0) {
                return -ENOMEM;
        }
  
 +#ifdef CONFIG_MTK_XHCI
 +      in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
 +      slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
 +
 +      if((slot_ctx->tt_info & 0xff) > 0){
 +              isTT = 1;
 +      }
 +      else{
 +              isTT = 0;
 +      }
 +      if(usb_endpoint_xfer_int(&ep->desc)){
 +              ep_type = USB_EP_INT;
 +      }
 +      else if(usb_endpoint_xfer_isoc(&ep->desc)){
 +              ep_type = USB_EP_ISOC;
 +      }
 +      else if(usb_endpoint_xfer_bulk(&ep->desc)){
 +              ep_type = USB_EP_BULK;
 +      }
 +      if(udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
 +              || udev->speed == USB_SPEED_LOW){
 +              maxp = ep->desc.wMaxPacketSize & 0x7FF;
 +              burst = ep->desc.wMaxPacketSize >> 11;
 +              mult = 0;
 +      }
 +      else if(udev->speed == USB_SPEED_SUPER){
 +              maxp = ep->desc.wMaxPacketSize & 0x7FF;
 +              burst = ep->ss_ep_comp.bMaxBurst;
 +              mult = ep->ss_ep_comp.bmAttributes & 0x3;
 +      }
 +      interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff));
 +      sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
 +      if(mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
 +              isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
 +              , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS){
 +              xhci_err(xhci, "[MTK] not enough bandwidth\n");
 +              return -ENOSPC;
 +      }
 +#endif
 +
        ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
        new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  
                        (unsigned int) new_drop_flags,
                        (unsigned int) new_add_flags,
                        (unsigned int) new_slot_info);
 +
 +      #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_ep_count_inc();
 +      #endif
 +
        return 0;
  }
  
@@@ -3492,6 -3356,9 +3492,9 @@@ int xhci_discover_or_reset_device(struc
                        return -EINVAL;
        }
  
+       if (virt_dev->tt_info)
+               old_active_eps = virt_dev->tt_info->active_eps;
        if (virt_dev->udev != udev) {
                /* If the virt_dev and the udev does not match, this virt_dev
                 * may belong to another udev.
@@@ -3639,9 -3506,7 +3642,9 @@@ void xhci_free_dev(struct usb_hcd *hcd
  {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct xhci_virt_device *virt_dev;
 +#ifndef CONFIG_USB_DEFAULT_PERSIST
        struct device *dev = hcd->self.controller;
 +#endif
        unsigned long flags;
        u32 state;
        int i, ret;
@@@ -4846,12 -4711,6 +4849,12 @@@ int xhci_gen_setup(struct usb_hcd *hcd
                return 0;
        }
  
 +#ifdef CONFIG_MTK_XHCI
 +      retval = mtk_xhci_ip_init(hcd, xhci);
 +      if(retval)
 +              goto error;
 +#endif
 +
        xhci->cap_regs = hcd->regs;
        xhci->op_regs = hcd->regs +
                HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
        if (retval)
                goto error;
        xhci_dbg(xhci, "Called HCD init\n");
 +
 +    printk("%s(%d): do mtk_xhci_set\n", __func__, __LINE__);
 +
        return 0;
  error:
        kfree(xhci);
@@@ -4914,189 -4770,6 +4917,189 @@@ MODULE_DESCRIPTION(DRIVER_DESC)
  MODULE_AUTHOR(DRIVER_AUTHOR);
  MODULE_LICENSE("GPL");
  
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +#ifndef CONFIG_USB_MTK_DUALMODE
 +static int xhci_hcd_driver_init(void)
 +{
 +      int retval;
 +
 +      retval = xhci_register_pci();
 +      if (retval < 0) {
 +              printk(KERN_DEBUG "Problem registering PCI driver.");
 +              return retval;
 +      }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    mtk_xhci_ip_init();
 +    #endif
 +
 +      retval = xhci_register_plat();
 +      if (retval < 0) {
 +              printk(KERN_DEBUG "Problem registering platform driver.");
 +              goto unreg_pci;
 +      }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    retval = xhci_attrs_init();
 +    if(retval < 0){
 +        printk(KERN_DEBUG "Problem creating xhci attributes.");
 +        goto unreg_plat;
 +    }
 +
 +    mtk_xhci_wakelock_init();
 +    #endif
 +
 +      /*
 +       * Check the compiler generated sizes of structures that must be laid
 +       * out in specific ways for hardware access.
 +       */
 +      BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
 +      /* xhci_device_control has eight fields, and also
 +       * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
 +       */
 +      BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
 +      /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
 +      BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
 +      return 0;
 +
 +#ifdef CONFIG_MTK_XHCI
 +unreg_plat:
 +    xhci_unregister_plat();
 +#endif
 +unreg_pci:
 +      xhci_unregister_pci();
 +      return retval;
 +}
 +
 +static void xhci_hcd_driver_cleanup(void)
 +{
 +      xhci_unregister_pci();
 +      xhci_unregister_plat();
 +    xhci_attrs_exit();
 +}
 +#else
 +static int xhci_hcd_driver_init(void)
 +{
 +      // init in mt_devs.c
 +      mtk_xhci_eint_iddig_init();
 +      mtk_xhci_switch_init();
 +      //mtk_xhci_wakelock_init();
 +      return 0;
 +}
 +
 +static void xhci_hcd_driver_cleanup(void)
 +{
 +      mtk_xhci_eint_iddig_deinit() ;
 +}
 +
 +#endif
 +
 +static int mu3h_normal_driver_on = 0 ;
 +
 +static int xhci_mu3h_proc_show(struct seq_file *seq, void *v)
 +{
 +        seq_printf(seq, "xhci_mu3h_proc_show, mu3h is %d (on:1, off:0)\n", mu3h_normal_driver_on);
 +        return 0;
 +}
 +
 +static int xhci_mu3h_proc_open(struct inode *inode, struct file *file)
 +{
 +    return single_open(file, xhci_mu3h_proc_show, inode->i_private);
 +}
 +
 +static ssize_t xhci_mu3h_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos)
 +{
 +      int ret ;
 +      char msg[32] ;
 +      int result;
 +
 +      if (length >= sizeof(msg)) {
 +              printk( "xhci_mu3h_proc_write length error, the error len is %d\n", (unsigned int)length);
 +              return -EINVAL;
 +      }
 +      if (copy_from_user(msg, buf, length))
 +              return -EFAULT;
 +
 +      msg[length] = 0 ;
 +
 +      printk("xhci_mu3h_proc_write: %s, current driver on/off: %d\n", msg, mu3h_normal_driver_on);
 +
 +      if ((msg[0] == '1') && (mu3h_normal_driver_on == 0)){
 +              xhci_hcd_driver_init() ;
 +              mu3h_normal_driver_on = 1 ;
 +              printk("registe mu3h driver : m3h xhci driver\n");
 +      }else if ((msg[0] == '0') && (mu3h_normal_driver_on == 1)){
 +              xhci_hcd_driver_cleanup();
 +              mu3h_normal_driver_on = 0 ;
 +              printk("unregiste m3h xhci driver.\n");
 +      }else{
 +              printk("xhci_mu3h_proc_write write faile !\n");
 +      }
 +      return length;
 +}
 +
 +static const struct file_operations mu3h_proc_fops = {
 +      .owner = THIS_MODULE,
 +      .open = xhci_mu3h_proc_open,
 +      .write = xhci_mu3h_proc_write,
 +      .read = seq_read,
 +      .llseek = seq_lseek,
 +
 +};
 +
 +static int __init xhci_hcd_init(void)
 +{
 +      struct proc_dir_entry *prEntry;
 +
 +      printk(KERN_DEBUG "xhci_hcd_init");
 +
 +      // set xhci up at boot up
 +      xhci_hcd_driver_init() ;
 +      mtk_xhci_wakelock_init();
 +      mu3h_normal_driver_on = 1;
 +
 +      // USBIF
 +      prEntry = proc_create("mu3h_driver_init", 0666, NULL, &mu3h_proc_fops);
 +      if (prEntry)
 +      {
 +              printk("create the mu3h init proc OK!\n") ;
 +      }else{
 +              printk("[ERROR] create the mu3h init proc FAIL\n") ;
 +      }
 +
 +#ifdef CONFIG_MTK_XHCI
 +
 +      if (!misc_register(&mu3h_uevent_device)){
 +              printk("create the mu3h_uevent_device uevent device OK!\n") ;
 +
 +      }else{
 +              printk("[ERROR] create the mu3h_uevent_device uevent device fail\n") ;
 +      }
 +
 +#endif
 +
 +      return 0 ;
 +
 +}
 +module_init(xhci_hcd_init);
 +
 +static void __exit xhci_hcd_cleanup(void)
 +{
 +#ifdef CONFIG_MTK_XHCI
 +      misc_deregister(&mu3h_uevent_device);
 +#endif
 +      printk(KERN_DEBUG "xhci_hcd_cleanup");
 +}
 +module_exit(xhci_hcd_cleanup);
 +
 +#else
 +#ifndef CONFIG_USB_MTK_DUALMODE
  static int __init xhci_hcd_init(void)
  {
        int retval;
                printk(KERN_DEBUG "Problem registering platform driver.");
                goto unreg_pci;
        }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    retval = xhci_attrs_init();
 +    if(retval < 0){
 +        printk(KERN_DEBUG "Problem creating xhci attributes.");
 +        goto unreg_plat;
 +    }
 +
 +    mtk_xhci_wakelock_init();
 +    #endif
 +
        /*
         * Check the compiler generated sizes of structures that must be laid
         * out in specific ways for hardware access.
        /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
        return 0;
 +
 +#ifdef CONFIG_MTK_XHCI
 +unreg_plat:
 +    xhci_unregister_plat();
 +#endif
  unreg_pci:
        xhci_unregister_pci();
        return retval;
@@@ -5155,23 -4812,5 +5158,23 @@@ static void __exit xhci_hcd_cleanup(voi
  {
        xhci_unregister_pci();
        xhci_unregister_plat();
 +    xhci_attrs_exit();
  }
  module_exit(xhci_hcd_cleanup);
 +#else
 +static int __init xhci_hcd_init(void)
 +{
 +    mtk_xhci_eint_iddig_init();
 +    mtk_xhci_switch_init();
 +    mtk_xhci_wakelock_init();
 +      return 0;
 +}
 +module_init(xhci_hcd_init);
 +
 +static void __exit xhci_hcd_cleanup(void)
 +{
 +}
 +module_exit(xhci_hcd_cleanup);
 +
 +#endif
 +#endif
diff --combined drivers/usb/host/xhci.h
index 4c820b09610acad13872a1c8888879ce909efb3f,deb2537ae75c4c76afb5b4aec3c4f5e67e52bdc1..6d5839213405f87bacfe36d75a6e67da5bb68339
@@@ -33,7 -33,6 +33,7 @@@
  #include      "xhci-ext-caps.h"
  #include "pci-quirks.h"
  
 +
  /* xHCI PCI Configuration Registers */
  #define XHCI_SBRN_OFFSET      (0x60)
  
@@@ -281,6 -280,7 +281,7 @@@ struct xhci_op_regs 
  #define XDEV_U0               (0x0 << 5)
  #define XDEV_U2               (0x2 << 5)
  #define XDEV_U3               (0x3 << 5)
+ #define XDEV_INACTIVE (0x6 << 5)
  #define XDEV_RESUME   (0xf << 5)
  /* true: port has power (see HCC_PPC) */
  #define PORT_POWER    (1 << 9)
@@@ -680,14 -680,6 +681,14 @@@ struct xhci_ep_ctx 
  /* deq bitmasks */
  #define EP_CTX_CYCLE_MASK             (1 << 0)
  
 +#ifdef CONFIG_MTK_XHCI
 +/* mtk scheduler bitmasks */
 +#define BPKTS(p)      ((p) & 0x3f)
 +#define BCSCOUNT(p)   (((p) & 0x7) << 8)
 +#define BBM(p)                ((p) << 11)
 +#define BOFFSET(p)    ((p) & 0x3fff)
 +#define BREPEAT(p)    (((p) & 0x7fff) << 16)
 +#endif
  
  /**
   * struct xhci_input_control_context
@@@ -1421,12 -1413,6 +1422,12 @@@ struct xhci_hcd 
        /* Our HCD's current interrupter register set */
        struct  xhci_intr_reg __iomem *ir_set;
  
 +      #ifdef CONFIG_MTK_XHCI
 +      unsigned long base_regs;
 +      unsigned long sif_regs;
 +      unsigned long sif2_regs;
 +      #endif
 +
        /* Cached register copies of read-only HC data */
        __u32           hcs_params1;
        __u32           hcs_params2;
  #define       XHCI_LINK_TRB_QUIRK     (1 << 0)
  #define XHCI_RESET_EP_QUIRK   (1 << 1)
  #define XHCI_NEC_HOST         (1 << 2)
 +#ifndef CONFIG_MTK_XHCI
  #define XHCI_AMD_PLL_FIX      (1 << 3)
 +#endif
  #define XHCI_SPURIOUS_SUCCESS (1 << 4)
  /*
   * Certain Intel host controllers have a limit to the number of endpoint
  #define XHCI_BROKEN_MSI               (1 << 6)
  #define XHCI_RESET_ON_RESUME  (1 << 7)
  #define       XHCI_SW_BW_CHECKING     (1 << 8)
 +#ifndef CONFIG_MTK_XHCI
  #define XHCI_AMD_0x96_HOST    (1 << 9)
 +#endif
  #define XHCI_TRUST_TX_LENGTH  (1 << 10)
  #define XHCI_LPM_SUPPORT      (1 << 11)
  #define XHCI_INTEL_HOST               (1 << 12)
@@@ -1594,12 -1576,12 +1595,12 @@@ static inline struct usb_hcd *xhci_to_h
  /* TODO: copied from ehci.h - can be refactored? */
  /* xHCI spec says all registers are little endian */
  static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
 -              __le32 __iomem *regs)
 +              void __iomem *regs)
  {
        return readl(regs);
  }
  static inline void xhci_writel(struct xhci_hcd *xhci,
 -              const unsigned int val, __le32 __iomem *regs)
 +              const unsigned int val, void __iomem *regs)
  {
        writel(val, regs);
  }
index d411f173f2ec1d00b4567751f7fb39296010657b,da380a99c6b81855a076327d66ed289e12682c7e..2f259351d1e42df154006e6af3c19b50acaf556c
@@@ -1938,24 -1938,7 +1938,24 @@@ UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0
                "Mass Storage",
                USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
                0),
 +#if 1         
 +HW_UNUSUAL_DEV ( 0x12d1, 0x08, 0x06, 0x50,
 +              "HUAWEI",
 +              "HUAWEI MOBILE Mass Storage",
 +              USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
 +              0),
  
 +HW_UNUSUAL_DEV ( 0x19d2, 0x08, 0x06, 0x50,
 +              "ZTE",
 +              "ZTE MOBILE Mass Storage",
 +              USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_zte_init,
 +              0),
 +HW_UNUSUAL_DEV(  0x2001, 0xa708, 0x06, 0x50,
 +              "D-LINK",
 +              "CD-ROM",
 +              USB_SC_8020, USB_PR_DEVICE, usb_stor_dlink_scsi_init,
 +              0),
 +#endif
  /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
  UNUSUAL_DEV(  0x132b, 0x000b, 0x0001, 0x0001,
                "Minolta",
@@@ -2049,6 -2032,18 +2049,18 @@@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
  
+ /* Reported by Oliver Neukum <oneukum@suse.com>
+  * This device morphes spontaneously into another device if the access
+  * pattern of Windows isn't followed. Thus writable media would be dirty
+  * if the initial instance is used. So the device is limited to its
+  * virtual CD.
+  * And yes, the concept that BCD goes up to 9 is not heeded */
+ UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+               "ZTE,Incorporated",
+               "ZTE WCDMA Technologies MSM",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
  /* Reported by Sven Geggus <sven-usbst@geggus.net>
   * This encrypted pen drive returns bogus data for the initial READ(10).
   */
diff --combined mm/memory.c
index 9feed4bfb32366d3356ba6dcc6390c748655ca75,30bf9cce8c2b046922a79a19b4a4ac046bb82f81..313d94a9ae260fcdf1d4da7de288b055171afd7d
  
  #include "internal.h"
  
 +#ifdef CONFIG_MTK_EXTMEM
 +extern bool extmem_in_mspace(struct vm_area_struct *vma);
 +extern unsigned long get_virt_from_mspace(unsigned long pa);
 +#endif
 +
  #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
  #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
  #endif
@@@ -1467,16 -1462,6 +1467,16 @@@ int zap_vma_ptes(struct vm_area_struct 
  }
  EXPORT_SYMBOL_GPL(zap_vma_ptes);
  
 +/*
 + * FOLL_FORCE can write to even unwritable pte's, but only
 + * after we've gone through a COW cycle and they are dirty.
 + */
 +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 +{
 +      return pte_write(pte) ||
 +              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 +}
 +
  /**
   * follow_page_mask - look up a page descriptor from a user-virtual address
   * @vma: vm_area_struct mapping @address
@@@ -1584,7 -1569,7 +1584,7 @@@ split_fallthrough
        }
        if ((flags & FOLL_NUMA) && pte_numa(pte))
                goto no_page;
 -      if ((flags & FOLL_WRITE) && !pte_write(pte))
 +      if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
                goto unlock;
  
        page = vm_normal_page(vma, address, pte);
@@@ -1658,7 -1643,6 +1658,7 @@@ no_page_table
                return ERR_PTR(-EFAULT);
        return page;
  }
 +EXPORT_SYMBOL_GPL(follow_page_mask);
  
  static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
  {
@@@ -1803,24 -1787,11 +1803,24 @@@ long __get_user_pages(struct task_struc
                        page_mask = 0;
                        goto next_page;
                }
 -
 +    #ifdef CONFIG_MTK_EXTMEM
 +        if (!vma || !(vm_flags & vma->vm_flags))
 +              {
 +                  return i ? : -EFAULT;
 +        }
 +
 +              if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 +              {
 +                  /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
 +                  if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
 +                          return i ? : -EFAULT;
 +        }
 +    #else
                if (!vma ||
                    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                    !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 +    #endif
  
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
                                    !(vma->vm_flags & VM_WRITE))
 -                                      foll_flags &= ~FOLL_WRITE;
 +                                      foll_flags |= FOLL_COW;
  
                                cond_resched();
                        }
@@@ -2397,18 -2368,12 +2397,18 @@@ int remap_pfn_range(struct vm_area_stru
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         * See vm_normal_page() for details.
         */
 +#ifdef CONFIG_MTK_EXTMEM
 +      if (addr == vma->vm_start && end == vma->vm_end) {
 +              vma->vm_pgoff = pfn;
 +      } else if (is_cow_mapping(vma->vm_flags))
 +              return -EINVAL;
 +#else
        if (is_cow_mapping(vma->vm_flags)) {
                if (addr != vma->vm_start || end != vma->vm_end)
                        return -EINVAL;
                vma->vm_pgoff = pfn;
        }
 -
 +#endif
        err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
        if (err)
                return -EINVAL;
@@@ -3533,11 -3498,10 +3533,14 @@@ static int do_linear_fault(struct mm_st
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  
 +      /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
 +      if (!vma->vm_ops->fault)
 +              return VM_FAULT_SIGBUS;
 +
        pte_unmap(page_table);
+       /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+       if (!vma->vm_ops->fault)
+               return VM_FAULT_SIGBUS;
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
  }
  
@@@ -3749,10 -3713,9 +3752,9 @@@ int handle_pte_fault(struct mm_struct *
        entry = *pte;
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
-                       if (vma->vm_ops) {
+                       if (vma->vm_ops)
                                return do_linear_fault(mm, vma, address,
-                                       pte, pmd, flags, entry);
-                       }
+                                               pte, pmd, flags, entry);
                        return do_anonymous_page(mm, vma, address,
                                                 pte, pmd, flags);
                }
@@@ -4163,21 -4126,6 +4165,21 @@@ static int __access_remote_vm(struct ta
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
                if (ret <= 0) {
 +#ifdef CONFIG_MTK_EXTMEM
 +                      if (!write) {
 +                              vma = find_vma(mm, addr);
 +                              if (!vma || vma->vm_start > addr)
 +                                      break;
 +                              if (vma->vm_end < addr + len)
 +                                      len = vma->vm_end - addr;
 +                              if (extmem_in_mspace(vma)) {
 +                                      void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
 +                                      memcpy(buf, extmem_va, len);
 +                                      buf += len;
 +                                      break;
 +                              }
 +                      }
 +#endif
                        /*
                         * Check if this is a VM_IO | VM_PFNMAP VMA, which
                         * we can access using slightly different code.