Merge tag 'v3.10.107' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 22:07:35 +0000 (23:07 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 22:07:35 +0000 (23:07 +0100)
This is the 3.10.107 stable release

50 files changed:
1  2 
Documentation/kernel-parameters.txt
Makefile
arch/arm/include/asm/cputype.h
arch/arm/kernel/hw_breakpoint.c
arch/x86/kernel/ftrace.c
block/genhd.c
crypto/Makefile
drivers/char/Kconfig
drivers/char/mem.c
drivers/cpufreq/cpufreq.c
drivers/mtd/ubi/upd.c
drivers/net/tun.c
drivers/rtc/interface.c
drivers/scsi/sd.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_acm.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/fat/inode.c
fs/nfs/nfs4proc.c
fs/proc/task_mmu.c
include/linux/mm.h
ipc/shm.c
kernel/futex.c
kernel/printk.c
kernel/sched/core.c
kernel/sysctl.c
mm/filemap.c
mm/memory.c
mm/mempolicy.c
mm/mmap.c
net/ipv4/fib_frontend.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv6/addrconf.c
net/ipv6/raw.c
net/ipv6/route.c
net/l2tp/l2tp_ip.c
net/unix/af_unix.c
security/apparmor/domain.c

index 61dc1796f0ab257e1aca99525c2fce879c81dcbf,ed0c7e3ba8da189b4b2677f9fbe28a122323c18d..6a0b69891c28bc8677c4622359bdfe9a3ae46530
@@@ -2889,6 -2889,13 +2889,13 @@@ bytes respectively. Such letter suffixe
        spia_pedr=
        spia_peddr=
  
+       stack_guard_gap=        [MM]
+                       override the default stack gap protection. The value
+                       is in page units and it defines how many pages prior
+                       to (for stacks growing down) resp. after (for stacks
+                       growing up) the main stack are reserved for no other
+                       mapping. Default value is 256 pages.
        stacktrace      [FTRACE]
                        Enabled the stack tracer on boot up.
  
                                        HIGHMEM regardless of setting
                                        of CONFIG_HIGHPTE.
  
 +      uuid_debug=     (Boolean) whether to enable debugging of TuxOnIce's
 +                      uuid support.
 +
        vdso=           [X86,SH]
                        vdso=2: enable compat VDSO (default with COMPAT_VDSO)
                        vdso=1: enable VDSO (default)
diff --combined Makefile
index a4bdeeb487d4973d9fbbed43a0b1b082d0512be7,752b1c67daa0645921a2b71f12be0ad014a19fd8..74550001b9b4fab904093418373bc10f45bda8e1
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 106
+ SUBLEVEL = 107
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
index 9347a1c69e372c55b3b8d3294e6a515e1fbdfe4a,f389107947e055a1be0c8e002700f3c616bf4289..cc33de61bc7f6122681f2de325803313efbaca67
@@@ -9,7 -9,6 +9,7 @@@
  #define CPUID_TCM     2
  #define CPUID_TLBTYPE 3
  #define CPUID_MPIDR   5
 +#define CPUID_REVIDR  6
  
  #define CPUID_EXT_PFR0        "c1, 0"
  #define CPUID_EXT_PFR1        "c1, 1"
  #define ARM_CPU_PART_CORTEX_A5                0xC050
  #define ARM_CPU_PART_CORTEX_A15               0xC0F0
  #define ARM_CPU_PART_CORTEX_A7                0xC070
 +#define ARM_CPU_PART_CORTEX_A12               0xC0D0
 +#define ARM_CPU_PART_CORTEX_A17               0xC0E0
 +#define ARM_CPU_PART_CORTEX_A53               0xD030
  
  #define ARM_CPU_XSCALE_ARCH_MASK      0xe000
  #define ARM_CPU_XSCALE_ARCH_V1                0x2000
  #define ARM_CPU_XSCALE_ARCH_V2                0x4000
  #define ARM_CPU_XSCALE_ARCH_V3                0x6000
  
+ /* Qualcomm implemented cores */
+ #define ARM_CPU_PART_SCORPION         0x510002d0
  extern unsigned int processor_id;
  
  #ifdef CONFIG_CPU_CP15
index 1b803117ed91c212a6f4077d6c87b58c0e219f44,b0b69e9ce66083826877bef9060dc33be757ceac..e0c98b47eb9cb70843d934b1e0e1261a2203e3ea
@@@ -1049,8 -1049,7 +1049,8 @@@ static struct notifier_block dbg_cpu_pm
  
  static void __init pm_init(void)
  {
 -      cpu_pm_register_notifier(&dbg_cpu_pm_nb);
 +      if (has_ossr)
 +              cpu_pm_register_notifier(&dbg_cpu_pm_nb);
  }
  #else
  static inline void pm_init(void)
@@@ -1067,6 -1066,22 +1067,22 @@@ static int __init arch_hw_breakpoint_in
                return 0;
        }
  
+       /*
+        * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+        * whenever a WFI is issued, even if the core is not powered down, in
+        * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
+        * breakpoint and watchpoint registers are treated as undefined, so
+        * this results in boot time and runtime failures when these are
+        * accessed and we unexpectedly take a trap.
+        *
+        * It's not clear if/how this can be worked around, so we blacklist
+        * Scorpion CPUs to avoid these issues.
+       */
+       if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
+               pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+               return 0;
+       }
        has_ossr = core_has_os_save_restore();
  
        /* Determine how many BRPs/WRPs are available. */
diff --combined arch/x86/kernel/ftrace.c
index e1f2e7292a54aa707b04198acb06db7647729e64,8c43930ce1a7c0b02bff387709d15b1985262ca7..ab38db973c33b1eaa14fcdf38d7a47fae6fb2f35
@@@ -678,8 -678,11 +678,8 @@@ void arch_ftrace_update_code(int comman
        atomic_dec(&modifying_ftrace_code);
  }
  
 -int __init ftrace_dyn_arch_init(void *data)
 +int __init ftrace_dyn_arch_init(void)
  {
 -      /* The return code is retured via data */
 -      *(unsigned long *)data = 0;
 -
        return 0;
  }
  #endif
@@@ -741,6 -744,18 +741,18 @@@ void prepare_ftrace_return(unsigned lon
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
  
+       /*
+        * When resuming from suspend-to-ram, this function can be indirectly
+        * called from early CPU startup code while the CPU is in real mode,
+        * which would fail miserably.  Make sure the stack pointer is a
+        * virtual address.
+        *
+        * This check isn't as accurate as virt_addr_valid(), but it should be
+        * good enough for this purpose, and it's fast.
+        */
+       if (unlikely((long)__builtin_frame_address(0) >= 0))
+               return;
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
  
diff --combined block/genhd.c
index 57afc91c4a0503557d32eba2ccd9fd106ed7c60f,afd8206e530d79fd28d9eb82dc1fc44ce4c9efc3..d5ee936133d363a7ea31a428876dde4f2be64c40
@@@ -17,8 -17,6 +17,8 @@@
  #include <linux/kobj_map.h>
  #include <linux/mutex.h>
  #include <linux/idr.h>
 +#include <linux/ctype.h>
 +#include <linux/fs_uuid.h>
  #include <linux/log2.h>
  #include <linux/pm_runtime.h>
  
@@@ -664,7 -662,6 +664,6 @@@ void del_gendisk(struct gendisk *disk
  
        kobject_put(disk->part0.holder_dir);
        kobject_put(disk->slave_dir);
-       disk->driverfs_dev = NULL;
        if (!sysfs_deprecated)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
@@@ -1119,22 -1116,6 +1118,22 @@@ static void disk_release(struct device 
                blk_put_queue(disk->queue);
        kfree(disk);
  }
 +
 +static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
 +{
 +      struct gendisk *disk = dev_to_disk(dev);
 +      struct disk_part_iter piter;
 +      struct hd_struct *part;
 +      int cnt = 0;
 +
 +      disk_part_iter_init(&piter, disk, 0);
 +      while((part = disk_part_iter_next(&piter)))
 +              cnt++;
 +      disk_part_iter_exit(&piter);
 +      add_uevent_var(env, "NPARTS=%u", cnt);
 +      return 0;
 +}
 +
  struct class block_class = {
        .name           = "block",
  };
@@@ -1154,7 -1135,6 +1153,7 @@@ static struct device_type disk_type = 
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
 +      .uevent         = disk_uevent,
  };
  
  #ifdef CONFIG_PROC_FS
@@@ -1405,87 -1385,6 +1404,87 @@@ int invalidate_partition(struct gendis
  
  EXPORT_SYMBOL(invalidate_partition);
  
 +dev_t blk_lookup_fs_info(struct fs_info *seek)
 +{
 +      dev_t devt = MKDEV(0, 0);
 +      struct class_dev_iter iter;
 +      struct device *dev;
 +      int best_score = 0;
 +
 +      class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 +      while (best_score < 3 && (dev = class_dev_iter_next(&iter))) {
 +              struct gendisk *disk = dev_to_disk(dev);
 +              struct disk_part_iter piter;
 +              struct hd_struct *part;
 +
 +              disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 +
 +              while (best_score < 3 && (part = disk_part_iter_next(&piter))) {
 +                      int score = part_matches_fs_info(part, seek);
 +                      if (score > best_score) {
 +                              devt = part_devt(part);
 +                              best_score = score;
 +                      }
 +              }
 +              disk_part_iter_exit(&piter);
 +      }
 +      class_dev_iter_exit(&iter);
 +      return devt;
 +}
 +EXPORT_SYMBOL_GPL(blk_lookup_fs_info);
 +
 +/* Caller uses NULL, key to start. For each match found, we return a bdev on
 + * which we have done blkdev_get, and we do the blkdev_put on block devices
 + * that are passed to us. When no more matches are found, we return NULL.
 + */
 +struct block_device *next_bdev_of_type(struct block_device *last,
 +      const char *key)
 +{
 +      dev_t devt = MKDEV(0, 0);
 +      struct class_dev_iter iter;
 +      struct device *dev;
 +      struct block_device *next = NULL, *bdev;
 +      int got_last = 0;
 +
 +      if (!key)
 +              goto out;
 +
 +      class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 +      while (!devt && (dev = class_dev_iter_next(&iter))) {
 +              struct gendisk *disk = dev_to_disk(dev);
 +              struct disk_part_iter piter;
 +              struct hd_struct *part;
 +
 +              disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 +
 +              while ((part = disk_part_iter_next(&piter))) {
 +                      bdev = bdget(part_devt(part));
 +                      if (last && !got_last) {
 +                              if (last == bdev)
 +                                      got_last = 1;
 +                              continue;
 +                      }
 +
 +                      if (blkdev_get(bdev, FMODE_READ, 0))
 +                              continue;
 +
 +                      if (bdev_matches_key(bdev, key)) {
 +                              next = bdev;
 +                              break;
 +                      }
 +
 +                      blkdev_put(bdev, FMODE_READ);
 +              }
 +              disk_part_iter_exit(&piter);
 +      }
 +      class_dev_iter_exit(&iter);
 +out:
 +      if (last)
 +              blkdev_put(last, FMODE_READ);
 +      return next;
 +}
 +EXPORT_SYMBOL_GPL(next_bdev_of_type);
 +
  /*
   * Disk events - monitor disk events like media change and eject request.
   */
@@@ -1506,17 -1405,11 +1505,17 @@@ struct disk_events 
  static const char *disk_events_strs[] = {
        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "media_change",
        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "eject_request",
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT  
 +      [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)]     = "media_disappear",
 +#endif
  };
  
  static char *disk_uevents[] = {
        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "DISK_MEDIA_CHANGE=1",
        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "DISK_EJECT_REQUEST=1",
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT  
 +      [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "DISK_EVENT_MEDIA_DISAPPEAR=1",   
 +#endif        
  };
  
  /* list of all disk_events */
@@@ -1524,10 -1417,7 +1523,10 @@@ static DEFINE_MUTEX(disk_events_mutex)
  static LIST_HEAD(disk_events);
  
  /* disable in-kernel polling by default */
 -static unsigned long disk_events_dfl_poll_msecs       = 0;
 +//ALPS00319570, CL955952 merged back, begin
 +//static unsigned long disk_events_dfl_poll_msecs     = 0;    //original
 +static unsigned long disk_events_dfl_poll_msecs       = 2000;
 +//ALPS00319570, CL955952 merged back, end
  
  static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
  {
diff --combined crypto/Makefile
index b8f90189c9c3e77064bc971c86cd2f2912eb65a9,139e7e0a06b47542ab0c3dd0c3f3c0022ceea12c..1b2f8a894de3ebf489e5dca5f20677a94cba9c0a
@@@ -52,6 -52,7 +52,7 @@@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_gener
  obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
  obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
  obj-$(CONFIG_CRYPTO_WP512) += wp512.o
+ CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
  obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
  obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
  obj-$(CONFIG_CRYPTO_ECB) += ecb.o
@@@ -72,6 -73,7 +73,7 @@@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) +
  obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
  obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
  obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+ CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
  obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
  obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
  obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
@@@ -90,7 -92,6 +92,7 @@@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.
  obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
  obj-$(CONFIG_CRYPTO_LZO) += lzo.o
 +obj-$(CONFIG_CRYPTO_LZ4K) += lz4kc.o
  obj-$(CONFIG_CRYPTO_842) += 842.o
  obj-$(CONFIG_CRYPTO_RNG2) += rng.o
  obj-$(CONFIG_CRYPTO_RNG2) += krng.o
@@@ -107,4 -108,3 +109,4 @@@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) 
  obj-$(CONFIG_XOR_BLOCKS) += xor.o
  obj-$(CONFIG_ASYNC_CORE) += async_tx/
  obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
 +obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --combined drivers/char/Kconfig
index 0519b3c5b8a47adeb6375b243f73f8b9cb0c98ba,8ccb96ae3fd9497805569ce1d4b1f69c852a966c..117a5dbcccc8ec9fa68783e449180e89ab2509e0
@@@ -6,19 -6,6 +6,19 @@@ menu "Character devices
  
  source "drivers/tty/Kconfig"
  
 +config DEVMEM
 +      bool "Memory device driver"
 +      default y
 +      help
 +        The memory driver provides two character devices, mem and kmem, which
 +        provide access to the system's memory. The mem device is a view of
 +        physical memory, and each byte in the device corresponds to the
 +        matching physical address. The kmem device is the same as mem, but
 +        the addresses correspond to the kernel's virtual address space rather
 +        than physical memory. These devices are standard parts of a Linux
 +        system and most users should say Y here. You might say N if very
 +        security conscience or memory is tight.
 +
  config DEVKMEM
        bool "/dev/kmem virtual device support"
        default y
@@@ -592,14 -579,13 +592,17 @@@ config TELCLOC
          controlling the behavior of this hardware.
  
  config DEVPORT
-       bool
+       bool "/dev/port character device"
        depends on ISA || PCI
        default y
+       help
+         Say Y here if you want to support the /dev/port device. The /dev/port
+         device is similar to /dev/mem, but for I/O ports.
  
 +config DCC_TTY
 +      tristate "DCC tty driver"
 +      depends on ARM
 +
  source "drivers/s390/char/Kconfig"
  
  config MSM_SMD_PKT
diff --combined drivers/char/mem.c
index d370021c7022a54ed5f3c9d362406efc8870dc9e,40d2e99c6ba731ee54ee84ede062094549208b5e..1a62849e7f1d45afa3b63a5af1436de865a1608e
@@@ -60,8 -60,11 +60,12 @@@ static inline int valid_mmap_phys_addr_
  }
  #endif
  
 +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
  #ifdef CONFIG_STRICT_DEVMEM
+ static inline int page_is_allowed(unsigned long pfn)
+ {
+       return devmem_is_allowed(pfn);
+ }
  static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  {
        u64 from = ((u64)pfn) << PAGE_SHIFT;
        return 1;
  }
  #else
+ static inline int page_is_allowed(unsigned long pfn)
+ {
+       return 1;
+ }
  static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  {
        return 1;
  }
  #endif
 +#endif
  
 +#ifdef CONFIG_DEVMEM
  void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  {
  }
@@@ -120,23 -125,31 +128,31 @@@ static ssize_t read_mem(struct file *fi
  
        while (count > 0) {
                unsigned long remaining;
+               int allowed;
  
                sz = size_inside_page(p, count);
  
-               if (!range_is_allowed(p >> PAGE_SHIFT, count))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
+               if (allowed == 2) {
+                       /* Show zeros for restricted memory. */
+                       remaining = clear_user(buf, sz);
+               } else {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr)
+                               return -EFAULT;
  
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr)
-                       return -EFAULT;
+                       remaining = copy_to_user(buf, ptr, sz);
+                       unxlate_dev_mem_ptr(p, ptr);
+               }
  
-               remaining = copy_to_user(buf, ptr, sz);
-               unxlate_dev_mem_ptr(p, ptr);
                if (remaining)
                        return -EFAULT;
  
@@@ -176,30 -189,36 +192,36 @@@ static ssize_t write_mem(struct file *f
  #endif
  
        while (count > 0) {
+               int allowed;
                sz = size_inside_page(p, count);
  
-               if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
  
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr) {
-                       if (written)
-                               break;
-                       return -EFAULT;
-               }
-               copied = copy_from_user(ptr, buf, sz);
-               unxlate_dev_mem_ptr(p, ptr);
-               if (copied) {
-                       written += sz - copied;
-                       if (written)
-                               break;
-                       return -EFAULT;
+               /* Skip actual writing when a page is marked as restricted. */
+               if (allowed == 1) {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr) {
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
+                       copied = copy_from_user(ptr, buf, sz);
+                       unxlate_dev_mem_ptr(p, ptr);
+                       if (copied) {
+                               written += sz - copied;
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
                }
  
                buf += sz;
        *ppos += written;
        return written;
  }
 +#endif        /* CONFIG_DEVMEM */
 +
 +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
  
  int __weak phys_mem_access_prot_allowed(struct file *file,
        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@@ -335,7 -351,6 +357,7 @@@ static int mmap_mem(struct file *file, 
        }
        return 0;
  }
 +#endif        /* CONFIG_DEVMEM */
  
  #ifdef CONFIG_DEVKMEM
  static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@@ -730,8 -745,6 +752,8 @@@ static loff_t null_lseek(struct file *f
        return file->f_pos = 0;
  }
  
 +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 +
  /*
   * The memory devices use the full 32/64 bits of the offset, and so we cannot
   * check against negative addresses: they are ok. The return value is weird,
@@@ -765,14 -778,10 +787,14 @@@ static loff_t memory_lseek(struct file 
        return ret;
  }
  
 +#endif
 +
 +#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
  static int open_port(struct inode *inode, struct file *filp)
  {
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  }
 +#endif
  
  #define zero_lseek    null_lseek
  #define full_lseek      null_lseek
  #define open_kmem     open_mem
  #define open_oldmem   open_mem
  
 +#ifdef CONFIG_DEVMEM
  static const struct file_operations mem_fops = {
        .llseek         = memory_lseek,
        .read           = read_mem,
        .open           = open_mem,
        .get_unmapped_area = get_unmapped_area_mem,
  };
 +#endif
  
  #ifdef CONFIG_DEVKMEM
  static const struct file_operations kmem_fops = {
@@@ -862,9 -869,7 +884,9 @@@ static const struct memdev 
        const struct file_operations *fops;
        struct backing_dev_info *dev_info;
  } devlist[] = {
 +#ifdef CONFIG_DEVMEM
         [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
 +#endif
  #ifdef CONFIG_DEVKMEM
         [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
  #endif
index 016294db2a795079ac1b7a819d4d37a311f140fe,d85c4180095297a66acc406bb9df3c9f17d06b26..b9174608c6871964e6d3a439350866f1f42c7b2b
@@@ -17,9 -17,7 +17,9 @@@
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
 +#include <asm/cputime.h>
  #include <linux/kernel.h>
 +#include <linux/kernel_stat.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/notifier.h>
@@@ -27,7 -25,6 +27,7 @@@
  #include <linux/delay.h>
  #include <linux/interrupt.h>
  #include <linux/spinlock.h>
 +#include <linux/tick.h>
  #include <linux/device.h>
  #include <linux/slab.h>
  #include <linux/cpu.h>
@@@ -136,51 -133,6 +136,51 @@@ bool have_governor_per_policy(void
  {
        return cpufreq_driver->have_governor_per_policy;
  }
 +EXPORT_SYMBOL_GPL(have_governor_per_policy);
 +
 +struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 +{
 +      if (have_governor_per_policy())
 +              return &policy->kobj;
 +      else
 +              return cpufreq_global_kobject;
 +}
 +EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 +
 +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 +{
 +      u64 idle_time;
 +      u64 cur_wall_time;
 +      u64 busy_time;
 +
 +      cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
 +
 +      busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
 +      busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
 +      busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
 +      busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
 +      busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
 +      busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 +
 +      idle_time = cur_wall_time - busy_time;
 +      if (wall)
 +              *wall = cputime_to_usecs(cur_wall_time);
 +
 +      return cputime_to_usecs(idle_time);
 +}
 +
 +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 +{
 +      u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 +
 +      if (idle_time == -1ULL)
 +              return get_cpu_idle_time_jiffy(cpu, wall);
 +      else if (!io_busy)
 +              idle_time += get_cpu_iowait_time_us(cpu, wall);
 +
 +      return idle_time;
 +}
 +EXPORT_SYMBOL_GPL(get_cpu_idle_time);
  
  static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
  {
@@@ -485,9 -437,11 +485,11 @@@ static ssize_t show_cpuinfo_cur_freq(st
                                        char *buf)
  {
        unsigned int cur_freq = __cpufreq_get(policy->cpu);
-       if (!cur_freq)
-               return sprintf(buf, "<unknown>");
-       return sprintf(buf, "%u\n", cur_freq);
+       if (cur_freq)
+               return sprintf(buf, "%u\n", cur_freq);
+       return sprintf(buf, "<unknown>\n");
  }
  
  
diff --combined drivers/mtd/ubi/upd.c
index 1f8bb16e39674e115bd0c663f7a51eff7ca1d2fb,39712560b4c1b55aa847f0ac69f6815edb11e678..35b12e9c2f9bbde347f2cde0fae41aaad72257d3
@@@ -148,11 -148,11 +148,11 @@@ int ubi_start_update(struct ubi_device 
                        return err;
        }
  
-       if (bytes == 0) {
-               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
-               if (err)
-                       return err;
+       err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+       if (err)
+               return err;
  
+       if (bytes == 0) {
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
@@@ -370,7 -370,7 +370,7 @@@ int ubi_more_update_data(struct ubi_dev
                        return err;
                vol->updating = 0;
                err = to_write;
 -              vfree(vol->upd_buf);
 +              kfree(vol->upd_buf);
        }
  
        return err;
@@@ -426,7 -426,7 +426,7 @@@ int ubi_more_leb_change_data(struct ubi
        if (vol->upd_received == vol->upd_bytes) {
                vol->changing_leb = 0;
                err = count;
 -              vfree(vol->upd_buf);
 +              kfree(vol->upd_buf);
        }
  
        return err;
diff --combined drivers/net/tun.c
index 21ec9ae8967e557d53382979476583587db5ef95,7bbc43fbb7200149716fb61d871f3a494d635c27..d818f990c7ad66052bc38fcf43f4585e2fd084db
@@@ -1087,9 -1087,11 +1087,11 @@@ static ssize_t tun_get_user(struct tun_
        }
  
        if (tun->flags & TUN_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
  
                if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
                        return -EFAULT;
  
                if (gso.hdr_len > len)
                        return -EINVAL;
-               offset += tun->vnet_hdr_sz;
+               offset += vnet_hdr_sz;
        }
  
        if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@@ -1275,7 -1277,7 +1277,7 @@@ static ssize_t tun_put_user(struct tun_
        int vnet_hdr_sz = 0;
  
        if (tun->flags & TUN_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
  
        if (!(tun->flags & TUN_NO_PI)) {
                if ((len -= sizeof(pi)) < 0)
@@@ -1902,12 -1904,6 +1904,12 @@@ static long __tun_chr_ioctl(struct fil
        int vnet_hdr_sz;
        int ret;
  
 +#ifdef CONFIG_ANDROID_PARANOID_NETWORK
 +      if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
 +              return -EPERM;
 +      }
 +#endif
 +
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
diff --combined drivers/rtc/interface.c
index 352e95da90d68cf5eb13da94e57128317e8bfa57,09198941ee2249611f293474d8d9b313cb8f7e47..8cfd20f79da86442baafb94ab16bf4e574d2b8ef
@@@ -303,9 -303,6 +303,9 @@@ done
  
  int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
  {
 +#ifdef RTC_LEGACY_ALARM_IMPL
 +      return __rtc_read_alarm(rtc, alarm);
 +#else 
        int err;
  
        err = mutex_lock_interruptible(&rtc->ops_lock);
        mutex_unlock(&rtc->ops_lock);
  
        return err;
 +#endif
  }
  EXPORT_SYMBOL_GPL(rtc_read_alarm);
  
  static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
  {
 +#ifdef RTC_LEGACY_ALARM_IMPL
 +      WARN(1, "__rtc_set_alarm() is not supported!!\n");
 +      return -EPERM;
 +#else 
        struct rtc_time tm;
        long now, scheduled;
        int err;
                err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
  
        return err;
 +#endif
  }
  
  int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        err = mutex_lock_interruptible(&rtc->ops_lock);
        if (err)
                return err;
 +#ifdef RTC_LEGACY_ALARM_IMPL
 +      if (!rtc->ops)
 +              err = -ENODEV;
 +      else if (!rtc->ops->set_alarm)
 +              err = -EINVAL;
 +      else
 +              err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
 +#else
 +
        if (rtc->aie_timer.enabled) {
                rtc_timer_remove(rtc, &rtc->aie_timer);
        }
        if (alarm->enabled) {
                err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
        }
 +#endif
        mutex_unlock(&rtc->ops_lock);
        return err;
  }
  EXPORT_SYMBOL_GPL(rtc_set_alarm);
  
 +int rtc_set_alarm_poweron(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 +{
 +      int err;
 +
 +      err = rtc_valid_tm(&alarm->time);
 +      if (err != 0)
 +              return err;
 +
 +      err = mutex_lock_interruptible(&rtc->ops_lock);
 +      if (err)
 +              return err;
 +              
 +      if (!rtc->ops)
 +              err = -ENODEV;
 +      else if (!rtc->ops->set_alarm)
 +              err = -EINVAL;
 +      else
 +              err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
 +      
 +      mutex_unlock(&rtc->ops_lock);
 +      return err;     
 +}
 +EXPORT_SYMBOL_GPL(rtc_set_alarm_poweron);
 +
  /* Called once per device from rtc_device_register */
  int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
  {
 +#ifdef RTC_LEGACY_ALARM_IMPL
 +                      return 0;
 +#else
        int err;
        struct rtc_time now;
  
        }
        mutex_unlock(&rtc->ops_lock);
        return err;
 +#endif
  }
  EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
  
@@@ -810,9 -763,23 +810,23 @@@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq)
   */
  static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
  {
+       struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+       struct rtc_time tm;
+       ktime_t now;
        timer->enabled = 1;
+       __rtc_read_time(rtc, &tm);
+       now = rtc_tm_to_ktime(tm);
+       /* Skip over expired timers */
+       while (next) {
+               if (next->expires.tv64 >= now.tv64)
+                       break;
+               next = timerqueue_iterate_next(next);
+       }
        timerqueue_add(&rtc->timerqueue, &timer->node);
-       if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+       if (!next) {
                struct rtc_wkalrm alarm;
                int err;
                alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --combined drivers/scsi/sd.c
index 1fd54406a8984a8b6e51506c25f1ca7986bae8c7,880a300baf7a9abb14c87eaa4f160f5b86f84bff..3d4a2eff5b0cdf9e7b1ca44a91d945a85b7dfcdc
@@@ -1354,11 -1354,15 +1354,15 @@@ static int media_not_present(struct scs
   **/
  static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
  {
-       struct scsi_disk *sdkp = scsi_disk(disk);
-       struct scsi_device *sdp = sdkp->device;
+       struct scsi_disk *sdkp = scsi_disk_get(disk);
+       struct scsi_device *sdp;
        struct scsi_sense_hdr *sshdr = NULL;
        int retval;
  
+       if (!sdkp)
+               return 0;
+       sdp = sdkp->device;
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
  
        /*
         */
        kfree(sshdr);
        retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
 +      //add for sdcard hotplug start
 +      if(1 == retval){
 +              if(sdkp->old_media_present != sdkp->media_present){
 +                      retval  |=  sdkp->media_present ? 0 : DISK_EVENT_MEDIA_DISAPPEAR;
 +                      sdkp->old_media_present = sdkp->media_present;
 +              }
 +      }
 +      //add for sdcard hotplug end    
 +#endif
        sdp->changed = 0;
+       scsi_disk_put(sdkp);
        return retval;
  }
  
@@@ -1764,22 -1759,16 +1769,22 @@@ sd_spinup_disk(struct scsi_disk *sdkp
                         * doesn't have any media in it, don't bother
                         * with any more polling.
                         */
 +                      /*
                        if (media_not_present(sdkp, &sshdr))
                                return;
 +                       */
  
                        if (the_result)
                                sense_valid = scsi_sense_valid(&sshdr);
                        retries++;
 -              } while (retries < 3 && 
 -                       (!scsi_status_is_good(the_result) ||
 +                      if(!scsi_status_is_good(the_result) ||
                          ((driver_byte(the_result) & DRIVER_SENSE) &&
 -                        sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
 +                        sense_valid && sshdr.sense_key == UNIT_ATTENTION)) {
 +                              msleep(100);
 +                      } else {
 +                              break;
 +                      }
 +              } while (retries < 5);
  
                if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
                        /* no sense, TUR either succeeded or failed
@@@ -1935,6 -1924,22 +1940,22 @@@ static void read_capacity_error(struct 
  
  #define READ_CAPACITY_RETRIES_ON_RESET        10
  
+ /*
+  * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+  * and the reported logical block size is bigger than 512 bytes. Note
+  * that last_sector is a u64 and therefore logical_to_sectors() is not
+  * applicable.
+  */
+ static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+ {
+       u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+       if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+               return false;
+       return true;
+ }
  static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                                                unsigned char *buffer)
  {
                return -ENODEV;
        }
  
-       if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@@ -2086,7 -2091,7 +2107,7 @@@ static int read_capacity_10(struct scsi
                return sector_size;
        }
  
-       if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@@ -2857,9 -2862,6 +2878,9 @@@ static void sd_probe_async(void *data, 
        sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
  
        sd_revalidate_disk(gd);
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT  
 +      sdkp->old_media_present = sdkp->media_present; //add for sdcard hotplug
 +#endif
  
        blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
        blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
        gd->flags = GENHD_FL_EXT_DEVT;
        if (sdp->removable) {
                gd->flags |= GENHD_FL_REMOVABLE;
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT          
 +              gd->events |= DISK_EVENT_MEDIA_CHANGE|DISK_EVENT_MEDIA_DISAPPEAR; //add for sdcard hotplug
 +#else
                gd->events |= DISK_EVENT_MEDIA_CHANGE;
 +#endif                
        }
  
        blk_pm_runtime_init(sdp->request_queue, dev);
index 5e45a5e55a5329326a0e017de56b9bc2b2e3a3d3,802df033e24cdfcca6bcbc13e9960306709a2b85..d98a0249acc94e9b0038f6019c480e99d42a1662
  
  #include "cdc-acm.h"
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +
 +/* adjust to 1 to avoid musb bug when lots write with clean urb */
 +#undef ACM_NW 
 +#define ACM_NW 1
 +/* adjust SZ to hsmaxp*20 to prevent tty disc don't accept big size write */
 +#define ACM_WB_SZ (512*20)
 +
 +#define DATA_DUMP_BYTES 15 // wx, how many bytes we'll print out for each packet
 +#define DATA_DUMP_SIZE 64 // wx, should be large enough to hold DATA_DUMP_DIGITS
 +static unsigned char data_out[DATA_DUMP_SIZE];
 +static unsigned char data_in[DATA_DUMP_SIZE];
 +/* Debug functions */
 +static int enable_debug = 0;
 +static int enable_dump = 0;
 +//origninal CDC-ACM log, more detail
 +#undef dev_dbg
 +#define dev_dbg(dev, format, args...)  \
 +      do{ \
 +              if(enable_debug) { \
 +                      dev_printk(KERN_WARNING, dev, "[CDC-ACM] " format, ##args); \
 +              } \
 +      }while(0)
 +#undef dev_vdbg
 +#define dev_vdbg  dev_dbg
 +//MTK added CDC-ACM log, more critical
 +#define dbg_mtk(dev, format, args...)  \
 +      do{ \
 +              dev_printk(KERN_WARNING, dev, "[CDC-ACM-MTK] " format "\n", ##args); \
 +      }while(0)
 +#else
 +#define dbg_mtk(dev, format, args...) do{}while(0)
 +#endif
 +
  
  #define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek, Johan Hovold"
  #define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters"
@@@ -94,167 -60,6 +94,167 @@@ static struct acm *acm_table[ACM_TTY_MI
  
  static DEFINE_MUTEX(acm_table_lock);
  
 +#define MYDBG(fmt, args...) do {printk(KERN_WARNING "MTK_ACM, <%s(), %d> " fmt, __func__, __LINE__, ## args); }while(0)
 +
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +
 +#define RECORD_SZ 100
 +#define DUMP_DELAY 3
 +struct record_entry
 +{
 +      struct timeval cur_time;
 +      u32      transfer_buffer_length;
 +      u32 actual_length;
 +      int status;
 +      unsigned char data;
 +
 +}w1[RECORD_SZ], w2[RECORD_SZ], r1[RECORD_SZ], r2[RECORD_SZ];
 +
 +static int w1_idx, w2_idx, r1_idx, r2_idx;
 +static int record_enable = 0;
 +static struct timeval tv_start; 
 +void dump_record(void)
 +{
 +      int i, index_limit;
 +      struct record_entry *ptr;
 +
 +      index_limit = w1_idx;
 +      ptr = w1;
 +      for(i = 0; i < index_limit; i++){
 +              MYDBG("w1, time:(%d,%d), reqlen:%d, data:%x\n", 
 +                              (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, (unsigned int)ptr[i].transfer_buffer_length, ptr[i].data);
 +              mdelay(DUMP_DELAY);
 +      }
 +      
 +      index_limit = r1_idx;
 +      ptr = r1;
 +      for(i = 0; i < index_limit; i++){
 +              MYDBG("r1, time:(%d,%d), reqlen:%d\n", 
 +                              (unsigned int)ptr[i].cur_time.tv_sec,  (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length);
 +              mdelay(DUMP_DELAY);
 +      }
 +      
 +      index_limit = w2_idx;
 +      ptr = w2;
 +      for(i = 0; i < index_limit; i++){
 +              MYDBG("w2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",  
 +                              (unsigned int)ptr[i].cur_time.tv_sec,  (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
 +              mdelay(DUMP_DELAY);
 +      }
 +      
 +      index_limit = r2_idx;
 +      ptr = r2;
 +      for(i = 0; i < index_limit; i++){
 +              MYDBG("r2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",  
 +                              (unsigned int)ptr[i].cur_time.tv_sec,  (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
 +              mdelay(DUMP_DELAY);
 +      }
 +
 +}
 +void record_activity(struct urb *urb, int is_in, int is_complete)
 +{
 +      struct timeval tv_time;
 +
 +      if(!record_enable)
 +              return;
 +
 +      do_gettimeofday(&tv_time);
 +      tv_time.tv_sec = tv_time.tv_sec - tv_start.tv_sec;
 +      tv_time.tv_usec = tv_time.tv_usec - tv_start.tv_usec;
 +      if(is_in){
 +              if(is_complete){
 +                      if(r2_idx >= RECORD_SZ)
 +                              return;
 +
 +                      r2[r2_idx].cur_time = tv_time;
 +                      r2[r2_idx].transfer_buffer_length = urb->transfer_buffer_length;
 +                      r2[r2_idx].actual_length = urb->actual_length;
 +                      r2[r2_idx].status = urb->status;
 +                      r2[r2_idx].data = *((unsigned char*)urb->transfer_buffer);
 +                      r2_idx++;
 +                      return;
 +              }
 +              if(r1_idx >= RECORD_SZ)
 +                      return;
 +              r1[r1_idx].cur_time = tv_time;
 +              r1[r1_idx].transfer_buffer_length = urb->transfer_buffer_length;
 +              r1[r1_idx].actual_length = urb->actual_length;
 +              r1[r1_idx].status = urb->status;
 +              r1_idx++;
 +      }else{
 +              if(is_complete){
 +                      if(w2_idx >= RECORD_SZ)
 +                              return;
 +                      w2[w2_idx].cur_time = tv_time;
 +                      w2[w2_idx].transfer_buffer_length = urb->transfer_buffer_length;
 +                      w2[w2_idx].actual_length = urb->actual_length;
 +                      w2[w2_idx].status = urb->status;
 +                      w2[w2_idx].data = *((unsigned char*)urb->transfer_buffer);
 +                      w2_idx++;
 +                      return;
 +              }
 +              if(w1_idx >= RECORD_SZ)
 +                      return;
 +              w1[w1_idx].cur_time = tv_time;
 +              w1[w1_idx].transfer_buffer_length = urb->transfer_buffer_length;
 +              w1[w1_idx].actual_length = urb->actual_length;
 +              w1[w1_idx].status = urb->status;
 +              w1[w1_idx].data = *((unsigned char*)urb->transfer_buffer);
 +              w1_idx++;
 +      }
 +}     
 +
 +bool usb_h_acm_all_clear(void)
 +{
 +      int i;
 +      int count = 0;
 +      for (i = 0; i < ACM_TTY_MINORS; i++) {
 +              if(acm_table[i] != NULL) {
 +                      count++;
 +              }
 +      }
 +      MYDBG("count<%d>\n", count); 
 +      return !count;
 +}
 +EXPORT_SYMBOL_GPL(usb_h_acm_all_clear);
 +
 +#define CHECK_INTERVAL 2
 +#define CB_NUM 3
 +extern unsigned long volatile jiffies;
 +static unsigned long callback_check_timeout[CB_NUM];
 +static char *callback_name[CB_NUM] = {
 +      "acm_read_bulk_callback",
 +      "acm_write_bulk",
 +      "acm_ctrl_irq",
 +};
 +void mark_callback_alive(char *func_name, struct urb *urb, struct acm *acm)
 +{
 +
 +
 +      int i;
 +      for(i = 0; i < CB_NUM ; i++)
 +      {
 +              if(!strcmp(func_name, callback_name[i])){
 +                      if(enable_debug || time_after(jiffies, callback_check_timeout[i]))
 +                      {
 +                              MYDBG("%s,ep(%d),len(%d,%d),data(%x),sts(%d), minor(%d)\n", 
 +                                              func_name,
 +                                              urb->ep->desc.bEndpointAddress,         
 +                                              urb->actual_length, 
 +                                              urb->transfer_buffer_length, 
 +                                              *((unsigned char*)urb->transfer_buffer), 
 +                                              urb->status,
 +                                              acm->minor);
 +                              callback_check_timeout[i] = jiffies + HZ * CHECK_INTERVAL;
 +                      }
 +                      break;
 +              }
 +      }
 +}
 +
 +
 +#endif
 +
  /*
   * acm_table accessors
   */
@@@ -411,9 -216,6 +411,9 @@@ static int acm_start_wb(struct acm *acm
        wb->urb->transfer_buffer_length = wb->len;
        wb->urb->dev = acm->dev;
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      record_activity(wb->urb, 0, 0);
 +#endif
        rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
        if (rc < 0) {
                dev_err(&acm->data->dev,
@@@ -510,21 -312,18 +510,21 @@@ static void acm_ctrl_irq(struct urb *ur
        case -ENOENT:
        case -ESHUTDOWN:
                /* this urb is terminated, clean up */
 -              dev_dbg(&acm->control->dev,
 +              dev_err(&acm->control->dev,
                                "%s - urb shutting down with status: %d\n",
                                __func__, status);
                return;
        default:
 -              dev_dbg(&acm->control->dev,
 +              dev_err(&acm->control->dev,
                                "%s - nonzero urb status received: %d\n",
                                __func__, status);
                goto exit;
        }
  
        usb_mark_last_busy(acm->dev);
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      mark_callback_alive(__func__, urb, acm);
 +#endif
  
        data = (unsigned char *)(dr + 1);
        switch (dr->bNotificationType) {
@@@ -577,19 -376,13 +577,19 @@@ static int acm_submit_read_urb(struct a
  {
        int res;
  
 -      if (!test_and_clear_bit(index, &acm->read_urbs_free))
 +      if (!test_and_clear_bit(index, &acm->read_urbs_free)){
                return 0;
 +      }
  
        dev_vdbg(&acm->data->dev, "%s - urb %d\n", __func__, index);
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      record_activity(acm->read_urbs[index], 1, 0);
 +#endif
 +
        res = usb_submit_urb(acm->read_urbs[index], mem_flags);
        if (res) {
 +              MYDBG("urb fail(%d)\n", res);
                if (res != -EPERM) {
                        dev_err(&acm->data->dev,
                                        "%s - usb_submit_urb failed: %d\n",
@@@ -609,9 -402,8 +609,9 @@@ static int acm_submit_read_urbs(struct 
  
        for (i = 0; i < acm->rx_buflimit; ++i) {
                res = acm_submit_read_urb(acm, i, mem_flags);
 -              if (res)
 +              if (res){
                        return res;
 +              }
        }
  
        return 0;
  
  static void acm_process_read_urb(struct acm *acm, struct urb *urb)
  {
 -      if (!urb->actual_length)
 +
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      int i, len;
 +#endif
 +      if (!urb->actual_length){
                return;
 +      }
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      if(enable_dump) {
 +              len = sprintf(data_in, "DT-I: ");
 +              for(i=0; i<urb->actual_length && i<DATA_DUMP_BYTES; i++) {
 +                      len += sprintf(data_in+len, "%02X ", *(((unsigned char *)(urb->transfer_buffer))+i));
 +              }
 +              sprintf(data_in+len, "\n");
 +              dbg_mtk(&acm->data->dev, "%s", data_in);
 +      }
 +#endif
  
        tty_insert_flip_string(&acm->port, urb->transfer_buffer,
                        urb->actual_length);
@@@ -652,10 -429,6 +652,10 @@@ static void acm_read_bulk_callback(stru
                                        rb->index, urb->actual_length);
        set_bit(rb->index, &acm->read_urbs_free);
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      record_activity(urb, 1, 1);
 +#endif
 +
        if (!acm->dev) {
                dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
                return;
        usb_mark_last_busy(acm->dev);
  
        if (urb->status) {
 -              dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
 +              dev_err(&acm->data->dev, "%s - non-zero urb status: %d\n",
                                                        __func__, urb->status);
                return;
        }
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      mark_callback_alive(__func__, urb, acm);
 +#endif
        acm_process_read_urb(acm, urb);
  
        /* throttle device if requested by tty */
@@@ -690,22 -460,13 +690,22 @@@ static void acm_write_bulk(struct urb *
        struct acm *acm = wb->instance;
        unsigned long flags;
  
 -      if (urb->status || (urb->actual_length != urb->transfer_buffer_length))
 -              dev_vdbg(&acm->data->dev, "%s - len %d/%d, status %d\n",
 +
 +      if (urb->status || (urb->actual_length != urb->transfer_buffer_length)){
 +              dev_err(&acm->data->dev, "%s - len %d/%d, status %d, data(%x)\n",
                        __func__,
                        urb->actual_length,
                        urb->transfer_buffer_length,
 -                      urb->status);
 +                      urb->status,
 +                      *((char *)(urb->transfer_buffer)));
 +      }
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      record_activity(urb, 0, 1);
 +#endif
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      mark_callback_alive(__func__, urb, acm);
 +#endif
        spin_lock_irqsave(&acm->write_lock, flags);
        acm_write_done(acm, wb);
        spin_unlock_irqrestore(&acm->write_lock, flags);
@@@ -749,46 -510,10 +749,46 @@@ error_init_termios
        return retval;
  }
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +extern struct usb_device *get_usb11_child_udev(void);
 +extern int usb_autoresume_device(struct usb_device *udev);
 +#endif
  static int acm_tty_open(struct tty_struct *tty, struct file *filp)
  {
 -      struct acm *acm = tty->driver_data;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      struct usb_device *udev;
 +      int result;
 +#endif
 +      struct acm *acm;
 +      acm = tty->driver_data;
 +
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
 +      MYDBG("ctrl:%x, read:%x, write:%x\n",
 +                      (acm->control->cur_altsetting->endpoint[0].desc).bEndpointAddress,
 +                      (acm->data->cur_altsetting->endpoint[0].desc).bEndpointAddress,
 +                      (acm->data->cur_altsetting->endpoint[1].desc).bEndpointAddress);
 +
  
 +#define META_BIN_NAME "meta_tst"
 +#define MDDOWNLOADER_BIN_NAME "downloader"
 +
 +      /* make sure usb1 is always alive in dl/meta mode */
 +      if(!strcmp(META_BIN_NAME, current->comm) || !strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
 +              udev = get_usb11_child_udev();
 +              result = usb_autoresume_device(udev);
 +              dbg_mtk(&acm->control->dev, "%s, auto result:%d", __func__, result);
 +      }
 +
 +      if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
 +              record_enable = 1;      
 +              w1_idx = w2_idx = r1_idx = r2_idx = 0;
 +              do_gettimeofday(&tv_start);
 +      }
 +
 +#else
 +      dev_dbg(&acm->control->dev, "%s\n", __func__);
 +#endif
        dev_dbg(tty->dev, "%s\n", __func__);
  
        return tty_port_open(&acm->port, tty, filp);
@@@ -816,26 -541,20 +816,25 @@@ static int acm_port_activate(struct tty
         */
        set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
        acm->control->needs_remote_wakeup = 1;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +#ifdef        CONFIG_PM_RUNTIME
 +      acm->control->needs_remote_wakeup = 0;          
 +#endif
 +#endif
  
        acm->ctrlurb->dev = acm->dev;
-       if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
+       retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
+       if (retval) {
                dev_err(&acm->control->dev,
                        "%s - usb_submit_urb(ctrl irq) failed\n", __func__);
                goto error_submit_urb;
        }
  
        acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
-       if (acm_set_control(acm, acm->ctrlout) < 0 &&
-           (acm->ctrl_caps & USB_CDC_CAP_LINE))
+       retval = acm_set_control(acm, acm->ctrlout);
+       if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
                goto error_set_control;
  
-       usb_autopm_put_interface(acm->control);
        /*
         * Unthrottle device in case the TTY was closed while throttled.
         */
        acm->throttle_req = 0;
        spin_unlock_irq(&acm->read_lock);
  
-       if (acm_submit_read_urbs(acm, GFP_KERNEL))
+       retval = acm_submit_read_urbs(acm, GFP_KERNEL);
+       if (retval)
                goto error_submit_read_urbs;
  
+       usb_autopm_put_interface(acm->control);
        mutex_unlock(&acm->mutex);
  
        return 0;
@@@ -863,7 -585,8 +865,8 @@@ error_submit_urb
  error_get_interface:
  disconnected:
        mutex_unlock(&acm->mutex);
-       return retval;
+       return usb_translate_errors(retval);
  }
  
  static void acm_port_destruct(struct tty_port *port)
@@@ -932,13 -655,6 +935,13 @@@ static void acm_tty_close(struct tty_st
  {
        struct acm *acm = tty->driver_data;
        dev_dbg(&acm->control->dev, "%s\n", __func__);
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
 +      if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
 +              record_enable = 0;
 +              dump_record();
 +      }
 +#endif
        tty_port_close(&acm->port, tty, filp);
  }
  
@@@ -950,25 -666,12 +953,25 @@@ static int acm_tty_write(struct tty_str
        unsigned long flags;
        int wbn;
        struct acm_wb *wb;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      int i, len;
 +#endif
  
        if (!count)
                return 0;
  
        dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count);
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      if(enable_dump) {
 +              len = sprintf(data_out, "DT-O: ");
 +              for(i=0; i<count && i<DATA_DUMP_BYTES; i++) {
 +                      len += sprintf(data_out+len, "%02X ", *(buf+i));
 +              }
 +              sprintf(data_out+len, "\n");
 +              dbg_mtk(&acm->data->dev, "%s", data_out);
 +      }
 +#endif
        spin_lock_irqsave(&acm->write_lock, flags);
        wbn = acm_wb_alloc(acm);
        if (wbn < 0) {
@@@ -1054,11 -757,6 +1057,11 @@@ static int acm_tty_tiocmget(struct tty_
  {
        struct acm *acm = tty->driver_data;
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      if(enable_dump) {
 +              dbg_mtk(&acm->control->dev, "tiocmget ctrlin=%x\n", acm->ctrlin);
 +      }
 +#endif
        return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
               (acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
               (acm->ctrlin  & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
@@@ -1073,11 -771,6 +1076,11 @@@ static int acm_tty_tiocmset(struct tty_
        struct acm *acm = tty->driver_data;
        unsigned int newctrl;
  
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      if(enable_dump) {
 +              dbg_mtk(&acm->control->dev, "tiocmset ctrlout=%x\n", acm->ctrlout);
 +      }
 +#endif
        newctrl = acm->ctrlout;
        set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
                                        (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
@@@ -1508,11 -1201,7 +1511,11 @@@ made_compressed_probe
        readsize = usb_endpoint_maxp(epread) *
                                (quirks == SINGLE_RX_URB ? 1 : 2);
        acm->combined_interfaces = combined_interfaces;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      acm->writesize = ACM_WB_SZ;
 +#else 
        acm->writesize = usb_endpoint_maxp(epwrite) * 20;
 +#endif
        acm->control = control_interface;
        acm->data = data_interface;
        acm->minor = minor;
@@@ -1778,7 -1467,6 +1781,7 @@@ static int acm_suspend(struct usb_inter
        struct acm *acm = usb_get_intfdata(intf);
        int cnt;
  
 +      dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
        spin_lock_irq(&acm->read_lock);
        spin_lock(&acm->write_lock);
        if (PMSG_IS_AUTO(message)) {
                        spin_unlock_irq(&acm->read_lock);
                        return -EBUSY;
                }
 +      }else{
 +              int i;
 +              for (i = 0; i < ACM_NW; i++){
 +                      if(acm->wb[i].use){
 +                              spin_unlock(&acm->write_lock);
 +                              spin_unlock_irq(&acm->read_lock);
 +                              return -EBUSY;
 +                      }
 +              }
        }
 +
        cnt = acm->susp_count++;
        spin_unlock(&acm->write_lock);
        spin_unlock_irq(&acm->read_lock);
@@@ -1816,8 -1494,6 +1819,8 @@@ static int acm_resume(struct usb_interf
        struct urb *urb;
        int rv = 0;
  
 +      dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
 +
        spin_lock_irq(&acm->read_lock);
        spin_lock(&acm->write_lock);
  
                 * delayed error checking because we must
                 * do the write path at all cost
                 */
 -              if (rv < 0)
 +              if (rv < 0){
 +                      MYDBG("urb fail:%d\n", rv);
                        goto out;
 +              }
  
                rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
        }
@@@ -2135,9 -1809,6 +2138,9 @@@ static const struct tty_operations acm_
  static int __init acm_init(void)
  {
        int retval;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      int i;
 +#endif
        acm_tty_driver = alloc_tty_driver(ACM_TTY_MINORS);
        if (!acm_tty_driver)
                return -ENOMEM;
        acm_tty_driver->init_termios = tty_std_termios;
        acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
                                                                HUPCL | CLOCAL;
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +      /* wx, disable echo and other flags in the very beginning. 
 +        * otherwise RILD will disable them via calling tcsetattr() after it opened tty port,
 +        * so there may be a gap between port opening and calling tcsetattr(). If modem send data
 +        * at that time, things goes ugly.
 +      */
 +      acm_tty_driver->init_termios.c_iflag = 0;
 +      acm_tty_driver->init_termios.c_oflag = 0;
 +      acm_tty_driver->init_termios.c_lflag = 0;
 +      for(i= 0; i < CB_NUM ; i++){
 +              callback_check_timeout[i] = jiffies;
 +      }
 +#endif
        tty_set_operations(acm_tty_driver, &acm_ops);
  
        retval = tty_register_driver(acm_tty_driver);
@@@ -2193,10 -1851,6 +2196,10 @@@ static void __exit acm_exit(void
  
  module_init(acm_init);
  module_exit(acm_exit);
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +module_param(enable_debug, int, 0644);
 +module_param(enable_dump, int, 0644);
 +#endif
  
  MODULE_AUTHOR(DRIVER_AUTHOR);
  MODULE_DESCRIPTION(DRIVER_DESC);
diff --combined drivers/usb/core/hub.c
index c09b87b489f536ba7ab6a811c981a65c32970a2d,55a8e84469ec90b9126fee1756c3355cda74ce05..37bb26da356ee849379fb0929f44043f4bbd4620
  
  #include <asm/uaccess.h>
  #include <asm/byteorder.h>
 -
  #include "hub.h"
  
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +int is_musbfsh_rh(struct usb_device *udev);
 +void set_icusb_sts_disconnect_done(void);
 +#endif
 +
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +#include <linux/proc_fs.h>
 +#include <linux/uaccess.h>
 +static struct usb_device *g_dsda_dev = NULL;
 +
 +#ifdef        CONFIG_PM_RUNTIME
 +struct usb_hub *usb11_hub = NULL;
 +int is_musbfsh_rh(struct usb_device *udev);
 +
 +struct usb_device *get_usb11_child_udev(void)
 +{
 +      if(usb11_hub){
 +              MYDBG("\n");
 +              return usb11_hub->ports[0]->child;
 +      }else{
 +              MYDBG("\n");
 +              return NULL;
 +      }
 +}
 +#endif
 +
 +void dump_data(char *buf, int len)
 +{
 +      int i;
 +      for(i =0 ; i< len ; i++)
 +      {
 +              MYDBG("data[%d]: %x\n", i, buf[i]);
 +      }
 +}
 +
 +void test_dsda_device_ep0(void)
 +{
 +
 +      int ret;
 +      char data_buf[256];
 +      ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
 +                      USB_REQ_GET_DESCRIPTOR,
 +                      USB_DIR_IN,
 +                      USB_DT_DEVICE << 8,
 +                      0,
 +                      data_buf,
 +                      64,
 +                      USB_CTRL_GET_TIMEOUT);
 +
 +
 +
 +      if (ret < 0) {
 +              MYDBG("test ep fail, ret : %d\n", ret);
 +      }
 +      else
 +      {
 +              MYDBG("test ep0 ok, ret : %d\n", ret);
 +              dump_data(data_buf, ret);
 +      }
 +
 +}
 +
 +void release_usb11_wakelock(void);
 +static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
 +{
 +      char cmd[64];
 +
 +      int ret = copy_from_user((char *) &cmd, user_buffer, count);
 +
 +      if(ret != 0)
 +      {
 +              return -EFAULT;
 +      }
 +
 +      /* apply action here */
 +      if(cmd[0] == '0')
 +      {
 +              MYDBG("");
 +              test_dsda_device_ep0();
 +      }
 +      if(cmd[0] == '1')
 +      {
 +              MYDBG("");
 +              release_usb11_wakelock();
 +      }
 +
 +      MYDBG("");
 +
 +      return count;
 +}
 +
 +struct file_operations dsda_tmp_proc_fops = {
 +      .write = dsda_tmp_proc_entry
 +};
 +
 +
 +void create_dsda_tmp_entry(void)
 +{
 +      struct proc_dir_entry *prEntry;
 +
 +      MYDBG("");
 +
 +      prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
 +      if (prEntry)
 +      {
 +              MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
 +      }
 +      else
 +      {
 +              MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
 +      }
 +}
 +#endif
 +
  /* if we are in debug mode, always announce new devices */
  #ifdef DEBUG
  #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
  #define USB_VENDOR_GENESYS_LOGIC              0x05e3
  #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND      0x01
  
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +extern int usbif_u3h_send_event(char* event) ;
 +#include "otg_whitelist.h"
 +#endif
 +
 +
  static inline int hub_is_superspeed(struct usb_device *hdev)
  {
        return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
@@@ -236,10 -116,6 +236,10 @@@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rw
  static void hub_release(struct kref *kref);
  static int usb_reset_and_verify_device(struct usb_device *udev);
  
 +#define usb_sndaddr0pipe()    (PIPE_CONTROL << 30)
 +#define usb_rcvaddr0pipe()    ((PIPE_CONTROL << 30) | USB_DIR_IN)
 +
 +
  static inline char *portspeed(struct usb_hub *hub, int portstatus)
  {
        if (hub_is_superspeed(hub->hdev))
@@@ -535,7 -411,6 +535,7 @@@ int usb_clear_port_feature(struct usb_d
   */
  static int set_port_feature(struct usb_device *hdev, int port1, int feature)
  {
 +      MYDBG("");
        return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
                USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
                NULL, 0, 1000);
@@@ -983,7 -858,7 +983,7 @@@ static int hub_hub_status(struct usb_hu
                                "%s failed (err = %d)\n", __func__, ret);
        } else {
                *status = le16_to_cpu(hub->status->hub.wHubStatus);
 -              *change = le16_to_cpu(hub->status->hub.wHubChange); 
 +              *change = le16_to_cpu(hub->status->hub.wHubChange);
                ret = 0;
        }
        mutex_unlock(&hub->status_mutex);
@@@ -1652,7 -1527,7 +1652,7 @@@ static int hub_configure(struct usb_hu
                        hub->mA_per_port = hdev->bus_mA;
                        hub->limited_power = 1;
                }
 -      } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
 +      } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
                int remaining = hdev->bus_mA -
                        hub->descriptor->bHubContrCurrent;
  
                        hub->descriptor->bHubContrCurrent);
                hub->limited_power = 1;
  
 -              if (remaining < hdev->maxchild * unit_load)
 +              if (remaining < hdev->maxchild * unit_load){
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +                      usbif_u3h_send_event("DEV_OVER_CURRENT");
 +#endif
                        dev_warn(hub_dev,
                                        "insufficient power available "
                                        "to use all downstream ports\n");
 +              }
                hub->mA_per_port = unit_load;   /* 7.2.1 */
  
        } else {        /* Self-powered external hub */
@@@ -1815,7 -1686,6 +1815,7 @@@ static int hub_probe(struct usb_interfa
        struct usb_device *hdev;
        struct usb_hub *hub;
  
 +
        desc = intf->cur_altsetting;
        hdev = interface_to_usbdev(intf);
  
        if (hdev->level == MAX_TOPO_LEVEL) {
                dev_err(&intf->dev,
                        "Unsupported bus topology: hub nested too deep\n");
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
 +#endif
                return -E2BIG;
        }
  
  #ifdef        CONFIG_USB_OTG_BLACKLIST_HUB
        if (hdev->parent) {
                dev_warn(&intf->dev, "ignoring external hub\n");
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("HUB_NOT_SUPPORTED");
 +#endif
                return -ENODEV;
        }
  #endif
@@@ -2229,18 -2093,6 +2229,18 @@@ void usb_disconnect(struct usb_device *
        struct usb_device       *udev = *pdev;
        struct usb_hub          *hub = usb_hub_to_struct_hub(udev);
        int                     i;
 +      struct timeval tv_begin, tv_end;
 +      struct timeval tv_before, tv_after;
 +      do_gettimeofday(&tv_begin);
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      int is_icusb_rh;
 +#endif
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      is_icusb_rh = is_musbfsh_rh(udev->parent);
 +#endif
 +
  
        /* mark the device as inactive, so any further urb submissions for
         * this device (and any of its children) will fail immediately.
         * so that the hardware is now fully quiesced.
         */
        dev_dbg (&udev->dev, "unregistering device\n");
 +
 +      do_gettimeofday(&tv_before);
        usb_disable_device(udev, 0);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
 +
        usb_hcd_synchronize_unlinks(udev);
  
        if (udev->parent) {
                        port_dev->did_runtime_put = false;
        }
  
 +      do_gettimeofday(&tv_before);
        usb_remove_ep_devs(&udev->ep0);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
 +
        usb_unlock_device(udev);
  
        /* Unregister the device.  The device driver is responsible
         * for de-configuring the device and invoking the remove-device
         * notifier chain (used by usbfs and possibly others).
         */
 +      do_gettimeofday(&tv_before);
        device_del(&udev->dev);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
  
        /* Free the device number and delete the parent's children[]
         * (or root_hub) pointer.
        hub_free_dev(udev);
  
        put_device(&udev->dev);
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      if (is_icusb_rh)
 +      {
 +              set_icusb_sts_disconnect_done();
 +              MYDBG("ICUSB Disconnect\n");
 +      }
 +#endif
 +      do_gettimeofday(&tv_end);
 +      MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
  }
  
  #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@@ -2464,16 -2294,6 +2464,16 @@@ static int usb_enumerate_device(struct 
        udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
  
        err = usb_enumerate_device_otg(udev);
 +
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +      if (udev->parent){ // we don't have to check ourself (roothub)
 +              if (!is_targeted(udev)) {
 +                      usbif_u3h_send_event("DEV_NOT_SUPPORTED");
 +                      err = -ENOTSUPP;
 +              }
 +      }
 +#endif
 +
        if (err < 0)
                return err;
  
@@@ -2545,16 -2365,6 +2545,16 @@@ int usb_new_device(struct usb_device *u
                 * sysfs power/wakeup controls wakeup enabled/disabled
                 */
                device_init_wakeup(&udev->dev, 0);
 +              MYDBG("udev :%p\n", udev);
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +#ifdef        CONFIG_PM_RUNTIME
 +              if(is_musbfsh_rh(udev->parent)){
 +                      MYDBG("\n");
 +                      /*find out struct *usb_hub and hook it */
 +                      usb11_hub = usb_hub_to_struct_hub(udev->parent);
 +              }
 +#endif
 +#endif
        }
  
        /* Tell the runtime-PM framework the device is active */
@@@ -2765,14 -2575,19 +2765,21 @@@ static int hub_port_wait_reset(struct u
                msleep(delay);
  
                /* read and decode port status */
 +              MYDBG("");
                ret = hub_port_status(hub, port1, &portstatus, &portchange);
 +              MYDBG("");
                if (ret < 0)
                        return ret;
  
-               /* The port state is unknown until the reset completes. */
-               if (!(portstatus & USB_PORT_STAT_RESET))
+               /*
+                * The port state is unknown until the reset completes.
+                *
+                * On top of that, some chips may require additional time
+                * to re-establish a connection after the reset is complete,
+                * so also wait for the connection to be re-established.
+                */
+               if (!(portstatus & USB_PORT_STAT_RESET) &&
+                   (portstatus & USB_PORT_STAT_CONNECTION))
                        break;
  
                /* switch to the long delay after two short delay failures */
@@@ -2892,38 -2707,27 +2899,38 @@@ static int hub_port_reset(struct usb_hu
  
        /* Reset the port */
        for (i = 0; i < PORT_RESET_TRIES; i++) {
 +              MYDBG("");
                status = set_port_feature(hub->hdev, port1, (warm ?
                                        USB_PORT_FEAT_BH_PORT_RESET :
                                        USB_PORT_FEAT_RESET));
 +              MYDBG("");
                if (status == -ENODEV) {
 +                      MYDBG("");
                        ;       /* The hub is gone */
                } else if (status) {
 +                      MYDBG("");
                        dev_err(hub->intfdev,
                                        "cannot %sreset port %d (err = %d)\n",
                                        warm ? "warm " : "", port1, status);
                } else {
 +                      MYDBG("");
                        status = hub_port_wait_reset(hub, port1, udev, delay,
                                                                warm);
 -                      if (status && status != -ENOTCONN && status != -ENODEV)
 +                      if (status && status != -ENOTCONN)
 +                      {
 +                              MYDBG("");
                                dev_dbg(hub->intfdev,
                                                "port_wait_reset: err = %d\n",
                                                status);
 +                      }
                }
  
 +              MYDBG("");
                /* Check for disconnect or reset */
                if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
 +                      MYDBG("");
                        hub_port_finish_reset(hub, port1, udev, &status);
 +                      MYDBG("");
  
                        if (!hub_is_superspeed(hub->hdev))
                                goto done;
                                warm = true;
                        }
                }
 +              MYDBG("");
  
                dev_dbg (hub->intfdev,
                        "port %d not enabled, trying %sreset again...\n",
                        port1, warm ? "warm " : "");
                delay = HUB_LONG_RESET_TIME;
        }
 +      MYDBG("");
 +
  
        dev_err (hub->intfdev,
                "Cannot enable port %i.  Maybe the USB cable is bad?\n",
  
  done:
        if (!hub_is_superspeed(hub->hdev))
 +      {
 +              MYDBG("");
                up_read(&ehci_cf_port_reset_rwsem);
 +      }
 +
 +      MYDBG("");
  
        return status;
  }
@@@ -3214,10 -3010,7 +3221,10 @@@ int usb_port_suspend(struct usb_device 
                                        status);
                        /* bail if autosuspend is requested */
                        if (PMSG_IS_AUTO(msg))
 +                      {
 +                              MYDBG("");
                                goto err_wakeup;
 +                      }
                }
        }
  
        if (usb_disable_ltm(udev)) {
                dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
                status = -ENOMEM;
 +              MYDBG("");
                if (PMSG_IS_AUTO(msg))
                        goto err_ltm;
        }
        if (usb_unlocked_disable_lpm(udev)) {
                dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
                status = -ENOMEM;
 +              MYDBG("");
                if (PMSG_IS_AUTO(msg))
                        goto err_lpm3;
        }
  
        /* see 7.1.7.6 */
        if (hub_is_superspeed(hub->hdev))
 +      {
 +              MYDBG("");
                status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
  
 +#if 0 /* behavior for kernel 3.10 */
        /*
         * For system suspend, we do not need to enable the suspend feature
         * on individual USB-2 ports.  The devices will automatically go
         * Therefore we will turn on the suspend feature if udev or any of its
         * descendants is enabled for remote wakeup.
         */
 -      else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
 +      } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
 +              MYDBG("");
                status = set_port_feature(hub->hdev, port1,
                                USB_PORT_FEAT_SUSPEND);
 -      else {
 +      else {
                really_suspend = false;
                status = 0;
        }
 +#else /*roll back behavior to kernel 3.4 */
 +      }else{
 +              MYDBG("");
 +              status = set_port_feature(hub->hdev, port1,
 +                              USB_PORT_FEAT_SUSPEND);
 +      }
 +#endif
 +
        if (status) {
                dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
                                port1, status);
 +              MYDBG("");
  
                /* Try to enable USB3 LPM and LTM again */
                usb_unlocked_enable_lpm(udev);
@@@ -3373,9 -3151,7 +3380,9 @@@ static int finish_port_resume(struct us
         */
        if (status == 0) {
                devstatus = 0;
 +              MYDBG("\n");
                status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
 +              MYDBG("%d\n", status);
                if (status >= 0)
                        status = (status > 0 ? 0 : -ENODEV);
  
@@@ -4166,7 -3942,7 +4173,7 @@@ EXPORT_SYMBOL_GPL(usb_enable_ltm)
   * Between connect detection and reset signaling there must be a delay
   * of 100ms at least for debounce and power-settling.  The corresponding
   * timer shall restart whenever the downstream port detects a disconnect.
 - * 
 + *
   * Apparently there are some bluetooth and irda-dongles and a number of
   * low-speed devices for which this debounce period may last over a second.
   * Not covered by the spec - but easy to deal with.
@@@ -4227,6 -4003,8 +4234,6 @@@ void usb_ep0_reinit(struct usb_device *
  }
  EXPORT_SYMBOL_GPL(usb_ep0_reinit);
  
 -#define usb_sndaddr0pipe()    (PIPE_CONTROL << 30)
 -#define usb_rcvaddr0pipe()    ((PIPE_CONTROL << 30) | USB_DIR_IN)
  
  static int hub_set_address(struct usb_device *udev, int devnum)
  {
@@@ -4281,7 -4059,6 +4288,7 @@@ hub_port_init (struct usb_hub *hub, str
        const char              *speed;
        int                     devnum = udev->devnum;
  
 +      dump_stack();
        /* root hub ports have a slightly longer reset period
         * (from USB 2.0 spec, section 7.1.7.5)
         */
  
        /* Reset the device; full speed may morph to high speed */
        /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
 +      MYDBG("");
        retval = hub_port_reset(hub, port1, udev, delay, false);
 +      MYDBG("");
        if (retval < 0)         /* error or disconnect */
                goto fail;
        /* success, speed is known */
        default:
                goto fail;
        }
 +      MYDBG("");
  
        if (udev->speed == USB_SPEED_WIRELESS)
                speed = "variable speed Wireless";
                udev->tt = &hub->tt;
                udev->ttport = port1;
        }
 - 
 +
        /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
         * Because device hardware and firmware is sometimes buggy in
         * this area, and this is how Linux has done it for ages.
         * value.
         */
        for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
 +              MYDBG("");
                if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
                        struct usb_device_descriptor *buf;
                        int r = 0;
                                 */
                                if (r == 0  || (r == -ETIMEDOUT && j == 0))
                                        break;
 +
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +                              if (buf->bMaxPacketSize0 == 0) {
 +                                      usbif_u3h_send_event("DEV_CONN_TMOUT");
 +                              }
 +#endif
 +
                        }
                        udev->descriptor.bMaxPacketSize0 =
                                        buf->bMaxPacketSize0;
                udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
                usb_ep0_reinit(udev);
        }
 -  
 +
        retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
        if (retval < (signed)sizeof(udev->descriptor)) {
                if (retval != -ENODEV)
@@@ -4644,9 -4410,6 +4651,9 @@@ hub_power_remaining (struct usb_hub *hu
                remaining -= delta;
        }
        if (remaining < 0) {
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("DEV_OVER_CURRENT");
 +#endif
                dev_warn(hub->intfdev, "%dmA over power budget!\n",
                        - remaining);
                remaining = 0;
@@@ -4674,7 -4437,6 +4681,7 @@@ static void hub_port_connect_change(str
        int status, i;
        unsigned unit_load;
  
 +      MYDBG("");
        dev_dbg (hub_dev,
                "port %d, status %04x, change %04x, %s\n",
                port1, portstatus, portchange, portspeed(hub, portstatus));
                }
  
                /* reset (non-USB 3.0 devices) and get descriptor */
 +              MYDBG("");
                status = hub_port_init(hub, udev, port1, i);
                if (status < 0)
 +              {
 +                      MYDBG("");
                        goto loop;
 +              }
 +              MYDBG("");
  
                if (udev->quirks & USB_QUIRK_DELAY_INIT)
                        msleep(1000);
                                goto loop_disable;
                        }
                }
 - 
 +
                /* check for devices running slower than they could */
                if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
                                && udev->speed == USB_SPEED_FULL
                                hub->ports[port1 - 1]->child = NULL;
                                spin_unlock_irq(&device_state_lock);
                        }
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +                      g_dsda_dev = udev;
 +                      MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
 +#endif
                }
  
                if (status)
@@@ -4906,7 -4659,7 +4913,7 @@@ loop
                        dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
                                        port1);
        }
 - 
 +
  done:
        hub_port_disable(hub, port1, 1);
        if (hcd->driver->relinquish_port && !hub->hdev->parent)
@@@ -5029,7 -4782,6 +5036,7 @@@ static void hub_events(void
                        dev_dbg (hub_dev, "resetting for error %d\n",
                                hub->error);
  
 +                      MYDBG("");
                        ret = usb_reset_device(hdev);
                        if (ret) {
                                dev_dbg (hub_dev,
                                 * EM interference sometimes causes badly
                                 * shielded USB devices to be shutdown by
                                 * the hub, this hack enables them again.
 -                               * Works at least with mouse driver. 
 +                               * Works at least with mouse driver.
                                 */
                                if (!(portstatus & USB_PORT_STAT_ENABLE)
                                    && !connect_change
@@@ -5261,9 -5013,6 +5268,9 @@@ static struct usb_driver hub_driver = 
        .supports_autosuspend = 1,
  };
  
 +#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
 +#endif
  int usb_hub_init(void)
  {
        if (usb_register(&hub_driver) < 0) {
                return -1;
        }
  
 +#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
 +#endif
 +
        khubd_task = kthread_run(hub_thread, NULL, "khubd");
        if (!IS_ERR(khubd_task))
                return 0;
@@@ -5414,7 -5159,6 +5421,7 @@@ static int usb_reset_and_verify_device(
        int                             i, ret = 0;
        int                             port1 = udev->portnum;
  
 +      MYDBG("");
        if (udev->state == USB_STATE_NOTATTACHED ||
                        udev->state == USB_STATE_SUSPENDED) {
                dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
  
        if (ret < 0)
                goto re_enumerate;
 - 
 +
        /* Device might have changed firmware (DFU or similar) */
        if (descriptors_changed(udev, &descriptor)) {
                dev_info(&udev->dev, "device firmware changed\n");
@@@ -5536,7 -5280,7 +5543,7 @@@ done
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
        return 0;
 - 
 +
  re_enumerate:
        /* LPM state doesn't matter when we're about to destroy the device. */
        hub_port_logical_disconnect(parent_hub, port1);
@@@ -5570,7 -5314,6 +5577,7 @@@ int usb_reset_device(struct usb_device 
        unsigned int noio_flag;
        struct usb_host_config *config = udev->actconfig;
  
 +      MYDBG("");
        if (udev->state == USB_STATE_NOTATTACHED ||
                        udev->state == USB_STATE_SUSPENDED) {
                dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
index 3fc750f52639a5885855c2a19f4063391f1eac1f,a9142a46ae829a2f09bbd7e75b44862b178f5aef..3fa5a003a319781174ff001db13dcf0240dd1856
  
  /* #define VERBOSE_DEBUG */
  
 +#ifdef pr_fmt
 +#undef pr_fmt
 +#endif
 +#define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
 +
  #include <linux/kallsyms.h>
  #include <linux/kernel.h>
  #include <linux/slab.h>
  #include <linux/usb/composite.h>
  #include <asm/unaligned.h>
  
 +#include <linux/printk.h>
 +
 +
 +
  /*
   * The code in this file is utility code, used to build a gadget driver
   * from one or more "function" drivers, one or more "configuration"
@@@ -134,11 -125,16 +134,16 @@@ int config_ep_by_speed(struct usb_gadge
  
  ep_found:
        /* commit results */
-       _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+       _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
        _ep->desc = chosen_desc;
        _ep->comp_desc = NULL;
        _ep->maxburst = 0;
-       _ep->mult = 0;
+       _ep->mult = 1;
+       if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+                               usb_endpoint_xfer_int(_ep->desc)))
+               _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1;
        if (!want_comp_desc)
                return 0;
  
                switch (usb_endpoint_type(_ep->desc)) {
                case USB_ENDPOINT_XFER_ISOC:
                        /* mult: bits 1:0 of bmAttributes */
-                       _ep->mult = comp_desc->bmAttributes & 0x3;
+                       _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
                case USB_ENDPOINT_XFER_BULK:
                case USB_ENDPOINT_XFER_INT:
                        _ep->maxburst = comp_desc->bMaxBurst + 1;
@@@ -189,10 -185,8 +194,10 @@@ int usb_add_function(struct usb_configu
                struct usb_function *function)
  {
        int     value = -EINVAL;
 +      
 +      pr_debug("[XLOG_DEBUG][USB][COM]%s: \n", __func__);
  
 -      DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
 +      INFO(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
                        function->name, function,
                        config->label, config);
  
  
  done:
        if (value)
 -              DBG(config->cdev, "adding '%s'/%p --> %d\n",
 +              INFO(config->cdev, "adding '%s'/%p --> %d\n",
                                function->name, function, value);
        return value;
  }
@@@ -234,17 -228,13 +239,17 @@@ EXPORT_SYMBOL_GPL(usb_add_function)
  
  void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
  {
 -      if (f->disable)
 +      if (f->disable) {
 +              INFO(c->cdev, "disable function '%s'/%p\n", f->name, f);
                f->disable(f);
 +      }
  
        bitmap_zero(f->endpoints, 32);
        list_del(&f->list);
 -      if (f->unbind)
 +      if (f->unbind) {
 +              INFO(c->cdev, "unbind function '%s'/%p\n", f->name, f);
                f->unbind(c, f);
 +      }
  }
  EXPORT_SYMBOL_GPL(usb_remove_function);
  
@@@ -543,11 -533,7 +548,11 @@@ static int bos_desc(struct usb_composit
        usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
        usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
        usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
 -      usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +      usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT) | cpu_to_le32(USB_BESL_SUPPORT) ; 
 +#else
 +      usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
 +#endif
  
        /*
         * The Superspeed USB Capability descriptor shall be implemented by all
@@@ -606,7 -592,6 +611,7 @@@ static void reset_config(struct usb_com
        DBG(cdev, "reset config\n");
  
        list_for_each_entry(f, &cdev->config->functions, list) {
 +              INFO(cdev, "disable function '%s'/%p\n", f->name, f);
                if (f->disable)
                        f->disable(f);
  
@@@ -788,19 -773,18 +793,19 @@@ int usb_add_config(struct usb_composite
                                        struct usb_function, list);
                        list_del(&f->list);
                        if (f->unbind) {
 -                              DBG(cdev, "unbind function '%s'/%p\n",
 +                              INFO(cdev, "unbind function '%s'/%p\n",
                                        f->name, f);
                                f->unbind(config, f);
                                /* may free memory for "f" */
                        }
                }
                list_del(&config->list);
 +              pr_debug("[XLOG_DEBUG][USB][COM]bind fialed and the list should be init because there is one entry only");
                config->cdev = NULL;
        } else {
                unsigned        i;
  
 -              DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
 +              INFO(cdev, "cfg %d/%p speeds:%s%s%s\n",
                        config->bConfigurationValue, config,
                        config->superspeed ? " super" : "",
                        config->highspeed ? " high" : "",
@@@ -833,7 -817,7 +838,7 @@@ done
  }
  EXPORT_SYMBOL_GPL(usb_add_config);
  
 -static void remove_config(struct usb_composite_dev *cdev,
 +static void unbind_config(struct usb_composite_dev *cdev,
                              struct usb_configuration *config)
  {
        while (!list_empty(&config->functions)) {
                                struct usb_function, list);
                list_del(&f->list);
                if (f->unbind) {
 -                      DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
 +                      INFO(cdev, "unbind function '%s'/%p\n", f->name, f);
                        f->unbind(config, f);
                        /* may free memory for "f" */
                }
        }
 -      list_del(&config->list);
        if (config->unbind) {
 -              DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
 +              INFO(cdev, "unbind config '%s'/%p\n", config->label, config);
                config->unbind(config);
                        /* may free memory for "c" */
        }
 +
 +      /* reset all driver data to prevent leakage of ep allocation */
 +      usb_ep_autoconfig_reset(cdev->gadget);
  }
  
  /**
@@@ -877,18 -859,9 +882,18 @@@ void usb_remove_config(struct usb_compo
        if (cdev->config == config)
                reset_config(cdev);
  
 +
 +      if(config->cdev != NULL)
 +      {
 +              list_del(&config->list);
 +      }else
 +      {
 +        DBG(cdev, "%s: config->list has been delete!! \n", __func__);
 +      }
 +      
        spin_unlock_irqrestore(&cdev->lock, flags);
  
 -      remove_config(cdev, config);
 +      unbind_config(cdev, config);
  }
  
  /*-------------------------------------------------------------------------*/
@@@ -1230,7 -1203,7 +1235,7 @@@ EXPORT_SYMBOL_GPL(usb_string_ids_n)
  
  /*-------------------------------------------------------------------------*/
  
 -static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
 +void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
  {
        if (req->status || req->actual != req->length)
                DBG((struct usb_composite_dev *) ep->driver_data,
                                req->status, req->actual, req->length);
  }
  
 +EXPORT_SYMBOL_GPL(composite_setup_complete);
 +
  /*
   * The setup() callback implements all the ep0 functionality that's
   * not handled lower down, in hardware or the hardware driver(like
@@@ -1261,9 -1232,6 +1266,9 @@@ composite_setup(struct usb_gadget *gadg
        struct usb_function             *f = NULL;
        u8                              endp;
  
 +      pr_debug("[XLOG_DEBUG][USB][COM]%s bRequest=0x%X\n",
 +              __func__, ctrl->bRequest);
 +
        /* partial re-init of the response message; the function or the
         * gadget might need to intercept e.g. a control-OUT completion
         * when we delegate to it.
                if (ctrl->bRequestType != USB_DIR_IN)
                        goto unknown;
                switch (w_value >> 8) {
 -
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +              case USB_DT_OTG:
 +            {
 +            struct usb_otg_descriptor *otg_desc = req->buf;
 +            otg_desc->bLength = sizeof(*otg_desc);
 +            otg_desc->bDescriptorType = USB_DT_OTG;
 +            otg_desc->bmAttributes = USB_OTG_SRP | USB_OTG_HNP;
 +            otg_desc->bcdOTG = cpu_to_le16(0x0200);
 +                      value = min_t(int, w_length,sizeof(struct usb_otg_descriptor));
 +            }
 +                      break;
 +#endif
                case USB_DT_DEVICE:
                        cdev->desc.bNumConfigurations =
                                count_configs(cdev, USB_DT_DEVICE);
  
                        value = min(w_length, (u16) sizeof cdev->desc);
                        memcpy(req->buf, &cdev->desc, value);
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                                      "USB_DT_DEVICE, value=%d\n",value);
                        break;
                case USB_DT_DEVICE_QUALIFIER:
                        if (!gadget_is_dualspeed(gadget) ||
                        device_qual(cdev);
                        value = min_t(int, w_length,
                                sizeof(struct usb_qualifier_descriptor));
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                                      "USB_DT_DEVICE_QUALIFIER, value=%d\n",value);
                        break;
                case USB_DT_OTHER_SPEED_CONFIG:
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                                      "USB_DT_OTHER_SPEED_CONFIG\n");
                        if (!gadget_is_dualspeed(gadget) ||
                            gadget->speed >= USB_SPEED_SUPER)
                                break;
                        value = config_desc(cdev, w_value);
                        if (value >= 0)
                                value = min(w_length, (u16) value);
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                                      "USB_DT_CONFIG, value=%d\n",value);
                        break;
                case USB_DT_STRING:
                        value = get_string(cdev, req->buf,
                                        w_index, w_value & 0xff);
 -                      if (value >= 0)
 +                      if (value >= 0) {
                                value = min(w_length, (u16) value);
 +                              pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                              "USB_DT_STRING, value=%d\n" ,value);
 +                      }
                        break;
                case USB_DT_BOS:
                        if (gadget_is_superspeed(gadget)) {
                                value = bos_desc(cdev);
                                value = min(w_length, (u16) value);
                        }
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
 +                                              "USB_DT_BOS, value=%d\n",value);
 +                      break;
 +              default:
 +                      pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR w_value=0x%X\n", w_value);
                        break;
                }
                break;
                spin_lock(&cdev->lock);
                value = set_config(cdev, ctrl, w_value);
                spin_unlock(&cdev->lock);
 +              pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_SET_CONFIGURATION: "
 +                                              "value=%d\n",value);
                break;
        case USB_REQ_GET_CONFIGURATION:
                if (ctrl->bRequestType != USB_DIR_IN)
                        goto unknown;
                if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
                        break;
 -              f = cdev->config->interface[intf];
 +
 +              if (cdev->config)
 +                  f = cdev->config->interface[intf];
 +              else
 +                      pr_debug("%s: cdev->config = NULL \n", __func__);
 +
                if (!f)
                        break;
  
         * interface of the function
         */
        case USB_REQ_GET_STATUS:
 +              pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_STATUS\n");
                if (!gadget_is_superspeed(gadget))
                        goto unknown;
                if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
         */
        case USB_REQ_CLEAR_FEATURE:
        case USB_REQ_SET_FEATURE:
 +              pr_debug("[XLOG_DEBUG][USB][COM]%s w_value=%d\n",
 +                      ((ctrl->bRequest==USB_REQ_SET_FEATURE)? "USB_REQ_SET_FEATURE" : "USB_REQ_CLEAR_FEATURE"), w_value);
                if (!gadget_is_superspeed(gadget))
                        goto unknown;
                if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
                        goto unknown;
                switch (w_value) {
                case USB_INTRF_FUNC_SUSPEND:
 +                      pr_debug("[COM]USB_INTRF_FUNC_SUSPEND\n");
                        if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
                                break;
                        f = cdev->config->interface[intf];
                        break;
                }
                break;
 +      case USB_REQ_SET_SEL:
 +              pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_SET_SEL Pretend success\n");
 +              value = 0;
 +              break;
        default:
  unknown:
                VDBG(cdev,
        }
  
  done:
 +      if(value < 0) {
 +              pr_debug("[XLOG_DEBUG][USB][COM]composite_setup: value=%d,"
 +                              "bRequestType=0x%x, bRequest=0x%x, w_value=0x%x, w_length=0x%x \n", value,
 +                              ctrl->bRequestType,     ctrl->bRequest, w_value, w_length);
 +      }
        /* device either stalls (value < 0) or reports success */
        return value;
  }
@@@ -1581,14 -1502,6 +1586,14 @@@ void composite_disconnect(struct usb_ga
                reset_config(cdev);
        if (cdev->driver->disconnect)
                cdev->driver->disconnect(cdev);
 +
 +      /* ALPS00235316 and ALPS00234976 */
 +      /* reset the complet function */
 +      if(cdev->req->complete) {
 +              pr_debug("[XLOG_DEBUG][USB][COM]%s:  reassign the complete function!!\n", __func__);
 +              cdev->req->complete = composite_setup_complete;
 +      }
 +
        spin_unlock_irqrestore(&cdev->lock, flags);
  }
  
@@@ -1621,8 -1534,7 +1626,8 @@@ static void __composite_unbind(struct u
                struct usb_configuration        *c;
                c = list_first_entry(&cdev->configs,
                                struct usb_configuration, list);
 -              remove_config(cdev, c);
 +              list_del(&c->list);
 +              unbind_config(cdev, c);
        }
        if (cdev->driver->unbind && unbind_driver)
                cdev->driver->unbind(cdev);
@@@ -1876,8 -1788,6 +1881,8 @@@ int usb_composite_probe(struct usb_comp
        if (!driver || !driver->dev || !driver->bind)
                return -EINVAL;
  
 +      pr_debug("[XLOG_DEBUG][USB][COM]%s: driver->name = %s", __func__, driver->name);
 +
        if (!driver->name)
                driver->name = "composite";
  
index 59f492a29a0b5f691cadd66352ba04ef238c0702,ff30171b69264100b648e6f481dfbfde1b55d9d0..bb2b3e2d71a082c93ce42b354ebb2abc336b034c
   */
  
  /* #define VERBOSE_DEBUG */
 +#ifdef pr_fmt
 +#undef pr_fmt
 +#endif
 +#define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
  
  #include <linux/slab.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/device.h>
  #include <linux/err.h>
 -
 +#include <linux/printk.h>
  #include "u_serial.h"
  #include "gadget_chips.h"
  
  
 +#define ACM_LOG "USB_ACM"
 +
  /*
   * This CDC ACM function support just wraps control functions and
   * notifications around the generic serial-over-usb code.
@@@ -340,10 -334,6 +340,10 @@@ static void acm_complete_set_line_codin
                 * nothing unless we control a real RS232 line.
                 */
                acm->port_line_coding = *value;
 +
 +              pr_debug("[XLOG_INFO][USB_ACM] %s: rate=%d, stop=%d, parity=%d, data=%d\n", __func__, \
 +                      acm->port_line_coding.dwDTERate, acm->port_line_coding.bCharFormat, \
 +                      acm->port_line_coding.bParityType, acm->port_line_coding.bDataBits);
        }
  }
  
@@@ -365,10 -355,6 +365,10 @@@ static int acm_setup(struct usb_functio
         * to them by stalling.  Options include get/set/clear comm features
         * (not that useful) and SEND_BREAK.
         */
 +
 +      pr_debug("[XLOG_INFO][USB_ACM]%s: ttyGS%d req%02x.%02x v%04x i%04x len=%d\n", __func__, \
 +              acm->port_num, ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length);
 +
        switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  
        /* SET_LINE_CODING ... just read and save what the host sends */
                value = min_t(unsigned, w_length,
                                sizeof(struct usb_cdc_line_coding));
                memcpy(req->buf, &acm->port_line_coding, value);
 +
 +              pr_debug("[XLOG_INFO][USB_ACM]%s: rate=%d,stop=%d,parity=%d,data=%d\n", __func__, \
 +                      acm->port_line_coding.dwDTERate, acm->port_line_coding.bCharFormat, \
 +                      acm->port_line_coding.bParityType, acm->port_line_coding.bDataBits);
 +
                break;
  
        /* SET_CONTROL_LINE_STATE ... save what the host sent */
@@@ -487,7 -468,7 +487,7 @@@ static void acm_disable(struct usb_func
        struct f_acm    *acm = func_to_acm(f);
        struct usb_composite_dev *cdev = f->config->cdev;
  
 -      DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
 +      INFO(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
        gserial_disconnect(&acm->port);
        usb_ep_disable(acm->notify);
        acm->notify->driver_data = NULL;
@@@ -554,13 -535,15 +554,15 @@@ static int acm_notify_serial_state(stru
  {
        struct usb_composite_dev *cdev = acm->port.func.config->cdev;
        int                     status;
+       __le16                  serial_state;
  
        spin_lock(&acm->lock);
        if (acm->notify_req) {
                DBG(cdev, "acm ttyGS%d serial state %04x\n",
                                acm->port_num, acm->serial_state);
+               serial_state = cpu_to_le16(acm->serial_state);
                status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
-                               0, &acm->serial_state, sizeof(acm->serial_state));
+                               0, &serial_state, sizeof(acm->serial_state));
        } else {
                acm->pending = true;
                status = 0;
@@@ -711,12 -694,6 +713,12 @@@ acm_bind(struct usb_configuration *c, s
        if (status)
                goto fail;
  
 +      pr_debug("[XLOG_INFO][USB_ACM]%s: ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", \
 +                      __func__, acm->port_num, \
 +                      gadget_is_superspeed(c->cdev->gadget) ? "super" : \
 +                      gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", \
 +                      acm->port.in->name, acm->port.out->name, acm->notify->name);
 +
        DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
                        acm->port_num,
                        gadget_is_superspeed(c->cdev->gadget) ? "super" :
index bd25d717146d552b06925f05011ac591460c131c,f4348a9ff3855b722944107fbb22a74a42ddc82e..f55b5a18c4908cffe14fd351170cac81417df640
@@@ -26,8 -26,6 +26,8 @@@
  #include <linux/dmapool.h>
  
  #include "xhci.h"
 +#include <mach/mt_boot.h>
 +#include <linux/dma-mapping.h>
  
  /*
   * Allocates a generic ring segment from the ring pool, sets the dma address,
@@@ -113,12 -111,10 +113,12 @@@ static void xhci_link_segments(struct x
                val |= TRB_TYPE(TRB_LINK);
                /* Always set the chain bit with 0.95 hardware */
                /* Set chain bit for isoc rings on AMD 0.96 host */
 +#ifndef CONFIG_MTK_XHCI
                if (xhci_link_trb_quirk(xhci) ||
                                (type == TYPE_ISOC &&
                                 (xhci->quirks & XHCI_AMD_0x96_HOST)))
                        val |= TRB_CHAIN;
 +#endif
                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
        }
  }
@@@ -427,16 -423,12 +427,16 @@@ static void xhci_free_stream_ctx(struc
                unsigned int num_stream_ctxs,
                struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
  {
 -      struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
 -              dma_free_coherent(&pdev->dev,
 -                              sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 -                              stream_ctx, dma);
 +              dma_free_coherent(dev,
 +                      sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 +#ifdef CONFIG_MTK_XHCI
 +                      xhci->erst.entries, xhci->erst.erst_dma_addr);
 +#else
 +                      stream_ctx, dma);
 +#endif
        else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
                return dma_pool_free(xhci->small_streams_pool,
                                stream_ctx, dma);
@@@ -459,12 -451,12 +459,12 @@@ static struct xhci_stream_ctx *xhci_all
                unsigned int num_stream_ctxs, dma_addr_t *dma,
                gfp_t mem_flags)
  {
 -      struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
 -              return dma_alloc_coherent(&pdev->dev,
 -                              sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 -                              dma, mem_flags);
 +              return dma_alloc_coherent(dev,
 +                      sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 +                      dma, mem_flags);
        else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
                return dma_pool_alloc(xhci->small_streams_pool,
                                mem_flags, dma);
@@@ -933,6 -925,40 +933,40 @@@ void xhci_free_virt_device(struct xhci_
        xhci->devs[slot_id] = NULL;
  }
  
+ /*
+  * Free a virt_device structure.
+  * If the virt_device added a tt_info (a hub) and has children pointing to
+  * that tt_info, then free the child first. Recursive.
+  * We can't rely on udev at this point to find child-parent relationships.
+  */
+ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+ {
+       struct xhci_virt_device *vdev;
+       struct list_head *tt_list_head;
+       struct xhci_tt_bw_info *tt_info, *next;
+       int i;
+       vdev = xhci->devs[slot_id];
+       if (!vdev)
+               return;
+       tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+       list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+               /* is this a hub device that added a tt_info to the tts list */
+               if (tt_info->slot_id == slot_id) {
+                       /* are any devices using this tt_info? */
+                       for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+                               vdev = xhci->devs[i];
+                               if (vdev && (vdev->tt_info == tt_info))
+                                       xhci_free_virt_devices_depth_first(
+                                               xhci, i);
+                       }
+               }
+       }
+       /* we are now at a leaf device */
+       xhci_free_virt_device(xhci, slot_id);
+ }
  int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                struct usb_device *udev, gfp_t flags)
  {
@@@ -1457,18 -1483,7 +1491,18 @@@ int xhci_endpoint_init(struct xhci_hcd 
                break;
        case USB_SPEED_FULL:
        case USB_SPEED_LOW:
 +      {
 +              CHIP_SW_VER sw_code = mt_get_chip_sw_ver();
 +              unsigned int hw_code = mt_get_chip_hw_code();
 +                      
 +              if((hw_code == 0x6595) && (sw_code <= CHIP_SW_VER_01)){
 +                      /* workaround for maxp size issue of RXXE */
 +                      if((max_packet % 4 == 2) && (max_packet % 16 != 14) &&
 +                              (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
 +                              max_packet += 2;
 +              }
                break;
 +      }
        default:
                BUG();
        }
@@@ -1705,7 -1720,7 +1739,7 @@@ static void scratchpad_free(struct xhci
  {
        int num_sp;
        int i;
 -      struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (!xhci->scratchpad)
                return;
        num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  
        for (i = 0; i < num_sp; i++) {
 -              dma_free_coherent(&pdev->dev, xhci->page_size,
 +              dma_free_coherent(dev, xhci->page_size,
                                    xhci->scratchpad->sp_buffers[i],
                                    xhci->scratchpad->sp_dma_buffers[i]);
        }
        kfree(xhci->scratchpad->sp_dma_buffers);
        kfree(xhci->scratchpad->sp_buffers);
 -      dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
 +      dma_free_coherent(dev, num_sp * sizeof(u64),
                            xhci->scratchpad->sp_array,
                            xhci->scratchpad->sp_dma);
        kfree(xhci->scratchpad);
@@@ -1781,7 -1796,7 +1815,7 @@@ void xhci_free_command(struct xhci_hcd 
  
  void xhci_mem_cleanup(struct xhci_hcd *xhci)
  {
 -      struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
        struct dev_info *dev_info, *next;
        struct xhci_cd  *cur_cd, *next_cd;
        unsigned long   flags;
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
        if (xhci->erst.entries)
 -              dma_free_coherent(&pdev->dev, size,
 +              dma_free_coherent(dev, size,
                                xhci->erst.entries, xhci->erst.erst_dma_addr);
        xhci->erst.entries = NULL;
        xhci_dbg(xhci, "Freed ERST\n");
                }
        }
  
-       for (i = 1; i < MAX_HC_SLOTS; ++i)
-               xhci_free_virt_device(xhci, i);
+       for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+               xhci_free_virt_devices_depth_first(xhci, i);
  
        if (xhci->segment_pool)
                dma_pool_destroy(xhci->segment_pool);
        xhci_dbg(xhci, "Freed medium stream array pool\n");
  
        if (xhci->dcbaa)
 -              dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
 +              dma_free_coherent(dev, sizeof(*xhci->dcbaa),
                                xhci->dcbaa, xhci->dcbaa->dma);
        xhci->dcbaa = NULL;
  
index 7d5846e20a5d041c58f11887c80853db914835a0,411db91b8305d97f98020df2982b00dfe1b420c2..6663fee96fecce135777ace06f9e3bd8edf8be5a
  #include <linux/platform_device.h>
  #include <linux/module.h>
  #include <linux/slab.h>
 +#include <linux/dma-mapping.h>
  
  #include "xhci.h"
  
 +#ifdef CONFIG_MTK_XHCI
 +#include <linux/xhci/xhci-mtk.h>
 +#include <linux/of.h>
 +#endif
 +
  static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
  {
        /*
         * dev struct in order to setup MSI
         */
        xhci->quirks |= XHCI_PLAT;
 +      //CC: MTK host controller gives a spurious successful event after a
 +      //    short transfer. Ignore it.
 +      xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
 +      xhci->quirks |= XHCI_LPM_SUPPORT;
  }
  
  /* called during probe() after chip reset completes */
@@@ -92,19 -82,6 +92,19 @@@ static const struct hc_driver xhci_plat
        .bus_resume =           xhci_bus_resume,
  };
  
 +#if defined(CONFIG_MTK_LM_MODE)
 +#define XHCI_DMA_BIT_MASK DMA_BIT_MASK(64)
 +#else
 +#define XHCI_DMA_BIT_MASK DMA_BIT_MASK(32)
 +#endif
 +
 +static u64 xhci_dma_mask = XHCI_DMA_BIT_MASK;
 +
 +static void xhci_hcd_release (struct device *dev)
 +{
 +    printk(KERN_INFO "dev = 0x%p\n", dev);
 +}
 +
  static int xhci_plat_probe(struct platform_device *pdev)
  {
        const struct hc_driver  *driver;
  
        driver = &xhci_plat_xhci_driver;
  
 +#ifdef CONFIG_MTK_XHCI  /* device tree support */
 +      irq = platform_get_irq_byname(pdev, XHCI_DRIVER_NAME);
 +      printk("%s(%d): %d\n", __func__, __LINE__, irq);
 +      if(irq < 0)
 +              return -ENODEV;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, XHCI_BASE_REGS_ADDR_RES_NAME);
 +      if (!res)
 +              return -ENODEV;
 +
 +      pdev->dev.coherent_dma_mask = XHCI_DMA_BIT_MASK;
 +      pdev->dev.dma_mask = &xhci_dma_mask;
 +      pdev->dev.release = xhci_hcd_release;
 +#else
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return -ENODEV;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -ENODEV;
 +#endif
  
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd)
                goto release_mem_region;
        }
  
 +      printk("%s(%d): logic 0x%p, phys 0x%p\n", __func__, __LINE__,
 +              (void *)(unsigned long)res->start, hcd->regs);
 +
 +      #ifdef CONFIG_MTK_XHCI
 +      ret = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_TRIGGER_LOW);
 +      #else
        ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 +      #endif
 +
        if (ret)
                goto unmap_registers;
  
         */
        *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
  
 +      #ifdef CONFIG_MTK_XHCI
 +    ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_TRIGGER_LOW);
 +    #else
        ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
 +    #endif
        if (ret)
                goto put_usb3_hcd;
  
@@@ -224,6 -174,8 +224,8 @@@ static int xhci_plat_remove(struct plat
        struct usb_hcd  *hcd = platform_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  
+       xhci->xhc_state |= XHCI_STATE_REMOVING;
        usb_remove_hcd(xhci->shared_hcd);
        usb_put_hcd(xhci->shared_hcd);
  
        iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
 +      #ifdef CONFIG_MTK_XHCI  
 +      mtk_xhci_reset(xhci);
 +      #endif
        kfree(xhci);
  
        return 0;
  }
  
 +#ifdef CONFIG_MTK_XHCI
 +static const struct of_device_id mtk_xhci_of_match[] = {
 +      {
 +              .compatible = "mediatek,USB3_XHCI",
 +      },
 +      { },
 +};
 +#endif
 +
  static struct platform_driver usb_xhci_driver = {
        .probe  = xhci_plat_probe,
        .remove = xhci_plat_remove,
        .driver = {
                .name = "xhci-hcd",
 +#ifdef CONFIG_MTK_XHCI
 +        .of_match_table = of_match_ptr(mtk_xhci_of_match),
 +#endif
        },
  };
  MODULE_ALIAS("platform:xhci-hcd");
diff --combined drivers/usb/host/xhci.c
index d99676c82e504795c7c5afb5ab442d269f80c0c8,0e7dccc271db474a5350ffcfe4f265e704809b50..c4199e406cbc454695a7c41a2c2b786c3797adbd
  
  #include "xhci.h"
  
 +#ifdef CONFIG_MTK_XHCI
 +#include <asm/uaccess.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/platform_device.h>
 +#include <linux/xhci/xhci-mtk-scheduler.h>
 +#include <linux/xhci/xhci-mtk-power.h>
 +#include <linux/xhci/xhci-mtk.h>
 +
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +#include <linux/proc_fs.h>
 +#include <asm/uaccess.h>
 +#include <linux/seq_file.h>
 +#include <linux/kobject.h>
 +#include <linux/miscdevice.h>
 +
 +static struct miscdevice mu3h_uevent_device = {
 +         .minor = MISC_DYNAMIC_MINOR,
 +         .name = "usbif_u3h_uevent",
 +         .fops = NULL,
 +};
 +#endif
 +#endif
 +
  #define DRIVER_AUTHOR "Sarah Sharp"
  #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  
@@@ -61,23 -38,6 +61,23 @@@ static int link_quirk
  module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +int usbif_u3h_send_event(char* event)
 +{
 +      char udev_event[128];
 +      char *envp[] = {udev_event, NULL };
 +      int ret ;
 +
 +      snprintf(udev_event, 128, "USBIF_EVENT=%s",event);
 +      printk("usbif_u3h_send_event - sending event - %s in %s\n", udev_event, kobject_get_path(&mu3h_uevent_device.this_device->kobj, GFP_KERNEL));
 +      ret = kobject_uevent_env(&mu3h_uevent_device.this_device->kobj, KOBJ_CHANGE, envp);
 +      if (ret < 0)
 +              printk("usbif_u3h_send_event sending failed with ret = %d, \n", ret);
 +
 +      return ret;
 +}
 +#endif
 +
  /* TODO: copied from ehci-hcd.c - can this be refactored? */
  /*
   * xhci_handshake - spin reading hc until handshake completes or fails
@@@ -179,7 -139,8 +179,8 @@@ static int xhci_start(struct xhci_hcd *
                                "waited %u microseconds.\n",
                                XHCI_MAX_HALT_USEC);
        if (!ret)
-               xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+               /* clear state flags. Including dying, halted or removing */
+               xhci->xhc_state = 0;
  
        return ret;
  }
@@@ -562,7 -523,6 +563,7 @@@ int xhci_init(struct usb_hcd *hcd
        } else {
                xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
        }
 +
        retval = xhci_mem_init(xhci, GFP_KERNEL);
        xhci_dbg(xhci, "Finished xhci_init\n");
  
@@@ -635,7 -595,6 +636,7 @@@ static int xhci_run_finished(struct xhc
                xhci_halt(xhci);
                return -ENODEV;
        }
 +
        xhci->shared_hcd->state = HC_STATE_RUNNING;
        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  
                xhci_ring_cmd_db(xhci);
  
        xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
 +
        return 0;
  }
  
@@@ -790,10 -748,10 +791,10 @@@ void xhci_stop(struct usb_hcd *hcd
                xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
                                __func__);
        }
 -
 +#ifndef CONFIG_MTK_XHCI
        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                usb_amd_dev_put();
 -
 +#endif
        xhci_dbg(xhci, "// Disabling event ring interrupts\n");
        temp = xhci_readl(xhci, &xhci->op_regs->status);
        xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@@ -1641,11 -1599,6 +1642,11 @@@ int xhci_drop_endpoint(struct usb_hcd *
        u32 drop_flag;
        u32 new_add_flags, new_drop_flags, new_slot_info;
        int ret;
 +#ifdef CONFIG_MTK_XHCI
 +      struct sch_ep *sch_ep = NULL;
 +      int isTT;
 +      int ep_type = 0;
 +#endif
  
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
        if (ret <= 0)
  
        xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
  
 +#ifdef CONFIG_MTK_XHCI
 +      slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx);
 +      if((slot_ctx->tt_info & 0xff) > 0){
 +              isTT = 1;
 +      }
 +      else{
 +              isTT = 0;
 +      }
 +      if(usb_endpoint_xfer_int(&ep->desc)){
 +              ep_type = USB_EP_INT;
 +      }
 +      else if(usb_endpoint_xfer_isoc(&ep->desc)){
 +              ep_type = USB_EP_ISOC;
 +      }
 +      else if(usb_endpoint_xfer_bulk(&ep->desc)){
 +              ep_type = USB_EP_BULK;
 +      }
 +      sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc)
 +              , isTT, ep_type, (mtk_u32 *)ep);
 +      if(sch_ep != NULL){
 +              kfree(sch_ep);
 +      }
 +      else{
 +              xhci_warn(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n");
 +      }
 +#endif
 +
        xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
                        (unsigned int) ep->desc.bEndpointAddress,
                        udev->slot_id,
                        (unsigned int) new_drop_flags,
                        (unsigned int) new_add_flags,
                        (unsigned int) new_slot_info);
 +
 +      #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_ep_count_dec();
 +      #endif
 +
        return 0;
  }
  
@@@ -1764,16 -1685,6 +1765,16 @@@ int xhci_add_endpoint(struct usb_hcd *h
        u32 new_add_flags, new_drop_flags, new_slot_info;
        struct xhci_virt_device *virt_dev;
        int ret = 0;
 +#ifdef CONFIG_MTK_XHCI
 +      struct xhci_ep_ctx *in_ep_ctx;
 +      struct sch_ep *sch_ep;
 +      int isTT;
 +      int ep_type = 0;
 +      int maxp = 0;
 +      int burst = 0;
 +      int mult = 0;
 +      int interval = 0;
 +#endif
  
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
        if (ret <= 0) {
                return -ENOMEM;
        }
  
 +#ifdef CONFIG_MTK_XHCI
 +      in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
 +      slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
 +
 +      if((slot_ctx->tt_info & 0xff) > 0){
 +              isTT = 1;
 +      }
 +      else{
 +              isTT = 0;
 +      }
 +      if(usb_endpoint_xfer_int(&ep->desc)){
 +              ep_type = USB_EP_INT;
 +      }
 +      else if(usb_endpoint_xfer_isoc(&ep->desc)){
 +              ep_type = USB_EP_ISOC;
 +      }
 +      else if(usb_endpoint_xfer_bulk(&ep->desc)){
 +              ep_type = USB_EP_BULK;
 +      }
 +      if(udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
 +              || udev->speed == USB_SPEED_LOW){
 +              maxp = ep->desc.wMaxPacketSize & 0x7FF;
 +              burst = ep->desc.wMaxPacketSize >> 11;
 +              mult = 0;
 +      }
 +      else if(udev->speed == USB_SPEED_SUPER){
 +              maxp = ep->desc.wMaxPacketSize & 0x7FF;
 +              burst = ep->ss_ep_comp.bMaxBurst;
 +              mult = ep->ss_ep_comp.bmAttributes & 0x3;
 +      }
 +      interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff));
 +      sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
 +      if(mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
 +              isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
 +              , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS){
 +              xhci_err(xhci, "[MTK] not enough bandwidth\n");
 +              return -ENOSPC;
 +      }
 +#endif
 +
        ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
        new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  
                        (unsigned int) new_drop_flags,
                        (unsigned int) new_add_flags,
                        (unsigned int) new_slot_info);
 +
 +      #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_ep_count_inc();
 +      #endif
 +
        return 0;
  }
  
@@@ -2828,7 -2694,8 +2829,8 @@@ int xhci_check_bandwidth(struct usb_hc
        if (ret <= 0)
                return ret;
        xhci = hcd_to_xhci(hcd);
-       if (xhci->xhc_state & XHCI_STATE_DYING)
+       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+               (xhci->xhc_state & XHCI_STATE_REMOVING))
                return -ENODEV;
  
        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@@ -3649,9 -3516,7 +3651,9 @@@ void xhci_free_dev(struct usb_hcd *hcd
  {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct xhci_virt_device *virt_dev;
 +#ifndef CONFIG_USB_DEFAULT_PERSIST
        struct device *dev = hcd->self.controller;
 +#endif
        unsigned long flags;
        u32 state;
        int i, ret;
@@@ -4864,12 -4729,6 +4866,12 @@@ int xhci_gen_setup(struct usb_hcd *hcd
                return 0;
        }
  
 +#ifdef CONFIG_MTK_XHCI
 +      retval = mtk_xhci_ip_init(hcd, xhci);
 +      if(retval)
 +              goto error;
 +#endif
 +
        xhci->cap_regs = hcd->regs;
        xhci->op_regs = hcd->regs +
                HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
        if (retval)
                goto error;
        xhci_dbg(xhci, "Called HCD init\n");
 +
 +    printk("%s(%d): do mtk_xhci_set\n", __func__, __LINE__);
 +
        return 0;
  error:
        kfree(xhci);
@@@ -4932,189 -4788,6 +4934,189 @@@ MODULE_DESCRIPTION(DRIVER_DESC)
  MODULE_AUTHOR(DRIVER_AUTHOR);
  MODULE_LICENSE("GPL");
  
 +#ifdef CONFIG_USBIF_COMPLIANCE
 +#ifndef CONFIG_USB_MTK_DUALMODE
 +static int xhci_hcd_driver_init(void)
 +{
 +      int retval;
 +
 +      retval = xhci_register_pci();
 +      if (retval < 0) {
 +              printk(KERN_DEBUG "Problem registering PCI driver.");
 +              return retval;
 +      }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    mtk_xhci_ip_init();
 +    #endif
 +
 +      retval = xhci_register_plat();
 +      if (retval < 0) {
 +              printk(KERN_DEBUG "Problem registering platform driver.");
 +              goto unreg_pci;
 +      }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    retval = xhci_attrs_init();
 +    if(retval < 0){
 +        printk(KERN_DEBUG "Problem creating xhci attributes.");
 +        goto unreg_plat;
 +    }
 +
 +    mtk_xhci_wakelock_init();
 +    #endif
 +
 +      /*
 +       * Check the compiler generated sizes of structures that must be laid
 +       * out in specific ways for hardware access.
 +       */
 +      BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
 +      /* xhci_device_control has eight fields, and also
 +       * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
 +       */
 +      BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
 +      BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
 +      /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
 +      BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
 +      return 0;
 +
 +#ifdef CONFIG_MTK_XHCI
 +unreg_plat:
 +    xhci_unregister_plat();
 +#endif
 +unreg_pci:
 +      xhci_unregister_pci();
 +      return retval;
 +}
 +
 +static void xhci_hcd_driver_cleanup(void)
 +{
 +      xhci_unregister_pci();
 +      xhci_unregister_plat();
 +    xhci_attrs_exit();
 +}
 +#else
 +static int xhci_hcd_driver_init(void)
 +{
 +      // init in mt_devs.c
 +      mtk_xhci_eint_iddig_init();
 +      mtk_xhci_switch_init();
 +      //mtk_xhci_wakelock_init();
 +      return 0;
 +}
 +
 +static void xhci_hcd_driver_cleanup(void)
 +{
 +      mtk_xhci_eint_iddig_deinit() ;
 +}
 +
 +#endif
 +
 +static int mu3h_normal_driver_on = 0 ;
 +
 +static int xhci_mu3h_proc_show(struct seq_file *seq, void *v)
 +{
 +        seq_printf(seq, "xhci_mu3h_proc_show, mu3h is %d (on:1, off:0)\n", mu3h_normal_driver_on);
 +        return 0;
 +}
 +
 +static int xhci_mu3h_proc_open(struct inode *inode, struct file *file)
 +{
 +    return single_open(file, xhci_mu3h_proc_show, inode->i_private);
 +}
 +
 +static ssize_t xhci_mu3h_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos)
 +{
 +      int ret ;
 +      char msg[32] ;
 +      int result;
 +
 +      if (length >= sizeof(msg)) {
 +              printk( "xhci_mu3h_proc_write length error, the error len is %d\n", (unsigned int)length);
 +              return -EINVAL;
 +      }
 +      if (copy_from_user(msg, buf, length))
 +              return -EFAULT;
 +
 +      msg[length] = 0 ;
 +
 +      printk("xhci_mu3h_proc_write: %s, current driver on/off: %d\n", msg, mu3h_normal_driver_on);
 +
 +      if ((msg[0] == '1') && (mu3h_normal_driver_on == 0)){
 +              xhci_hcd_driver_init() ;
 +              mu3h_normal_driver_on = 1 ;
 +              printk("registe mu3h driver : m3h xhci driver\n");
 +      }else if ((msg[0] == '0') && (mu3h_normal_driver_on == 1)){
 +              xhci_hcd_driver_cleanup();
 +              mu3h_normal_driver_on = 0 ;
 +              printk("unregiste m3h xhci driver.\n");
 +      }else{
 +              printk("xhci_mu3h_proc_write write faile !\n");
 +      }
 +      return length;
 +}
 +
 +static const struct file_operations mu3h_proc_fops = {
 +      .owner = THIS_MODULE,
 +      .open = xhci_mu3h_proc_open,
 +      .write = xhci_mu3h_proc_write,
 +      .read = seq_read,
 +      .llseek = seq_lseek,
 +
 +};
 +
 +static int __init xhci_hcd_init(void)
 +{
 +      struct proc_dir_entry *prEntry;
 +
 +      printk(KERN_DEBUG "xhci_hcd_init");
 +
 +      // set xhci up at boot up
 +      xhci_hcd_driver_init() ;
 +      mtk_xhci_wakelock_init();
 +      mu3h_normal_driver_on = 1;
 +
 +      // USBIF
 +      prEntry = proc_create("mu3h_driver_init", 0666, NULL, &mu3h_proc_fops);
 +      if (prEntry)
 +      {
 +              printk("create the mu3h init proc OK!\n") ;
 +      }else{
 +              printk("[ERROR] create the mu3h init proc FAIL\n") ;
 +      }
 +
 +#ifdef CONFIG_MTK_XHCI
 +
 +      if (!misc_register(&mu3h_uevent_device)){
 +              printk("create the mu3h_uevent_device uevent device OK!\n") ;
 +
 +      }else{
 +              printk("[ERROR] create the mu3h_uevent_device uevent device fail\n") ;
 +      }
 +
 +#endif
 +
 +      return 0 ;
 +
 +}
 +module_init(xhci_hcd_init);
 +
 +static void __exit xhci_hcd_cleanup(void)
 +{
 +#ifdef CONFIG_MTK_XHCI
 +      misc_deregister(&mu3h_uevent_device);
 +#endif
 +      printk(KERN_DEBUG "xhci_hcd_cleanup");
 +}
 +module_exit(xhci_hcd_cleanup);
 +
 +#else
 +#ifndef CONFIG_USB_MTK_DUALMODE
  static int __init xhci_hcd_init(void)
  {
        int retval;
                printk(KERN_DEBUG "Problem registering platform driver.");
                goto unreg_pci;
        }
 +
 +    #ifdef CONFIG_MTK_XHCI
 +    retval = xhci_attrs_init();
 +    if(retval < 0){
 +        printk(KERN_DEBUG "Problem creating xhci attributes.");
 +        goto unreg_plat;
 +    }
 +
 +    mtk_xhci_wakelock_init();
 +    #endif
 +
        /*
         * Check the compiler generated sizes of structures that must be laid
         * out in specific ways for hardware access.
        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
  
        return 0;
 +
 +#ifdef CONFIG_MTK_XHCI
 +unreg_plat:
 +    xhci_unregister_plat();
 +#endif
  unreg_pci:
        xhci_unregister_pci();
        return retval;
@@@ -5177,23 -4834,5 +5179,23 @@@ static void __exit xhci_hcd_cleanup(voi
  {
        xhci_unregister_pci();
        xhci_unregister_plat();
 +    xhci_attrs_exit();
  }
  module_exit(xhci_hcd_cleanup);
 +#else
 +static int __init xhci_hcd_init(void)
 +{
 +    mtk_xhci_eint_iddig_init();
 +    mtk_xhci_switch_init();
 +    mtk_xhci_wakelock_init();
 +      return 0;
 +}
 +module_init(xhci_hcd_init);
 +
 +static void __exit xhci_hcd_cleanup(void)
 +{
 +}
 +module_exit(xhci_hcd_cleanup);
 +
 +#endif
 +#endif
diff --combined drivers/usb/host/xhci.h
index 6d5839213405f87bacfe36d75a6e67da5bb68339,15e796faa0a888fd08c166f2f6a62bdc74df0e22..29eec48e4b614e8bc12d0112ca8a293ceed4a45b
@@@ -33,7 -33,6 +33,7 @@@
  #include      "xhci-ext-caps.h"
  #include "pci-quirks.h"
  
 +
  /* xHCI PCI Configuration Registers */
  #define XHCI_SBRN_OFFSET      (0x60)
  
@@@ -681,14 -680,6 +681,14 @@@ struct xhci_ep_ctx 
  /* deq bitmasks */
  #define EP_CTX_CYCLE_MASK             (1 << 0)
  
 +#ifdef CONFIG_MTK_XHCI
 +/* mtk scheduler bitmasks */
 +#define BPKTS(p)      ((p) & 0x3f)
 +#define BCSCOUNT(p)   (((p) & 0x7) << 8)
 +#define BBM(p)                ((p) << 11)
 +#define BOFFSET(p)    ((p) & 0x3fff)
 +#define BREPEAT(p)    (((p) & 0x7fff) << 16)
 +#endif
  
  /**
   * struct xhci_input_control_context
@@@ -1422,12 -1413,6 +1422,12 @@@ struct xhci_hcd 
        /* Our HCD's current interrupter register set */
        struct  xhci_intr_reg __iomem *ir_set;
  
 +      #ifdef CONFIG_MTK_XHCI
 +      unsigned long base_regs;
 +      unsigned long sif_regs;
 +      unsigned long sif2_regs;
 +      #endif
 +
        /* Cached register copies of read-only HC data */
        __u32           hcs_params1;
        __u32           hcs_params2;
   */
  #define XHCI_STATE_DYING      (1 << 0)
  #define XHCI_STATE_HALTED     (1 << 1)
+ #define XHCI_STATE_REMOVING   (1 << 2)
        /* Statistics */
        int                     error_bitmask;
        unsigned int            quirks;
  #define       XHCI_LINK_TRB_QUIRK     (1 << 0)
  #define XHCI_RESET_EP_QUIRK   (1 << 1)
  #define XHCI_NEC_HOST         (1 << 2)
 +#ifndef CONFIG_MTK_XHCI
  #define XHCI_AMD_PLL_FIX      (1 << 3)
 +#endif
  #define XHCI_SPURIOUS_SUCCESS (1 << 4)
  /*
   * Certain Intel host controllers have a limit to the number of endpoint
  #define XHCI_BROKEN_MSI               (1 << 6)
  #define XHCI_RESET_ON_RESUME  (1 << 7)
  #define       XHCI_SW_BW_CHECKING     (1 << 8)
 +#ifndef CONFIG_MTK_XHCI
  #define XHCI_AMD_0x96_HOST    (1 << 9)
 +#endif
  #define XHCI_TRUST_TX_LENGTH  (1 << 10)
  #define XHCI_LPM_SUPPORT      (1 << 11)
  #define XHCI_INTEL_HOST               (1 << 12)
@@@ -1595,12 -1577,12 +1596,12 @@@ static inline struct usb_hcd *xhci_to_h
  /* TODO: copied from ehci.h - can be refactored? */
  /* xHCI spec says all registers are little endian */
  static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
 -              __le32 __iomem *regs)
 +              void __iomem *regs)
  {
        return readl(regs);
  }
  static inline void xhci_writel(struct xhci_hcd *xhci,
 -              const unsigned int val, __le32 __iomem *regs)
 +              const unsigned int val, void __iomem *regs)
  {
        writel(val, regs);
  }
diff --combined fs/ext4/inode.c
index 2c6c5e1654caf8aea27abe7bef399e64cfe2595a,1095d77c2a9da326b5596a57d154ea719ac50b14..c15bdac6eaa8386f69a73411d326296af3b89d9a
@@@ -46,7 -46,6 +46,7 @@@
  #include "truncate.h"
  
  #include <trace/events/ext4.h>
 +#include <linux/blkdev.h>
  
  #define MPAGE_DA_EXTENT_TAIL 0x01
  
@@@ -74,10 -73,9 +74,9 @@@ static __u32 ext4_inode_csum(struct ino
                        csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
                                           csum_size);
                        offset += csum_size;
-                       csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
-                                          EXT4_INODE_SIZE(inode->i_sb) -
-                                          offset);
                }
+               csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+                                  EXT4_INODE_SIZE(inode->i_sb) - offset);
        }
  
        return csum;
@@@ -1005,13 -1003,6 +1004,13 @@@ static int ext4_write_begin(struct fil
        struct page *page;
        pgoff_t index;
        unsigned from, to;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +              extern unsigned char *page_logger;
 +              struct page_pid_logger *tmp_logger;
 +              unsigned long page_index;
 +              extern spinlock_t g_locker;
 +              unsigned long g_flags;
 +#endif
  
        trace_ext4_write_begin(inode, pos, len, flags);
        /*
@@@ -1107,24 -1098,6 +1106,24 @@@ retry_journal
                return ret;
        }
        *pagep = page;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +              if( page_logger && (*pagep)) {
 +                      //#if defined(CONFIG_FLATMEM)
 +                      //page_index = (unsigned long)((*pagep) - mem_map) ;
 +                      //#else
 +                      page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
 +                      //#endif
 +                      tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
 +                      spin_lock_irqsave(&g_locker, g_flags);
 +                      if( page_index < num_physpages) {
 +                              if( tmp_logger->pid1 == 0XFFFF)
 +                                      tmp_logger->pid1 = current->pid;
 +                              else if( tmp_logger->pid1 != current->pid)
 +                                      tmp_logger->pid2 = current->pid;
 +                      }
 +                      spin_unlock_irqrestore(&g_locker, g_flags);
 +              }
 +#endif
        return ret;
  }
  
@@@ -3653,7 -3626,6 +3652,7 @@@ int ext4_can_truncate(struct inode *ino
  
  int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
  {
 +#if 0
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
@@@ -3839,12 -3811,6 +3838,12 @@@ out_dio
  out_mutex:
        mutex_unlock(&inode->i_mutex);
        return ret;
 +#else
 +      /*
 +       * Disabled as per b/28760453
 +       */
 +      return -EOPNOTSUPP;
 +#endif
  }
  
  /*
@@@ -4107,10 -4073,6 +4106,10 @@@ make_io
                trace_ext4_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
 +#ifdef FEATURE_STORAGE_META_LOG
 +              if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
 +                      set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
 +#endif
                submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
diff --combined fs/ext4/mballoc.c
index 61cc29fef63eb3d4e35978ad7a49a40ab2a5d5ea,cba1fc678eecf7dab8b8b589dc2851129c543b6f..cb8072a050ea8719aa7d7a34453c25eaf359c36b
@@@ -2718,8 -2718,7 +2718,8 @@@ int ext4_mb_release(struct super_block 
  }
  
  static inline int ext4_issue_discard(struct super_block *sb,
 -              ext4_group_t block_group, ext4_grpblk_t cluster, int count)
 +              ext4_group_t block_group, ext4_grpblk_t cluster, int count,
 +              unsigned long flags)
  {
        ext4_fsblk_t discard_block;
  
        count = EXT4_C2B(EXT4_SB(sb), count);
        trace_ext4_discard_blocks(sb,
                        (unsigned long long) discard_block, count);
 -      return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 +      return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
  }
  
  /*
@@@ -2750,7 -2749,7 +2750,7 @@@ static void ext4_free_data_callback(str
        if (test_opt(sb, DISCARD)) {
                err = ext4_issue_discard(sb, entry->efd_group,
                                         entry->efd_start_cluster,
 -                                       entry->efd_count);
 +                                       entry->efd_count, 0);
                if (err && err != -EOPNOTSUPP)
                        ext4_msg(sb, KERN_WARNING, "discard request in"
                                 " group:%d block:%d count:%d failed"
@@@ -3064,6 -3063,13 +3064,13 @@@ ext4_mb_normalize_request(struct ext4_a
        if (ar->pright && start + size - 1 >= ar->lright)
                size -= start + size - ar->lright;
  
+       /*
+        * Trim allocation request for filesystems with artificially small
+        * groups.
+        */
+       if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
+               size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
        end = start + size;
  
        /* check we don't cross already preallocated blocks */
@@@ -4787,8 -4793,7 +4794,8 @@@ do_more
                 * them with group lock_held
                 */
                if (test_opt(sb, DISCARD)) {
 -                      err = ext4_issue_discard(sb, block_group, bit, count);
 +                      err = ext4_issue_discard(sb, block_group, bit, count,
 +                                               0);
                        if (err && err != -EOPNOTSUPP)
                                ext4_msg(sb, KERN_WARNING, "discard request in"
                                         " group:%d block:%d count:%lu failed"
@@@ -4995,15 -5000,13 +5002,15 @@@ error_return
   * @count:    number of blocks to TRIM
   * @group:    alloc. group we are working with
   * @e4b:      ext4 buddy for the group
 + * @blkdev_flags: flags for the block device
   *
   * Trim "count" blocks starting at "start" in the "group". To assure that no
   * one will allocate those blocks, mark it as used in buddy bitmap. This must
   * be called with under the group lock.
   */
  static int ext4_trim_extent(struct super_block *sb, int start, int count,
 -                           ext4_group_t group, struct ext4_buddy *e4b)
 +                          ext4_group_t group, struct ext4_buddy *e4b,
 +                          unsigned long blkdev_flags)
  {
        struct ext4_free_extent ex;
        int ret = 0;
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
 -      ret = ext4_issue_discard(sb, group, start, count);
 +      ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
        return ret;
   * @start:            first group block to examine
   * @max:              last group block to examine
   * @minblocks:                minimum extent block count
 + * @blkdev_flags:     flags for the block device
   *
   * ext4_trim_all_free walks through group's buddy bitmap searching for free
   * extents. When the free block is found, ext4_trim_extent is called to TRIM
  static ext4_grpblk_t
  ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                   ext4_grpblk_t start, ext4_grpblk_t max,
 -                 ext4_grpblk_t minblocks)
 +                 ext4_grpblk_t minblocks, unsigned long blkdev_flags)
  {
        void *bitmap;
        ext4_grpblk_t next, count = 0, free_count = 0;
  
                if ((next - start) >= minblocks) {
                        ret = ext4_trim_extent(sb, start,
 -                                             next - start, group, &e4b);
 +                                             next - start, group, &e4b,
 +                                             blkdev_flags);
                        if (ret && ret != -EOPNOTSUPP)
                                break;
                        ret = 0;
@@@ -5126,7 -5127,6 +5133,7 @@@ out
   * ext4_trim_fs() -- trim ioctl handle function
   * @sb:                       superblock for filesystem
   * @range:            fstrim_range structure
 + * @blkdev_flags:     flags for the block device
   *
   * start:     First Byte to trim
   * len:               number of Bytes to trim from start
   * start to start+len. For each such a group ext4_trim_all_free function
   * is invoked to trim all free space.
   */
 -int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 +int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
 +                      unsigned long blkdev_flags)
  {
        struct ext4_group_info *grp;
        ext4_group_t group, first_group, last_group;
  
                if (grp->bb_free >= minlen) {
                        cnt = ext4_trim_all_free(sb, group, first_cluster,
 -                                              end, minlen);
 +                                              end, minlen, blkdev_flags);
                        if (cnt < 0) {
                                ret = cnt;
                                break;
diff --combined fs/ext4/super.c
index 5e3532e985b7b82a4db0c80a230a71a1bc571994,1fe383f22ab1798dabcb6e8d9ec8f383358d3cea..53829a8d1f95f4c4fe4e53d03cbf07a291decfa3
@@@ -753,6 -753,7 +753,7 @@@ static void ext4_put_super(struct super
  {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_super_block *es = sbi->s_es;
+       int aborted = 0;
        int i, err;
  
        ext4_unregister_li_request(sb);
        destroy_workqueue(sbi->dio_unwritten_wq);
  
        if (sbi->s_journal) {
+               aborted = is_journal_aborted(sbi->s_journal);
                err = jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
-               if (err < 0)
+               if ((err < 0) && !aborted)
                        ext4_abort(sb, "Couldn't clean up the journal");
        }
  
        ext4_ext_release(sb);
        ext4_xattr_put_super(sb);
  
-       if (!(sb->s_flags & MS_RDONLY)) {
+       if (!(sb->s_flags & MS_RDONLY) && !aborted) {
                EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
                es->s_state = cpu_to_le16(sbi->s_mount_state);
        }
@@@ -2846,7 -2848,6 +2848,7 @@@ static int ext4_lazyinit_thread(void *a
        unsigned long next_wakeup, cur;
  
        BUG_ON(NULL == eli);
 +      set_freezable();
  
  cont_thread:
        while (true) {
  
                schedule_timeout_interruptible(next_wakeup - cur);
  
 -              if (kthread_should_stop()) {
 +              if (kthread_freezable_should_stop(NULL)) {
                        ext4_clear_request_list();
                        goto exit_thread;
                }
@@@ -3186,10 -3187,15 +3188,15 @@@ static int count_overhead(struct super_
                        ext4_set_bit(s++, buf);
                        count++;
                }
-               for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
-                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
-                       count++;
+               j = ext4_bg_num_gdb(sb, grp);
+               if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+                       ext4_error(sb, "Invalid number of block group "
+                                  "descriptor blocks: %d", j);
+                       j = EXT4_BLOCKS_PER_GROUP(sb) - s;
                }
+               count += j;
+               for (; j > 0; j--)
+                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
        }
        if (!count)
                return 0;
@@@ -3292,7 -3298,7 +3299,7 @@@ static int ext4_fill_super(struct super
        char *orig_data = kstrdup(data, GFP_KERNEL);
        struct buffer_head *bh;
        struct ext4_super_block *es = NULL;
-       struct ext4_sb_info *sbi;
+       struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
        ext4_fsblk_t block;
        ext4_fsblk_t sb_block = get_sb_block(&data);
        ext4_fsblk_t logical_sb_block;
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
        ext4_group_t first_not_zeroed;
  
-       sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-       if (!sbi)
-               goto out_free_orig;
+       if ((data && !orig_data) || !sbi)
+               goto out_free_base;
  
        sbi->s_blockgroup_lock =
                kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-       if (!sbi->s_blockgroup_lock) {
-               kfree(sbi);
-               goto out_free_orig;
-       }
+       if (!sbi->s_blockgroup_lock)
+               goto out_free_base;
        sb->s_fs_info = sbi;
        sbi->s_sb = sb;
        sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
         */
        sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  
-       if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
-                          &journal_devnum, &journal_ioprio, 0)) {
-               ext4_msg(sb, KERN_WARNING,
-                        "failed to parse options in superblock: %s",
-                        sbi->s_es->s_mount_opts);
+       if (sbi->s_es->s_mount_opts[0]) {
+               char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+                                             sizeof(sbi->s_es->s_mount_opts),
+                                             GFP_KERNEL);
+               if (!s_mount_opts)
+                       goto failed_mount;
+               if (!parse_options(s_mount_opts, sb, &journal_devnum,
+                                  &journal_ioprio, 0)) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "failed to parse options in superblock: %s",
+                                s_mount_opts);
+               }
+               kfree(s_mount_opts);
        }
        sbi->s_def_mount_opt = sbi->s_mount_opt;
        if (!parse_options((char *) data, sb, &journal_devnum,
  
        sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
        sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-       if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
-               goto cantfind_ext4;
  
        sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
        if (sbi->s_inodes_per_block == 0)
                goto cantfind_ext4;
+       if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+           sbi->s_inodes_per_group > blocksize * 8) {
+               ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+                        sbi->s_blocks_per_group);
+               goto failed_mount;
+       }
        sbi->s_itb_per_group = sbi->s_inodes_per_group /
                                        sbi->s_inodes_per_block;
        sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
        }
        sbi->s_cluster_ratio = clustersize / blocksize;
  
-       if (sbi->s_inodes_per_group > blocksize * 8) {
-               ext4_msg(sb, KERN_ERR,
-                      "#inodes per group too big: %lu",
-                      sbi->s_inodes_per_group);
-               goto failed_mount;
-       }
        /* Do we have standard group size of clustersize * 8 blocks ? */
        if (sbi->s_blocks_per_group == clustersize << 3)
                set_opt2(sb, STD_GROUP_SIZE);
                        (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
        db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
                   EXT4_DESC_PER_BLOCK(sb);
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) {
+               if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "first meta block group too large: %u "
+                                "(group descriptor block count %u)",
+                                le32_to_cpu(es->s_first_meta_bg), db_count);
+                       goto failed_mount;
+               }
+       }
        sbi->s_group_desc = ext4_kvmalloc(db_count *
                                          sizeof(struct buffer_head *),
                                          GFP_KERNEL);
         */
        if (!test_opt(sb, NOLOAD) &&
            EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
-               if (ext4_load_journal(sb, es, journal_devnum))
+               err = ext4_load_journal(sb, es, journal_devnum);
+               if (err)
                        goto failed_mount3;
        } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
              EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
@@@ -4100,7 -4119,9 +4120,9 @@@ no_journal
        }
  
        ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-                "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+                "Opts: %.*s%s%s", descr,
+                (int) sizeof(sbi->s_es->s_mount_opts),
+                sbi->s_es->s_mount_opts,
                 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  
        if (es->s_error_count)
@@@ -4168,8 -4189,8 +4190,8 @@@ failed_mount
  out_fail:
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
+ out_free_base:
        kfree(sbi);
- out_free_orig:
        kfree(orig_data);
        return err ? err : ret;
  }
diff --combined fs/fat/inode.c
index 871ea6a821dcb540f0257a329d5026d541841255,04708fab99aac43121e03548a5e5557f7a70571b..72abdad44c8904ddb8c4c814f6d76ce6b707917d
@@@ -157,40 -157,11 +157,40 @@@ static int fat_write_begin(struct file 
                        struct page **pagep, void **fsdata)
  {
        int err;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +      extern unsigned char *page_logger;
 +      struct page_pid_logger *tmp_logger;
 +      unsigned long page_index;
 +      extern spinlock_t g_locker;
 +      unsigned long g_flags;
 +#endif
  
        *pagep = NULL;
        err = cont_write_begin(file, mapping, pos, len, flags,
                                pagep, fsdata, fat_get_block,
                                &MSDOS_I(mapping->host)->mmu_private);
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +      if( page_logger && (*pagep)) {
 +              //printk(KERN_INFO"fat write_begin hank logger count:%d init %x currentpid:%d page:%x mem_map:%x pfn:%d page->index:%d\n", num_physpages, page_logger, current->pid, *pagep, mem_map, (unsigned)((*pagep) - mem_map), (*pagep)->index);
 +              //printk(KERN_INFO"page_logger_lock:%x %d", page_logger_lock, ((num_physpages+(1<<PAGE_LOCKER_SHIFT)-1)>>PAGE_LOCKER_SHIFT));
 +              //#if defined(CONFIG_FLATMEM)
 +              //page_index = (unsigned long)((*pagep) - mem_map) ;
 +              //#else
 +              page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
 +              //#endif
 +              tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
 +              spin_lock_irqsave(&g_locker, g_flags);
 +              if( page_index < num_physpages) {
 +                      if( tmp_logger->pid1 == 0XFFFF)
 +                              tmp_logger->pid1 = current->pid;
 +                      else if( tmp_logger->pid1 != current->pid)
 +                              tmp_logger->pid2 = current->pid;
 +              }
 +              spin_unlock_irqrestore(&g_locker, g_flags);
 +              //printk(KERN_INFO"tmp logger pid1:%u pid2:%u pfn:%d page:%x pos:%x host:%x max_mapnr:%x\n", tmp_logger->pid1, tmp_logger->pid2, (unsigned long)((*pagep) - mem_map),(*pagep), pos, mapping->host, max_mapnr );
 +              //printk(KERN_INFO"tmp logger pid1:%u pid2:%u pfn:%lu page:%p\n", tmp_logger->pid1, tmp_logger->pid2, (unsigned long)(__page_to_pfn(*pagep)),(*pagep));
 +      }
 +#endif
        if (err < 0)
                fat_write_failed(mapping, pos + len);
        return err;
@@@ -750,15 -721,7 +750,15 @@@ retry
        mark_buffer_dirty(bh);
        err = 0;
        if (wait)
 +      {
                err = sync_dirty_buffer(bh);
 +      }else
 +      {
 +#ifdef FEATURE_STORAGE_META_LOG
 +              if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
 +                      set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, NOWAIT_WRITE_CNT);
 +#endif
 +      }
        brelse(bh);
        return err;
  }
        return 0;
  }
  
+ static void fat_dummy_inode_init(struct inode *inode)
+ {
+       /* Initialize this dummy inode to work as no-op. */
+       MSDOS_I(inode)->mmu_private = 0;
+       MSDOS_I(inode)->i_start = 0;
+       MSDOS_I(inode)->i_logstart = 0;
+       MSDOS_I(inode)->i_attrs = 0;
+       MSDOS_I(inode)->i_pos = 0;
+ }
  static int fat_read_root(struct inode *inode)
  {
        struct super_block *sb = inode->i_sb;
@@@ -1289,7 -1262,6 +1299,7 @@@ int fat_fill_super(struct super_block *
        struct inode *fsinfo_inode = NULL;
        struct buffer_head *bh;
        struct fat_boot_sector *b;
 +      struct fat_boot_bsx *bsx;
        struct msdos_sb_info *sbi;
        u16 logical_sector_size;
        u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
                        goto out_fail;
                }
  
 +              bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
 +
                fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
                if (!IS_FSINFO(fsinfo)) {
                        fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
                }
  
                brelse(fsinfo_bh);
 +      } else {
 +              bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
        }
  
 +      /* interpret volume ID as a little endian 32 bit integer */
 +      sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
 +              ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
 +
        sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
        sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
  
        fat_inode = new_inode(sb);
        if (!fat_inode)
                goto out_fail;
-       MSDOS_I(fat_inode)->i_pos = 0;
+       fat_dummy_inode_init(fat_inode);
        sbi->fat_inode = fat_inode;
  
        fsinfo_inode = new_inode(sb);
        if (!fsinfo_inode)
                goto out_fail;
+       fat_dummy_inode_init(fsinfo_inode);
        fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
        sbi->fsinfo_inode = fsinfo_inode;
        insert_inode_hash(fsinfo_inode);
diff --combined fs/nfs/nfs4proc.c
index 6b4689b70850dbfe09d1122037ca5b832b305a04,c1148e87d53339db1dca1b8cc86929e2fc84f882..bc3cb9d4f7d2ea276e94318d9d98ad852ea21a0c
@@@ -268,7 -268,7 +268,7 @@@ static int nfs4_delay(struct rpc_clnt *
                *timeout = NFS4_POLL_RETRY_MIN;
        if (*timeout > NFS4_POLL_RETRY_MAX)
                *timeout = NFS4_POLL_RETRY_MAX;
 -      freezable_schedule_timeout_killable(*timeout);
 +      freezable_schedule_timeout_killable_unsafe(*timeout);
        if (fatal_signal_pending(current))
                res = -ERESTARTSYS;
        *timeout <<= 1;
@@@ -4047,7 -4047,7 +4047,7 @@@ out
   */
  static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
  {
-       struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+       struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
        struct nfs_getaclargs args = {
                .fh = NFS_FH(inode),
                .acl_pages = pages,
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
        int ret = -ENOMEM, i;
  
-       /* As long as we're doing a round trip to the server anyway,
-        * let's be prepared for a page of acl data. */
-       if (npages == 0)
-               npages = 1;
        if (npages > ARRAY_SIZE(pages))
                return -ERANGE;
  
@@@ -4557,7 -4553,7 +4553,7 @@@ int nfs4_proc_delegreturn(struct inode 
  static unsigned long
  nfs4_set_lock_task_retry(unsigned long timeout)
  {
 -      freezable_schedule_timeout_killable(timeout);
 +      freezable_schedule_timeout_killable_unsafe(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
diff --combined fs/proc/task_mmu.c
index 214687b505852f1efb518ede46c8d6e75c66a475,972cdc282b1a434d7b5e6332bc7783da8ca31c7c..3ced57ef25ebd1328fec217e937a86000dca1da1
@@@ -134,56 -134,6 +134,56 @@@ static void release_task_mempolicy(stru
  }
  #endif
  
 +static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
 +{
 +      const char __user *name = vma_get_anon_name(vma);
 +      struct mm_struct *mm = vma->vm_mm;
 +
 +      unsigned long page_start_vaddr;
 +      unsigned long page_offset;
 +      unsigned long num_pages;
 +      unsigned long max_len = NAME_MAX;
 +      int i;
 +
 +      page_start_vaddr = (unsigned long)name & PAGE_MASK;
 +      page_offset = (unsigned long)name - page_start_vaddr;
 +      num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
 +
 +      seq_puts(m, "[anon:");
 +
 +      for (i = 0; i < num_pages; i++) {
 +              int len;
 +              int write_len;
 +              const char *kaddr;
 +              long pages_pinned;
 +              struct page *page;
 +
 +              pages_pinned = get_user_pages(current, mm, page_start_vaddr,
 +                              1, 0, 0, &page, NULL);
 +              if (pages_pinned < 1) {
 +                      seq_puts(m, "<fault>]");
 +                      return;
 +              }
 +
 +              kaddr = (const char *)kmap(page);
 +              len = min(max_len, PAGE_SIZE - page_offset);
 +              write_len = strnlen(kaddr + page_offset, len);
 +              seq_write(m, kaddr + page_offset, write_len);
 +              kunmap(page);
 +              put_page(page);
 +
 +              /* if strnlen hit a null terminator then we're done */
 +              if (write_len != len)
 +                      break;
 +
 +              max_len -= len;
 +              page_offset = 0;
 +              page_start_vaddr += PAGE_SIZE;
 +      }
 +
 +      seq_putc(m, ']');
 +}
 +
  static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
  {
        if (vma && vma != priv->tail_vma) {
@@@ -329,11 -279,7 +329,7 @@@ show_map_vma(struct seq_file *m, struc
  
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
-       if (stack_guard_page_start(vma, start))
-               start += PAGE_SIZE;
        end = vma->vm_end;
-       if (stack_guard_page_end(vma, end))
-               end -= PAGE_SIZE;
  
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
                                pad_len_spaces(m, len);
                                seq_printf(m, "[stack:%d]", tid);
                        }
 +                      goto done;
 +              }
 +
 +              if (vma_get_anon_name(vma)) {
 +                      pad_len_spaces(m, len);
 +                      seq_print_vma_name(m, vma);
                }
        }
  
@@@ -497,18 -437,8 +493,18 @@@ struct mem_size_stats 
        unsigned long swap;
        unsigned long nonlinear;
        u64 pss;
 +        u64 pswap;
  };
  
 +#ifdef CONFIG_SWAP 
 +extern struct swap_info_struct *swap_info_get(swp_entry_t entry);
 +extern void swap_info_unlock(struct swap_info_struct *si);
 +#endif // CONFIG_SWAP 
 +
 +static inline unsigned char swap_count(unsigned char ent)
 +{
 +      return ent & ~SWAP_HAS_CACHE;   /* may include SWAP_HAS_CONT flag */
 +}
  
  static void smaps_pte_entry(pte_t ptent, unsigned long addr,
                unsigned long ptent_size, struct mm_walk *walk)
        } else if (is_swap_pte(ptent)) {
                swp_entry_t swpent = pte_to_swp_entry(ptent);
  
 -              if (!non_swap_entry(swpent))
 +              if (!non_swap_entry(swpent)) {
 +#ifdef CONFIG_SWAP
 +                      swp_entry_t entry;
 +                      struct swap_info_struct *p;
 +#endif // CONFIG_SWAP
 +
                        mss->swap += ptent_size;
 -              else if (is_migration_entry(swpent))
 +
 +#ifdef CONFIG_SWAP
 +                      entry = pte_to_swp_entry(ptent);
 +                      if (non_swap_entry(entry))
 +                              return;
 +                      p = swap_info_get(entry);
 +                      if (p) {
 +                              int swapcount = swap_count(p->swap_map[swp_offset(entry)]);
 +                              if (swapcount == 0) {
 +                                      swapcount = 1;
 +                              }
 +                              mss->pswap += (ptent_size << PSS_SHIFT) / swapcount;
 +                              swap_info_unlock(p);
 +                      }
 +#endif // CONFIG_SWAP
 +              } else if (is_migration_entry(swpent))
                        page = migration_entry_to_page(swpent);
        } else if (pte_file(ptent)) {
                if (pte_to_pgoff(ptent) != pgoff)
@@@ -695,7 -605,6 +691,7 @@@ static int show_smap(struct seq_file *m
                   "Anonymous:      %8lu kB\n"
                   "AnonHugePages:  %8lu kB\n"
                   "Swap:           %8lu kB\n"
 +                 "PSwap:          %8lu kB\n"
                   "KernelPageSize: %8lu kB\n"
                   "MMUPageSize:    %8lu kB\n"
                   "Locked:         %8lu kB\n",
                   mss.anonymous >> 10,
                   mss.anonymous_thp >> 10,
                   mss.swap >> 10,
 +                 (unsigned long)(mss.pswap >> (10 + PSS_SHIFT)),
                   vma_kernel_pagesize(vma) >> 10,
                   vma_mmu_pagesize(vma) >> 10,
                   (vma->vm_flags & VM_LOCKED) ?
  
        show_smap_vma_flags(m, vma);
  
 +      if (vma_get_anon_name(vma)) {
 +              seq_puts(m, "Name:           ");
 +              seq_print_vma_name(m, vma);
 +              seq_putc(m, '\n');
 +      }
 +
        if (m->count < m->size)  /* vma is copied successfully */
                m->version = (vma != get_gate_vma(task->mm))
                        ? vma->vm_start : 0;
diff --combined include/linux/mm.h
index 0cdc92331246ee44562c8b3f3074e2b8958b71df,d16f5243f95203551a0dea93ad5abbff0e8aa28b..9fe6bc3e5527821010e24c8bb10bce8c575a4788
@@@ -99,7 -99,6 +99,7 @@@ extern unsigned int kobjsize(const voi
  
  #define VM_DONTCOPY   0x00020000      /* Do not copy this vma on fork */
  #define VM_DONTEXPAND 0x00040000      /* Cannot expand with mremap() */
 +#define VM_RESERVED   0x00080000      /* Count as reserved_vm like IO */
  #define VM_ACCOUNT    0x00100000      /* Is a VM accounted object */
  #define VM_NORESERVE  0x00200000      /* should the VM suppress accounting */
  #define VM_HUGETLB    0x00400000      /* Huge TLB Page VM */
@@@ -326,8 -325,6 +326,8 @@@ static inline int is_vmalloc_or_module_
  }
  #endif
  
 +extern void kvfree(const void *addr);
 +
  static inline void compound_lock(struct page *page)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@@ -926,7 -923,6 +926,7 @@@ extern void pagefault_out_of_memory(voi
  extern void show_free_areas(unsigned int flags);
  extern bool skip_free_areas_node(unsigned int flags, int nid);
  
 +void shmem_set_file(struct vm_area_struct *vma, struct file *file);
  int shmem_zero_setup(struct vm_area_struct *);
  
  extern int can_do_mlock(void);
@@@ -1073,34 -1069,6 +1073,6 @@@ int set_page_dirty(struct page *page)
  int set_page_dirty_lock(struct page *page);
  int clear_page_dirty_for_io(struct page *page);
  
- /* Is the vma a continuation of the stack vma above it? */
- static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
- {
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
- }
- static inline int stack_guard_page_start(struct vm_area_struct *vma,
-                                            unsigned long addr)
- {
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_growsdown(vma->vm_prev, addr);
- }
- /* Is the vma a continuation of the stack vma below it? */
- static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
- {
-       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
- }
- static inline int stack_guard_page_end(struct vm_area_struct *vma,
-                                          unsigned long addr)
- {
-       return (vma->vm_flags & VM_GROWSUP) &&
-               (vma->vm_end == addr) &&
-               !vma_growsup(vma->vm_next, addr);
- }
  extern pid_t
  vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
  
@@@ -1505,7 -1473,7 +1477,7 @@@ extern int vma_adjust(struct vm_area_st
  extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
 -      struct mempolicy *);
 +      struct mempolicy *, const char __user *);
  extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
  extern int split_vma(struct mm_struct *,
        struct vm_area_struct *, unsigned long addr, int new_below);
@@@ -1626,6 -1594,7 +1598,7 @@@ unsigned long ra_submit(struct file_ra_
                        struct address_space *mapping,
                        struct file *filp);
  
+ extern unsigned long stack_guard_gap;
  /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
  extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
  
@@@ -1654,6 -1623,30 +1627,30 @@@ static inline struct vm_area_struct * f
        return vma;
  }
  
+ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+ {
+       unsigned long vm_start = vma->vm_start;
+       if (vma->vm_flags & VM_GROWSDOWN) {
+               vm_start -= stack_guard_gap;
+               if (vm_start > vma->vm_start)
+                       vm_start = 0;
+       }
+       return vm_start;
+ }
+ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+ {
+       unsigned long vm_end = vma->vm_end;
+       if (vma->vm_flags & VM_GROWSUP) {
+               vm_end += stack_guard_gap;
+               if (vm_end < vma->vm_end)
+                       vm_end = -PAGE_SIZE;
+       }
+       return vm_end;
+ }
  static inline unsigned long vma_pages(struct vm_area_struct *vma)
  {
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@@ -1767,7 -1760,6 +1764,7 @@@ int drop_caches_sysctl_handler(struct c
  unsigned long shrink_slab(struct shrink_control *shrink,
                          unsigned long nr_pages_scanned,
                          unsigned long lru_pages);
 +void drop_pagecache(void);
  
  #ifndef CONFIG_MMU
  #define randomize_va_space 0
diff --combined ipc/shm.c
index cacdc2badae65cef549f2e3ab57f7b822a82aee7,ddfad445242cd1d07e9de25a91121c6c92fa5fe8..26d7bbf92249202f056dc8a2c0521b88bed572ff
+++ b/ipc/shm.c
@@@ -538,7 -538,7 +538,7 @@@ static int newseg(struct ipc_namespace 
                if  ((shmflg & SHM_NORESERVE) &&
                                sysctl_overcommit_memory != OVERCOMMIT_NEVER)
                        acctflag = VM_NORESERVE;
 -              file = shmem_file_setup(name, size, acctflag);
 +              file = shmem_file_setup(name, size, acctflag, 0);
        }
        error = PTR_ERR(file);
        if (IS_ERR(file))
@@@ -1041,8 -1041,8 +1041,8 @@@ out_unlock1
   * "raddr" thing points to kernel space, and there has to be a wrapper around
   * this.
   */
- long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
-             unsigned long shmlba)
+ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+             ulong *raddr, unsigned long shmlba)
  {
        struct shmid_kernel *shp;
        unsigned long addr;
                goto out;
        else if ((addr = (ulong)shmaddr)) {
                if (addr & (shmlba - 1)) {
-                       if (shmflg & SHM_RND)
-                               addr &= ~(shmlba - 1);     /* round down */
+                       /*
+                        * Round down to the nearest multiple of shmlba.
+                        * For sane do_mmap_pgoff() parameters, avoid
+                        * round downs that trigger nil-page and MAP_FIXED.
+                        */
+                       if ((shmflg & SHM_RND) && addr >= shmlba)
+                               addr &= ~(shmlba - 1);
                        else
  #ifndef __ARCH_FORCE_SHMLBA
                                if (addr & ~PAGE_MASK)
diff --combined kernel/futex.c
index fea18acbeae194d5012ae79b917149e845d266eb,3f7dd29f4d4d69b02b0b52b8af4689918e876490..ac8871ec9abb7b8f453eb5aedb2e88ef6b56b473
@@@ -62,7 -62,6 +62,7 @@@
  #include <linux/ptrace.h>
  #include <linux/sched/rt.h>
  #include <linux/hugetlb.h>
 +#include <linux/freezer.h>
  
  #include <asm/futex.h>
  
@@@ -1936,7 -1935,7 +1936,7 @@@ static void futex_wait_queue_me(struct 
                 * is no timeout, or if it has yet to expire.
                 */
                if (!timeout || timeout->task)
 -                      schedule();
 +                      freezable_schedule();
        }
        __set_current_state(TASK_RUNNING);
  }
@@@ -2414,7 -2413,6 +2414,6 @@@ static int futex_wait_requeue_pi(u32 __
  {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct rt_mutex_waiter rt_waiter;
-       struct rt_mutex *pi_mutex = NULL;
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
        struct futex_q q = futex_q_init;
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_pi_state_owner(uaddr2, &q, current);
+                       if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+                               rt_mutex_unlock(&q.pi_state->pi_mutex);
                        /*
                         * Drop the reference to the pi state which
                         * the requeue_pi() code acquired for us.
                        spin_unlock(q.lock_ptr);
                }
        } else {
+               struct rt_mutex *pi_mutex;
                /*
                 * We have been woken up by futex_unlock_pi(), a timeout, or a
                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
                if (res)
                        ret = (res < 0) ? res : 0;
  
+               /*
+                * If fixup_pi_state_owner() faulted and was unable to handle
+                * the fault, unlock the rt_mutex and return the fault to
+                * userspace.
+                */
+               if (ret && rt_mutex_owner(pi_mutex) == current)
+                       rt_mutex_unlock(pi_mutex);
                /* Unqueue and drop the lock. */
                unqueue_me_pi(&q);
        }
  
-       /*
-        * If fixup_pi_state_owner() faulted and was unable to handle the
-        * fault, unlock the rt_mutex and return the fault to userspace.
-        */
-       if (ret == -EFAULT) {
-               if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
-                       rt_mutex_unlock(pi_mutex);
-       } else if (ret == -EINTR) {
+       if (ret == -EINTR) {
                /*
                 * We've already been requeued, but cannot restart by calling
                 * futex_lock_pi() directly. We could restart this syscall, but
@@@ -2903,4 -2906,4 +2907,4 @@@ static int __init futex_init(void
  
        return 0;
  }
__initcall(futex_init);
core_initcall(futex_init);
diff --combined kernel/printk.c
index f23cbc5e9ea31f2fae57235e5c6827c812fba296,8acc98aafa6ea189fac01726d3e819703ce743e6..e20087156cdb23172a5b4de466544f5b8901c48d
@@@ -16,7 -16,6 +16,7 @@@
   *    01Mar01 Andrew Morton
   */
  
 +
  #include <linux/kernel.h>
  #include <linux/mm.h>
  #include <linux/tty.h>
@@@ -35,7 -34,6 +35,7 @@@
  #include <linux/memblock.h>
  #include <linux/aio.h>
  #include <linux/syscalls.h>
 +#include <linux/suspend.h>
  #include <linux/kexec.h>
  #include <linux/kdb.h>
  #include <linux/ratelimit.h>
  #include <linux/poll.h>
  #include <linux/irq_work.h>
  #include <linux/utsname.h>
 +#include <linux/mt_sched_mon.h>
 +#include <linux/aee.h>
  
  #include <asm/uaccess.h>
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/printk.h>
  
 +/* Some options {*/
 +#define LOG_TOO_MUCH_WARNING
 +#ifdef LOG_TOO_MUCH_WARNING
 +static int log_in_resume;
 +#endif
 +/* Some options }*/
 +#ifdef CONFIG_EARLY_PRINTK_DIRECT
 +extern void printascii(char *);
 +#endif
 +
 +bool printk_disable_uart = 0;
 +static DEFINE_PER_CPU(char, printk_state);
  /* printk's without a loglevel use this.. */
  #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
  
  /* We show everything that is MORE important than this.. */
  #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
 -#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
 +#define DEFAULT_CONSOLE_LOGLEVEL 6 /* anything MORE serious than KERN_INFO */
  
  int console_printk[4] = {
        DEFAULT_CONSOLE_LOGLEVEL,       /* console_loglevel */
@@@ -80,7 -64,6 +80,7 @@@
        MINIMUM_CONSOLE_LOGLEVEL,       /* minimum_console_loglevel */
        DEFAULT_CONSOLE_LOGLEVEL,       /* default_console_loglevel */
  };
 +EXPORT_SYMBOL_GPL(console_printk);
  
  /*
   * Low level drivers may need that to know if they can schedule in
@@@ -242,12 -225,12 +242,12 @@@ static enum log_flags syslog_prev
  static size_t syslog_partial;
  
  /* index and sequence number of the first record stored in the buffer */
 -static u64 log_first_seq;
 -static u32 log_first_idx;
 +/*static*/ u64 log_first_seq;
 +/*static*/ u32 log_first_idx;
  
  /* index and sequence number of the next record to store in the buffer */
 -static u64 log_next_seq;
 -static u32 log_next_idx;
 +/*static*/ u64 log_next_seq;
 +/*static*/ u32 log_next_idx;
  
  /* the next printk record to write to the console */
  static u64 console_seq;
@@@ -272,31 -255,6 +272,31 @@@ static char __log_buf[__LOG_BUF_LEN] __
  static char *log_buf = __log_buf;
  static u32 log_buf_len = __LOG_BUF_LEN;
  
 +#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
 +
 +extern int mt_need_uart_console;
 +inline void mt_disable_uart()
 +{
 +    if (mt_need_uart_console == 0) {
 +        printk("<< printk console disable >>\n");
 +        printk_disable_uart = 1;
 +    } else {
 +        printk("<< printk console can't be disabled >>\n");
 +    }
 +}
 +inline void mt_enable_uart()
 +{
 +    if (mt_need_uart_console == 1) {
 +        if (printk_disable_uart == 0)
 +            return;
 +        printk_disable_uart = 0;
 +        printk("<< printk console enable >>\n");
 +    } else {
 +        printk("<< printk console can't be enabled >>\n");
 +    }
 +}
 +
 +#endif
  /* cpu currently holding logbuf_lock */
  static volatile unsigned int logbuf_cpu = UINT_MAX;
  
@@@ -352,24 -310,9 +352,24 @@@ static void log_store(int facility, in
  {
        struct log *msg;
        u32 size, pad_len;
 -
 +    int this_cpu = smp_processor_id();
 +    char state = __raw_get_cpu_var(printk_state);
 +    if (state == 0) {
 +      __raw_get_cpu_var(printk_state) = ' ';
 +      state = ' ';
 +    }
 +    /*printk prefix {*/
 +    char tbuf[50];
 +    unsigned tlen;
 +    if (console_suspended == 0) {
 +       tlen = snprintf(tbuf, sizeof(tbuf), "%c(%x)[%d:%s]",
 +               state, this_cpu, current->pid, current->comm); 
 +    } else {
 +        tlen = snprintf(tbuf, sizeof(tbuf), "%c%x)", state, this_cpu);
 +    }
 +    /*printk prefix }*/
        /* number of '\0' padding bytes to next message */
 -      size = sizeof(struct log) + text_len + dict_len;
 +      size = sizeof(struct log) + text_len +tlen + dict_len;
        pad_len = (-size) & (LOG_ALIGN - 1);
        size += pad_len;
  
  
        /* fill message */
        msg = (struct log *)(log_buf + log_next_idx);
 -      memcpy(log_text(msg), text, text_len);
 +      //memcpy(log_text(msg), text, text_len);
 +    memcpy(log_text(msg), tbuf, tlen);
 +      memcpy(log_text(msg) + tlen, text, text_len);
 +    text_len += tlen;
        msg->text_len = text_len;
        memcpy(log_dict(msg), dict, dict_len);
        msg->dict_len = dict_len;
@@@ -927,7 -867,6 +927,7 @@@ static bool printk_time = 1
  static bool printk_time;
  #endif
  module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 +module_param_named(disable_uart, printk_disable_uart, bool, S_IRUGO | S_IWUSR);
  
  static size_t print_time(u64 ts, char *buf)
  {
        rem_nsec = do_div(ts, 1000000000);
  
        if (!buf)
 -              return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
 +              return snprintf(NULL, 0, "[%5lu.000000]", (unsigned long)ts);
  
 -      return sprintf(buf, "[%5lu.%06lu] ",
 +      return sprintf(buf, "[%5lu.%06lu]",
                       (unsigned long)ts, rem_nsec / 1000);
  }
  
@@@ -1331,7 -1270,7 +1331,7 @@@ static void call_console_drivers(int le
  {
        struct console *con;
  
-       trace_console(text, len);
+       trace_console_rcuidle(text, len);
  
        if (level >= console_loglevel && !ignore_loglevel)
                return;
                return;
  
        for_each_console(con) {
 +        if (printk_disable_uart && (con->flags & CON_CONSDEV))
 +            continue;
                if (exclusive_console && con != exclusive_console)
                        continue;
                if (!(con->flags & CON_ENABLED))
@@@ -1568,11 -1505,7 +1568,11 @@@ asmlinkage int vprintk_emit(int facilit
        unsigned long flags;
        int this_cpu;
        int printed_len = 0;
 -
 +    int in_irq_disable, in_non_preempt;
 +    in_irq_disable = irqs_disabled();
 +    in_non_preempt = in_atomic();
 +      vscnprintf(text, sizeof(textbuf), fmt, args);
 +      memset(text, 0x0, sizeof(textbuf));
        boot_delay_msec(level);
        printk_delay();
  
                }
        }
  
 +#ifdef CONFIG_EARLY_PRINTK_DIRECT
 +      printascii(text);
 +#endif
 +
        if (level == -1)
                level = default_message_loglevel;
  
        if (dict)
                lflags |= LOG_PREFIX|LOG_NEWLINE;
 -
 +        
 +#ifdef CONFIG_PRINTK_PROCESS_INFO
 +    if (in_irq_disable)
 +        __raw_get_cpu_var(printk_state) = '-';
 +#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
 +    else if (printk_disable_uart == 0)
 +        __raw_get_cpu_var(printk_state) = '.';
 +#endif
 +    else
 +        __raw_get_cpu_var(printk_state) = ' ';
 +#endif
 +      
        if (!(lflags & LOG_NEWLINE)) {
                /*
                 * Flush the conflicting buffer. An earlier newline was missing,
@@@ -1981,28 -1899,16 +1981,28 @@@ void suspend_console(void
        console_lock();
        console_suspended = 1;
        up(&console_sem);
 +      mutex_release(&console_lock_dep_map, 1, _RET_IP_);
  }
 +EXPORT_SYMBOL_GPL(suspend_console);
  
  void resume_console(void)
  {
        if (!console_suspend_enabled)
                return;
        down(&console_sem);
 +      mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
        console_suspended = 0;
 -      console_unlock();
 +#ifdef LOG_TOO_MUCH_WARNING
 +//    __raw_get_cpu_var(MT_trace_in_resume_console) = 1;
 +//    log_in_resume = 1;
 +    console_unlock();
 +//    log_in_resume = 0;
 +//    __raw_get_cpu_var(MT_trace_in_resume_console) = 0;
 +#else
 +    console_unlock();
 +#endif
  }
 +EXPORT_SYMBOL_GPL(resume_console);
  
  /**
   * console_cpu_notify - print deferred console messages after CPU hotplug
   *
   * console_unlock(); may be called from any context.
   */
 +#ifdef LOG_TOO_MUCH_WARNING
 +static int console_log_max = 400000;
 +static int already_skip_log;
 +#endif
  void console_unlock(void)
  {
        static char text[LOG_LINE_MAX + PREFIX_MAX];
        unsigned long flags;
        bool wake_klogd = false;
        bool do_cond_resched, retry;
 +#ifdef LOG_TOO_MUCH_WARNING
 +    unsigned long total_log_size = 0;
 +    unsigned long long t1 = 0, t2 = 0;
 +    char aee_str[512];
 +    int org_loglevel = console_loglevel;
 +#endif
 +
  
        if (console_suspended) {
                up(&console_sem);
@@@ -2167,12 -2062,6 +2167,12 @@@ again
                int level;
  
                raw_spin_lock_irqsave(&logbuf_lock, flags);
 +#ifdef LOG_TOO_MUCH_WARNING /*For Resume log too much*/
 +        if (log_in_resume) {
 +            t1 = sched_clock();
 +        }
 +#endif
 +
                if (seen_seq != log_next_seq) {
                        wake_klogd = true;
                        seen_seq = log_next_seq;
@@@ -2215,41 -2104,8 +2215,41 @@@ skip
                raw_spin_unlock(&logbuf_lock);
  
                stop_critical_timings();        /* don't trace print latency */
 -              call_console_drivers(level, text, len);
 -              start_critical_timings();
 +#ifdef LOG_TOO_MUCH_WARNING
 +        /*
 +           For uart console, 10us/per chars
 +           400,000 chars = need to wait 4.0 sec
 +                normal case: 4sec
 +         */
 +        if (log_in_resume) {
 +            org_loglevel = console_loglevel;
 +            console_loglevel = 4;
 +        }
 +        total_log_size += len;
 +        if (total_log_size < console_log_max)
 +                  call_console_drivers(level, text, len);
 +        else if (!already_skip_log) {
 +            sprintf(aee_str, "PRINTK too much:%lu", total_log_size);
 +            aee_kernel_warning(aee_str, "Need to shrink kernel log");
 +            already_skip_log = 1;
 +        }
 +        /**/
 +        start_critical_timings();
 +        /* For Resume log too much*/
 +        if (log_in_resume) {
 +            t2 = sched_clock();
 +            console_loglevel = org_loglevel;
 +            if (t2 - t1 > 100000000) {
 +                sprintf( aee_str,"[RESUME CONSOLE too long:%lluns>100ms] s:%lluns, e:%lluns\n", t2 - t1, t1, t2);
 +                aee_kernel_warning(aee_str, "Need to shrink kernel log");
 +            }
 +        }
 +
 +        /**/
 +#else
 +        start_critical_timings();
 +        call_console_drivers(level, text, len);
 +#endif
                local_irq_restore(flags);
  
                if (do_cond_resched)
@@@ -2635,7 -2491,6 +2635,7 @@@ late_initcall(printk_late_init)
  
  static DEFINE_PER_CPU(int, printk_pending);
  static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
 +static DEFINE_PER_CPU(int, printk_sched_length);
  
  static void wake_up_klogd_work_func(struct irq_work *irq_work)
  {
  
        if (pending & PRINTK_PENDING_SCHED) {
                char *buf = __get_cpu_var(printk_sched_buf);
 -              printk(KERN_WARNING "[sched_delayed] %s", buf);
 +              printk(KERN_WARNING "[printk_delayed:start]\n");
 +              printk(KERN_WARNING "%s", buf);
 +              printk(KERN_WARNING "[printk_delayed:done]\n");
 +    __get_cpu_var(printk_sched_length) = 0;
        }
  
        if (pending & PRINTK_PENDING_WAKEUP)
@@@ -2674,19 -2526,12 +2674,19 @@@ int printk_deferred(const char *fmt, ..
        va_list args;
        char *buf;
        int r;
 -
 +    int buf_length;
        local_irq_save(flags);
        buf = __get_cpu_var(printk_sched_buf);
 +    buf_length = __get_cpu_var(printk_sched_length);
  
        va_start(args, fmt);
 -      r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
 +    if(PRINTK_BUF_SIZE >= buf_length){
 +          r = vsnprintf((buf_length + buf), PRINTK_BUF_SIZE-buf_length, fmt, args);
 +        __get_cpu_var(printk_sched_length) += r;
 +    }else{
 +        printk("delayed log buf overflow,  size:%d\n", buf_length);
 +        r = 0;
 +    }
        va_end(args);
  
        __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
@@@ -3111,10 -2956,4 +3111,10 @@@ void show_regs_print_info(const char *l
               task_thread_info(current));
  }
  
 +void get_kernel_log_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
 +{
 +      *addr = (unsigned long)log_buf;
 +      *size = log_buf_len;
 +      *start = (unsigned long)&log_first_idx;
 +}
  #endif
diff --combined kernel/sched/core.c
index b949db611be4f43cd8d8fec02c42d03bafa0d6e8,0892cfa0fe343666f9c8cb6594d453139afb7363..3c359e0c83808e9a7802b1e7f6d25c8fd943ed14
  #include "../workqueue_internal.h"
  #include "../smpboot.h"
  
 +#ifdef CONFIG_MT65XX_TRACER
 +#include "mach/mt_mon.h"
 +#include "linux/aee.h"
 +#endif
 +
 +#include <linux/mt_sched_mon.h>
  #define CREATE_TRACE_POINTS
  #include <trace/events/sched.h>
  
 +#include <mtlbprof/mtlbprof.h>
 +#include <mtlbprof/mtlbprof_stat.h>
 +
 +#ifdef CONFIG_MT_PRIO_TRACER
 +# include <linux/prio_tracer.h>
 +#endif
 +
  void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
  {
        unsigned long delta;
@@@ -773,99 -760,11 +773,99 @@@ static void set_load_weight(struct task
        load->inv_weight = prio_to_wmult[prio];
  }
  
 +#ifdef CONFIG_MTK_SCHED_CMP_TGS
 +static void sched_tg_enqueue(struct rq *rq, struct task_struct *p)
 +{
 +      int id;
 +      unsigned long flags;
 +      struct task_struct *tg = p->group_leader;
 +
 +      if(group_leader_is_empty(p))
 +              return;
 +      id = get_cluster_id(rq->cpu);
 +      if (unlikely(WARN_ON(id < 0)))
 +              return;
 +
 +      raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
 +      tg->thread_group_info[id].nr_running++;
 +      raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
 +
 +#if 0 
 +      mt_sched_printf("enqueue %d:%s %d:%s %d %lu %lu %lu, %lu %lu %lu",
 +                      tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
 +                      tg->thread_group_info[0].nr_running,
 +                      tg->thread_group_info[0].cfs_nr_running,
 +                      tg->thread_group_info[0].load_avg_ratio,
 +                      tg->thread_group_info[1].nr_running,
 +                      tg->thread_group_info[1].cfs_nr_running,
 +                      tg->thread_group_info[1].load_avg_ratio);
 +#endif
 +      //tgs_log(rq, p);
 +}
 +
 +static void sched_tg_dequeue(struct rq *rq, struct task_struct *p)
 +{
 +      int id;
 +      unsigned long flags;
 +      struct task_struct *tg = p->group_leader;
 +
 +      if(group_leader_is_empty(p))
 +              return;
 +      id = get_cluster_id(rq->cpu);
 +      if (unlikely(WARN_ON(id < 0)))
 +              return;
 +
 +      raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
 +      //WARN_ON(!tg->thread_group_info[id].nr_running);
 +      tg->thread_group_info[id].nr_running--;
 +      raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
 +
 +#if 0 
 +      mt_sched_printf("dequeue %d:%s %d:%s %d %d %lu %lu %lu, %lu %lu %lu",
 +                      tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
 +                      tg->thread_group_info[0].nr_running,
 +                      tg->thread_group_info[0].cfs_nr_running,
 +                      tg->thread_group_info[0].load_avg_ratio,
 +                      tg->thread_group_info[1].nr_running,
 +                      tg->thread_group_info[1].cfs_nr_running,
 +                      tg->thread_group_info[1].load_avg_ratio);
 +#endif
 +      //tgs_log(rq, p);
 +}
 +
 +#endif
 +
 +#ifdef CONFIG_MTK_SCHED_CMP_TGS
 +static void tgs_log(struct rq *rq, struct task_struct *p)
 +{
 +#ifdef CONFIG_MT_SCHED_INFO
 +      struct task_struct *tg = p->group_leader;
 +
 +      if(group_leader_is_empty(p))
 +              return;
 +
 +      //  if(!strncmp(tg->comm,"sched_test", 10)){
 +                mt_sched_printf("%d:%s %d:%s %lu %lu %lu, %lu %lu %lu", tg->pid, tg->comm, p->pid, p->comm,
 +                      tg->thread_group_info[0].nr_running,
 +                      tg->thread_group_info[0].cfs_nr_running,
 +                      tg->thread_group_info[0].load_avg_ratio,
 +                      tg->thread_group_info[1].nr_running,
 +                      tg->thread_group_info[1].cfs_nr_running,
 +                      tg->thread_group_info[1].load_avg_ratio);
 +       // }
 +#endif
 +}
 +#endif
 +
  static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
  {
        update_rq_clock(rq);
        sched_info_queued(p);
        p->sched_class->enqueue_task(rq, p, flags);
 +#ifdef CONFIG_MTK_SCHED_CMP_TGS
 +      sched_tg_enqueue(rq, p);
 +      tgs_log(rq, p); 
 +#endif
  }
  
  static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
        update_rq_clock(rq);
        sched_info_dequeued(p);
        p->sched_class->dequeue_task(rq, p, flags);
 +#ifdef CONFIG_MTK_SCHED_CMP_TGS
 +      sched_tg_dequeue(rq, p);
 +      tgs_log(rq, p); 
 +#endif
  }
  
  void activate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible--;
  
        enqueue_task(rq, p, flags);
 +      
 +#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
 +      if( 2 <=  rq->nr_running){
 +              if (1 == cpumask_weight(&p->cpus_allowed))
 +                      mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_AFFINITY_STATE);
 +              else
 +                      mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_N_TASK_STATE);
 +      }else if ( (1 == rq->nr_running)){
 +              mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
 +      }
 +#endif
  }
  
  void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible++;
  
        dequeue_task(rq, p, flags);
 +      
 +#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
 +      if ( 1 == rq->nr_running )
 +              mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
 +      else if (0 == rq->nr_running)
 +              mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_NO_TASK_STATE);
 +#endif
  }
  
  static void update_rq_clock_task(struct rq *rq, s64 delta)
  
  void sched_set_stop_task(int cpu, struct task_struct *stop)
  {
 -      struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 +      //struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 +      struct sched_param param = { .sched_priority = RTPM_PRIO_CPU_CALLBACK };
        struct task_struct *old_stop = cpu_rq(cpu)->stop;
  
        if (stop) {
@@@ -1524,21 -1400,13 +1524,21 @@@ static void sched_ttwu_pending(void
  
        raw_spin_unlock(&rq->lock);
  }
 -
 +enum ipi_msg_type {
 +      IPI_RESCHEDULE,
 +      IPI_CALL_FUNC,
 +      IPI_CALL_FUNC_SINGLE,
 +      IPI_CPU_STOP,
 +};
  void scheduler_ipi(void)
  {
        if (llist_empty(&this_rq()->wake_list)
                        && !tick_nohz_full_cpu(smp_processor_id())
 -                      && !got_nohz_idle_kick())
 +                      && !got_nohz_idle_kick()){
 +        mt_trace_ISR_start(IPI_RESCHEDULE);
 +        mt_trace_ISR_end(IPI_RESCHEDULE);
                return;
 +    }
  
        /*
         * Not all reschedule IPI handlers call irq_enter/irq_exit, since
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
 +    mt_trace_ISR_start(IPI_RESCHEDULE);
        tick_nohz_full_check();
        sched_ttwu_pending();
  
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
 +    mt_trace_ISR_end(IPI_RESCHEDULE);
        irq_exit();
  }
  
@@@ -1697,14 -1563,7 +1697,14 @@@ try_to_wake_up(struct task_struct *p, u
  
        cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
        if (task_cpu(p) != cpu) {
 +#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER                
 +              char strings[128]="";
 +#endif
                wake_flags |= WF_MIGRATED;
 +#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
 +              snprintf(strings, 128, "%d:%d:%s:wakeup:%d:%d:%s", task_cpu(current), current->pid, current->comm, cpu, p->pid, p->comm);
 +              trace_sched_lbprof_log(strings);
 +#endif                
                set_task_cpu(p, cpu);
        }
  #endif /* CONFIG_SMP */
@@@ -1802,20 -1661,6 +1802,20 @@@ static void __sched_fork(struct task_st
  #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
        p->se.avg.runnable_avg_period = 0;
        p->se.avg.runnable_avg_sum = 0;
 +#ifdef CONFIG_SCHED_HMP
 +      /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
 +#define LOAD_AVG_MAX 47742
 +      if (p->mm) {
 +              p->se.avg.hmp_last_up_migration = 0;
 +              p->se.avg.hmp_last_down_migration = 0;
 +              p->se.avg.load_avg_ratio = 1023;
 +              p->se.avg.load_avg_contrib =
 +                              (1023 * scale_load_down(p->se.load.weight));
 +              p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
 +              p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
 +              p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
 +      }
 +#endif
  #endif
  #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@@ -1960,8 -1805,6 +1960,8 @@@ void wake_up_new_task(struct task_struc
        set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
  #endif
  
 +      /* Initialize new task's runnable average */
 +      init_task_runnable_average(p);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
        p->on_rq = 1;
@@@ -2184,10 -2027,6 +2184,10 @@@ context_switch(struct rq *rq, struct ta
  
        prepare_task_switch(rq, prev, next);
  
 +#ifdef CONFIG_MT65XX_TRACER
 +    if(get_mt65xx_mon_mode() == MODE_SCHED_SWITCH)
 +          trace_mt65xx_mon_sched_switch(prev, next);
 +#endif
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@@ -2280,12 -2119,6 +2280,12 @@@ unsigned long this_cpu_load(void
        return this->cpu_load[0];
  }
  
 +unsigned long get_cpu_load(int cpu)                                                                     
 +{   
 +    struct rq *this = cpu_rq(cpu);                                                                      
 +    return this->cpu_load[0];                                                                           
 +}
 +EXPORT_SYMBOL(get_cpu_load);
  
  /*
   * Global load-average calculations
@@@ -2773,19 -2606,6 +2773,19 @@@ static void __update_cpu_load(struct r
        sched_avg_update(this_rq);
  }
  
 +# ifdef CONFIG_SMP
 +/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
 +static inline unsigned long get_rq_runnable_load(struct rq *rq)
 +{
 +      return rq->cfs.runnable_load_avg;
 +}
 +# else
 +static inline unsigned long get_rq_runnable_load(struct rq *rq)
 +{
 +      return rq->load.weight;
 +}
 +# endif
 +
  #ifdef CONFIG_NO_HZ_COMMON
  /*
   * There is no sane way to deal with nohz on smp when using jiffies because the
   * Called from nohz_idle_balance() to update the load ratings before doing the
   * idle balance.
   */
 +/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
  void update_idle_cpu_load(struct rq *this_rq)
  {
        unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
  /*
   * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
   */
 +/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
  void update_cpu_load_nohz(void)
  {
        struct rq *this_rq = this_rq();
  /*
   * Called from scheduler_tick()
   */
 +/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
  static void update_cpu_load_active(struct rq *this_rq)
  {
 +      unsigned long load = get_rq_runnable_load(this_rq);
        /*
         * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
         */
        this_rq->last_load_update_tick = jiffies;
 -      __update_cpu_load(this_rq, this_rq->load.weight, 1);
 +      __update_cpu_load(this_rq, load, 1);
  
        calc_load_account_active(this_rq);
  }
@@@ -2967,18 -2783,12 +2967,18 @@@ void scheduler_tick(void
  
        raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
 -      update_cpu_load_active(rq);
        curr->sched_class->task_tick(rq, curr, 0);
 +      update_cpu_load_active(rq);
 +#ifdef CONFIG_MT_RT_SCHED
 +      mt_check_rt_policy(rq);
 +#endif
        raw_spin_unlock(&rq->lock);
  
        perf_event_task_tick();
 -
 +#ifdef CONFIG_MT_SCHED_MONITOR
 +    if(smp_processor_id() == 0) //only record by CPU#0
 +        mt_save_irq_counts();
 +#endif
  #ifdef CONFIG_SMP
        rq->idle_balance = idle_cpu(cpu);
        trigger_load_balance(rq, cpu);
@@@ -3042,19 -2852,8 +3042,19 @@@ void __kprobes add_preempt_count(int va
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
  #endif
 -      if (preempt_count() == val)
 -              trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +    //if (preempt_count() == val)
 +    //  trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +    if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
 +#ifdef CONFIG_PREEMPT_TRACER
 +        trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +#endif
 +#ifdef CONFIG_PREEMPT_MONITOR
 +        if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
 +            //current->t_add_prmpt = sched_clock();
 +            MT_trace_preempt_off();
 +        }
 +#endif
 +    }
  }
  EXPORT_SYMBOL(add_preempt_count);
  
@@@ -3074,18 -2873,8 +3074,18 @@@ void __kprobes sub_preempt_count(int va
                return;
  #endif
  
 -      if (preempt_count() == val)
 -              trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +    //if (preempt_count() == val)
 +    //  trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +    if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
 +#ifdef CONFIG_PREEMPT_TRACER
 +        trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 +#endif
 +#ifdef CONFIG_PREEMPT_MONITOR
 +        if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
 +            MT_trace_preempt_on();
 +        }    
 +#endif
 +    }
        preempt_count() -= val;
  }
  EXPORT_SYMBOL(sub_preempt_count);
@@@ -3109,7 -2898,6 +3109,7 @@@ static noinline void __schedule_bug(str
                print_irqtrace_events(prev);
        dump_stack();
        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 +      BUG_ON(1);
  }
  
  /*
@@@ -3221,9 -3009,6 +3221,9 @@@ need_resched
  
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 +#ifdef CONFIG_MT_SCHED_MONITOR
 +    __raw_get_cpu_var(MT_trace_in_sched) = 1;
 +#endif 
  
        /*
         * Make sure that signal_pending_state()->signal_pending() below
        } else
                raw_spin_unlock_irq(&rq->lock);
  
 +#ifdef CONFIG_MT_RT_SCHED
 +      mt_post_schedule(rq);
 +#endif        
 +#ifdef CONFIG_MT_SCHED_MONITOR
 +    __raw_get_cpu_var(MT_trace_in_sched) = 0;
 +#endif
        post_schedule(rq);
  
        sched_preempt_enable_no_resched();
@@@ -3918,61 -3697,6 +3918,61 @@@ out_unlock
        __task_rq_unlock(rq);
  }
  #endif
 +
 +#ifdef CONFIG_MT_PRIO_TRACER
 +void set_user_nice_core(struct task_struct *p, long nice)
 +{
 +      int old_prio, delta, on_rq;
 +      unsigned long flags;
 +      struct rq *rq;
 +
 +      if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 +              return;
 +      /*
 +       * We have to be careful, if called from sys_setpriority(),
 +       * the task might be in the middle of scheduling on another CPU.
 +       */
 +      rq = task_rq_lock(p, &flags);
 +      /*
 +       * The RT priorities are set via sched_setscheduler(), but we still
 +       * allow the 'normal' nice value to be set - but as expected
 +       * it wont have any effect on scheduling until the task is
 +       * SCHED_FIFO/SCHED_RR:
 +       */
 +      if (task_has_rt_policy(p)) {
 +              p->static_prio = NICE_TO_PRIO(nice);
 +              goto out_unlock;
 +      }
 +      on_rq = p->on_rq;
 +      if (on_rq)
 +              dequeue_task(rq, p, 0);
 +
 +      p->static_prio = NICE_TO_PRIO(nice);
 +      set_load_weight(p);
 +      old_prio = p->prio;
 +      p->prio = effective_prio(p);
 +      delta = p->prio - old_prio;
 +
 +      if (on_rq) {
 +              enqueue_task(rq, p, 0);
 +              /*
 +               * If the task increased its priority or is running and
 +               * lowered its priority, then reschedule its CPU:
 +               */
 +              if (delta < 0 || (delta > 0 && task_running(rq, p)))
 +                      resched_task(rq->curr);
 +      }
 +out_unlock:
 +      task_rq_unlock(rq, p, &flags);
 +}
 +
 +void set_user_nice(struct task_struct *p, long nice)
 +{
 +      set_user_nice_core(p, nice);
 +      /* setting nice implies to set a normal sched policy */
 +      update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_KRNL);
 +}
 +#else /* !CONFIG_MT_PRIO_TRACER */
  void set_user_nice(struct task_struct *p, long nice)
  {
        int old_prio, delta, on_rq;
  out_unlock:
        task_rq_unlock(rq, p, &flags);
  }
 +#endif
  EXPORT_SYMBOL(set_user_nice);
  
  /*
@@@ -4070,11 -3793,8 +4070,11 @@@ SYSCALL_DEFINE1(nice, int, increment
        retval = security_task_setnice(current, nice);
        if (retval)
                return retval;
 -
 +#ifdef CONFIG_MT_PRIO_TRACER
 +      set_user_nice_syscall(current, nice);
 +#else
        set_user_nice(current, nice);
 +#endif
        return 0;
  }
  
@@@ -4143,8 -3863,6 +4143,8 @@@ static struct task_struct *find_process
        return pid ? find_task_by_vpid(pid) : current;
  }
  
 +extern struct cpumask hmp_slow_cpu_mask;
 +
  /* Actually do priority change: must hold rq lock. */
  static void
  __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
        p->normal_prio = normal_prio(p);
        /* we are holding p->pi_lock already */
        p->prio = rt_mutex_getprio(p);
 -      if (rt_prio(p->prio))
 +      if (rt_prio(p->prio)) {
                p->sched_class = &rt_sched_class;
 +      }
        else
                p->sched_class = &fair_sched_class;
        set_load_weight(p);
@@@ -4178,21 -3895,6 +4178,21 @@@ static bool check_same_owner(struct tas
        return match;
  }
  
 +static int check_mt_allow_rt(struct sched_param *param)
 +{
 +      int allow = 0;
 +      if(0 == MT_ALLOW_RT_PRIO_BIT){
 +              //this condition check will be removed
 +              return 1;
 +      }
 +
 +      if(param->sched_priority & MT_ALLOW_RT_PRIO_BIT){
 +              param->sched_priority &= ~MT_ALLOW_RT_PRIO_BIT;
 +              allow = 1;
 +      }
 +      return allow;
 +}   
 +
  static int __sched_setscheduler(struct task_struct *p, int policy,
                                const struct sched_param *param, bool user)
  {
@@@ -4219,13 -3921,6 +4219,13 @@@ recheck
                        return -EINVAL;
        }
  
 +      if(rt_policy(policy)){                                                                                
 +              if (!check_mt_allow_rt((struct sched_param *)param)){
 +                      printk("[RT_MONITOR]WARNNING [%d:%s] SET NOT ALLOW RT Prio [%d] for proc [%d:%s]\n", current->pid, current->comm, param->sched_priority, p->pid, p->comm);
 +                      //dump_stack();
 +              }
 +      }
 +
        /*
         * Valid priorities for SCHED_FIFO and SCHED_RR are
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
   *
   * NOTE that the task may be already dead.
   */
 +#ifdef CONFIG_MT_PRIO_TRACER
 +int sched_setscheduler_core(struct task_struct *p, int policy,
 +                          const struct sched_param *param)
 +{
 +      return __sched_setscheduler(p, policy, param, true);
 +}
 +
 +int sched_setscheduler(struct task_struct *p, int policy,
 +                     const struct sched_param *param)
 +{
 +      int retval;
 +
 +      retval = sched_setscheduler_core(p, policy, param);
 +      if (!retval) {
 +              int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
 +              if (!rt_policy(policy))
 +                      prio = __normal_prio(p);
 +              else
 +                      prio = MAX_RT_PRIO-1 - prio;
 +              update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
 +      }
 +      return retval;
 +}
 +#else /* !CONFIG_MT_PRIO_TRACER */
  int sched_setscheduler(struct task_struct *p, int policy,
                       const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, true);
  }
 +#endif
  EXPORT_SYMBOL_GPL(sched_setscheduler);
  
  /**
   * stop_machine(): we create temporary high priority worker threads,
   * but our caller might not have that capability.
   */
 +#ifdef CONFIG_MT_PRIO_TRACER
 +int sched_setscheduler_nocheck_core(struct task_struct *p, int policy,
 +                                  const struct sched_param *param)
 +{
 +      return __sched_setscheduler(p, policy, param, false);
 +}
 +
 +
 +int sched_setscheduler_nocheck(struct task_struct *p, int policy,
 +                             const struct sched_param *param)
 +{
 +        int retval;
 +
 +      retval = sched_setscheduler_nocheck_core(p, policy, param);
 +      if (!retval) {
 +              int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
 +              if (!rt_policy(policy))
 +                      prio = __normal_prio(p);
 +              else
 +                      prio = MAX_RT_PRIO-1 - prio;
 +              update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
 +      }
 +      return retval;
 +}
 +#else /* !CONFIG_MT_PRIO_TRACER */
  int sched_setscheduler_nocheck(struct task_struct *p, int policy,
                               const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, false);
  }
 +#endif
  
  static int
  do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
        rcu_read_lock();
        retval = -ESRCH;
        p = find_process_by_pid(pid);
 +#ifdef CONFIG_MT_PRIO_TRACER
 +      if (p != NULL)
 +              retval = sched_setscheduler_syscall(p, policy, &lparam);
 +#else
        if (p != NULL)
                retval = sched_setscheduler(p, policy, &lparam);
 +#endif
 +
        rcu_read_unlock();
  
        return retval;
@@@ -4567,7 -4205,6 +4567,7 @@@ long sched_setaffinity(pid_t pid, cons
        if (!p) {
                rcu_read_unlock();
                put_online_cpus();
 +              printk(KERN_DEBUG "SCHED: setaffinity find process %d fail\n", pid); 
                return -ESRCH;
        }
  
  
        if (p->flags & PF_NO_SETAFFINITY) {
                retval = -EINVAL;
 +              printk(KERN_DEBUG "SCHED: setaffinity flags PF_NO_SETAFFINITY fail\n"); 
                goto out_put_task;
        }
        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
                retval = -ENOMEM;
 +              printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for cpus_allowed fail\n"); 
                goto out_put_task;
        }
        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
                retval = -ENOMEM;
 +              printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for new_mask fail\n");  
                goto out_free_cpus_allowed;
        }
        retval = -EPERM;
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
 +                      printk(KERN_DEBUG "SCHED: setaffinity check_same_owner and task_ns_capable fail\n");  
                        goto out_unlock;
                }
                rcu_read_unlock();
        }
  
        retval = security_task_setscheduler(p);
 -      if (retval)
 +      if (retval){
 +              printk(KERN_DEBUG "SCHED: setaffinity security_task_setscheduler fail, status: %d\n", retval);  
                goto out_unlock;
 +      }
  
        cpuset_cpus_allowed(p, cpus_allowed);
        cpumask_and(new_mask, in_mask, cpus_allowed);
  again:
        retval = set_cpus_allowed_ptr(p, new_mask);
 +      if (retval)
 +              printk(KERN_DEBUG "SCHED: set_cpus_allowed_ptr status %d\n", retval);   
  
        if (!retval) {
                cpuset_cpus_allowed(p, cpus_allowed);
@@@ -4633,8 -4262,6 +4633,8 @@@ out_free_cpus_allowed
  out_put_task:
        put_task_struct(p);
        put_online_cpus();
 +      if (retval)
 +              printk(KERN_DEBUG "SCHED: setaffinity status %d\n", retval);
        return retval;
  }
  
@@@ -4682,16 -4309,12 +4682,16 @@@ long sched_getaffinity(pid_t pid, struc
  
        retval = -ESRCH;
        p = find_process_by_pid(pid);
 -      if (!p)
 +      if (!p){
 +              printk(KERN_DEBUG "SCHED: getaffinity find process %d fail\n", pid);  
                goto out_unlock;
 +      }
  
        retval = security_task_getscheduler(p);
 -      if (retval)
 +      if (retval){
 +              printk(KERN_DEBUG "SCHED: getaffinity security_task_getscheduler fail, status: %d\n", retval); 
                goto out_unlock;
 +      }
  
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
@@@ -4701,9 -4324,6 +4701,9 @@@ out_unlock
        rcu_read_unlock();
        put_online_cpus();
  
 +      if (retval){
 +              printk(KERN_DEBUG "SCHED: getaffinity status %d\n", retval);   
 +      }
        return retval;
  }
  
@@@ -5065,25 -4685,7 +5065,25 @@@ out_unlock
  }
  
  static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 -
 +#ifdef CONFIG_MT_DEBUG_MUTEXES
 +void mt_mutex_state(struct task_struct *p)
 +{
 +    struct task_struct *locker;
 +    if(p->blocked_on){
 +        locker =  p->blocked_on->task_wait_on;
 +        if(find_task_by_vpid(locker->pid) !=  NULL){
 +            printk("Hint: wait on mutex, holder is [%d:%s:%ld]\n", locker->pid, locker->comm, locker->state);
 +            if(locker->state != TASK_RUNNING){
 +                printk("Mutex holder process[%d:%s] is not running now:\n", locker->pid, locker->comm);
 +                show_stack(locker, NULL);
 +                printk("----\n");
 +            }
 +        }else{
 +            printk("Hint: wait on mutex, but holder already released lock\n");
 +        }
 +    }
 +}
 +#endif
  void sched_show_task(struct task_struct *p)
  {
        unsigned long free = 0;
  
        print_worker_info(KERN_INFO, p);
        show_stack(p, NULL);
 +#ifdef CONFIG_MT_DEBUG_MUTEXES
 +    mt_mutex_state(p);
 +#endif
  }
  
  void show_state_filter(unsigned long state_filter)
        touch_all_softlockup_watchdogs();
  
  #ifdef CONFIG_SCHED_DEBUG
-       sysrq_sched_debug_show();
+       if (!state_filter)
+               sysrq_sched_debug_show();
  #endif
        rcu_read_unlock();
        /*
@@@ -5262,7 -4862,6 +5263,7 @@@ int set_cpus_allowed_ptr(struct task_st
  
        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
 +              printk(KERN_DEBUG "SCHED: intersects new_mask: %lu, cpu_active_mask: %lu\n", new_mask->bits[0], cpu_active_mask->bits[0]);
                goto out;
        }
  
@@@ -5411,8 -5010,6 +5412,8 @@@ static void migrate_tasks(unsigned int 
         * done here.
         */
        rq->stop = NULL;
 +      /* MTK patch: prevent could not migrate RT task when RT throttle*/
 +      unthrottle_offline_rt_rqs(rq);
  
        for ( ; ; ) {
                /*
@@@ -5668,6 -5265,7 +5669,6 @@@ migration_call(struct notifier_block *n
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 -
                        set_rq_online(rq);
                }
                raw_spin_unlock_irqrestore(&rq->lock, flags);
@@@ -6156,9 -5754,6 +6157,9 @@@ cpu_attach_domain(struct sched_domain *
        rcu_assign_pointer(rq->sd, sd);
        destroy_sched_domains(tmp, cpu);
  
 +#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
 +      update_packing_domain(cpu);
 +#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
        update_top_cache_domain(cpu);
  }
  
@@@ -6435,12 -6030,6 +6436,12 @@@ int __weak arch_sd_sibling_asym_packing
         return 0*SD_ASYM_PACKING;
  }
  
 +#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
 +int __weak arch_sd_share_power_line(void)
 +{
 +      return 0*SD_SHARE_POWERLINE;
 +}
 +#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK  || CONFIG_HMP_PACK_SMALL_TASK */
  /*
   * Initializers for schedule domains
   * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@@ -7438,9 -7027,6 +7439,9 @@@ void __init sched_init(void
                rq->nr_running = 0;
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
 +#ifdef CONFIG_PROVE_LOCKING
 +              rq->cpu = i;
 +#endif
                init_cfs_rq(&rq->cfs);
                init_rt_rq(&rq->rt, rq);
  #ifdef CONFIG_FAIR_GROUP_SCHED
@@@ -7558,24 -7144,13 +7559,24 @@@ static inline int preempt_count_equals(
        return (nested == preempt_offset);
  }
  
 +static int __might_sleep_init_called;
 +int __init __might_sleep_init(void)
 +{
 +      __might_sleep_init_called = 1;
 +      return 0;
 +}
 +early_initcall(__might_sleep_init);
 +
  void __might_sleep(const char *file, int line, int preempt_offset)
  {
        static unsigned long prev_jiffy;        /* ratelimiting */
  
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
 -          system_state != SYSTEM_RUNNING || oops_in_progress)
 +          oops_in_progress)
 +              return;
 +      if (system_state != SYSTEM_RUNNING &&
 +          (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                return;
@@@ -8181,23 -7756,6 +8182,23 @@@ static void cpu_cgroup_css_offline(stru
        sched_offline_group(tg);
  }
  
 +static int
 +cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 +{
 +      const struct cred *cred = current_cred(), *tcred;
 +      struct task_struct *task;
 +
 +      cgroup_taskset_for_each(task, cgrp, tset) {
 +              tcred = __task_cred(task);
 +
 +              if ((current != task) && !capable(CAP_SYS_NICE) &&
 +                  cred->euid != tcred->uid && cred->euid != tcred->suid)
 +                      return -EACCES;
 +      }
 +
 +      return 0;
 +}
 +
  static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
  {
        cgroup_taskset_for_each(task, cgrp, tset) {
  #ifdef CONFIG_RT_GROUP_SCHED
                if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
 -                      return -EINVAL;
 +                      return -ERTGROUP;
  #else
                /* We don't support RT-tasks being in separate groups */
                if (task->sched_class != &fair_sched_class)
@@@ -8564,7 -8122,6 +8565,7 @@@ struct cgroup_subsys cpu_cgroup_subsys 
        .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
 +      .allow_attach   = cpu_cgroup_allow_attach,
        .exit           = cpu_cgroup_exit,
        .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,
@@@ -8578,22 -8135,3 +8579,22 @@@ void dump_cpu_task(int cpu
        pr_info("Task dump for CPU %d:\n", cpu);
        sched_show_task(cpu_curr(cpu));
  }
 +
 +unsigned long long mt_get_thread_cputime(pid_t pid)
 +{
 +    struct task_struct *p;
 +    p = pid ? find_task_by_vpid(pid) : current;
 +    return task_sched_runtime(p);
 +}
 +unsigned long long mt_get_cpu_idle(int cpu)
 +{
 +    unsigned long long *unused = 0;
 +    return get_cpu_idle_time_us(cpu, unused);
 +}
 +unsigned long long mt_sched_clock(void)
 +{
 +    return sched_clock();
 +}
 +EXPORT_SYMBOL(mt_get_thread_cputime);
 +EXPORT_SYMBOL(mt_get_cpu_idle);
 +EXPORT_SYMBOL(mt_sched_clock);
diff --combined kernel/sysctl.c
index a04cdcf29e1f977231724dcd5d1f5b0f118b4094,430725da89d07524f8ab34e5717a1bedbb123e83..f9648251352d5177b16d632047da3b0cd4b1d8e8
@@@ -105,8 -105,6 +105,8 @@@ extern char core_pattern[]
  extern unsigned int core_pipe_limit;
  #endif
  extern int pid_max;
 +extern int extra_free_kbytes;
 +extern int min_free_order_shift;
  extern int pid_max_min, pid_max_max;
  extern int percpu_pagelist_fraction;
  extern int compat_log;
@@@ -1283,21 -1281,6 +1283,21 @@@ static struct ctl_table vm_table[] = 
                .proc_handler   = min_free_kbytes_sysctl_handler,
                .extra1         = &zero,
        },
 +      {
 +              .procname       = "extra_free_kbytes",
 +              .data           = &extra_free_kbytes,
 +              .maxlen         = sizeof(extra_free_kbytes),
 +              .mode           = 0644,
 +              .proc_handler   = min_free_kbytes_sysctl_handler,
 +              .extra1         = &zero,
 +      },
 +      {
 +              .procname       = "min_free_order_shift",
 +              .data           = &min_free_order_shift,
 +              .maxlen         = sizeof(min_free_order_shift),
 +              .mode           = 0644,
 +              .proc_handler   = &proc_dointvec
 +      },
        {
                .procname       = "percpu_pagelist_fraction",
                .data           = &percpu_pagelist_fraction,
@@@ -1728,11 -1711,10 +1728,11 @@@ static int _proc_do_string(void* data, 
                len = 0;
                p = buffer;
                while (len < *lenp) {
 -                      if (get_user(c, p++))
 +                      if (get_user(c, p))
                                return -EFAULT;
                        if (c == 0 || c == '\n')
                                break;
 +                      p++;
                        len++;
                }
                if (len >= maxlen)
@@@ -2242,6 -2224,7 +2242,7 @@@ static int __do_proc_doulongvec_minmax(
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
diff --combined mm/filemap.c
index 9d9b705ac0e7ea5d8d3f186e4e01b07f5d88502a,72130787db0afe8ff7ff0c0cb51c72a4e3cf562e..036046223c4f4b8b952012ad68df9a8f3fcc0d8b
@@@ -1123,6 -1123,11 +1123,11 @@@ static void do_generic_file_read(struc
  
                cond_resched();
  find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
@@@ -1634,10 -1639,6 +1639,10 @@@ int filemap_fault(struct vm_area_struc
        } else if (!page) {
                /* No page in the page cache at all */
                do_sync_mmap_readahead(vma, ra, file, offset);
 +#ifdef CONFIG_ZRAM
 +        current->fm_flt++;
 +#endif
 +              count_vm_event(PGFMFAULT);
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
diff --combined mm/memory.c
index 2467a766656565fddcf42e4234dc60cb3cd643f6,8b4975d1f1672e39b681e5cd07ca8612c98f1ba8..9a90856bb0c08cbf01c84358df75b4c90f3b428e
  
  #include "internal.h"
  
 +#ifdef CONFIG_MTK_EXTMEM
 +extern bool extmem_in_mspace(struct vm_area_struct *vma);
 +extern unsigned long get_virt_from_mspace(unsigned long pa);
 +#endif
 +
  #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
  #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
  #endif
@@@ -1658,14 -1653,7 +1658,8 @@@ no_page_table
                return ERR_PTR(-EFAULT);
        return page;
  }
 +EXPORT_SYMBOL_GPL(follow_page_mask);
  
- static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
- {
-       return stack_guard_page_start(vma, addr) ||
-              stack_guard_page_end(vma, addr+PAGE_SIZE);
- }
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:      task_struct of target task
@@@ -1803,24 -1791,11 +1797,24 @@@ long __get_user_pages(struct task_struc
                        page_mask = 0;
                        goto next_page;
                }
 -
 +    #ifdef CONFIG_MTK_EXTMEM
 +        if (!vma || !(vm_flags & vma->vm_flags))
 +              {
 +                  return i ? : -EFAULT;
 +        }
 +
 +              if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 +              {
 +                  /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
 +                  if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
 +                          return i ? : -EFAULT;
 +        }
 +    #else
                if (!vma ||
                    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                    !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 +    #endif
  
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
                                int ret;
                                unsigned int fault_flags = 0;
  
-                               /* For mlock, just skip the stack guard page. */
-                               if (foll_flags & FOLL_MLOCK) {
-                                       if (stack_guard_page(vma, start))
-                                               goto next_page;
-                               }
                                if (foll_flags & FOLL_WRITE)
                                        fault_flags |= FAULT_FLAG_WRITE;
                                if (nonblocking)
@@@ -2397,18 -2367,12 +2386,18 @@@ int remap_pfn_range(struct vm_area_stru
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         * See vm_normal_page() for details.
         */
 +#ifdef CONFIG_MTK_EXTMEM
 +      if (addr == vma->vm_start && end == vma->vm_end) {
 +              vma->vm_pgoff = pfn;
 +      } else if (is_cow_mapping(vma->vm_flags))
 +              return -EINVAL;
 +#else
        if (is_cow_mapping(vma->vm_flags)) {
                if (addr != vma->vm_start || end != vma->vm_end)
                        return -EINVAL;
                vma->vm_pgoff = pfn;
        }
 -
 +#endif
        err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
        if (err)
                return -EINVAL;
@@@ -3216,40 -3180,6 +3205,6 @@@ out_release
        return ret;
  }
  
- /*
-  * This is like a special single-page "expand_{down|up}wards()",
-  * except we must first make sure that 'address{-|+}PAGE_SIZE'
-  * doesn't hit another vma.
-  */
- static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
- {
-       address &= PAGE_MASK;
-       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-               struct vm_area_struct *prev = vma->vm_prev;
-               /*
-                * Is there a mapping abutting this one below?
-                *
-                * That's only ok if it's the same stack mapping
-                * that has gotten split..
-                */
-               if (prev && prev->vm_end == address)
-                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-               return expand_downwards(vma, address - PAGE_SIZE);
-       }
-       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-               struct vm_area_struct *next = vma->vm_next;
-               /* As VM_GROWSDOWN but s/below/above/ */
-               if (next && next->vm_start == address + PAGE_SIZE)
-                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-               return expand_upwards(vma, address + PAGE_SIZE);
-       }
-       return 0;
- }
  /*
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
@@@ -3269,10 -3199,6 +3224,6 @@@ static int do_anonymous_page(struct mm_
        if (vma->vm_flags & VM_SHARED)
                return VM_FAULT_SIGBUS;
  
-       /* Check if we need to add a guard page to the stack */
-       if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGSEGV;
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
@@@ -3533,10 -3459,6 +3484,10 @@@ static int do_linear_fault(struct mm_st
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  
 +      /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
 +      if (!vma->vm_ops->fault)
 +              return VM_FAULT_SIGBUS;
 +
        pte_unmap(page_table);
        /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
        if (!vma->vm_ops->fault)
@@@ -4175,21 -4097,6 +4126,21 @@@ static int __access_remote_vm(struct ta
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
                if (ret <= 0) {
 +#ifdef CONFIG_MTK_EXTMEM
 +                      if (!write) {
 +                              vma = find_vma(mm, addr);
 +                              if (!vma || vma->vm_start > addr)
 +                                      break;
 +                              if (vma->vm_end < addr + len)
 +                                      len = vma->vm_end - addr;
 +                              if (extmem_in_mspace(vma)) {
 +                                      void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
 +                                      memcpy(buf, extmem_va, len);
 +                                      buf += len;
 +                                      break;
 +                              }
 +                      }
 +#endif
                        /*
                         * Check if this is a VM_IO | VM_PFNMAP VMA, which
                         * we can access using slightly different code.
diff --combined mm/mempolicy.c
index 420e4b97ffab5853d6e543080c612b341a171ce3,e57c967a1af0e751ab47f71b7fb24ce154e4f1fe..984419bbe565c1b4f9d4df41bfa5faeb8f0a93ba
@@@ -725,7 -725,7 +725,7 @@@ static int mbind_range(struct mm_struc
                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                  vma->anon_vma, vma->vm_file, pgoff,
 -                                new_pol);
 +                                new_pol, vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
@@@ -1537,7 -1537,6 +1537,6 @@@ asmlinkage long compat_sys_get_mempolic
  asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
                                     compat_ulong_t maxnode)
  {
-       long err = 0;
        unsigned long __user *nm = NULL;
        unsigned long nr_bits, alloc_size;
        DECLARE_BITMAP(bm, MAX_NUMNODES);
        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
  
        if (nmask) {
-               err = compat_get_bitmap(bm, nmask, nr_bits);
+               if (compat_get_bitmap(bm, nmask, nr_bits))
+                       return -EFAULT;
                nm = compat_alloc_user_space(alloc_size);
-               err |= copy_to_user(nm, bm, alloc_size);
+               if (copy_to_user(nm, bm, alloc_size))
+                       return -EFAULT;
        }
  
-       if (err)
-               return -EFAULT;
        return sys_set_mempolicy(mode, nm, nr_bits+1);
  }
  
@@@ -1561,7 -1559,6 +1559,6 @@@ asmlinkage long compat_sys_mbind(compat
                             compat_ulong_t mode, compat_ulong_t __user *nmask,
                             compat_ulong_t maxnode, compat_ulong_t flags)
  {
-       long err = 0;
        unsigned long __user *nm = NULL;
        unsigned long nr_bits, alloc_size;
        nodemask_t bm;
        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
  
        if (nmask) {
-               err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+               if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+                       return -EFAULT;
                nm = compat_alloc_user_space(alloc_size);
-               err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+               if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+                       return -EFAULT;
        }
  
-       if (err)
-               return -EFAULT;
        return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
  }
  
diff --combined mm/mmap.c
index f172a2723460d50a0ec8129f9b0b18a5987edfe6,3c4e4d7ae54e1c6f34890e43a6882b869dee68af..740b4ff2fd92064748e1cd4f6681ee73365f1f25
+++ b/mm/mmap.c
@@@ -263,6 -263,7 +263,7 @@@ SYSCALL_DEFINE1(brk, unsigned long, brk
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *next;
        unsigned long min_brk;
        bool populate;
  
        }
  
        /* Check against existing mmap mappings. */
-       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+       next = find_vma(mm, oldbrk);
+       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
                goto out;
  
        /* Ok, looks good - let it rip. */
@@@ -331,10 -333,22 +333,22 @@@ out
  
  static long vma_compute_subtree_gap(struct vm_area_struct *vma)
  {
-       unsigned long max, subtree_gap;
-       max = vma->vm_start;
-       if (vma->vm_prev)
-               max -= vma->vm_prev->vm_end;
+       unsigned long max, prev_end, subtree_gap;
+       /*
+        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+        * allow two stack_guard_gaps between them here, and when choosing
+        * an unmapped area; whereas when expanding we only require one.
+        * That's a little inconsistent, but keeps the code here simpler.
+        */
+       max = vm_start_gap(vma);
+       if (vma->vm_prev) {
+               prev_end = vm_end_gap(vma->vm_prev);
+               if (max > prev_end)
+                       max -= prev_end;
+               else
+                       max = 0;
+       }
        if (vma->vm_rb.rb_left) {
                subtree_gap = rb_entry(vma->vm_rb.rb_left,
                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@@ -418,7 -432,7 +432,7 @@@ void validate_mm(struct mm_struct *mm
                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                        anon_vma_interval_tree_verify(avc);
                vma_unlock_anon_vma(vma);
-               highest_address = vma->vm_end;
+               highest_address = vm_end_gap(vma);
                vma = vma->vm_next;
                i++;
        }
@@@ -586,7 -600,7 +600,7 @@@ void __vma_link_rb(struct mm_struct *mm
        if (vma->vm_next)
                vma_gap_update(vma->vm_next);
        else
-               mm->highest_vm_end = vma->vm_end;
+               mm->highest_vm_end = vm_end_gap(vma);
  
        /*
         * vma->vm_prev wasn't known when we followed the rbtree to find the
@@@ -835,7 -849,7 +849,7 @@@ again:                     remove_next = 1 + (end > next-
                        vma_gap_update(vma);
                if (end_changed) {
                        if (!next)
-                               mm->highest_vm_end = end;
+                               mm->highest_vm_end = vm_end_gap(vma);
                        else if (!adjust_next)
                                vma_gap_update(next);
                }
                else if (next)
                        vma_gap_update(next);
                else
-                       mm->highest_vm_end = end;
+                       WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
        }
        if (insert && file)
                uprobe_mmap(insert);
   * per-vma resources, so we don't attempt to merge those.
   */
  static inline int is_mergeable_vma(struct vm_area_struct *vma,
 -                      struct file *file, unsigned long vm_flags)
 +                      struct file *file, unsigned long vm_flags,
 +                      const char __user *anon_name)
  {
        if (vma->vm_flags ^ vm_flags)
                return 0;
                return 0;
        if (vma->vm_ops && vma->vm_ops->close)
                return 0;
 +      if (vma_get_anon_name(vma) != anon_name)
 +              return 0;
        return 1;
  }
  
@@@ -934,10 -945,9 +948,10 @@@ static inline int is_mergeable_anon_vma
   */
  static int
  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 -      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 +      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
 +      const char __user *anon_name)
  {
 -      if (is_mergeable_vma(vma, file, vm_flags) &&
 +      if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
   */
  static int
  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 -      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 +      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
 +      const char __user *anon_name)
  {
 -      if (is_mergeable_vma(vma, file, vm_flags) &&
 +      if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  }
  
  /*
 - * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
 - * whether that can be merged with its predecessor or its successor.
 - * Or both (it neatly fills a hole).
 + * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
 + * figure out whether that can be merged with its predecessor or its
 + * successor.  Or both (it neatly fills a hole).
   *
   * In most cases - when called for mmap, brk or mremap - [addr,end) is
   * certain not to be mapped by the time vma_merge is called; but when
@@@ -1000,8 -1009,7 +1014,8 @@@ struct vm_area_struct *vma_merge(struc
                        struct vm_area_struct *prev, unsigned long addr,
                        unsigned long end, unsigned long vm_flags,
                        struct anon_vma *anon_vma, struct file *file,
 -                      pgoff_t pgoff, struct mempolicy *policy)
 +                      pgoff_t pgoff, struct mempolicy *policy,
 +                      const char __user *anon_name)
  {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
         */
        if (prev && prev->vm_end == addr &&
                        mpol_equal(vma_policy(prev), policy) &&
 -                      can_vma_merge_after(prev, vm_flags,
 -                                              anon_vma, file, pgoff)) {
 +                      can_vma_merge_after(prev, vm_flags, anon_vma,
 +                                              file, pgoff, anon_name)) {
                /*
                 * OK, it can.  Can we now merge in the successor as well?
                 */
                if (next && end == next->vm_start &&
                                mpol_equal(policy, vma_policy(next)) &&
 -                              can_vma_merge_before(next, vm_flags,
 -                                      anon_vma, file, pgoff+pglen) &&
 +                              can_vma_merge_before(next, vm_flags, anon_vma,
 +                                              file, pgoff+pglen, anon_name) &&
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
         */
        if (next && end == next->vm_start &&
                        mpol_equal(policy, vma_policy(next)) &&
 -                      can_vma_merge_before(next, vm_flags,
 -                                      anon_vma, file, pgoff+pglen)) {
 +                      can_vma_merge_before(next, vm_flags, anon_vma,
 +                                      file, pgoff+pglen, anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
@@@ -1525,8 -1533,7 +1539,8 @@@ munmap_back
        /*
         * Can we just expand an old mapping?
         */
 -      vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
 +      vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
 +                      NULL, NULL);
        if (vma)
                goto out;
  
@@@ -1677,7 -1684,7 +1691,7 @@@ unsigned long unmapped_area(struct vm_u
  
        while (true) {
                /* Visit left subtree if it looks promising */
-               gap_end = vma->vm_start;
+               gap_end = vm_start_gap(vma);
                if (gap_end >= low_limit && vma->vm_rb.rb_left) {
                        struct vm_area_struct *left =
                                rb_entry(vma->vm_rb.rb_left,
                        }
                }
  
-               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
  check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
                        return -ENOMEM;
-               if (gap_end >= low_limit && gap_end - gap_start >= length)
+               if (gap_end >= low_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
  
                /* Visit right subtree if it looks promising */
                        vma = rb_entry(rb_parent(prev),
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_left) {
-                               gap_start = vma->vm_prev->vm_end;
-                               gap_end = vma->vm_start;
+                               gap_start = vm_end_gap(vma->vm_prev);
+                               gap_end = vm_start_gap(vma);
                                goto check_current;
                        }
                }
@@@ -1780,7 -1788,7 +1795,7 @@@ unsigned long unmapped_area_topdown(str
  
        while (true) {
                /* Visit right subtree if it looks promising */
-               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
                if (gap_start <= high_limit && vma->vm_rb.rb_right) {
                        struct vm_area_struct *right =
                                rb_entry(vma->vm_rb.rb_right,
  
  check_current:
                /* Check if current node has a suitable gap */
-               gap_end = vma->vm_start;
+               gap_end = vm_start_gap(vma);
                if (gap_end < low_limit)
                        return -ENOMEM;
-               if (gap_start <= high_limit && gap_end - gap_start >= length)
+               if (gap_start <= high_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
  
                /* Visit left subtree if it looks promising */
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_right) {
                                gap_start = vma->vm_prev ?
-                                       vma->vm_prev->vm_end : 0;
+                                       vm_end_gap(vma->vm_prev) : 0;
                                goto check_current;
                        }
                }
@@@ -1857,7 -1866,7 +1873,7 @@@ arch_get_unmapped_area(struct file *fil
                unsigned long len, unsigned long pgoff, unsigned long flags)
  {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct vm_unmapped_area_info info;
  
        if (len > TASK_SIZE - mmap_min_addr)
  
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
+               vma = find_vma_prev(mm, addr, &prev);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)) &&
+                   (!prev || addr >= vm_end_gap(prev)))
                        return addr;
        }
  
@@@ -1902,7 -1912,7 +1919,7 @@@ arch_get_unmapped_area_topdown(struct f
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
  {
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
+               vma = find_vma_prev(mm, addr, &prev);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vma->vm_start))
+                               (!vma || addr + len <= vm_start_gap(vma)) &&
+                               (!prev || addr >= vm_end_gap(prev)))
                        return addr;
        }
  
@@@ -2059,21 -2070,19 +2077,19 @@@ find_vma_prev(struct mm_struct *mm, uns
   * update accounting. This is shared with both the
   * grow-up and grow-down cases.
   */
- static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+ static int acct_stack_growth(struct vm_area_struct *vma,
+                            unsigned long size, unsigned long grow)
  {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start, actual_size;
+       unsigned long new_start;
  
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
  
        /* Stack limit test */
-       actual_size = size;
-       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-               actual_size -= PAGE_SIZE;
-       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
  
        /* mlock limit tests */
   */
  int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  {
-       int error;
+       struct vm_area_struct *next;
+       unsigned long gap_addr;
+       int error = 0;
  
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
  
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
+       /* Guard against exceeding limits of the address space. */
+       address &= PAGE_MASK;
+       if (address >= TASK_SIZE)
+               return -ENOMEM;
+       address += PAGE_SIZE;
+       /* Enforce stack_guard_gap */
+       gap_addr = address + stack_guard_gap;
+       /* Guard against overflow */
+       if (gap_addr < address || gap_addr > TASK_SIZE)
+               gap_addr = TASK_SIZE;
+       next = vma->vm_next;
+       if (next && next->vm_start < gap_addr) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+       }
+       /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       vma_lock_anon_vma(vma);
  
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
-        * Also guard against wrapping around to address 0.
         */
-       if (address < PAGE_ALIGN(address+4))
-               address = PAGE_ALIGN(address+4);
-       else {
-               vma_unlock_anon_vma(vma);
-               return -ENOMEM;
-       }
-       error = 0;
+       vma_lock_anon_vma(vma);
  
        /* Somebody else might have raced and expanded it already */
        if (address > vma->vm_end) {
                                if (vma->vm_next)
                                        vma_gap_update(vma->vm_next);
                                else
-                                       vma->vm_mm->highest_vm_end = address;
+                                       vma->vm_mm->highest_vm_end = vm_end_gap(vma);
                                spin_unlock(&vma->vm_mm->page_table_lock);
  
                                perf_event_mmap(vma);
  int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
  {
+       struct vm_area_struct *prev;
+       unsigned long gap_addr;
        int error;
  
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
        address &= PAGE_MASK;
        error = security_mmap_addr(address);
        if (error)
                return error;
  
-       vma_lock_anon_vma(vma);
+       /* Enforce stack_guard_gap */
+       gap_addr = address - stack_guard_gap;
+       if (gap_addr > address)
+               return -ENOMEM;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end > gap_addr) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+       }
+       /* We must make sure the anon_vma is allocated. */
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
  
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
+       vma_lock_anon_vma(vma);
  
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
        return error;
  }
  
- /*
-  * Note how expand_stack() refuses to expand the stack all the way to
-  * abut the next virtual mapping, *unless* that mapping itself is also
-  * a stack mapping. We want to leave room for a guard page, after all
-  * (the guard page itself is not added here, that is done by the
-  * actual page faulting logic)
-  *
-  * This matches the behavior of the guard page logic (see mm/memory.c:
-  * check_stack_guard_page()), which only allows the guard page to be
-  * removed under these circumstances.
-  */
+ /* enforced gap between the expanding stack and other mappings. */
+ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+ static int __init cmdline_parse_stack_guard_gap(char *p)
+ {
+       unsigned long val;
+       char *endptr;
+       val = simple_strtoul(p, &endptr, 10);
+       if (!*endptr)
+               stack_guard_gap = val << PAGE_SHIFT;
+       return 0;
+ }
+ __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
  #ifdef CONFIG_STACK_GROWSUP
  int expand_stack(struct vm_area_struct *vma, unsigned long address)
  {
-       struct vm_area_struct *next;
-       address &= PAGE_MASK;
-       next = vma->vm_next;
-       if (next && next->vm_start == address + PAGE_SIZE) {
-               if (!(next->vm_flags & VM_GROWSUP))
-                       return -ENOMEM;
-       }
        return expand_upwards(vma, address);
  }
  
@@@ -2295,14 -2321,6 +2328,6 @@@ find_extend_vma(struct mm_struct *mm, u
  #else
  int expand_stack(struct vm_area_struct *vma, unsigned long address)
  {
-       struct vm_area_struct *prev;
-       address &= PAGE_MASK;
-       prev = vma->vm_prev;
-       if (prev && prev->vm_end == address) {
-               if (!(prev->vm_flags & VM_GROWSDOWN))
-                       return -ENOMEM;
-       }
        return expand_downwards(vma, address);
  }
  
@@@ -2399,7 -2417,7 +2424,7 @@@ detach_vmas_to_be_unmapped(struct mm_st
                vma->vm_prev = prev;
                vma_gap_update(vma);
        } else
-               mm->highest_vm_end = prev ? prev->vm_end : 0;
+               mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
@@@ -2498,17 -2516,9 +2523,17 @@@ int split_vma(struct mm_struct *mm, str
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
 +#ifdef CONFIG_MTK_EXTMEM
 +extern bool extmem_in_mspace(struct vm_area_struct *vma);
 +extern void * get_virt_from_mspace(void * pa);
 +extern size_t extmem_get_mem_size(unsigned long pgoff);
 +extern void extmem_free(void* mem);
 +#endif
 +
  int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  {
        unsigned long end;
 +      struct file *file;
        struct vm_area_struct *vma, *prev, *last;
  
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
        vma = find_vma(mm, start);
        if (!vma)
                return 0;
 +      file=vma->vm_file;
 +      if(file) 
 +      {
 +              const char *name=file->f_path.dentry->d_iname;
 +              if(name && (strstr(name,"app_process") || strstr(name,"app_process64") || strstr(name,"main") || strstr(name,"Binder_")))
 +                      printk("name:%s unmap vm_start %lx  end: %lx\n", name, vma->vm_start, vma->vm_end);
 +      }
 +      else
 +      {
 +              const char *name = arch_vma_name(vma);
 +              if(name && (strstr(name,"app_process") || strstr(name,"app_process64") || strstr(name,"main") || strstr(name,"Binder_")))
 +                      printk("name:%s unmap vm_start %lx  end: %lx\n", name, vma->vm_start, vma->vm_end);
 +      }
        prev = vma->vm_prev;
        /* we have  start < vma->vm_end  */
  
 +#ifdef CONFIG_MTK_EXTMEM
 +      /* get correct mmap size if in mspace. */
 +    if (extmem_in_mspace(vma))
 +        len = extmem_get_mem_size(vma->vm_pgoff);
 +#endif
 +
        /* if it doesn't overlap, we have nothing.. */
        end = start + len;
        if (vma->vm_start >= end)
@@@ -2700,7 -2691,7 +2725,7 @@@ static unsigned long do_brk(unsigned lo
  
        /* Can we just expand an old private anonymous mapping? */
        vma = vma_merge(mm, prev, addr, addr + len, flags,
 -                                      NULL, NULL, pgoff, NULL);
 +                                      NULL, NULL, pgoff, NULL, NULL);
        if (vma)
                goto out;
  
@@@ -2858,8 -2849,7 +2883,8 @@@ struct vm_area_struct *copy_vma(struct 
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 -                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 +                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
 +                      vma_get_anon_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
diff --combined net/ipv4/fib_frontend.c
index 4fc63bd9a5b223d0a43483d9775a82226cf8a051,017b4792cd447e8937c7ccb2f07791c12b22a08f..d60e1783d522271f00fa1446364cdce84a2a6a3f
@@@ -413,9 -413,6 +413,9 @@@ static int rtentry_to_fib_config(struc
                colon = strchr(devname, ':');
                if (colon)
                        *colon = 0;
 +        #ifdef CONFIG_MTK_NET_LOGGING  
 +              printk(KERN_INFO "[mtk_net][RTlog info]  ip_rt_ioctl   rt.dev =%s \n",devname); 
 +              #endif          
                dev = __dev_get_by_name(net, devname);
                if (!dev)
                        return -ENODEV;
@@@ -494,9 -491,6 +494,9 @@@ int ip_rt_ioctl(struct net *net, unsign
  
                if (copy_from_user(&rt, arg, sizeof(rt)))
                        return -EFAULT;
 +              #ifdef CONFIG_MTK_NET_LOGGING  
 +              printk(KERN_INFO "[mtk_net][RTlog info]ip_rt_ioctl rt.flags =%08x, rt.rt_dst =%08x rt.rt_gateway =%08x \n",rt.rt_flags,sk_extract_addr(&(rt.rt_dst)),sk_extract_addr(&(rt.rt_gateway)));
 +              #endif
  
                rtnl_lock();
                err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
                        struct fib_table *tb;
  
                        if (cmd == SIOCDELRT) {
 +                              #ifdef CONFIG_MTK_NET_LOGGING 
 +                              printk(KERN_INFO "[mtk_net][RTlog delete]  p_rt_ioctl: cmd == SIOCDELRT! >>\n");
 +                              #endif
                                tb = fib_get_table(net, cfg.fc_table);
                                if (tb)
                                        err = fib_table_delete(tb, &cfg);
                                else
                                        err = -ESRCH;
                        } else {
 +                              #ifdef CONFIG_MTK_NET_LOGGING 
 +                              printk(KERN_INFO "[mtk_net][RTlog insert]  p_rt_ioctl: cmd == SIOCADDRT! >>\n");
 +                              #endif
                                tb = fib_new_table(net, cfg.fc_table);
                                if (tb)
                                        err = fib_table_insert(tb, &cfg);
@@@ -543,7 -531,6 +543,7 @@@ const struct nla_policy rtm_ipv4_policy
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_FLOW]              = { .type = NLA_U32 },
 +      [RTA_UID]               = { .type = NLA_U32 },
  };
  
  static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@@ -623,15 -610,11 +623,15 @@@ static int inet_rtm_delroute(struct sk_
        struct fib_config cfg;
        struct fib_table *tb;
        int err;
 -
 +    #ifdef CONFIG_MTK_NET_LOGGING 
 +      printk(KERN_INFO "[mtk_net][RTlog delete] inet_rtm_delroute !\n");
 +      #endif
        err = rtm_to_fib_config(net, skb, nlh, &cfg);
        if (err < 0)
                goto errout;
 -
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      printk(KERN_INFO "[mtk_net][RTlog info]  inet_rtm_delroute  cfg.fc_dst =%08x, cfg.fc_gw =%08x\n",cfg.fc_dst,cfg.fc_gw);
 +    #endif
        tb = fib_get_table(net, cfg.fc_table);
        if (tb == NULL) {
                err = -ESRCH;
@@@ -649,16 -632,10 +649,16 @@@ static int inet_rtm_newroute(struct sk_
        struct fib_config cfg;
        struct fib_table *tb;
        int err;
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      printk(KERN_INFO "[mtk_net][RTlog insert] inet_rtm_newroute !\n");
 +      #endif
  
        err = rtm_to_fib_config(net, skb, nlh, &cfg);
        if (err < 0)
                goto errout;
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      printk(KERN_INFO "[mtk_net][RTlog info]  inet_rtm_newroute  cfg.fc_dst =%08x,cfg.fc_gw =%08x \n",cfg.fc_dst,cfg.fc_gw);
 +      #endif
  
        tb = fib_new_table(net, cfg.fc_table);
        if (tb == NULL) {
@@@ -782,16 -759,10 +782,16 @@@ void fib_add_ifaddr(struct in_ifaddr *i
  
        if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
            (prefix != addr || ifa->ifa_prefixlen < 32)) {
 +          /* MTK_NET_CHANGES */
 +          if(0 == strncmp(dev->name, "ccmni", 2)){
 +          #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][RTlog] ignore ccmni subnet route\n");
 +              #endif
 +              } else {
                fib_magic(RTM_NEWROUTE,
                          dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
                          prefix, ifa->ifa_prefixlen, prim);
 -
 +              }
                /* Add network specific broadcasts, when it takes a sense */
                if (ifa->ifa_prefixlen < 31) {
                        fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
@@@ -986,7 -957,8 +986,8 @@@ static void nl_fib_input(struct sk_buf
  
        net = sock_net(skb->sk);
        nlh = nlmsg_hdr(skb);
-       if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+       if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+           skb->len < nlh->nlmsg_len ||
            nlmsg_len(nlh) < sizeof(*frn))
                return;
  
@@@ -1042,9 -1014,6 +1043,9 @@@ static int fib_inetaddr_event(struct no
  
        switch (event) {
        case NETDEV_UP:
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][RTlog insert]   fib_inetaddr_event()  %s NETDEV_UP!\n", ifa->ifa_dev->dev->name);
 +              #endif
                fib_add_ifaddr(ifa);
  #ifdef CONFIG_IP_ROUTE_MULTIPATH
                fib_sync_up(dev);
                rt_cache_flush(dev_net(dev));
                break;
        case NETDEV_DOWN:
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][RTlog delete]   fib_inetaddr_event()  %s NETDEV_DOWN!\n", ifa->ifa_dev->dev->name);
 +              #endif
                fib_del_ifaddr(ifa, NULL);
                atomic_inc(&net->ipv4.dev_addr_genid);
                if (ifa->ifa_dev->ifa_list == NULL) {
@@@ -1089,9 -1055,6 +1090,9 @@@ static int fib_netdev_event(struct noti
  
        switch (event) {
        case NETDEV_UP:
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][RTlog insert]   fib_netdev_event()  %s NETDEV_UP!\n", dev->name);
 +              #endif
                for_ifa(in_dev) {
                        fib_add_ifaddr(ifa);
                } endfor_ifa(in_dev);
                rt_cache_flush(net);
                break;
        case NETDEV_DOWN:
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][RTlog delete]   fib_netdev_event()  %s NETDEV_DOWN!\n", dev->name);
 +              #endif
                fib_disable_ip(dev, 0);
                break;
        case NETDEV_CHANGEMTU:
diff --combined net/ipv4/tcp_input.c
index 1e0c33efccbad0cb638fc16a2e210f63be21be5b,0680058fe6937c43b42e37784549c1f72d37c91a..094515e751d01c323f6600356cf072d0a723d457
@@@ -99,7 -99,6 +99,7 @@@ int sysctl_tcp_thin_dupack __read_mostl
  
  int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
  int sysctl_tcp_early_retrans __read_mostly = 3;
 +int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
  
  #define FLAG_DATA             0x01 /* Incoming frame contained data.          */
  #define FLAG_WIN_UPDATE               0x02 /* Incoming ACK was a window update.       */
@@@ -353,14 -352,14 +353,14 @@@ static void tcp_grow_window(struct soc
  static void tcp_fixup_rcvbuf(struct sock *sk)
  {
        u32 mss = tcp_sk(sk)->advmss;
 -      u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
 +      u32 icwnd = sysctl_tcp_default_init_rwnd;
        int rcvmem;
  
        /* Limit to 10 segments if mss <= 1460,
         * or 14600/mss segments, with a minimum of two segments.
         */
        if (mss > 1460)
 -              icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 +              icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
  
        rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
        while (tcp_win_from_space(rcvmem) < mss)
@@@ -1870,7 -1869,6 +1870,7 @@@ void tcp_clear_retrans(struct tcp_sock 
  void tcp_enter_loss(struct sock *sk, int how)
  {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 +      struct inet_connection_sock *icsk1 = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        bool new_recovery = false;
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
        }
 +      if (icsk->icsk_MMSRB == 1)
 +      {
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +          printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
 +        #endif
 +            if (tp->mss_cache != 0)
 +                tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
 +            else
 +            {
 +                tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
 +            }
 +              
 +            if (tp->snd_ssthresh > 16)
 +            {
 +                tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
 +            }
 +            else
 +            {
 +                tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
 +            }
 +            #ifdef CONFIG_MTK_NET_LOGGING 
 +            printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
 +            #endif
 +            icsk1->icsk_MMSRB = 0;
 +            #ifdef CONFIG_MTK_NET_LOGGING 
 +            printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
 +            #endif
 +      }
 +        else
 +        {
        tp->snd_cwnd       = 1;
 +        }     
 +  
 +      //tp->snd_cwnd     = 1;
        tp->snd_cwnd_cnt   = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
  
@@@ -1980,7 -1945,7 +1980,7 @@@ static bool tcp_check_sack_reneging(str
                icsk->icsk_retransmits++;
                tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                        icsk->icsk_rto, TCP_RTO_MAX);
 +                                        icsk->icsk_rto, sysctl_tcp_rto_max);
                return true;
        }
        return false;
@@@ -2029,7 -1994,7 +2029,7 @@@ static bool tcp_pause_early_retransmit(
                return false;
  
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
        return true;
  }
  
@@@ -3017,7 -2982,7 +3017,7 @@@ void tcp_rearm_rto(struct sock *sk
                                rto = delta;
                }
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
        }
  }
  
@@@ -3247,8 -3212,8 +3247,8 @@@ static void tcp_ack_probe(struct sock *
                 */
        } else {
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
 -                                        TCP_RTO_MAX);
 +                                        min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
 +                                        sysctl_tcp_rto_max);
        }
  }
  
@@@ -5370,6 -5335,7 +5370,7 @@@ void tcp_finish_connect(struct sock *sk
        struct inet_connection_sock *icsk = inet_csk(sk);
  
        tcp_set_state(sk, TCP_ESTABLISHED);
+       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
  
        if (skb != NULL) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@@ -5570,10 -5536,9 +5571,9 @@@ static int tcp_rcv_synsent_state_proces
                         * to stand against the temptation 8)     --ANK
                         */
                        inet_csk_schedule_ack(sk);
-                       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
                        tcp_enter_quickack_mode(sk);
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                                TCP_DELACK_MAX, TCP_RTO_MAX);
 +                                                TCP_DELACK_MAX, sysctl_tcp_rto_max);
  
  discard:
                        __kfree_skb(skb);
diff --combined net/ipv4/tcp_ipv4.c
index 36fcc193d7cf8800cba4ee5d52bd6311113728f4,270840f5ee013e9482cbafd81c0a54068328c5d7..491a6a7b50aae969fae3f834c92fa22caf811553
@@@ -233,7 -233,7 +233,7 @@@ int tcp_v4_connect(struct sock *sk, str
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
 -
 +        printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
                                                           inet->inet_daddr,
@@@ -270,10 -270,13 +270,13 @@@ EXPORT_SYMBOL(tcp_v4_connect)
   */
  void tcp_v4_mtu_reduced(struct sock *sk)
  {
-       struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
-       u32 mtu = tcp_sk(sk)->mtu_info;
+       struct dst_entry *dst;
+       u32 mtu;
  
+       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+               return;
+       mtu = tcp_sk(sk)->mtu_info;
        dst = inet_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
@@@ -447,7 -450,7 +450,7 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
                if (remaining) {
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                                remaining, TCP_RTO_MAX);
 +                                                remaining, sysctl_tcp_rto_max);
                } else {
                        /* RTO revert clocked out retransmission.
                         * Will retransmit now */
@@@ -1430,7 -1433,7 +1433,7 @@@ static int tcp_v4_conn_req_fastopen(str
         * because it's been added to the accept queue directly.
         */
        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 -          TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 +          TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
  
        /* Add the child socket directly into the accept queue */
        inet_csk_reqsk_queue_add(sk, req, child);
@@@ -1534,7 -1537,6 +1537,7 @@@ int tcp_v4_conn_request(struct sock *sk
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 +      ireq->ir_mark = inet_request_mark(sk, skb);
  
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@@ -1956,7 -1958,7 +1959,7 @@@ bool tcp_prequeue(struct sock *sk, stru
                if (!inet_csk_ack_scheduled(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  (3 * tcp_rto_min(sk)) / 4,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
        return true;
  }
@@@ -2187,7 -2189,6 +2190,7 @@@ static int tcp_v4_init_sock(struct soc
        struct inet_connection_sock *icsk = inet_csk(sk);
  
        tcp_init_sock(sk);
 +        icsk->icsk_MMSRB = 0;
  
        icsk->icsk_af_ops = &ipv4_specific;
  
@@@ -2243,115 -2244,6 +2246,115 @@@ void tcp_v4_destroy_sock(struct sock *s
  }
  EXPORT_SYMBOL(tcp_v4_destroy_sock);
  
 +void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +      struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
 +
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +
 +                // update sk time out value
 +              icsk = inet_csk(sk);
 +              printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
 +                                      
 +              sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
 +              icsk->icsk_rto = sysctl_tcp_rto_min * 30;       
 +              icsk->icsk_MMSRB = 1;
 +                              
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +              spin_lock_bh(lock);
 +                sock_put(sk);
 +
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +
 +}
 +
 +
 +/*
 + * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
 + */
 +void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +restart:
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +                sk->sk_err = uid_e.errNum;
 +                printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
 +                sk->sk_error_report(sk);
 +    
 +                tcp_done(sk);
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +                sock_put(sk);
 +
 +                goto restart;
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +}
 +
 +
  #ifdef CONFIG_PROC_FS
  /* Proc filesystem TCP sock list dumping. */
  
diff --combined net/ipv4/tcp_minisocks.c
index c2b195b85b01ee328d626487865625791b84bb36,914a55db80311e1dd61f7c52e515b60a6566e3ef..61f04ef82450d70924f0f65019dcc88490038d08
@@@ -405,6 -405,7 +405,7 @@@ struct sock *tcp_create_openreq_child(s
                newtp->srtt = 0;
                newtp->mdev = TCP_TIMEOUT_INIT;
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+               newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
  
                newtp->packets_out = 0;
                newtp->retrans_out = 0;
@@@ -556,8 -557,8 +557,8 @@@ struct sock *tcp_check_req(struct sock 
                 * the idea of fast retransmit in recovery.
                 */
                if (!inet_rtx_syn_ack(sk, req))
 -                      req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
 -                                         TCP_RTO_MAX) + jiffies;
 +                      req->expires = min_t(unsigned int, TCP_TIMEOUT_INIT << req->num_timeout,
 +                                         sysctl_tcp_rto_max) + jiffies;
                return NULL;
        }
  
diff --combined net/ipv4/tcp_output.c
index cfd61259d7cf448a311e313f87ec6fe3241c304a,8729a934124f82408555670fbc57113f6b8ed9ed..77e6a9d36023ef9e97dc9536cb61d83b9da6d9a4
@@@ -232,13 -232,14 +232,13 @@@ void tcp_select_initial_window(int __sp
        }
  
        /* Set initial window to a value enough for senders starting with
 -       * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 +       * initial congestion window of sysctl_tcp_default_init_rwnd. Place
         * a limit on the initial window when mss is larger than 1460.
         */
        if (mss > (1 << *rcv_wscale)) {
 -              int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 +              int init_cwnd = sysctl_tcp_default_init_rwnd;
                if (mss > 1460)
 -                      init_cwnd =
 -                      max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 +                      init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
@@@ -1996,7 -1997,7 +1996,7 @@@ bool tcp_schedule_loss_probe(struct soc
        }
  
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
        return true;
  }
  
@@@ -2047,7 -2048,7 +2047,7 @@@ void tcp_send_loss_probe(struct sock *s
  rearm_timer:
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                  inet_csk(sk)->icsk_rto,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
  
        if (likely(!err))
                NET_INC_STATS_BH(sock_net(sk),
@@@ -2153,9 -2154,11 +2153,11 @@@ u32 __tcp_select_window(struct sock *sk
        int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
        int window;
  
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
  
@@@ -2570,7 -2573,7 +2572,7 @@@ begin_fwd
                if (skb == tcp_write_queue_head(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
  }
  
@@@ -3019,7 -3022,7 +3021,7 @@@ int tcp_connect(struct sock *sk
  
        /* Timer for repeating the SYN until an answer. */
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
 +                                inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
        return 0;
  }
  EXPORT_SYMBOL(tcp_connect);
@@@ -3098,7 -3101,7 +3100,7 @@@ void tcp_send_ack(struct sock *sk
                inet_csk_schedule_ack(sk);
                inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                        TCP_DELACK_MAX, TCP_RTO_MAX);
 +                                        TCP_DELACK_MAX, sysctl_tcp_rto_max);
                return;
        }
  
@@@ -3218,8 -3221,8 +3220,8 @@@ void tcp_send_probe0(struct sock *sk
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
 -                                        TCP_RTO_MAX);
 +                                        min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
 +                                        sysctl_tcp_rto_max);
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember icsk_probes_out.
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          min(icsk->icsk_rto << icsk->icsk_backoff,
                                              TCP_RESOURCE_PROBE_INTERVAL),
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
        }
  }
diff --combined net/ipv4/tcp_timer.c
index 98cd820c1207825a516b30e24cb796daad3c9c4b,722367a6d8175166d338b487739022e4785c856e..8cb86fa84e2d448d61086a0fe4f7a9b7ccfe98c6
@@@ -31,10 -31,6 +31,10 @@@ int sysctl_tcp_retries1 __read_mostly 
  int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
  int sysctl_tcp_orphan_retries __read_mostly;
  int sysctl_tcp_thin_linear_timeouts __read_mostly;
 +int sysctl_tcp_rto_min __read_mostly = TCP_RTO_MIN;
 +EXPORT_SYMBOL(sysctl_tcp_rto_min);
 +int sysctl_tcp_rto_max __read_mostly = TCP_RTO_MAX;
 +EXPORT_SYMBOL(sysctl_tcp_rto_max);
  
  static void tcp_write_err(struct sock *sk)
  {
@@@ -63,7 -59,7 +63,7 @@@ static int tcp_out_of_resources(struct 
  
        /* If peer does not open window for long time, or did not transmit
         * anything for long time, penalize it. */
 -      if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
 +      if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*sysctl_tcp_rto_max || !do_reset)
                shift++;
  
        /* If some dubious ICMP arrived, penalize even more. */
@@@ -134,7 -130,7 +134,7 @@@ static bool retransmits_timed_out(struc
                                  bool syn_set)
  {
        unsigned int linear_backoff_thresh, start_ts;
 -      unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
 +      unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : sysctl_tcp_rto_min;
  
        if (!inet_csk(sk)->icsk_retransmits)
                return false;
                start_ts = tcp_sk(sk)->retrans_stamp;
  
        if (likely(timeout == 0)) {
 -              linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
 +              linear_backoff_thresh = ilog2(sysctl_tcp_rto_max/rto_base);
  
                if (boundary <= linear_backoff_thresh)
                        timeout = ((2 << boundary) - 1) * rto_base;
                else
                        timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 -                              (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 +                              (boundary - linear_backoff_thresh) * sysctl_tcp_rto_max;
        }
        return (tcp_time_stamp - start_ts) >= timeout;
  }
@@@ -178,7 -174,7 +178,7 @@@ static int tcp_write_timeout(struct soc
  
                retry_until = sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
 -                      const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
 +                      const int alive = (icsk->icsk_rto < sysctl_tcp_rto_max);
  
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
@@@ -205,7 -201,8 +205,8 @@@ void tcp_delack_timer_handler(struct so
  
        sk_mem_reclaim_partial(sk);
  
-       if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+       if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+           !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
                goto out;
  
        if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@@ -292,7 -289,7 +293,7 @@@ static void tcp_probe_timer(struct soc
        max_probes = sysctl_tcp_retries2;
  
        if (sock_flag(sk, SOCK_DEAD)) {
 -              const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
 +              const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < sysctl_tcp_rto_max);
  
                max_probes = tcp_orphan_retries(sk, alive);
  
@@@ -334,7 -331,7 +335,7 @@@ static void tcp_fastopen_synack_timer(s
        inet_rtx_syn_ack(sk, req);
        req->num_timeout++;
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                        TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 +                        TCP_TIMEOUT_INIT << req->num_timeout, sysctl_tcp_rto_max);
  }
  
  /*
@@@ -385,7 -382,7 +386,7 @@@ void tcp_retransmit_timer(struct sock *
                                       tp->snd_una, tp->snd_nxt);
                }
  #endif
 -              if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
 +              if (tcp_time_stamp - tp->rcv_tstamp > sysctl_tcp_rto_max) {
                        tcp_write_err(sk);
                        goto out;
                }
                NET_INC_STATS_BH(sock_net(sk), mib_idx);
        }
  
 +      if (icsk->icsk_MMSRB == 1)
 +      {
 +              #ifdef CONFIG_MTK_NET_LOGGING  
 +              printk(KERN_DEBUG "[mtk_net][mmspb] tcp_retransmit_timer enter loss\n");
 +              #endif
 +      }
        tcp_enter_loss(sk, 0);
  
        if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
                        icsk->icsk_retransmits = 1;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
                goto out;
        }
  
@@@ -473,12 -464,12 +474,12 @@@ out_reset_timer
            tcp_stream_is_thin(tp) &&
            icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
                icsk->icsk_backoff = 0;
 -              icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 +              icsk->icsk_rto = min_t(unsigned int, __tcp_set_rto(tp), sysctl_tcp_rto_max);
        } else {
                /* Use normal (exponential) backoff */
 -              icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 +              icsk->icsk_rto = min_t(unsigned int, icsk->icsk_rto << 1, sysctl_tcp_rto_max);
        }
 -      inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
 +      inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, sysctl_tcp_rto_max);
        if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
                __sk_dst_reset(sk);
  
@@@ -490,15 -481,15 +491,16 @@@ void tcp_write_timer_handler(struct soc
        struct inet_connection_sock *icsk = inet_csk(sk);
        int event;
  
-       if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+       if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+           !icsk->icsk_pending)
                goto out;
 -
 +  if (icsk->icsk_MMSRB != 1)
 +  {
        if (time_after(icsk->icsk_timeout, jiffies)) {
                sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
                goto out;
        }
 -
 +  }
        event = icsk->icsk_pending;
  
        switch (event) {
@@@ -530,10 -521,6 +532,10 @@@ static void tcp_write_timer(unsigned lo
        if (!sock_owned_by_user(sk)) {
                tcp_write_timer_handler(sk);
        } else {
 +
 +              //if (icsk->icsk_MMSRB == 1)
 +              //printk("[mmspb] tcp_write_timer user owner sock\n");
 +
                /* deleguate our work to tcp_release_cb() */
                if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
                        sock_hold(sk);
  static void tcp_synack_timer(struct sock *sk)
  {
        inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
 -                                 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 +                                 TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
  }
  
  void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
diff --combined net/ipv6/addrconf.c
index 40e6a85eb069b4c484e42eeb5206fee2ce048f94,9c4aa2e22448c5aa86ea00327e594b8c6cb3a83a..eecfc43510c0c5878c1bcfeea74c2440e645c47f
@@@ -175,7 -175,7 +175,7 @@@ static struct ipv6_devconf ipv6_devcon
        .forwarding             = 0,
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
        .mtu6                   = IPV6_MIN_MTU,
 -      .accept_ra              = 1,
 +      .accept_ra              = 1,    
        .accept_redirects       = 1,
        .autoconf               = 1,
        .force_mld_version      = 0,
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
        .rtr_solicit_delay      = MAX_RTR_SOLICITATION_DELAY,
  #ifdef CONFIG_IPV6_PRIVACY
 -      .use_tempaddr           = 0,
 +      .use_tempaddr           = 1,
        .temp_valid_lft         = TEMP_VALID_LIFETIME,
        .temp_prefered_lft      = TEMP_PREFERRED_LIFETIME,
        .regen_max_retry        = REGEN_MAX_RETRY,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_pinfo        = 1,
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      .ra_info_flag           = 0,
 +#endif                
  #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -222,7 -218,7 +222,7 @@@ static struct ipv6_devconf ipv6_devconf
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
        .rtr_solicit_delay      = MAX_RTR_SOLICITATION_DELAY,
  #ifdef CONFIG_IPV6_PRIVACY
 -      .use_tempaddr           = 0,
 +      .use_tempaddr           = 1,
        .temp_valid_lft         = TEMP_VALID_LIFETIME,
        .temp_prefered_lft      = TEMP_PREFERRED_LIFETIME,
        .regen_max_retry        = REGEN_MAX_RETRY,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_pinfo        = 1,
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      .ra_info_flag           = 0,
 +#endif        
  #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -765,10 -757,9 +765,10 @@@ static int addrconf_fixup_forwarding(st
        } else if ((!newf) ^ (!old))
                dev_forward_change((struct inet6_dev *)table->extra1);
        rtnl_unlock();
 -
 +      
        if (newf)
                rt6_purge_dflt_routers(net);
 +
        return 1;
  }
  #endif
@@@ -1861,10 -1852,6 +1861,10 @@@ static int addrconf_ifid_gre(u8 *eui, s
  
  static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
  {
 +    /* MTK_NET_CHANGES */
 +    if (strncmp(dev->name, "ccmni", 2) == 0)
 +        return -1;
 +              
        switch (dev->type) {
        case ARPHRD_ETHER:
        case ARPHRD_FDDI:
@@@ -1972,31 -1959,6 +1972,31 @@@ static void  __ipv6_try_regen_rndid(str
  }
  #endif
  
 +u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
 +      /* Determines into what table to put autoconf PIO/RIO/default routes
 +       * learned on this device.
 +       *
 +       * - If 0, use the same table for every device. This puts routes into
 +       *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
 +       *   (but note that these three are currently all equal to
 +       *   RT6_TABLE_MAIN).
 +       * - If > 0, use the specified table.
 +       * - If < 0, put routes into table dev->ifindex + (-rt_table).
 +       */
 +      struct inet6_dev *idev = in6_dev_get(dev);
 +      u32 table;
 +      int sysctl = idev->cnf.accept_ra_rt_table;
 +      if (sysctl == 0) {
 +              table = default_table;
 +      } else if (sysctl > 0) {
 +              table = (u32) sysctl;
 +      } else {
 +              table = (unsigned) dev->ifindex + (-sysctl);
 +      }
 +      in6_dev_put(idev);
 +      return table;
 +}
 +
  /*
   *    Add prefix route.
   */
@@@ -2006,7 -1968,7 +2006,7 @@@ addrconf_prefix_route(struct in6_addr *
                      unsigned long expires, u32 flags)
  {
        struct fib6_config cfg = {
 -              .fc_table = RT6_TABLE_PREFIX,
 +              .fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@@ -2040,8 -2002,7 +2040,8 @@@ static struct rt6_info *addrconf_get_pr
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
  
 -      table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_PREFIX));
        if (table == NULL)
                return NULL;
  
@@@ -4326,7 -4287,6 +4326,7 @@@ static inline void ipv6_store_devconf(s
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
  #endif
  #endif
 +      array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
  #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
        array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
        array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      array[DEVCONF_RA_INFO_FLAG] = cnf->ra_info_flag;
 +#endif        
  }
  
  static inline size_t inet6_ifla6_size(void)
@@@ -4811,8 -4768,7 +4811,7 @@@ static void addrconf_disable_change(str
        struct net_device *dev;
        struct inet6_dev *idev;
  
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
                idev = __in6_dev_get(dev);
                if (idev) {
                        int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
                                dev_disable_change(idev);
                }
        }
-       rcu_read_unlock();
  }
  
  static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
@@@ -5044,13 -4999,6 +5042,13 @@@ static struct addrconf_sysctl_tabl
                },
  #endif
  #endif
 +              {
 +                      .procname       = "accept_ra_rt_table",
 +                      .data           = &ipv6_devconf.accept_ra_rt_table,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
 +              },
                {
                        .procname       = "proxy_ndp",
                        .data           = &ipv6_devconf.proxy_ndp,
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec
                },
 +#ifdef        CONFIG_MTK_DHCPV6C_WIFI 
 +              {
 +                      .procname               = "ra_info_flag",
 +                      .data                   = &ipv6_devconf.ra_info_flag,
 +                      .maxlen                 = sizeof(int),
 +                      .mode                   = 0644,
 +                      .proc_handler   = proc_dointvec
 +              },
 +#endif
                {
                        /* sentinel */
                }
diff --combined net/ipv6/raw.c
index 58d92339220fe2b34d711d6500a192cb90c4892e,c7ce2be09d90d36bcbe52e510772c2ae6143bc6c..3eb62d776fc9534f71e31bdb6120fa58496aa19a
@@@ -762,7 -762,6 +762,7 @@@ static int rawv6_sendmsg(struct kiocb *
        memset(&fl6, 0, sizeof(fl6));
  
        fl6.flowi6_mark = sk->sk_mark;
 +      fl6.flowi6_uid = sock_i_uid(sk);
  
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
@@@ -1134,7 -1133,7 +1134,7 @@@ static int rawv6_ioctl(struct sock *sk
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb != NULL)
-                       amount = skb->tail - skb->transport_header;
+                       amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
        }
diff --combined net/ipv6/route.c
index 9f4b790cc2f02538b4506c4b5275633932e29c87,244892ce560e2e5ea546e0bc562cfe70007e03e0..3cd034c4b854fe610a537b31a9e2301803281c16
@@@ -93,12 -93,13 +93,12 @@@ static void                rt6_do_redirect(struct dst
                                        struct sk_buff *skb);
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
 -static struct rt6_info *rt6_add_route_info(struct net *net,
 +static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex,
 -                                         unsigned int pref);
 -static struct rt6_info *rt6_get_route_info(struct net *net,
 +                                         const struct in6_addr *gwaddr, unsigned int pref);
 +static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex);
 +                                         const struct in6_addr *gwaddr);
  #endif
  
  static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@@ -684,6 -685,7 +684,6 @@@ static struct rt6_info *rt6_select(stru
  int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr)
  {
 -      struct net *net = dev_net(dev);
        struct route_info *rinfo = (struct route_info *) opt;
        struct in6_addr prefix_buf, *prefix;
        unsigned int pref;
        if (rinfo->prefix_len == 0)
                rt = rt6_get_dflt_router(gwaddr, dev);
        else
 -              rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
 -                                      gwaddr, dev->ifindex);
 +              rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
  
        if (rt && !lifetime) {
                ip6_del_rt(rt);
        }
  
        if (!rt && lifetime)
 -              rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
 -                                      pref);
 +              rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
        else if (rt)
                rt->rt6i_flags = RTF_ROUTEINFO |
                                 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@@ -1146,7 -1150,7 +1146,7 @@@ static void ip6_rt_update_pmtu(struct d
  }
  
  void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 -                   int oif, u32 mark)
 +                   int oif, u32 mark, kuid_t uid)
  {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
  
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
 -      fl6.flowi6_mark = mark;
 +      fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
        fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
 +      fl6.flowi6_uid = uid;
  
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
@@@ -1171,7 -1174,7 +1171,7 @@@ EXPORT_SYMBOL_GPL(ip6_update_pmtu)
  void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
  {
        ip6_update_pmtu(skb, sock_net(sk), mtu,
 -                      sk->sk_bound_dev_if, sk->sk_mark);
 +                      sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
  }
  EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
  
@@@ -1673,6 -1676,8 +1673,8 @@@ static int ip6_route_del(struct fib6_co
                                continue;
                        if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
                                continue;
+                       if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+                               continue;
                        dst_hold(&rt->dst);
                        read_unlock_bh(&table->tb6_lock);
  
@@@ -1844,16 -1849,15 +1846,16 @@@ static struct rt6_info *ip6_rt_copy(str
  }
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
 -static struct rt6_info *rt6_get_route_info(struct net *net,
 +static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex)
 +                                         const struct in6_addr *gwaddr)
  {
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
  
 -      table = fib6_get_table(net, RT6_TABLE_INFO);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_INFO));
        if (!table)
                return NULL;
  
                goto out;
  
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 -              if (rt->dst.dev->ifindex != ifindex)
 +              if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
                if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
                        continue;
        return rt;
  }
  
 -static struct rt6_info *rt6_add_route_info(struct net *net,
 +static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
 -                                         const struct in6_addr *gwaddr, int ifindex,
 -                                         unsigned int pref)
 +                                         const struct in6_addr *gwaddr, unsigned int pref)
  {
        struct fib6_config cfg = {
 -              .fc_table       = RT6_TABLE_INFO,
 +              .fc_table       = addrconf_rt_table(dev, RT6_TABLE_INFO),
                .fc_metric      = IP6_RT_PRIO_USER,
 -              .fc_ifindex     = ifindex,
 +              .fc_ifindex     = dev->ifindex,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
 -              .fc_nlinfo.nl_net = net,
 +              .fc_nlinfo.nl_net = dev_net(dev),
        };
  
        cfg.fc_dst = *prefix;
  
        ip6_route_add(&cfg);
  
 -      return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
 +      return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
  }
  #endif
  
@@@ -1911,8 -1916,7 +1913,8 @@@ struct rt6_info *rt6_get_dflt_router(co
        struct rt6_info *rt;
        struct fib6_table *table;
  
 -      table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_MAIN));
        if (!table)
                return NULL;
  
@@@ -1934,7 -1938,7 +1936,7 @@@ struct rt6_info *rt6_add_dflt_router(co
                                     unsigned int pref)
  {
        struct fib6_config cfg = {
 -              .fc_table       = RT6_TABLE_DFLT,
 +              .fc_table       = addrconf_rt_table(dev, RT6_TABLE_DFLT),
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
        return rt6_get_dflt_router(gwaddr, dev);
  }
  
 -void rt6_purge_dflt_routers(struct net *net)
 -{
 -      struct rt6_info *rt;
 -      struct fib6_table *table;
  
 -      /* NOTE: Keep consistent with rt6_get_dflt_router */
 -      table = fib6_get_table(net, RT6_TABLE_DFLT);
 -      if (!table)
 -              return;
 +int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
 +      if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
 +          (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
 +              return -1;
 +      return 0;
 +}
  
 -restart:
 -      read_lock_bh(&table->tb6_lock);
 -      for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
 -              if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
 -                  (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
 -                      dst_hold(&rt->dst);
 -                      read_unlock_bh(&table->tb6_lock);
 -                      ip6_del_rt(rt);
 -                      goto restart;
 -              }
 -      }
 -      read_unlock_bh(&table->tb6_lock);
 +void rt6_purge_dflt_routers(struct net *net)
 +{
 +      fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
  }
  
  static void rtmsg_to_fib6_config(struct net *net,
@@@ -2246,7 -2261,6 +2248,7 @@@ static const struct nla_policy rtm_ipv6
        [RTA_PRIORITY]          = { .type = NLA_U32 },
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
 +      [RTA_UID]               = { .type = NLA_U32 },
  };
  
  static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@@ -2635,12 -2649,6 +2637,12 @@@ static int inet6_rtm_getroute(struct sk
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
  
 +      if (tb[RTA_UID])
 +              fl6.flowi6_uid = make_kuid(current_user_ns(),
 +                                         nla_get_u32(tb[RTA_UID]));
 +      else
 +              fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
 +
        if (iif) {
                struct net_device *dev;
                int flags = 0;
diff --combined net/l2tp/l2tp_ip.c
index b69b762159ad0a55cfd5ebf9a9e7acf30e920078,1f65095c3217ed7b630d99f458f3cbf949d0f319..c44b3742ae36f8339d5e5dd7a54ced347ab6af74
@@@ -252,6 -252,8 +252,6 @@@ static int l2tp_ip_bind(struct sock *sk
        int ret;
        int chk_addr_ret;
  
 -      if (!sock_flag(sk, SOCK_ZAPPED))
 -              return -EINVAL;
        if (addr_len < sizeof(struct sockaddr_l2tpip))
                return -EINVAL;
        if (addr->l2tp_family != AF_INET)
        read_unlock_bh(&l2tp_ip_lock);
  
        lock_sock(sk);
 +      if (!sock_flag(sk, SOCK_ZAPPED))
 +              goto out;
 +
        if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
                goto out;
  
@@@ -383,7 -382,7 +383,7 @@@ static int l2tp_ip_backlog_recv(struct 
  drop:
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
        kfree_skb(skb);
-       return -1;
+       return 0;
  }
  
  /* Userspace will call sendmsg() on the tunnel socket to send L2TP
diff --combined net/unix/af_unix.c
index 360954149888401df3834ac402c7fec28e554bd8,4b18115c0543cc4d29b7bd9bfffdf84993808815..a14c213ea4270f6b2795afd3af7a40f13feb0360
  #include <linux/mount.h>
  #include <net/checksum.h>
  #include <linux/security.h>
 +#include <linux/freezer.h>
 +
 +
 +#include <linux/uio.h>
 +#include <linux/blkdev.h>
 +#include <linux/compat.h>
 +#include <linux/rtc.h>
 +#include <asm/kmap_types.h>
 +#include <linux/device.h>
 +
  
  struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
  EXPORT_SYMBOL_GPL(unix_socket_table);
@@@ -144,17 -134,6 +144,17 @@@ static struct hlist_head *unix_sockets_
  
  #define UNIX_ABSTRACT(sk)     (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
  
 +
 +//for aee interface start
 +#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__   3500
 +static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
 +#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
 +#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
 +
 +static volatile unsigned int unix_sock_track_stop_flag = 0;
 +#define unix_peer(sk) (unix_sk(sk)->peer)
 +
 +
  #ifdef CONFIG_SECURITY_NETWORK
  static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  {
@@@ -187,7 -166,7 +187,7 @@@ static inline unsigned int unix_hash_fo
        return hash&(UNIX_HASH_SIZE-1);
  }
  
 -#define unix_peer(sk) (unix_sk(sk)->peer)
 +
  
  static inline int unix_our_peer(struct sock *sk, struct sock *osk)
  {
@@@ -497,9 -476,7 +497,9 @@@ static void unix_sock_destructor(struc
        WARN_ON(!sk_unhashed(sk));
        WARN_ON(sk->sk_socket);
        if (!sock_flag(sk, SOCK_DEAD)) {
 -              printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
 +              #endif
                return;
        }
  
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 -#ifdef UNIX_REFCNT_DEBUG
 -      printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
 +    #ifdef UNIX_REFCNT_DEBUG
 +      printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
                atomic_long_read(&unix_nr_socks));
 -#endif
 +    #endif
  }
  
  static void unix_release_sock(struct sock *sk, int embrion)
@@@ -635,7 -612,6 +635,7 @@@ out_unlock
        unix_state_unlock(sk);
        put_pid(old_pid);
  out:
 +   
        return err;
  }
  
@@@ -1001,6 -977,7 +1001,7 @@@ static int unix_bind(struct socket *soc
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
+       struct path path = { NULL, NULL };
  
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
                goto out;
        addr_len = err;
  
+       if (sun_path[0]) {
+               umode_t mode = S_IFSOCK |
+                      (SOCK_INODE(sock)->i_mode & ~current_umask());
+               err = unix_mknod(sun_path, mode, &path);
+               if (err) {
+                       if (err == -EEXIST)
+                               err = -EADDRINUSE;
+                       goto out;
+               }
+       }
        err = mutex_lock_interruptible(&u->readlock);
        if (err)
-               goto out;
+               goto out_put;
  
        err = -EINVAL;
        if (u->addr)
        atomic_set(&addr->refcnt, 1);
  
        if (sun_path[0]) {
-               struct path path;      
-               umode_t mode = S_IFSOCK |
-                      (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(sun_path, mode, &path);
-               if (err) {
-                       if (err == -EEXIST)
-                               err = -EADDRINUSE;
-                       unix_release_addr(addr);
-                       goto out_up;
-               }
                addr->hash = UNIX_HASH_SIZE;
                hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
                spin_lock(&unix_table_lock);
@@@ -1072,8 -1049,10 +1073,11 @@@ out_unlock
        spin_unlock(&unix_table_lock);
  out_up:
        mutex_unlock(&u->readlock);
+ out_put:
+       if (err)
+               path_put(&path);
  out:
 + 
        return err;
  }
  
@@@ -1113,7 -1092,6 +1117,7 @@@ static int unix_dgram_connect(struct so
        int err;
  
        if (addr->sa_family != AF_UNSPEC) {
 +     
                err = unix_mkname(sunaddr, alen, &hash);
                if (err < 0)
                        goto out;
@@@ -1170,21 -1148,12 +1174,21 @@@ restart
                unix_peer(sk) = other;
                unix_state_double_unlock(sk, other);
        }
 +      
 +#ifdef CONFIG_MTK_NET_LOGGING 
 +    if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +    {
 +             printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +        }
 +#endif 
 +            
        return 0;
  
  out_unlock:
        unix_state_double_unlock(sk, other);
        sock_put(other);
  out:
 +     
        return err;
  }
  
@@@ -1367,17 -1336,8 +1371,17 @@@ restart
        __skb_queue_tail(&other->sk_receive_queue, skb);
        spin_unlock(&other->sk_receive_queue.lock);
        unix_state_unlock(other);
 +      
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +  {
 +        printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +      }
 +  #endif 
 +
        other->sk_data_ready(other, 0);
        sock_put(other);
 +       
        return 0;
  
  out_unlock:
@@@ -1390,7 -1350,6 +1394,7 @@@ out
                unix_release_sock(newsk, 0);
        if (other)
                sock_put(other);
 +    
        return err;
  }
  
@@@ -1442,7 -1401,7 +1446,7 @@@ static int unix_accept(struct socket *s
        /* If socket state is TCP_LISTEN it cannot change (for now...),
         * so that no locks are necessary.
         */
 -
 +    
        skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
        if (!skb) {
                /* This means receive shutdown. */
        unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
 +    
        return 0;
  
  out:
 +    
        return err;
  }
  
@@@ -1638,11 -1595,7 +1642,7 @@@ static int unix_dgram_sendmsg(struct ki
        int max_level;
        int data_len = 0;
        int sk_locked;
- <<<<<<< HEAD
-        
- =======
  
- >>>>>>> v3.10.95
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
        wait_for_unix_gc();
@@@ -1734,13 -1687,9 +1734,9 @@@ restart_locked
                sock_put(other);
  
                if (!sk_locked)
- <<<<<<< HEAD
-               unix_state_lock(sk);
- =======
                        unix_state_lock(sk);
  
                err = 0;
- >>>>>>> v3.10.95
                if (unix_peer(sk) == other) {
                        unix_peer(sk) = NULL;
                        unix_dgram_peer_wake_disconnect_wakeup(sk, other);
                        goto out_unlock;
        }
  
- <<<<<<< HEAD
 -      if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
 +      /* other == sk && unix_peer(other) != sk if
 +       * - unix_peer(sk) == NULL, destination address bound to sk
 +       * - unix_peer(sk) == sk by time of get but disconnected before lock
 +       */
 +      if (other != sk &&
 +          unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
- =======
-       if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
- >>>>>>> v3.10.95
                if (timeo) {
                        timeo = unix_wait_for_peer(other, timeo);
  
        other->sk_data_ready(other, len);
        sock_put(other);
        scm_destroy(siocb->scm);
 +    
        return len;
  
  out_unlock:
@@@ -1834,7 -1773,6 +1826,7 @@@ out
        if (other)
                sock_put(other);
        scm_destroy(siocb->scm);
 +      
        return err;
  }
  
@@@ -1854,7 -1792,6 +1846,7 @@@ static int unix_stream_sendmsg(struct k
  
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
 +              
        wait_for_unix_gc();
        err = scm_send(sock, msg, siocb->scm, false);
        if (err < 0)
  
                skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
                                          &err);
 +              
  
                if (skb == NULL)
                        goto out_err;
  
                if (sock_flag(other, SOCK_DEAD) ||
                    (other->sk_shutdown & RCV_SHUTDOWN))
 +              {
 +                    if( other->sk_socket )
 +                    {
 +                        if(sk->sk_socket)
 +                        {
 +                
 +                         #ifdef CONFIG_MTK_NET_LOGGING 
 +                         printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                       #endif
 +                       }
 +                       else{
 +                                          #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
 +                                      #endif
 +                       }        
 +
 +                  }
 +                  else        
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
 +                                      #endif
 +                              }
 +                              
 +          
                        goto pipe_err_free;
 +              }
  
                maybe_add_creds(skb, sock, other);
                skb_queue_tail(&other->sk_receive_queue, skb);
@@@ -1984,7 -1894,6 +1976,7 @@@ pipe_err
  out_err:
        scm_destroy(siocb->scm);
        siocb->scm = NULL;
 +        
        return sent ? : err;
  }
  
@@@ -2126,7 -2035,6 +2118,7 @@@ out_free
  out_unlock:
        mutex_unlock(&u->readlock);
  out:
 +      
        return err;
  }
  
@@@ -2152,7 -2060,7 +2144,7 @@@ static long unix_stream_data_wait(struc
  
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
 -              timeo = schedule_timeout(timeo);
 +              timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
  
                if (sock_flag(sk, SOCK_DEAD))
@@@ -2182,7 -2090,6 +2174,7 @@@ static int unix_stream_recvmsg(struct k
        int err = 0;
        long timeo;
        int skip;
 +      struct sock * other = unix_peer(sk);
  
        err = -EINVAL;
        if (sk->sk_state != TCP_ESTABLISHED)
@@@ -2230,27 -2137,8 +2222,27 @@@ again
                        if (err)
                                goto unlock;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
 +                      {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                 if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                 }
 +                               }
 +                          else{       
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                 printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
 +                                 #endif
 +                          }
                                goto unlock;
 -
 +                      }
                        unix_state_unlock(sk);
                        err = -EAGAIN;
                        if (!timeo)
                        mutex_unlock(&u->readlock);
  
                        timeo = unix_stream_data_wait(sk, timeo, last);
 +                        if (!timeo)
 +                        {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                  }                     
 +                         }
 +                         else 
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
 +                                #endif
 +                              }
 +                                
 +                       }
  
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeo);
        mutex_unlock(&u->readlock);
        scm_recv(sock, msg, siocb->scm, flags);
  out:
 +  
        return copied ? : err;
  }
  
@@@ -2544,17 -2409,12 +2536,17 @@@ static unsigned int unix_dgram_poll(str
                        mask |= POLLHUP;
                /* connection hasn't started yet? */
                if (sk->sk_state == TCP_SYN_SENT)
 +    {
 +        
                        return mask;
 -      }
 +        }
 +  }
  
        /* No write status requested, avoid expensive OUT tests. */
        if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
 +  {     
                return mask;
 +  }
  
        writable = unix_writable(sk);
        if (writable) {
index 9aaa4e72cc1fc602afada0596a0f69c6c4d08eee,8405a0428b67c150e3c4a47e787f3d1d10d8d0a3..15d172e39cf4435a661a09ed9a2fe29fdff0834f
@@@ -441,7 -441,7 +441,7 @@@ int apparmor_bprm_set_creds(struct linu
                                new_profile = aa_get_profile(ns->unconfined);
                                info = "ux fallback";
                        } else {
-                               error = -ENOENT;
+                               error = -EACCES;
                                info = "profile not found";
                        }
                }
@@@ -629,7 -629,7 +629,7 @@@ int aa_change_hat(const char *hats[], i
         * There is no exception for unconfined as change_hat is not
         * available.
         */
 -      if (current->no_new_privs)
 +      if (task_no_new_privs(current))
                return -EPERM;
  
        /* released below */
@@@ -780,7 -780,7 +780,7 @@@ int aa_change_profile(const char *ns_na
         * no_new_privs is set because this aways results in a reduction
         * of permissions.
         */
 -      if (current->no_new_privs && !unconfined(profile)) {
 +      if (task_no_new_privs(current) && !unconfined(profile)) {
                put_cred(cred);
                return -EPERM;
        }