Merge tag 'v3.10.85' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:46:39 +0000 (22:46 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:46:39 +0000 (22:46 +0100)
This is the 3.10.85 stable release

15 files changed:
1  2 
Makefile
arch/arm64/mm/init.c
drivers/base/firmware_class.c
drivers/cpuidle/governors/menu.c
drivers/md/md.c
drivers/mmc/card/block.c
drivers/mtd/mtd_blkdevs.c
drivers/usb/core/devio.c
drivers/usb/host/xhci-mem.c
drivers/usb/serial/option.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/fuse/inode.c
kernel/trace/trace.h

diff --combined Makefile
index 7a9eadf1717d995e44560fb4e718c66814dbd889,11a7e7bc31f26a980dcfa903e83fa6f685c17b1c..33d047b338008cf4ead3b02778d68cd5602c827b
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 84
+ SUBLEVEL = 85
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
diff --combined arch/arm64/mm/init.c
index 07e93593299797ab69a7dbee2fc96d194f0b4845,5c5516611b5e5fbbd68dd7ed6f080be4b5168a9d..d8dce27db963da26a330a116bf3185c7b53ae1d8
  #include <linux/memblock.h>
  #include <linux/sort.h>
  #include <linux/of_fdt.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/mrdump.h>
  
  #include <asm/prom.h>
  #include <asm/sections.h>
  #include <asm/setup.h>
  #include <asm/sizes.h>
  #include <asm/tlb.h>
 +#include <mach/mtk_memcfg.h>
  
  #include "mm.h"
  
@@@ -70,22 -67,22 +70,22 @@@ static int __init early_initrd(char *p
  }
  early_param("initrd", early_initrd);
  
 -#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
 -
  static void __init zone_sizes_init(unsigned long min, unsigned long max)
  {
        struct memblock_region *reg;
        unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
 -      unsigned long max_dma32 = min;
 +      unsigned long max_dma = min;
  
        memset(zone_size, 0, sizeof(zone_size));
  
 -#ifdef CONFIG_ZONE_DMA32
        /* 4GB maximum for 32-bit only capable devices */
 -      max_dma32 = max(min, min(max, MAX_DMA32_PFN));
 -      zone_size[ZONE_DMA32] = max_dma32 - min;
 -#endif
 -      zone_size[ZONE_NORMAL] = max - max_dma32;
 +      if (IS_ENABLED(CONFIG_ZONE_DMA)) {
 +              unsigned long max_dma_phys =
 +                      (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
 +              max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
 +              zone_size[ZONE_DMA] = max_dma - min;
 +      }
 +      zone_size[ZONE_NORMAL] = max - max_dma;
  
        memcpy(zhole_size, zone_size, sizeof(zhole_size));
  
  
                if (start >= max)
                        continue;
 -#ifdef CONFIG_ZONE_DMA32
 -              if (start < max_dma32) {
 -                      unsigned long dma_end = min(end, max_dma32);
 -                      zhole_size[ZONE_DMA32] -= dma_end - start;
 +
 +              if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
 +                      unsigned long dma_end = min(end, max_dma);
 +                      zhole_size[ZONE_DMA] -= dma_end - start;
                }
 -#endif
 -              if (end > max_dma32) {
 +
 +              if (end > max_dma) {
                        unsigned long normal_end = min(end, max);
 -                      unsigned long normal_start = max(start, max_dma32);
 +                      unsigned long normal_start = max(start, max_dma);
                        zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
                }
        }
  }
  
  #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 +#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
 +
  int pfn_valid(unsigned long pfn)
  {
 -      return memblock_is_memory(pfn << PAGE_SHIFT);
 +      return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
  }
  EXPORT_SYMBOL(pfn_valid);
  #endif
@@@ -136,34 -131,6 +136,34 @@@ static void arm64_memory_present(void
  }
  #endif
  
 +static bool arm64_memblock_steal_permitted = true;
 +
 +phys_addr_t __init arm64_memblock_steal(phys_addr_t size, phys_addr_t align)
 +{
 +      phys_addr_t phys;
 +
 +      BUG_ON(!arm64_memblock_steal_permitted);
 +
 +      phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
 +      memblock_free(phys, size);
 +      memblock_remove(phys, size);
 +      if (phys) {
 +              MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT"[PHY layout]%ps   :   0x%08llx - 0x%08llx (0x%08llx)\n",
 +                      __builtin_return_address(0), (unsigned long long)phys, 
 +              (unsigned long long)phys + size - 1,
 +              (unsigned long long)size);
 +      }
 +
 +      return phys;
 +}
 +
 +#ifdef CONFIG_MTK_COMBO_CHIP
 +extern void mtk_wcn_consys_memory_reserve(void);
 +void __weak mtk_wcn_consys_memory_reserve(void)
 +{
 +    printk(KERN_ERR"weak reserve function: %s", __FUNCTION__);
 +}
 +#endif
  void __init arm64_memblock_init(void)
  {
        u64 *reserve_map, base, size;
                        break;
                memblock_reserve(base, size);
        }
 +#ifdef CONFIG_MTK_ECCCI_DRIVER
 +{
 +  extern void ccci_md_mem_reserve(void);
 +      ccci_md_mem_reserve();
 +}
 +#endif
 + 
 +#ifdef CONFIG_MTK_COMBO_CHIP
 +{
 +      mtk_wcn_consys_memory_reserve();
 +}
 +#endif
 +
 +#if defined(CONFIG_MTK_RAM_CONSOLE_USING_DRAM)
 +      memblock_reserve(CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR, CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE);
 +#endif
 +        mrdump_reserve_memory();
  
 +        mrdump_mini_reserve_memory();
 +      
 +      arm64_memblock_steal_permitted = false;
 +      early_init_fdt_scan_reserved_mem();
        memblock_allow_resize();
        memblock_dump_all();
  }
@@@ -316,7 -262,7 +316,7 @@@ static void __init free_unused_memmap(v
                 * memmap entries are valid from the bank end aligned to
                 * MAX_ORDER_NR_PAGES.
                 */
-               prev_end = ALIGN(start + __phys_to_pfn(reg->size),
+               prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
                                 MAX_ORDER_NR_PAGES);
        }
  
@@@ -392,7 -338,7 +392,7 @@@ void __init mem_init(void
  #define MLM(b, t) b, t, ((t) - (b)) >> 20
  #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
  
 -      pr_notice("Virtual kernel memory layout:\n"
 +      MTK_MEMCFG_LOG_AND_PRINTK(KERN_NOTICE "Virtual kernel memory layout:\n"
                  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld MB)\n"
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld MB)\n"
index 9cf8dc7448e257446a54c0b4d904b37ae4321463,8e08fab0ed2e0f10f920fb338ab50a73397d3e12..30d575a09ad17e62f345a3520f2b480519c800c2
@@@ -27,7 -27,6 +27,7 @@@
  #include <linux/pm.h>
  #include <linux/suspend.h>
  #include <linux/syscore_ops.h>
 +#include <linux/reboot.h>
  
  #include <generated/utsrelease.h>
  
@@@ -131,7 -130,6 +131,7 @@@ struct firmware_buf 
        struct page **pages;
        int nr_pages;
        int page_array_size;
 +      struct list_head pending_list;
  #endif
        char fw_id[];
  };
@@@ -173,9 -171,6 +173,9 @@@ static struct firmware_buf *__allocate_
        strcpy(buf->fw_id, fw_name);
        buf->fwc = fwc;
        init_completion(&buf->completion);
 +#ifdef CONFIG_FW_LOADER_USER_HELPER
 +      INIT_LIST_HEAD(&buf->pending_list);
 +#endif
  
        pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
  
@@@ -451,8 -446,10 +451,8 @@@ static struct firmware_priv *to_firmwar
        return container_of(dev, struct firmware_priv, dev);
  }
  
 -static void fw_load_abort(struct firmware_priv *fw_priv)
 +static void __fw_load_abort(struct firmware_buf *buf)
  {
 -      struct firmware_buf *buf = fw_priv->buf;
 -
        /*
         * There is a small window in which user can write to 'loading'
         * between loading done and disappearance of 'loading'
        if (test_bit(FW_STATUS_DONE, &buf->status))
                return;
  
 +      list_del_init(&buf->pending_list);
        set_bit(FW_STATUS_ABORT, &buf->status);
        complete_all(&buf->completion);
 +}
 +
 +static void fw_load_abort(struct firmware_priv *fw_priv)
 +{
 +      struct firmware_buf *buf = fw_priv->buf;
 +
 +      __fw_load_abort(buf);
  
        /* avoid user action after loading abort */
        fw_priv->buf = NULL;
  #define is_fw_load_aborted(buf)       \
        test_bit(FW_STATUS_ABORT, &(buf)->status)
  
 +static LIST_HEAD(pending_fw_head);
 +
 +/* reboot notifier for avoid deadlock with usermode_lock */
 +static int fw_shutdown_notify(struct notifier_block *unused1,
 +                            unsigned long unused2, void *unused3)
 +{
 +      mutex_lock(&fw_lock);
 +      while (!list_empty(&pending_fw_head))
 +              __fw_load_abort(list_first_entry(&pending_fw_head,
 +                                             struct firmware_buf,
 +                                             pending_list));
 +      mutex_unlock(&fw_lock);
 +      return NOTIFY_DONE;
 +}
 +
 +static struct notifier_block fw_shutdown_nb = {
 +      .notifier_call = fw_shutdown_notify,
 +};
 +
  static ssize_t firmware_timeout_show(struct class *class,
                                     struct class_attribute *attr,
                                     char *buf)
@@@ -543,10 -513,8 +543,8 @@@ static void fw_dev_release(struct devic
        module_put(THIS_MODULE);
  }
  
- static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+ static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
  {
-       struct firmware_priv *fw_priv = to_firmware_priv(dev);
        if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
                return -ENOMEM;
        if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
        return 0;
  }
  
+ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+ {
+       struct firmware_priv *fw_priv = to_firmware_priv(dev);
+       int err = 0;
+       mutex_lock(&fw_lock);
+       if (fw_priv->buf)
+               err = do_firmware_uevent(fw_priv, env);
+       mutex_unlock(&fw_lock);
+       return err;
+ }
  static struct class firmware_class = {
        .name           = "firmware",
        .class_attrs    = firmware_class_attrs,
@@@ -649,7 -629,6 +659,7 @@@ static ssize_t firmware_loading_store(s
                         * is completed.
                         * */
                        fw_map_pages_buf(fw_buf);
 +                      list_del_init(&fw_buf->pending_list);
                        complete_all(&fw_buf->completion);
                        break;
                }
@@@ -884,15 -863,8 +894,15 @@@ static int _request_firmware_load(struc
                goto err_del_dev;
        }
  
 +      mutex_lock(&fw_lock);
 +      list_add(&buf->pending_list, &pending_fw_head);
 +      mutex_unlock(&fw_lock);
 +
        retval = device_create_file(f_dev, &dev_attr_loading);
        if (retval) {
 +              mutex_lock(&fw_lock);
 +              list_del_init(&buf->pending_list);
 +              mutex_unlock(&fw_lock);
                dev_err(f_dev, "%s: device_create_file failed\n", __func__);
                goto err_del_bin_attr;
        }
@@@ -1567,7 -1539,6 +1577,7 @@@ static int __init firmware_class_init(v
  {
        fw_cache_init();
  #ifdef CONFIG_FW_LOADER_USER_HELPER
 +      register_reboot_notifier(&fw_shutdown_nb);
        return class_register(&firmware_class);
  #else
        return 0;
@@@ -1581,7 -1552,6 +1591,7 @@@ static void __exit firmware_class_exit(
        unregister_pm_notifier(&fw_cache.pm_notify);
  #endif
  #ifdef CONFIG_FW_LOADER_USER_HELPER
 +      unregister_reboot_notifier(&fw_shutdown_nb);
        class_unregister(&firmware_class);
  #endif
  }
index 33305fb3d5fcd7c06a1af2d9c862d70c47382e30,67fd901f6fc9cb75766db6ac7b4a344d1a76211e..16e8a59bcf78601f9176ec5bb2cc060f541854fe
@@@ -173,12 -173,7 +173,12 @@@ static inline int performance_multiplie
  
        /* for higher loadavg, we are more reluctant */
  
 -      mult += 2 * get_loadavg();
 +      /*
 +       * this doesn't work as intended - it is almost always 0, but can
 +       * sometimes, depending on workload, spike very high into the hundreds
 +       * even when the average cpu load is under 10%.
 +       */
 +      /* mult += 2 * get_loadavg(); */
  
        /* for IO wait tasks (per cpu!) we add 5x each */
        mult += 10 * nr_iowait_cpu(smp_processor_id());
@@@ -274,7 -269,7 +274,7 @@@ static int menu_select(struct cpuidle_d
                data->needs_update = 0;
        }
  
-       data->last_state_idx = 0;
+       data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
        data->exit_us = 0;
  
        /* Special case when user has set very strict latency requirement */
diff --combined drivers/md/md.c
index 27a28f620494f0327fe15213eb7e32cd28bfc632,631fe3e9c6e558823699477a94c4ab303c2105a4..a07668b2e68bb5b11620d115bbc693b83c4ed6a4
@@@ -33,7 -33,6 +33,7 @@@
  */
  
  #include <linux/kthread.h>
 +#include <linux/freezer.h>
  #include <linux/blkdev.h>
  #include <linux/sysctl.h>
  #include <linux/seq_file.h>
@@@ -6222,7 -6221,7 +6222,7 @@@ static int update_array_info(struct mdd
            mddev->ctime         != info->ctime         ||
            mddev->level         != info->level         ||
  /*        mddev->layout        != info->layout        || */
-           !mddev->persistent   != info->not_persistent||
+           mddev->persistent    != !info->not_persistent ||
            mddev->chunk_sectors != info->chunk_size >> 9 ||
            /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
            ((state^info->state) & 0xfffffe00)
@@@ -7372,14 -7371,11 +7372,14 @@@ void md_do_sync(struct md_thread *threa
         *
         */
  
 +      set_freezable();
 +
        do {
                mddev->curr_resync = 2;
  
        try_again:
 -              if (kthread_should_stop())
 +
 +              if (kthread_freezable_should_stop(NULL))
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                         * time 'round when curr_resync == 2
                                         */
                                        continue;
 +
 +                              try_to_freeze();
 +
                                /* We need to wait 'interruptible' so as not to
                                 * contribute to the load average, and not to
                                 * be caught by 'softlockup'
                                               " share one or more physical units)\n",
                                               desc, mdname(mddev), mdname(mddev2));
                                        mddev_put(mddev2);
 +                                      try_to_freeze();
                                        if (signal_pending(current))
                                                flush_signals(current);
                                        schedule();
                                                 || kthread_should_stop());
                }
  
 -              if (kthread_should_stop())
 +              if (kthread_freezable_should_stop(NULL))
                        goto interrupted;
  
                sectors = mddev->pers->sync_request(mddev, j, &skipped,
                        last_mark = next;
                }
  
 -
 -              if (kthread_should_stop())
 +              if (kthread_freezable_should_stop(NULL))
                        goto interrupted;
  
  
@@@ -7768,10 -7761,8 +7768,10 @@@ no_add
   */
  void md_check_recovery(struct mddev *mddev)
  {
 -      if (mddev->suspended)
 +#ifdef CONFIG_FREEZER
 +      if (mddev->suspended || unlikely(atomic_read(&system_freezing_cnt)))
                return;
 +#endif
  
        if (mddev->bitmap)
                bitmap_daemon_work(mddev);
diff --combined drivers/mmc/card/block.c
index 556ba8bc8b7e0c27286e5794856d86697c9a0fd8,885ba4a19a6ceed2d8dc577e31776c6a59afbfea..6a846b6593fd01b60dac3256d478220038e6c8dd
@@@ -35,9 -35,6 +35,9 @@@
  #include <linux/capability.h>
  #include <linux/compat.h>
  
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/mmc.h>
 +
  #include <linux/mmc/ioctl.h>
  #include <linux/mmc/card.h>
  #include <linux/mmc/host.h>
  #include <asm/uaccess.h>
  
  #include "queue.h"
 +#include <mach/mtk_meminfo.h>
 +
 +//add vmstat info with block tag log
 +#include <linux/vmstat.h>
 +#define FEATURE_STORAGE_VMSTAT_LOGGER
 +
 +
 +#include <linux/xlog.h>
 +#include <asm/div64.h>
 +#include <linux/vmalloc.h>
 +
 +#include <linux/mmc/sd_misc.h>
 +
 +#define MET_USER_EVENT_SUPPORT
 +#include <linux/met_drv.h>
 +
 +#define FEATURE_STORAGE_PERF_INDEX
 +//enable storage log in user load
 +#if 0
 +#ifdef USER_BUILD_KERNEL
 +#undef FEATURE_STORAGE_PERF_INDEX
 +#endif
 +#endif
  
  MODULE_ALIAS("mmc:block");
  #ifdef MODULE_PARAM_PREFIX
@@@ -159,338 -133,6 +159,338 @@@ static inline int mmc_blk_part_switch(s
                                      struct mmc_blk_data *md);
  static int get_card_status(struct mmc_card *card, u32 *status, int retries);
  
 +#ifndef CONFIG_MTK_FPGA
 +#include <linux/met_ftrace_bio.h>
 +#endif
 +
 +char mmc_get_rw_type(u32 opcode)
 +{
 +      switch (opcode)
 +      {
 +        case MMC_READ_SINGLE_BLOCK:
 +        case MMC_READ_MULTIPLE_BLOCK:
 +            return 'R';
 +        case MMC_WRITE_BLOCK:
 +        case MMC_WRITE_MULTIPLE_BLOCK:
 +            return 'W';
 +        default:
 +            // Unknown opcode!!!
 +            return 'X';
 +      }
 +}
 +
 +inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      int is_legal = 0;
 +
 +      if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
 +              || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
 +              || (host->card == NULL))) {
 +              is_legal = 1;
 +      }
 +
 +      return is_legal;
 +}
 +
 +inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
 +{
 +      int is_legal = 0;
 +
 +      if (!((md == NULL) || (md->disk == NULL))) {
 +              is_legal = 1;
 +      }
 +
 +      return is_legal;
 +}
 +
 +inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
 +{
 +      int is_legal = 0;
 +
 +      if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
 +              || (req->data == NULL) || (host->card == NULL))) {
 +              is_legal = 1;
 +      }
 +
 +      return is_legal;
 +}
 +
 +void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
 +#endif
 +}
 +
 +void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
 +#endif
 +}
 +
 +//void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
 +//{
 +//    struct mmc_blk_data *md;
 +//    char type;
 +//
 +//    if (!check_met_mmc_async_req_legal(host, areq))
 +//            return;
 +//
 +//    md = mmc_get_drvdata(host->card);
 +//
 +//    type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +//    if (type == 'X')
 +//            return;
 +//
 +//    MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
 +//}
 +
 +void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_req_legal(host, req))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(req->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
 +#endif
 +}
 +
 +void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
 +{
 +      struct mmc_blk_data *md = mmc_get_drvdata(host->card);
 +      char type;
 +
 +      type = mmc_get_rw_type(cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +      trace_printk("%d,%d %c %d + %d [%s]\n",
 +                      md->disk->major, md->disk->first_minor, type,
 +                      cmd->arg, cmd->data->blocks,
 +                      current->comm);
 +}
 +
 +void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
 +{
 +    struct mmc_blk_data *md=mmc_get_drvdata(host->card);
 +      char type;
 +
 +      type = mmc_get_rw_type(cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +      trace_printk("%d,%d %c %d + %d [%s]\n",
 +                      md->disk->major, md->disk->first_minor, type,
 +                      cmd->arg, cmd->data->blocks,
 +                      current->comm);
 +}
 +
 +void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md = mmc_get_drvdata(host->card);
 +      char type;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +      trace_printk("%d,%d %c %d + %d [%s]\n",
 +                      md->disk->major, md->disk->first_minor, type,
 +                      areq->mrq->cmd->arg, areq->mrq->data->blocks,
 +                      current->comm);
 +
 +}
 +
 +void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
 +{
 +      struct mmc_blk_data *md = mmc_get_drvdata(host->card);
 +      char type;
 +
 +      type = mmc_get_rw_type(cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +      trace_printk("%d,%d %c %d + %d [%s]\n",
 +                      md->disk->major, md->disk->first_minor, type,
 +                      cmd->arg, cmd->data->blocks,
 +                      current->comm);
 +}
 +
 +void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
 +{
 +      struct mmc_blk_data *md = mmc_get_drvdata(host->card);
 +      char type;
 +
 +      type = mmc_get_rw_type(cmd->opcode);
 +      if (type == 'X')
 +              return;
 +
 +      trace_printk("%d,%d %c %d + %d [%s]\n",
 +                      md->disk->major, md->disk->first_minor, type,
 +                      cmd->arg, cmd->data->blocks,
 +                      current->comm);
 +}
 +
 +void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
 +#endif
 +}
 +
 +void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
 +#endif
 +}
 +
 +void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
 +#endif
 +}
 +
 +void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
 +#endif
 +}
 +
 +void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
 +{
 +      struct mmc_blk_data *md;
 +      char type;
 +
 +      if (!check_met_mmc_async_req_legal(host, areq))
 +              return;
 +
 +      md = mmc_get_drvdata(host->card);
 +      if (!check_met_mmc_blk_data_legal(md))
 +              return;
 +
 +      type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +      if (type == 'X')
 +              return;
 +#ifndef CONFIG_MTK_FPGA
 +      MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
 +#endif
 +}
 +
 +//void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
 +//{
 +//    struct mmc_blk_data *md;
 +//    char type;
 +//
 +//    if (areq && areq->mrq && host && host->card) {
 +//            type = mmc_get_rw_type(areq->mrq->cmd->opcode);
 +//            if (type == 'X')
 +//                    return;
 +//
 +//            md = mmc_get_drvdata(host->card);
 +//
 +//            if (areq && areq->mrq)
 +//            {
 +//                    trace_printk("%d,%d %c %d + %d [%s]\n",
 +//                                    md->disk->major, md->disk->first_minor, type,
 +//                                    areq->mrq->cmd->arg, areq->mrq->data->blocks,
 +//                                    current->comm);
 +//            }
 +//    }
 +//}
 +
  static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
  {
        struct mmc_packed *packed = mqrq->packed;
@@@ -521,7 -163,11 +521,7 @@@ static struct mmc_blk_data *mmc_blk_get
  
  static inline int mmc_get_devidx(struct gendisk *disk)
  {
 -      int devmaj = MAJOR(disk_devt(disk));
 -      int devidx = MINOR(disk_devt(disk)) / perdev_minors;
 -
 -      if (!devmaj)
 -              devidx = disk->first_minor / perdev_minors;
 +      int devidx = disk->first_minor / perdev_minors;
        return devidx;
  }
  
@@@ -556,6 -202,8 +556,8 @@@ static ssize_t power_ro_lock_show(struc
  
        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
  
+       mmc_blk_put(md);
        return ret;
  }
  
@@@ -1033,12 -681,6 +1035,12 @@@ static u32 mmc_sd_num_wr_blocks(struct 
        return result;
  }
  
 +u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
 +{
 +      return mmc_sd_num_wr_blocks(card);
 +}
 +EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
 +
  static int send_stop(struct mmc_card *card, u32 *status)
  {
        struct mmc_command cmd = {0};
@@@ -1088,22 -730,18 +1090,22 @@@ static int mmc_blk_cmd_error(struct req
                        req->rq_disk->disk_name, "timed out", name, status);
  
                /* If the status cmd initially failed, retry the r/w cmd */
 -              if (!status_valid)
 +              if (!status_valid) {
 +                      pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
 -
 +              }
                /*
                 * If it was a r/w cmd crc error, or illegal command
                 * (eg, issued in wrong state) then retry - we should
                 * have corrected the state problem above.
                 */
 -              if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
 +              if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
 +                      pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
 +              }
  
                /* Otherwise abort the command */
 +              pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
                return ERR_ABORT;
  
        default:
@@@ -1188,17 -826,6 +1190,17 @@@ static int mmc_blk_cmd_recovery(struct 
        if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
            R1_CURRENT_STATE(status) == R1_STATE_RCV) {
                err = send_stop(card, &stop_status);
 +              if (err)
 +              {
 +                      get_card_status(card,&status,0);
 +                      if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
 +                              err=0;
 +                              stop_status=0;
 +                              pr_err("b card status %d \n",status);
 +                      }
 +                      else
 +                      pr_err("g card status %d \n",status);    
 +              }
                if (err)
                        pr_err("%s: error %d sending stop command\n",
                               req->rq_disk->disk_name, err);
@@@ -1406,12 -1033,9 +1408,12 @@@ retry
                        goto out;
        }
  
 -      if (mmc_can_sanitize(card))
 +      if (mmc_can_sanitize(card)) {
 +              trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_SANITIZE_START, 1, 0);
 +              trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
 +      }
  out_retry:
        if (err && !mmc_blk_reset(md, card->host, type))
                goto retry;
@@@ -1737,15 -1361,6 +1739,15 @@@ static void mmc_blk_rw_rq_prep(struct m
                readcmd = MMC_READ_SINGLE_BLOCK;
                writecmd = MMC_WRITE_BLOCK;
        }
 +#ifdef CONFIG_MTK_EMMC_CACHE
 +    /* for non-cacheable system data,
 +     * the implementation of reliable write / force prg write,
 +     * must be applied with mutli write cmd
 +     * */
 +    if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
 +       writecmd = MMC_WRITE_MULTIPLE_BLOCK;
 +    }
 +#endif
        if (rq_data_dir(req) == READ) {
                brq->cmd.opcode = readcmd;
                brq->data.flags |= MMC_DATA_READ;
        brq->data.sg = mqrq->sg;
        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
  
 +      if (brq->data.sg_len > 1024)
 +              pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
 +
        /*
         * Adjust the sg list so it is the same size as the
         * request.
                        }
                }
                brq->data.sg_len = i;
 +              pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
        }
  
        mqrq->mmc_active.mrq = &brq->mrq;
@@@ -2031,7 -1642,6 +2033,7 @@@ static void mmc_blk_packed_hdr_wrq_prep
  
        brq->data.sg = mqrq->sg;
        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 +      pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
  
        mqrq->mmc_active.mrq = &brq->mrq;
        mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
@@@ -2139,94 -1749,7 +2141,94 @@@ static void mmc_blk_revert_packed_req(s
  
        mmc_blk_clear_packed(mq_rq);
  }
 +#if defined(FEATURE_STORAGE_PERF_INDEX)
 +#define PRT_TIME_PERIOD       500000000
 +#define UP_LIMITS_4BYTE               4294967295UL    //((4*1024*1024*1024)-1)
 +#define ID_CNT 10
 +pid_t mmcqd[ID_CNT]={0};
 +bool start_async_req[ID_CNT] = {0};
 +unsigned long long start_async_req_time[ID_CNT] = {0};
 +static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
 +unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
 +unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
 +static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
 +static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
 +static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
 +static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
 +unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
 +extern u32 g_u32_cid[4];      
 +#ifdef FEATURE_STORAGE_META_LOG
 +int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
 +struct metadata_rwlogger metadata_logger[10] = {{{0}}};
 +#endif
  
 +unsigned int mmcqd_work_percent[ID_CNT]={0};
 +unsigned int mmcqd_w_throughput[ID_CNT]={0};
 +unsigned int mmcqd_r_throughput[ID_CNT]={0};
 +unsigned int mmcqd_read_clear[ID_CNT]={0};
 +
 +static void g_var_clear(unsigned int idx)
 +{
 +                              mmcqd_t_usage_wr[idx]=0;
 +                              mmcqd_t_usage_rd[idx]=0;
 +                              mmcqd_rq_size_wr[idx]=0;
 +                              mmcqd_rq_size_rd[idx]=0;
 +                              mmcqd_rq_count[idx]=0;
 +                              mmcqd_wr_offset[idx]=0;
 +                              mmcqd_rd_offset[idx]=0;                         
 +                              mmcqd_wr_break[idx]=0;
 +                              mmcqd_rd_break[idx]=0;                          
 +                              mmcqd_wr_tract[idx]=0; 
 +                              mmcqd_wr_bit[idx]=0; 
 +                              mmcqd_rd_tract[idx]=0; 
 +                              mmcqd_rd_bit[idx]=0;                            
 +                              mmcqd_wr_rq_count[idx]=0;
 +                              mmcqd_rd_rq_count[idx]=0;
 +}
 +
 +unsigned int find_mmcqd_index(void)
 +{
 +      pid_t mmcqd_pid=0;
 +      unsigned int idx=0;
 +      unsigned char i=0;
 +
 +      mmcqd_pid = task_pid_nr(current);
 +
 +      if(mmcqd[0] ==0) {
 +              mmcqd[0] = mmcqd_pid;
 +              start_async_req[0]=0;
 +    }
 +
 +      for(i=0;i<ID_CNT;i++)
 +      {
 +              if(mmcqd_pid == mmcqd[i])
 +              {
 +                      idx=i;
 +                      break;
 +              }
 +              if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
 +              {
 +                      mmcqd[i]=mmcqd_pid;
 +                      start_async_req[i]=0;
 +                      idx=i;
 +                      break;
 +              }
 +      }
 +      return idx;
 +}
 +
 +#endif
 +//#undef FEATURE_STORAGE_PID_LOGGER
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +
 +struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
 +
 +
 +
 +unsigned char *page_logger = NULL;
 +spinlock_t g_locker;
 +
 +#endif
  static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  {
        struct mmc_blk_data *md = mq->data;
        struct mmc_async_req *areq;
        const u8 packed_nr = 2;
        u8 reqs = 0;
 +      unsigned long long time1 = 0;
 +#if defined(FEATURE_STORAGE_PERF_INDEX)
 +      pid_t mmcqd_pid=0;
 +      unsigned long long t_period=0, t_usage=0;
 +      unsigned int t_percent=0;
 +      unsigned int perf_meter=0; 
 +      unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
 +      unsigned int diversity=0;
 +      unsigned int idx=0;
 +#ifdef FEATURE_STORAGE_META_LOG
 +      unsigned int mmcmetaindex=0;
 +#endif
 +#endif
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +      unsigned int index=0;
 +#endif
  
        if (!rqc && !mq->mqrq_prev->req)
                return 0;
 +      time1 = sched_clock();
  
        if (rqc)
                reqs = mmc_blk_prep_packed_list(mq, rqc);
 +#if defined(FEATURE_STORAGE_PERF_INDEX)
 +                      mmcqd_pid = task_pid_nr(current);
 +
 +                      idx = find_mmcqd_index();
 +
 +                      mmcqd_read_clear[idx] = 1;
 +                      if(mmccid_tag_t1==0)
 +                              mmccid_tag_t1 = time1;
 +                      t_period = time1 - mmccid_tag_t1;
 +                      if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
 +                      {
 +                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);  
 +                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
 +                              mmccid_tag_t1 = time1;
 +                      }
 +                      if(mmcqd_tag_t1[idx]==0)
 +                              mmcqd_tag_t1[idx] = time1;                      
 +                      t_period = time1 - mmcqd_tag_t1[idx];
 +                      
 +                      if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
 +                      {
 +                              mmcqd_read_clear[idx] = 2;
 +                              mmcqd_work_percent[idx] = 1;
 +                              mmcqd_r_throughput[idx] = 0;
 +                              mmcqd_w_throughput[idx] = 0;
 +                              t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
 +                              if(t_period > t_usage*100)
 +                                      xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
 +                              else
 +                              {
 +                                      do_div(t_period, 100);  //boundary issue
 +                                      t_percent =((unsigned int)t_usage)/((unsigned int)t_period);                                            
 +                                      mmcqd_work_percent[idx] = t_percent;
 +                                      xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]);  //period %lld00 == period %lld x100
 +                              }
 +                              if(mmcqd_wr_rq_count[idx] >= 2)
 +                              {
 +                                      diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
 +                                      xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
 +                              }
 +                              if(mmcqd_rd_rq_count[idx] >= 2)
 +                              {
 +                                      diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
 +                                      xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
 +                              }
 +                              if(mmcqd_t_usage_wr[idx])
 +                              {
 +                                      do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
 +                                      if(mmcqd_t_usage_wr[idx])       // discard print if duration will <1ms
 +                                      {
 +                                              perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
 +                                              mmcqd_w_throughput[idx] = perf_meter;
 +                                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
 +                                      }
 +                              }
 +                              if(mmcqd_t_usage_rd[idx])
 +                              {
 +                                      do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
 +                                      if(mmcqd_t_usage_rd[idx])       // discard print if duration will <1ms
 +                                      {
 +                                              perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s                                      
 +                                              mmcqd_r_throughput[idx] = perf_meter;
 +                                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);                                          
 +                                      }
 +                              }
 +                              mmcqd_tag_t1[idx]=time1;
 +                              g_var_clear(idx);
 +#ifdef FEATURE_STORAGE_META_LOG                       
 +                              mmcmetaindex = mmc_get_devidx(md->disk);
 +                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n", 
 +                                      metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1], 
 +                                      metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3], 
 +                                      metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5], 
 +                                      metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);                                      
 +                              clear_metadata_rw_status(md->disk->first_minor);
 +#endif
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +                              do {
 +                                      int i;
 +                                      for(index=0; index<PID_ID_CNT; index++) {
 +                                              
 +                                              if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
 +                                                      break;
 +                                      }
 +                                      if( index == PID_ID_CNT )
 +                                              break;
 +                                      for( i=0; i<PID_LOGGER_COUNT; i++) {
 +                                              //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
 +                                              if( g_pid_logger[index].pid_logger[i] == 0)
 +                                                      break;
 +                                              sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);                                   
 +
 +                                      }
 +                                      if( i != 0) {
 +                                              xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
 +                                              //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
 +                                              //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
 +                                              memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
 +                                              memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
 +                                              memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
 +                                              memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
 +                                              memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
 +                                              memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
 +                                              
 +
 +                                      }
 +                                      g_pid_logger[index].pid_buffer[0] = '\0';
 +                                      
 +                              } while(0);
 +#endif
 +                              
 +#if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
 +                xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
 +                            ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
 +                            ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
 +                            ((global_page_state(NR_DIRTIED))    << (PAGE_SHIFT - 10)),
 +                            ((global_page_state(NR_WRITEBACK))  << (PAGE_SHIFT - 10)),
 +                            ((global_page_state(NR_WRITTEN))    << (PAGE_SHIFT - 10)));
 +#endif
  
 +                      }
 +              if( rqc )
 +              {
 +                      rq_byte = blk_rq_bytes(rqc);
 +                      rq_sector = blk_rq_sectors(rqc);                        
 +                      if(rq_data_dir(rqc) == WRITE)
 +                      {
 +                              if(mmcqd_wr_offset_tag[idx]>0)
 +                              {
 +                                      sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);  
 +                                      mmcqd_wr_offset[idx] += sect_offset;
 +                                      if(sect_offset == 1)
 +                                              mmcqd_wr_break[idx]++;  
 +                              }
 +                              mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;                         
 +                              if(rq_sector <= 1)      //512 bytes
 +                                      mmcqd_wr_bit[idx] ++;
 +                              else if(rq_sector >= 1016)                                      //508kB
 +                                      mmcqd_wr_tract[idx] ++;
 +                      }
 +                      else    //read
 +                      {
 +                              if(mmcqd_rd_offset_tag[idx]>0)
 +                              {
 +                                      sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);  
 +                                      mmcqd_rd_offset[idx] += sect_offset;
 +                                      if(sect_offset == 1)
 +                                              mmcqd_rd_break[idx]++;          
 +                              }
 +                              mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;                         
 +                              if(rq_sector <= 1)      //512 bytes
 +                                      mmcqd_rd_bit[idx] ++;
 +                              else if(rq_sector >= 1016)                                      //508kB
 +                                      mmcqd_rd_tract[idx] ++;
 +                      }
 +              }
 +#endif
        do {
                if (rqc) {
                        /*
                                                brq->data.bytes_xfered);
                        }
  
 +//                    if (card && card->host && card->host->areq)
 +//                            met_mmc_end(card->host, card->host->areq);
 +
                        /*
                         * If the blk_end_request function returns non-zero even
                         * though all data has been transferred and no errors
                        break;
                case MMC_BLK_CMD_ERR:
                        ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-                       if (!mmc_blk_reset(md, card->host, type))
-                               break;
-                       goto cmd_abort;
+                       if (mmc_blk_reset(md, card->host, type))
+                               goto cmd_abort;
+                       if (!ret)
+                               goto start_new_req;
+                       break;
                case MMC_BLK_RETRY:
                        if (retry++ < 5)
                                break;
@@@ -2600,11 -1949,6 +2604,11 @@@ static int mmc_blk_issue_rq(struct mmc_
        unsigned long flags;
        unsigned int cmd_flags = req ? req->cmd_flags : 0;
  
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      if (mmc_bus_needs_resume(card->host))
 +              mmc_resume_bus(card->host);
 +#endif
 +
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
                mmc_claim_host(card->host);
@@@ -2661,12 -2005,6 +2665,12 @@@ static inline int mmc_blk_readonly(stru
               !(card->csd.cmdclass & CCC_BLOCK_WRITE);
  }
  
 +//#if defined(FEATURE_STORAGE_PID_LOGGER)
 +//extern unsigned long get_memory_size(void);
 +//#endif
 +#ifdef CONFIG_MTK_EXTMEM
 +extern void* extmem_malloc_page_align(size_t bytes);
 +#endif
  static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                                              struct device *parent,
                                              sector_t size,
        ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
        if (ret)
                goto err_putdisk;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +      if( !page_logger){
 +              //num_page_logger = sizeof(struct page_pid_logger);
 +              //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
 +                // solution: use get_memory_size to obtain the size from start pfn to max pfn
 +
 +                //unsigned long count = get_memory_size() >> PAGE_SHIFT;
 +                unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
 +#ifdef CONFIG_MTK_EXTMEM
 +              page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
 +#else
 +              page_logger = vmalloc(count * sizeof(struct page_pid_logger));
 +#endif
 +              if( page_logger) {
 +                      memset( page_logger, -1, count*sizeof( struct page_pid_logger));
 +              }
 +              spin_lock_init(&g_locker);
 +      }
 +#endif
 +#if defined(FEATURE_STORAGE_META_LOG)
 +      check_perdev_minors = perdev_minors;
 +#endif
  
        md->queue.issue_fn = mmc_blk_issue_rq;
        md->queue.data = md;
        md->disk->queue = md->queue.queue;
        md->disk->driverfs_dev = parent;
        set_disk_ro(md->disk, md->read_only || default_ro);
 +      md->disk->flags = GENHD_FL_EXT_DEVT;
        if (area_type & MMC_BLK_DATA_AREA_RPMB)
                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
  
  static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
  {
        sector_t size;
 +#ifdef CONFIG_MTK_EMMC_SUPPORT
 +    unsigned int l_reserve;
 +      struct storage_info s_info = {0};
 +#endif
        struct mmc_blk_data *md;
  
        if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
                size = card->csd.capacity << (card->csd.read_blkbits - 9);
        }
  
 +      if(!mmc_card_sd(card)){
 +#ifdef CONFIG_MTK_EMMC_SUPPORT
 +            msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
 +            l_reserve =  s_info.emmc_reserve;
 +            printk("l_reserve = 0x%x\n", l_reserve);
 +            size -= l_reserve;                         /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
 +#endif
 +    }
        md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
                                        MMC_BLK_DATA_AREA_MAIN);
        return md;
@@@ -2994,9 -2297,6 +2998,9 @@@ force_ro_fail
  #define CID_MANFID_TOSHIBA    0x11
  #define CID_MANFID_MICRON     0x13
  #define CID_MANFID_SAMSUNG    0x15
 +#define CID_MANFID_SANDISK_NEW        0x45
 +#define CID_MANFID_HYNIX      0x90
 +#define CID_MANFID_KSI                0x70
  
  static const struct mmc_fixup blk_fixups[] =
  {
                  MMC_QUIRK_INAND_CMD38),
        MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
                  MMC_QUIRK_INAND_CMD38),
 -
 +      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
 +                MMC_QUIRK_PON),
        /*
         * Some MMC cards experience performance degradation with CMD23
         * instead of CMD12-bounded multiblock transfers. For now we'll
                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
        MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
 +#ifdef CONFIG_MTK_EMMC_CACHE
 +    /*
 +     * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
 +     * which cannot gurrantee the strong order betwee meta data and file data.
 +     */
 +     
 +     /*
 +     * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
 +     */
 +      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
 +                MMC_QUIRK_DISABLE_CACHE),
 +#endif
 +
 +      /* Hynix 4.41 trim will lead boot up failed. */
 +      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
 +                MMC_QUIRK_TRIM_UNSTABLE),
 +
 +      /* KSI PRV=0x3 trim will lead write performance drop. */
 +      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
 +                MMC_QUIRK_KSI_V03_SKIP_TRIM),
  
        END_FIXUP
  };
  
 +#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
 +      extern void emmc_create_sys_symlink (struct mmc_card *card);
 +#endif
  static int mmc_blk_probe(struct mmc_card *card)
  {
        struct mmc_blk_data *md, *part_md;
        mmc_set_drvdata(card, md);
        mmc_fixup_device(card, blk_fixups);
  
 +      printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
 +
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      mmc_set_bus_resume_policy(card->host, 1);
 +#endif
        if (mmc_add_disk(md))
                goto out;
  
                if (mmc_add_disk(part_md))
                        goto out;
        }
 +#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
 +      emmc_create_sys_symlink(card);
 +#endif
        return 0;
  
   out:
@@@ -3142,9 -2410,6 +3146,9 @@@ static void mmc_blk_remove(struct mmc_c
        mmc_release_host(card->host);
        mmc_blk_remove_req(md);
        mmc_set_drvdata(card, NULL);
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      mmc_set_bus_resume_policy(card->host, 0);
 +#endif
  }
  
  #ifdef CONFIG_PM
index a9c1bf1fe1989d605e18e47cf155a61fa0b818da,32d5e40c6863dcde95571fe7ac5ce3f5075e68c9..2844f2f6f815b54f4986d34c1f911e1c09ec0f46
@@@ -199,6 -199,7 +199,7 @@@ static int blktrans_open(struct block_d
                return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
  
        mutex_lock(&dev->lock);
+       mutex_lock(&mtd_table_mutex);
  
        if (dev->open)
                goto unlock;
  
  unlock:
        dev->open++;
+       mutex_unlock(&mtd_table_mutex);
        mutex_unlock(&dev->lock);
        blktrans_dev_put(dev);
        return ret;
@@@ -232,6 -234,7 +234,7 @@@ error_release
  error_put:
        module_put(dev->tr->owner);
        kref_put(&dev->ref, blktrans_dev_release);
+       mutex_unlock(&mtd_table_mutex);
        mutex_unlock(&dev->lock);
        blktrans_dev_put(dev);
        return ret;
@@@ -245,6 -248,7 +248,7 @@@ static void blktrans_release(struct gen
                return;
  
        mutex_lock(&dev->lock);
+       mutex_lock(&mtd_table_mutex);
  
        if (--dev->open)
                goto unlock;
                __put_mtd_device(dev->mtd);
        }
  unlock:
+       mutex_unlock(&mtd_table_mutex);
        mutex_unlock(&dev->lock);
        blktrans_dev_put(dev);
  }
@@@ -405,9 -410,7 +410,9 @@@ int add_mtd_blktrans_dev(struct mtd_blk
        /* Create the request queue */
        spin_lock_init(&new->queue_lock);
        new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
 -
 +#ifdef CONFIG_MTK_MTD_NAND
 +              new->rq->backing_dev_info.ra_pages =0;
 +#endif
        if (!new->rq)
                goto error3;
  
diff --combined drivers/usb/core/devio.c
index 6b914e2f6ac4f3ed51b6639d9f9a36194adb3ac7,62e532fb82ad4a4230094becb2fa676a1a9b9d78..5e3f8c7e59269295cfe21aa83f889eb7284302d3
@@@ -513,7 -513,7 +513,7 @@@ static void async_completed(struct urb 
        snoop(&urb->dev->dev, "urb complete\n");
        snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
                        as->status, COMPLETE, NULL, 0);
-       if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
+       if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
                snoop_urb_data(urb, urb->actual_length);
  
        if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
@@@ -841,10 -841,6 +841,10 @@@ static int usbdev_open(struct inode *in
        usb_unlock_device(dev);
        snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
                        current->comm);
 +
 +      MYDBG("opened by process %d: %s\n", task_pid_nr(current),
 +                      current->comm); 
 +
        return ret;
  
   out_unlock_device:
@@@ -1110,11 -1106,10 +1110,11 @@@ static int proc_getdriver(struct dev_st
  
  static int proc_connectinfo(struct dev_state *ps, void __user *arg)
  {
 -      struct usbdevfs_connectinfo ci = {
 -              .devnum = ps->dev->devnum,
 -              .slow = ps->dev->speed == USB_SPEED_LOW
 -      };
 +      struct usbdevfs_connectinfo ci;
 +
 +      memset(&ci, 0, sizeof(ci));
 +      ci.devnum = ps->dev->devnum;
 +      ci.slow = ps->dev->speed == USB_SPEED_LOW;
  
        if (copy_to_user(arg, &ci, sizeof(ci)))
                return -EFAULT;
@@@ -1598,7 -1593,7 +1598,7 @@@ static struct async *reap_as(struct dev
        for (;;) {
                __set_current_state(TASK_INTERRUPTIBLE);
                as = async_getcompleted(ps);
-               if (as)
+               if (as || !connected(ps))
                        break;
                if (signal_pending(current))
                        break;
@@@ -1621,7 -1616,7 +1621,7 @@@ static int proc_reapurb(struct dev_stat
        }
        if (signal_pending(current))
                return -EINTR;
-       return -EIO;
+       return -ENODEV;
  }
  
  static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
        struct async *as;
  
        as = async_getcompleted(ps);
-       retval = -EAGAIN;
        if (as) {
                retval = processcompl(as, (void __user * __user *)arg);
                free_async(as);
+       } else {
+               retval = (connected(ps) ? -EAGAIN : -ENODEV);
        }
        return retval;
  }
@@@ -1763,7 -1759,7 +1764,7 @@@ static int proc_reapurb_compat(struct d
        }
        if (signal_pending(current))
                return -EINTR;
-       return -EIO;
+       return -ENODEV;
  }
  
  static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
        int retval;
        struct async *as;
  
-       retval = -EAGAIN;
        as = async_getcompleted(ps);
        if (as) {
                retval = processcompl_compat(as, (void __user * __user *)arg);
                free_async(as);
+       } else {
+               retval = (connected(ps) ? -EAGAIN : -ENODEV);
        }
        return retval;
  }
@@@ -1946,7 -1943,8 +1948,8 @@@ static int proc_get_capabilities(struc
  {
        __u32 caps;
  
-       caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM;
+       caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
+                       USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
        if (!ps->dev->bus->no_stop_on_short)
                caps |= USBDEVFS_CAP_BULK_CONTINUATION;
        if (ps->dev->bus->sg_tablesize)
@@@ -2007,6 -2005,32 +2010,32 @@@ static long usbdev_do_ioctl(struct fil
                return -EPERM;
  
        usb_lock_device(dev);
+       /* Reap operations are allowed even after disconnection */
+       switch (cmd) {
+       case USBDEVFS_REAPURB:
+               snoop(&dev->dev, "%s: REAPURB\n", __func__);
+               ret = proc_reapurb(ps, p);
+               goto done;
+       case USBDEVFS_REAPURBNDELAY:
+               snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
+               ret = proc_reapurbnonblock(ps, p);
+               goto done;
+ #ifdef CONFIG_COMPAT
+       case USBDEVFS_REAPURB32:
+               snoop(&dev->dev, "%s: REAPURB32\n", __func__);
+               ret = proc_reapurb_compat(ps, p);
+               goto done;
+       case USBDEVFS_REAPURBNDELAY32:
+               snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
+               ret = proc_reapurbnonblock_compat(ps, p);
+               goto done;
+ #endif
+       }
        if (!connected(ps)) {
                usb_unlock_device(dev);
                return -ENODEV;
                        inode->i_mtime = CURRENT_TIME;
                break;
  
-       case USBDEVFS_REAPURB32:
-               snoop(&dev->dev, "%s: REAPURB32\n", __func__);
-               ret = proc_reapurb_compat(ps, p);
-               break;
-       case USBDEVFS_REAPURBNDELAY32:
-               snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
-               ret = proc_reapurbnonblock_compat(ps, p);
-               break;
        case USBDEVFS_IOCTL32:
                snoop(&dev->dev, "%s: IOCTL32\n", __func__);
                ret = proc_ioctl_compat(ps, ptr_to_compat(p));
                ret = proc_unlinkurb(ps, p);
                break;
  
-       case USBDEVFS_REAPURB:
-               snoop(&dev->dev, "%s: REAPURB\n", __func__);
-               ret = proc_reapurb(ps, p);
-               break;
-       case USBDEVFS_REAPURBNDELAY:
-               snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
-               ret = proc_reapurbnonblock(ps, p);
-               break;
        case USBDEVFS_DISCSIGNAL:
                snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
                ret = proc_disconnectsignal(ps, p);
                ret = proc_disconnect_claim(ps, p);
                break;
        }
+  done:
        usb_unlock_device(dev);
        if (ret >= 0)
                inode->i_atime = CURRENT_TIME;
index df94df151316a6b7c92e86c20d02c1db19f90b77,31bed5f7d0eb0819a83e40c8eb2cec0beb0c1b2f..00b3b44d328f6f693733aef4d5600e914b4048b4
@@@ -26,8 -26,6 +26,8 @@@
  #include <linux/dmapool.h>
  
  #include "xhci.h"
 +#include <mach/mt_boot.h>
 +#include <linux/dma-mapping.h>
  
  /*
   * Allocates a generic ring segment from the ring pool, sets the dma address,
@@@ -113,12 -111,10 +113,12 @@@ static void xhci_link_segments(struct x
                val |= TRB_TYPE(TRB_LINK);
                /* Always set the chain bit with 0.95 hardware */
                /* Set chain bit for isoc rings on AMD 0.96 host */
 +#ifndef CONFIG_MTK_XHCI
                if (xhci_link_trb_quirk(xhci) ||
                                (type == TYPE_ISOC &&
                                 (xhci->quirks & XHCI_AMD_0x96_HOST)))
                        val |= TRB_CHAIN;
 +#endif
                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
        }
  }
@@@ -427,16 -423,12 +427,16 @@@ static void xhci_free_stream_ctx(struc
                unsigned int num_stream_ctxs,
                struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
  {
 -      struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
 -              dma_free_coherent(&pdev->dev,
 -                              sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 -                              stream_ctx, dma);
 +              dma_free_coherent(dev,
 +                      sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 +#ifdef CONFIG_MTK_XHCI
 +                      xhci->erst.entries, xhci->erst.erst_dma_addr);
 +#else
 +                      stream_ctx, dma);
 +#endif
        else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
                return dma_pool_free(xhci->small_streams_pool,
                                stream_ctx, dma);
@@@ -459,12 -451,12 +459,12 @@@ static struct xhci_stream_ctx *xhci_all
                unsigned int num_stream_ctxs, dma_addr_t *dma,
                gfp_t mem_flags)
  {
 -      struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
 -              return dma_alloc_coherent(&pdev->dev,
 -                              sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 -                              dma, mem_flags);
 +              return dma_alloc_coherent(dev,
 +                      sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
 +                      dma, mem_flags);
        else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
                return dma_pool_alloc(xhci->small_streams_pool,
                                mem_flags, dma);
@@@ -1408,10 -1400,10 +1408,10 @@@ int xhci_endpoint_init(struct xhci_hcd 
                /* Attempt to use the ring cache */
                if (virt_dev->num_rings_cached == 0)
                        return -ENOMEM;
+               virt_dev->num_rings_cached--;
                virt_dev->eps[ep_index].new_ring =
                        virt_dev->ring_cache[virt_dev->num_rings_cached];
                virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-               virt_dev->num_rings_cached--;
                xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
                                        1, type);
        }
                break;
        case USB_SPEED_FULL:
        case USB_SPEED_LOW:
 +      {
 +              CHIP_SW_VER sw_code = mt_get_chip_sw_ver();
 +              unsigned int hw_code = mt_get_chip_hw_code();
 +                      
 +              if((hw_code == 0x6595) && (sw_code <= CHIP_SW_VER_01)){
 +                      /* workaround for maxp size issue of RXXE */
 +                      if((max_packet % 4 == 2) && (max_packet % 16 != 14) &&
 +                              (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
 +                              max_packet += 2;
 +              }
                break;
 +      }
        default:
                BUG();
        }
@@@ -1705,7 -1686,7 +1705,7 @@@ static void scratchpad_free(struct xhci
  {
        int num_sp;
        int i;
 -      struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
  
        if (!xhci->scratchpad)
                return;
        num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
  
        for (i = 0; i < num_sp; i++) {
 -              dma_free_coherent(&pdev->dev, xhci->page_size,
 +              dma_free_coherent(dev, xhci->page_size,
                                    xhci->scratchpad->sp_buffers[i],
                                    xhci->scratchpad->sp_dma_buffers[i]);
        }
        kfree(xhci->scratchpad->sp_dma_buffers);
        kfree(xhci->scratchpad->sp_buffers);
 -      dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
 +      dma_free_coherent(dev, num_sp * sizeof(u64),
                            xhci->scratchpad->sp_array,
                            xhci->scratchpad->sp_dma);
        kfree(xhci->scratchpad);
@@@ -1781,7 -1762,7 +1781,7 @@@ void xhci_free_command(struct xhci_hcd 
  
  void xhci_mem_cleanup(struct xhci_hcd *xhci)
  {
 -      struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 +      struct device *dev = xhci_to_hcd(xhci)->self.controller;
        struct dev_info *dev_info, *next;
        struct xhci_cd  *cur_cd, *next_cd;
        unsigned long   flags;
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
        if (xhci->erst.entries)
 -              dma_free_coherent(&pdev->dev, size,
 +              dma_free_coherent(dev, size,
                                xhci->erst.entries, xhci->erst.erst_dma_addr);
        xhci->erst.entries = NULL;
        xhci_dbg(xhci, "Freed ERST\n");
        xhci_dbg(xhci, "Freed medium stream array pool\n");
  
        if (xhci->dcbaa)
 -              dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
 +              dma_free_coherent(dev, sizeof(*xhci->dcbaa),
                                xhci->dcbaa, xhci->dcbaa->dma);
        xhci->dcbaa = NULL;
  
index c065f3e05648d692c6c3edc0d4a394b8e9588fc0,096438e4fb0cf59387efb9f33228e58777e5b37f..510c7f3da637ca886d790b72b174dfa90fe3e017
@@@ -86,13 -86,6 +86,13 @@@ static void option_instat_callback(stru
  #define HUAWEI_PRODUCT_K3765                  0x1465
  #define HUAWEI_PRODUCT_K4605                  0x14C6
  #define HUAWEI_PRODUCT_E173S6                 0x1C07
 +#define HW_USB_DEVICE_AND_INTERFACE_INFO(vend, cl, sc, pr) \
 +      .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
 +              | USB_DEVICE_ID_MATCH_VENDOR, \
 +      .idVendor = (vend), \
 +      .bInterfaceClass = (cl), \
 +      .bInterfaceSubClass = (sc), \
 +      .bInterfaceProtocol = (pr)
  
  #define QUANTA_VENDOR_ID                      0x0408
  #define QUANTA_PRODUCT_Q101                   0xEA02
  #define CELOT_VENDOR_ID                               0x211f
  #define CELOT_PRODUCT_CT680M                  0x6801
  
 -/* Samsung products */
 +/* SS products */
  #define SAMSUNG_VENDOR_ID                       0x04e8
  #define SAMSUNG_PRODUCT_GT_B3730                0x6889
  
  #define MEDIATEK_PRODUCT_DC_1COM              0x00a0
  #define MEDIATEK_PRODUCT_DC_4COM              0x00a5
  #define MEDIATEK_PRODUCT_DC_4COM2             0x00a7
 +#define MEDIATEK_PRODUCT_DC_4COM3             0x00a8
  #define MEDIATEK_PRODUCT_DC_5COM              0x00a4
  #define MEDIATEK_PRODUCT_7208_1COM            0x7101
  #define MEDIATEK_PRODUCT_7208_2COM            0x7102
@@@ -1677,7 -1669,7 +1677,7 @@@ static const struct usb_device_id optio
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
                .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
 -      { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
 +      { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* SS GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
 +      { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM3, 0xff, 0x00, 0x00) },
        { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
        { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { } /* Terminating entry */
@@@ -1860,7 -1852,7 +1861,7 @@@ static int option_probe(struct usb_seri
                (const struct option_blacklist_info *) id->driver_info))
                return -ENODEV;
        /*
 -       * Don't bind network interface on Samsung GT-B3730, it is handled by
 +       * Don't bind network interface on SS GT-B3730, it is handled by
         * a separate module.
         */
        if (dev_desc->idVendor == cpu_to_le16(SAMSUNG_VENDOR_ID) &&
diff --combined fs/ext4/inode.c
index 0a4c7eed8699f46300b7fc551ff256b7202d7ae1,10b71e4029a0e0908233eb3d9642c5ce1737efb6..5ae486f4d742050e66f2632e362a16477c3b3c42
@@@ -46,7 -46,6 +46,7 @@@
  #include "truncate.h"
  
  #include <trace/events/ext4.h>
 +#include <linux/blkdev.h>
  
  #define MPAGE_DA_EXTENT_TAIL 0x01
  
@@@ -985,13 -984,6 +985,13 @@@ static int ext4_write_begin(struct fil
        struct page *page;
        pgoff_t index;
        unsigned from, to;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +              extern unsigned char *page_logger;
 +              struct page_pid_logger *tmp_logger;
 +              unsigned long page_index;
 +              extern spinlock_t g_locker;
 +              unsigned long g_flags;
 +#endif
  
        trace_ext4_write_begin(inode, pos, len, flags);
        /*
@@@ -1087,24 -1079,6 +1087,24 @@@ retry_journal
                return ret;
        }
        *pagep = page;
 +#if defined(FEATURE_STORAGE_PID_LOGGER)
 +              if( page_logger && (*pagep)) {
 +                      //#if defined(CONFIG_FLATMEM)
 +                      //page_index = (unsigned long)((*pagep) - mem_map) ;
 +                      //#else
 +                      page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
 +                      //#endif
 +                      tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
 +                      spin_lock_irqsave(&g_locker, g_flags);
 +                      if( page_index < num_physpages) {
 +                              if( tmp_logger->pid1 == 0XFFFF)
 +                                      tmp_logger->pid1 = current->pid;
 +                              else if( tmp_logger->pid1 != current->pid)
 +                                      tmp_logger->pid2 = current->pid;
 +                      }
 +                      spin_unlock_irqrestore(&g_locker, g_flags);
 +              }
 +#endif
        return ret;
  }
  
@@@ -1438,7 -1412,7 +1438,7 @@@ static void ext4_da_release_space(struc
  static void ext4_da_page_release_reservation(struct page *page,
                                             unsigned long offset)
  {
-       int to_release = 0;
+       int to_release = 0, contiguous_blks = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
  
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
+                       contiguous_blks++;
                        clear_buffer_delay(bh);
+               } else if (contiguous_blks) {
+                       lblk = page->index <<
+                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                       lblk += (curr_off >> inode->i_blkbits) -
+                               contiguous_blks;
+                       ext4_es_remove_extent(inode, lblk, contiguous_blks);
+                       contiguous_blks = 0;
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
  
-       if (to_release) {
+       if (contiguous_blks) {
                lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               ext4_es_remove_extent(inode, lblk, to_release);
+               lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+               ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
  
        /* If we have released all the blocks belonging to a cluster, then we
@@@ -2125,19 -2108,32 +2134,32 @@@ static int __ext4_journalled_writepage(
                ext4_walk_page_buffers(handle, page_bufs, 0, len,
                                       NULL, bget_one);
        }
-       /* As soon as we unlock the page, it can go away, but we have
-        * references to buffers so we are safe */
+       /*
+        * We need to release the page lock before we start the
+        * journal, so grab a reference so the page won't disappear
+        * out from under us.
+        */
+       get_page(page);
        unlock_page(page);
  
        handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
                                    ext4_writepage_trans_blocks(inode));
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
-               goto out;
+               put_page(page);
+               goto out_no_pagelock;
        }
        BUG_ON(!ext4_handle_valid(handle));
  
+       lock_page(page);
+       put_page(page);
+       if (page->mapping != mapping) {
+               /* The page got truncated from under us */
+               ext4_journal_stop(handle);
+               ret = 0;
+               goto out;
+       }
        if (inline_data) {
                ret = ext4_journal_get_write_access(handle, inode_bh);
  
                                       NULL, bput_one);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  out:
+       unlock_page(page);
+ out_no_pagelock:
        brelse(inode_bh);
        return ret;
  }
@@@ -3618,7 -3616,6 +3642,7 @@@ int ext4_can_truncate(struct inode *ino
  
  int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
  {
 +#if 0
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
@@@ -3804,12 -3801,6 +3828,12 @@@ out_dio
  out_mutex:
        mutex_unlock(&inode->i_mutex);
        return ret;
 +#else
 +      /*
 +       * Disabled as per b/28760453
 +       */
 +      return -EOPNOTSUPP;
 +#endif
  }
  
  /*
@@@ -4072,10 -4063,6 +4096,10 @@@ make_io
                trace_ext4_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
 +#ifdef FEATURE_STORAGE_META_LOG
 +              if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
 +                      set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
 +#endif
                submit_bh(READ | REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
diff --combined fs/ext4/mballoc.c
index aed1a67ab3a2bfedac039faadac4967d5d099a1f,61ee016039405673c4d3dab83ea7d92ffa97ab39..687ddf0f9423bba6b1bdf43e6430dddf0f26153d
@@@ -2707,8 -2707,7 +2707,8 @@@ int ext4_mb_release(struct super_block 
  }
  
  static inline int ext4_issue_discard(struct super_block *sb,
 -              ext4_group_t block_group, ext4_grpblk_t cluster, int count)
 +              ext4_group_t block_group, ext4_grpblk_t cluster, int count,
 +              unsigned long flags)
  {
        ext4_fsblk_t discard_block;
  
        count = EXT4_C2B(EXT4_SB(sb), count);
        trace_ext4_discard_blocks(sb,
                        (unsigned long long) discard_block, count);
 -      return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 +      return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
  }
  
  /*
@@@ -2739,7 -2738,7 +2739,7 @@@ static void ext4_free_data_callback(str
        if (test_opt(sb, DISCARD)) {
                err = ext4_issue_discard(sb, entry->efd_group,
                                         entry->efd_start_cluster,
 -                                       entry->efd_count);
 +                                       entry->efd_count, 0);
                if (err && err != -EOPNOTSUPP)
                        ext4_msg(sb, KERN_WARNING, "discard request in"
                                 " group:%d block:%d count:%d failed"
@@@ -4765,18 -4764,12 +4765,12 @@@ do_more
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-       retry:
-               new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       /*
-                        * We use a retry loop because
-                        * ext4_free_blocks() is not allowed to fail.
-                        */
-                       cond_resched();
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
-                       goto retry;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->efd_start_cluster = bit;
                new_entry->efd_group = block_group;
                new_entry->efd_count = count_clusters;
                 * them with group lock_held
                 */
                if (test_opt(sb, DISCARD)) {
 -                      err = ext4_issue_discard(sb, block_group, bit, count);
 +                      err = ext4_issue_discard(sb, block_group, bit, count,
 +                                               0);
                        if (err && err != -EOPNOTSUPP)
                                ext4_msg(sb, KERN_WARNING, "discard request in"
                                         " group:%d block:%d count:%lu failed"
@@@ -4999,15 -4991,13 +4993,15 @@@ error_return
   * @count:    number of blocks to TRIM
   * @group:    alloc. group we are working with
   * @e4b:      ext4 buddy for the group
 + * @blkdev_flags: flags for the block device
   *
   * Trim "count" blocks starting at "start" in the "group". To assure that no
   * one will allocate those blocks, mark it as used in buddy bitmap. This must
   * be called with under the group lock.
   */
  static int ext4_trim_extent(struct super_block *sb, int start, int count,
 -                           ext4_group_t group, struct ext4_buddy *e4b)
 +                          ext4_group_t group, struct ext4_buddy *e4b,
 +                          unsigned long blkdev_flags)
  {
        struct ext4_free_extent ex;
        int ret = 0;
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
 -      ret = ext4_issue_discard(sb, group, start, count);
 +      ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
        return ret;
   * @start:            first group block to examine
   * @max:              last group block to examine
   * @minblocks:                minimum extent block count
 + * @blkdev_flags:     flags for the block device
   *
   * ext4_trim_all_free walks through group's buddy bitmap searching for free
   * extents. When the free block is found, ext4_trim_extent is called to TRIM
  static ext4_grpblk_t
  ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                   ext4_grpblk_t start, ext4_grpblk_t max,
 -                 ext4_grpblk_t minblocks)
 +                 ext4_grpblk_t minblocks, unsigned long blkdev_flags)
  {
        void *bitmap;
        ext4_grpblk_t next, count = 0, free_count = 0;
  
                if ((next - start) >= minblocks) {
                        ret = ext4_trim_extent(sb, start,
 -                                             next - start, group, &e4b);
 +                                             next - start, group, &e4b,
 +                                             blkdev_flags);
                        if (ret && ret != -EOPNOTSUPP)
                                break;
                        ret = 0;
@@@ -5130,7 -5118,6 +5124,7 @@@ out
   * ext4_trim_fs() -- trim ioctl handle function
   * @sb:                       superblock for filesystem
   * @range:            fstrim_range structure
 + * @blkdev_flags:     flags for the block device
   *
   * start:     First Byte to trim
   * len:               number of Bytes to trim from start
   * start to start+len. For each such a group ext4_trim_all_free function
   * is invoked to trim all free space.
   */
 -int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 +int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
 +                      unsigned long blkdev_flags)
  {
        struct ext4_group_info *grp;
        ext4_group_t group, first_group, last_group;
  
                if (grp->bb_free >= minlen) {
                        cnt = ext4_trim_all_free(sb, group, first_cluster,
 -                                              end, minlen);
 +                                              end, minlen, blkdev_flags);
                        if (cnt < 0) {
                                ret = cnt;
                                break;
diff --combined fs/ext4/super.c
index 0d81a73b79fc1eee835e583628ea330a90d31a2a,af1eaed96a9130992dc0aa1a10eb544b258735ce..b18a565ca02ab88497fd882aa65795aef805371f
@@@ -803,6 -803,7 +803,7 @@@ static void ext4_put_super(struct super
                dump_orphan_list(sb, sbi);
        J_ASSERT(list_empty(&sbi->s_orphan));
  
+       sync_blockdev(sb->s_bdev);
        invalidate_bdev(sb->s_bdev);
        if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
                /*
@@@ -2811,7 -2812,6 +2812,7 @@@ static int ext4_lazyinit_thread(void *a
        unsigned long next_wakeup, cur;
  
        BUG_ON(NULL == eli);
 +      set_freezable();
  
  cont_thread:
        while (true) {
  
                schedule_timeout_interruptible(next_wakeup - cur);
  
 -              if (kthread_should_stop()) {
 +              if (kthread_freezable_should_stop(NULL)) {
                        ext4_clear_request_list();
                        goto exit_thread;
                }
diff --combined fs/fuse/inode.c
index 04c0f355333329145a3b4b9974f2005f4d711aae,4d371f3b9a4536f48b09a10544b24b0c18531924..9bc50c30afc4b0a24fdf3f77cb2583e95e33d52e
@@@ -7,7 -7,7 +7,7 @@@
  */
  
  #include "fuse_i.h"
 -
 +#include "fuse.h"
  #include <linux/pagemap.h>
  #include <linux/slab.h>
  #include <linux/file.h>
@@@ -1028,6 -1028,7 +1028,7 @@@ static int fuse_fill_super(struct super
                goto err_fput;
  
        fuse_conn_init(fc);
+       fc->release = fuse_free_conn;
  
        fc->dev = sb->s_dev;
        fc->sb = sb;
                fc->dont_mask = 1;
        sb->s_flags |= MS_POSIXACL;
  
-       fc->release = fuse_free_conn;
        fc->flags = d.flags;
        fc->user_id = d.user_id;
        fc->group_id = d.group_id;
@@@ -1298,7 -1298,7 +1298,7 @@@ static int __init fuse_init(void
  
        sanitize_global_limit(&max_user_bgreq);
        sanitize_global_limit(&max_user_congthresh);
 -
 +      fuse_iolog_init();
        return 0;
  
   err_sysfs_cleanup:
@@@ -1319,7 -1319,6 +1319,7 @@@ static void __exit fuse_exit(void
        fuse_sysfs_cleanup();
        fuse_fs_cleanup();
        fuse_dev_cleanup();
 +      fuse_iolog_exit();
  }
  
  module_init(fuse_init);
diff --combined kernel/trace/trace.h
index 1d8c1cb606741b3f807d6882168252b4b53dd01d,fe576073580ab28d31c108d87426c5d1286944bc..ca0b8899082a0199f892267a87d54e72f1e2da3e
@@@ -12,9 -12,6 +12,9 @@@
  #include <linux/hw_breakpoint.h>
  #include <linux/trace_seq.h>
  #include <linux/ftrace_event.h>
 +#ifdef CONFIG_MT65XX_TRACER
 +#include <mach/mt_mon.h>
 +#endif
  
  #ifdef CONFIG_FTRACE_SYSCALLS
  #include <asm/unistd.h>               /* For NR_SYSCALLS           */
@@@ -38,7 -35,6 +38,7 @@@ enum trace_type 
        TRACE_USER_STACK,
        TRACE_BLK,
        TRACE_BPUTS,
 +    TRACE_MT65XX_MON_TYPE,
  
        __TRACE_LAST_TYPE,
  };
@@@ -292,8 -288,6 +292,8 @@@ extern void __ftrace_bad_type(void)
                          TRACE_GRAPH_ENT);             \
                IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
                          TRACE_GRAPH_RET);             \
 +        IF_ASSIGN(var, ent, struct mt65xx_mon_entry, \
 +                TRACE_MT65XX_MON_TYPE); \
                __ftrace_bad_type();                                    \
        } while (0)
  
@@@ -434,6 -428,7 +434,7 @@@ enum 
  
        TRACE_CONTROL_BIT,
  
+       TRACE_BRANCH_BIT,
  /*
   * Abuse of the trace_recursion.
   * As we need a way to maintain state if we are tracing the function
@@@ -659,7 -654,6 +660,7 @@@ static inline void __trace_stack(struc
  extern cycle_t ftrace_now(int cpu);
  
  extern void trace_find_cmdline(int pid, char comm[]);
 +extern int trace_find_tgid(int pid);
  
  #ifdef CONFIG_DYNAMIC_FTRACE
  extern unsigned long ftrace_update_tot_cnt;
@@@ -873,7 -867,6 +874,7 @@@ enum trace_iterator_flags 
        TRACE_ITER_IRQ_INFO             = 0x800000,
        TRACE_ITER_MARKERS              = 0x1000000,
        TRACE_ITER_FUNCTION             = 0x2000000,
 +      TRACE_ITER_TGID                 = 0x4000000,
  };
  
  /*
@@@ -1040,9 -1033,6 +1041,9 @@@ extern struct list_head ftrace_events
  extern const char *__start___trace_bprintk_fmt[];
  extern const char *__stop___trace_bprintk_fmt[];
  
 +extern const char *__start___tracepoint_str[];
 +extern const char *__stop___tracepoint_str[];
 +
  void trace_printk_init_buffers(void);
  void trace_printk_start_comm(void);
  int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);