Merge branch 'android-4.14-p' into android-exynos-4.14-ww-9610-minor_up-dev
authorYoungmin Nam <youngmin.nam@samsung.com>
Thu, 2 May 2019 06:37:01 +0000 (15:37 +0900)
committerYoungmin Nam <youngmin.nam@samsung.com>
Thu, 2 May 2019 06:37:01 +0000 (15:37 +0900)
Change-Id: Ibb00d2fdd553354d3689f7d292a984b39001b9c3
Signed-off-by: Youngmin Nam <youngmin.nam@samsung.com>
141 files changed:
1  2 
MAINTAINERS
Makefile
arch/Kconfig
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/topology.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/smp.c
arch/arm64/kernel/traps.c
arch/arm64/mm/cache.S
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/fault.c
block/bio.c
block/blk-core.c
crypto/Kconfig
crypto/Makefile
crypto/testmgr.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/base/arch_topology.c
drivers/base/power/main.c
drivers/char/random.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/crypto/Kconfig
drivers/dma/at_hdmac.c
drivers/dma/bcm2835-dma.c
drivers/dma/imx-dma.c
drivers/dma/pl330.c
drivers/dma/tegra20-apb-dma.c
drivers/hid/hid-input.c
drivers/hid/uhid.c
drivers/i2c/i2c-core-base.c
drivers/iio/adc/exynos_adc.c
drivers/md/dm-crypt.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/mmc/core/mmc.c
drivers/mmc/host/Kconfig
drivers/mmc/host/dw_mmc.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/rtc/interface.c
drivers/scsi/hosts.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_system_heap.c
drivers/thermal/Kconfig
drivers/thermal/of-thermal.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/usb/common/common.c
drivers/usb/core/hub.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/phy/Kconfig
fs/buffer.c
fs/crypto/crypto.c
fs/debugfs/inode.c
fs/direct-io.c
fs/ext4/ext4.h
fs/ext4/inode.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/pnode.c
fs/pstore/ram_core.c
include/linux/arch_topology.h
include/linux/compiler-clang.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/crypto.h
include/linux/mm.h
include/linux/nmi.h
include/linux/power_supply.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/serial_core.h
include/linux/tick.h
include/linux/usb.h
include/scsi/scsi_host.h
include/sound/compress_driver.h
include/trace/events/sched.h
init/Kconfig
kernel/cpu.c
kernel/exit.c
kernel/irq/chip.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/panic.c
kernel/printk/printk.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched-pelt.h
kernel/sched/sched.h
kernel/sched/tune.c
kernel/softirq.c
kernel/time/hrtimer.c
kernel/time/tick-sched.c
kernel/time/timer.c
kernel/watchdog.c
kernel/workqueue.c
mm/gup.c
mm/kasan/kasan.c
mm/migrate.c
mm/page_alloc.c
mm/shmem.c
net/ipv6/ip6_output.c
net/netfilter/nf_conntrack_helper.c
net/wireless/reg.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
sound/core/pcm_lib.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c

diff --cc MAINTAINERS
Simple merge
diff --cc Makefile
Simple merge
diff --cc arch/Kconfig
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 7b0a86621e516b6cab0cf2feee161f95ec0a44bf,714fe90dbf66d44791d8a742d66f2411342dafb0..1ba2e33b7643d32454d370bf72477ca8c0455d53
@@@ -55,11 -54,8 +55,12 @@@ arm64-obj-$(CONFIG_KEXEC)           += machine_k
  arm64-obj-$(CONFIG_ARM64_RELOC_TEST)  += arm64-reloc-test.o
  arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
  arm64-obj-$(CONFIG_CRASH_DUMP)                += crash_dump.o
+ arm64-obj-$(CONFIG_ARM64_SSBD)                += ssbd.o
  
 +ifndef CONFIG_ARCH_EXYNOS
 +arm64-obj-y                                   += topology.o
 +endif
 +
  ifeq ($(CONFIG_KVM),y)
  arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)   += bpi.o
  endif
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc block/bio.c
Simple merge
Simple merge
diff --cc crypto/Kconfig
Simple merge
diff --cc crypto/Makefile
Simple merge
Simple merge
index 4344956cd59d9fd657b3af59b82942c440fae6f5,7a49ff6b5778f621909d8139028c7ee59ccdfb19..26ddc3b6eb33e022eadb23cb61b0e980bcd17fad
@@@ -3244,9 -3140,6 +3244,12 @@@ static void binder_transaction(struct b
                extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
        }
  
++<<<<<<< HEAD
 +#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
 +      dss_binder_transaction(reply, t, t->from ? t->from : thread, target_node ? target_node->debug_id : 0);
 +#endif
++=======
++>>>>>>> android-4.14-p
        trace_binder_transaction(reply, t, target_node);
  
        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@@ -3787,13 -3670,13 +3790,23 @@@ static int binder_thread_write(struct b
                        if (IS_ERR_OR_NULL(buffer)) {
                                if (PTR_ERR(buffer) == -EPERM) {
                                        binder_user_error(
++<<<<<<< HEAD
 +                                              "%d:%d(%s:%s) BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
 +                                              proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
 +                                              (u64)data_ptr);
 +                              } else {
 +                                      binder_user_error(
 +                                              "%d:%d(%s:%s) BC_FREE_BUFFER u%016llx no match\n",
 +                                              proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
++=======
+                                               "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+                                               proc->pid, thread->pid,
+                                               (u64)data_ptr);
+                               } else {
+                                       binder_user_error(
+                                               "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+                                               proc->pid, thread->pid,
++>>>>>>> android-4.14-p
                                                (u64)data_ptr);
                                }
                                break;
@@@ -5108,8 -4975,8 +5121,13 @@@ static int binder_mmap(struct file *fil
        return 0;
  
  err_bad_arg:
++<<<<<<< HEAD
 +      pr_err("%s: %d(%s) %lx-%lx %s failed %d\n", __func__,
 +              proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end, failure_string, ret);
++=======
+       pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+              proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
++>>>>>>> android-4.14-p
        return ret;
  }
  
@@@ -5118,9 -4985,8 +5136,14 @@@ static int binder_open(struct inode *no
        struct binder_proc *proc;
        struct binder_device *binder_dev;
  
++<<<<<<< HEAD
 +      binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d (%s:%s)\n", __func__,
 +                   current->group_leader->pid, current->pid,
 +                   current->group_leader->comm, current->comm);
++=======
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
+                    current->group_leader->pid, current->pid);
++>>>>>>> android-4.14-p
  
        proc = kzalloc(sizeof(*proc), GFP_KERNEL);
        if (proc == NULL)
index d5419fe5ba4712cddcce2ef07dd583dd2fa14827,e00d4d13810a004d61821fe9ccec31651645be8b..01077da8681b3de27e17c6e104b547426820d5b7
@@@ -325,6 -325,34 +325,37 @@@ err_no_vma
        return vma ? -ENOMEM : -ESRCH;
  }
  
++<<<<<<< HEAD
++=======
+ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+ {
+       if (vma)
+               alloc->vma_vm_mm = vma->vm_mm;
+       /*
+        * If we see alloc->vma is not NULL, buffer data structures set up
+        * completely. Look at smp_rmb side binder_alloc_get_vma.
+        * We also want to guarantee new alloc->vma_vm_mm is always visible
+        * if alloc->vma is set.
+        */
+       smp_wmb();
+       alloc->vma = vma;
+ }
+ static inline struct vm_area_struct *binder_alloc_get_vma(
+               struct binder_alloc *alloc)
+ {
+       struct vm_area_struct *vma = NULL;
+       if (alloc->vma) {
+               /* Look at description in binder_alloc_set_vma */
+               smp_rmb();
+               vma = alloc->vma;
+       }
+       return vma;
+ }
++>>>>>>> android-4.14-p
  static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
@@@ -922,14 -948,14 +951,25 @@@ enum lru_status binder_alloc_free_page(
  
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
++<<<<<<< HEAD
 +
 +      mm = alloc->vma_vm_mm;
 +      if (!mmget_not_zero(mm))
 +              goto err_mmget;
 +      if (!down_write_trylock(&mm->mmap_sem))
 +              goto err_down_write_mmap_sem_failed;
 +
 +      vma = alloc->vma;
++=======
+       vma = binder_alloc_get_vma(alloc);
+       if (vma) {
+               if (!mmget_not_zero(alloc->vma_vm_mm))
+                       goto err_mmget;
+               mm = alloc->vma_vm_mm;
+               if (!down_write_trylock(&mm->mmap_sem))
+                       goto err_down_write_mmap_sem_failed;
+       }
++>>>>>>> android-4.14-p
  
        list_lru_isolate(lru, item);
        spin_unlock(lock);
Simple merge
index a8d49f2e07596f529d7562d28302e6ee47686c3f,594581a0c973b78f20e09f8795ce7ae36fafc40b..885de2e96e6139e676438104f4aeeb63f79af0c9
@@@ -1483,9 -1482,8 +1485,10 @@@ static int __device_suspend(struct devi
                pm_get_active_wakeup_sources(suspend_abort,
                        MAX_SUSPEND_ABORT_LEN);
                log_suspend_abort_reason(suspend_abort);
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
 +              if (dev->power.direct_complete)
 +                      dev->power.is_suspend_aborted = true;
                goto Complete;
        }
  
Simple merge
index 27b41aae04fa316b76bee84a9a7e66b3e40cc834,d55c30f6981dcc377b7ad6a2bb76b2b37a182ce1..35e231137cc382a49fd3b9e661b19daa385037c2
@@@ -407,7 -410,8 +414,12 @@@ static int set_state_shutdown(struct cl
        struct mct_clock_event_device *mevt;
  
        mevt = container_of(evt, struct mct_clock_event_device, evt);
++<<<<<<< HEAD
 +      exynos4_mct_tick_stop(mevt, 1);
++=======
+       exynos4_mct_tick_stop(mevt);
+       exynos4_mct_tick_clear(mevt);
++>>>>>>> android-4.14-p
        return 0;
  }
  
@@@ -429,7 -433,15 +441,19 @@@ static irqreturn_t exynos4_mct_tick_isr
        struct mct_clock_event_device *mevt = dev_id;
        struct clock_event_device *evt = &mevt->evt;
  
++<<<<<<< HEAD
 +      exynos4_mct_tick_stop(mevt, 0);
++=======
+       /*
+        * This is for supporting oneshot mode.
+        * Mct would generate interrupt periodically
+        * without explicit stopping.
+        */
+       if (!clockevent_state_periodic(&mevt->evt))
+               exynos4_mct_tick_stop(mevt);
+       exynos4_mct_tick_clear(mevt);
++>>>>>>> android-4.14-p
  
        evt->event_handler(evt);
  
index 51959aecf68e57a421508e2db527cc924565bfa8,971dcb871961a93b888b4cedd3e294713fb87e1d..88f77e6bb931d2acda86cf803f43a14a0f30a4a2
@@@ -556,13 -556,13 +556,21 @@@ EXPORT_SYMBOL_GPL(cpufreq_policy_transi
   *                          SYSFS INTERFACE                          *
   *********************************************************************/
  static ssize_t show_boost(struct kobject *kobj,
++<<<<<<< HEAD
 +                               struct kobj_attribute *attr, char *buf)
++=======
+                         struct kobj_attribute *attr, char *buf)
++>>>>>>> android-4.14-p
  {
        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
  }
  
  static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++<<<<<<< HEAD
 +                                const char *buf, size_t count)
++=======
+                          const char *buf, size_t count)
++>>>>>>> android-4.14-p
  {
        int ret, enable;
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b279def5804af4f8a17070cd88e33cbe3e466b70,019153882e700732d98f4a53cfe09faf4622819b..b6148a0d4b33a031a6cc2f83dac6924e9195a411
@@@ -1097,9 -915,8 +1097,9 @@@ static int exynos_adc_remove(struct pla
  {
        struct iio_dev *indio_dev = platform_get_drvdata(pdev);
        struct exynos_adc *info = iio_priv(indio_dev);
 +      int ret;
  
-       if (IS_REACHABLE(CONFIG_INPUT)) {
+       if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
                free_irq(info->tsirq, info);
                input_unregister_device(info->input);
        }
index b60adf57e8dd53569d272fa9e0045c6418e96de5,94b8d81f6020a83b462766bc3c8378a86c28f92b..834d9eb67b2861ec33dc4b357ba3e8293300ff3f
@@@ -2550,14 -2497,8 +2562,19 @@@ static int crypt_ctr_cipher_old(struct 
                goto bad_mem;
  
        chainmode = strsep(&tmp, "-");
++<<<<<<< HEAD
 +      *ivopts = strsep(&tmp, "-");
 +      *ivmode = strsep(&*ivopts, ":");
 +
 +      if (*ivmode && (!strcmp(*ivmode, "disk") || !strcmp(*ivmode, "fmp")))
 +              set_bit(CRYPT_MODE_DISKCIPHER, &cc->cipher_flags);
 +
 +      if (tmp)
 +              DMWARN("Ignoring unexpected additional cipher options");
++=======
+       *ivmode = strsep(&tmp, ":");
+       *ivopts = tmp;
++>>>>>>> android-4.14-p
  
        /*
         * For compatibility with the original dm-crypt mapping format, if
@@@ -3166,14 -3081,11 +3183,22 @@@ static void crypt_io_hints(struct dm_ta
         */
        limits->max_segment_size = PAGE_SIZE;
  
++<<<<<<< HEAD
 +      if (cc->sector_size != (1 << SECTOR_SHIFT)) {
 +              limits->logical_block_size = cc->sector_size;
 +              limits->physical_block_size = cc->sector_size;
 +              blk_limits_io_min(limits, cc->sector_size);
 +      }
 +
 +      if (crypt_mode_diskcipher(cc))
 +              limits->logical_block_size = PAGE_SIZE;
++=======
+       limits->logical_block_size =
+               max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+       limits->physical_block_size =
+               max_t(unsigned, limits->physical_block_size, cc->sector_size);
+       limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
++>>>>>>> android-4.14-p
  }
  
  static struct target_type crypt_target = {
index 2ebbf42314af6b88fd609b34bb2973b46c5f0566,f1725da2a90d6ced4676ac613825a6f9b154dd76..2da70aafa3d90179fcebd0dfdeb745a87e190485
@@@ -917,8 -916,7 +917,12 @@@ static void vb2_process_buffer_done(str
            state != VB2_BUF_STATE_REQUEUEING) {
                /* sync buffers */
                for (plane = 0; plane < vb->num_planes; ++plane)
++<<<<<<< HEAD
 +                      call_void_memop(vb, finish, vb->planes[plane].mem_priv,
 +                                      vb->planes[plane].bytesused, memflags);
++=======
+                       call_void_memop(vb, finish, vb->planes[plane].mem_priv);
++>>>>>>> android-4.14-p
        }
  
        spin_lock_irqsave(&q->done_lock, flags);
@@@ -1432,100 -1368,16 +1436,105 @@@ static int vb2_start_streaming(struct v
        return ret;
  }
  
 -int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
 +static void __qbuf_work(struct work_struct *work)
  {
        struct vb2_buffer *vb;
 +      struct vb2_queue *q;
 +
 +      vb = container_of(work, struct vb2_buffer, qbuf_work);
 +      q = vb->vb2_queue;
 +
 +      if (q->start_streaming_called)
 +              __enqueue_in_driver(vb);
 +}
 +
 +static void vb2_qbuf_fence_cb(struct dma_fence *f, struct dma_fence_cb *cb)
 +{
 +      struct vb2_buffer *vb = container_of(cb, struct vb2_buffer, fence_cb);
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&vb->fence_cb_lock, flags);
 +      del_timer(&vb->fence_timer);
 +      if (!vb->in_fence) {
 +              spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
 +              return;
 +      }
 +      /*
 +       * If the fence signals with an error we mark the buffer as such
 +       * and avoid using it by setting it to VB2_BUF_STATE_ERROR and
 +       * not queueing it to the driver. However we can't notify the error
 +       * to userspace right now because, at the time this callback run, QBUF
 +       * returned already.
 +       * So we delay that to DQBUF time. See comments in vb2_buffer_done()
 +       * as well.
 +       */
 +      if (vb->in_fence->error)
 +              vb->state = VB2_BUF_STATE_ERROR;
 +
 +      dma_fence_put(vb->in_fence);
 +      vb->in_fence = NULL;
 +
 +      if (vb->state == VB2_BUF_STATE_ERROR) {
 +              spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
 +              return;
 +      }
 +      spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
 +
 +      schedule_work(&vb->qbuf_work);
 +}
 +
 +#define VB2_FENCE_TIMEOUT             (1000)
 +static void vb2_fence_timeout_handler(unsigned long arg)
 +{
 +      struct vb2_buffer *vb = (struct vb2_buffer *)arg;
 +      struct dma_fence *fence;
 +      unsigned long flags;
 +      char name[32];
 +
 +      pr_err("%s: fence callback is not called during %d ms\n",
 +                                      __func__, VB2_FENCE_TIMEOUT);
 +      spin_lock_irqsave(&vb->fence_cb_lock, flags);
 +      if (!vb->in_fence) {
 +              spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
 +              return;
 +      }
 +
 +      fence = vb->in_fence;
 +      if (fence) {
 +              strlcpy(name, fence->ops->get_driver_name(fence),
 +                              sizeof(name));
 +              pr_err("%s: vb2 in-fence: %s #%d (%s), error: %d\n",
 +                              __func__, name, fence->seqno,
 +                              dma_fence_is_signaled(fence) ?
 +                              "signaled" : "active", fence->error);
 +
 +              dma_fence_remove_callback(vb->in_fence, &vb->fence_cb);
 +              dma_fence_put(fence);
 +              vb->in_fence = NULL;
 +              vb->state = VB2_BUF_STATE_ERROR;
 +      }
 +
 +      fence = vb->out_fence;
 +      if (fence)
 +              pr_err("%s: vb2 out-fence: #%d\n", __func__, fence->seqno);
 +
 +      spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
 +
 +      schedule_work(&vb->qbuf_work);
 +}
 +
 +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
 +                struct dma_fence *in_fence)
 +{
 +      struct vb2_buffer *vb;
 +      unsigned long flags;
        int ret;
  
+       if (q->error) {
+               dprintk(1, "fatal error occurred on queue\n");
+               return -EIO;
+       }
        vb = q->bufs[index];
  
        switch (vb->state) {
index 57813c9bcd7191fa8be4ad127d2ea043608f4a70,69ca8debb711aad1f73b3516b34cdfff8fd852d4..c20a35cdfc0608942e1faa215b56bc52e0fb3bf6
@@@ -146,7 -145,6 +146,10 @@@ static void vb2_warn_zero_bytesused(str
                return;
  
        check_once = true;
++<<<<<<< HEAD
 +      /* WARN_ON(1); */
++=======
++>>>>>>> android-4.14-p
  
        pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
        if (vb->vb2_queue->allow_zero_bytesused)
Simple merge
Simple merge
index da5ebd57cde58090795d750cda4b0ca94935270d,5252885e5cda5e87c9e3e49ce66bacf823e033b1..b99ad0fe5f38196526185e2b212062dd2343f54f
@@@ -1652,6 -1251,12 +1652,15 @@@ static void dw_mci_setup_bus(struct dw_
        u32 clk_en_a;
        u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
  
++<<<<<<< HEAD
++=======
+       /* We must continue to set bit 28 in CMD until the change is complete */
+       if (host->state == STATE_WAITING_CMD11_DONE)
+               sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
+       slot->mmc->actual_clock = 0;
++>>>>>>> android-4.14-p
        if (!clock) {
                mci_writel(host, CLKENA, 0);
                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 05efa9e136a77b0fb6c1f7a6ff039b0e6f85fdab,e0c0fea227c1628c0ae5223a9be07c64c3ea5372..d55029a0eb98082786f0f99c9b9b18333da146ee
@@@ -3138,18 -3193,10 +3198,25 @@@ static int sd_revalidate_disk(struct ge
        dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
        q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
  
++<<<<<<< HEAD
 +      /*
 +       * Determine the device's preferred I/O size for reads and writes
 +       * unless the reported value is unreasonably small, large, or
 +       * garbage.
 +       */
 +      if (sdkp->opt_xfer_blocks &&
 +          sdkp->opt_xfer_blocks <= dev_max &&
 +          sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
 +              sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
 +              rw_max = q->limits.io_opt =
 +                      sdkp->opt_xfer_blocks * sdp->sector_size;
 +      else
++=======
+       if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
+               q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+               rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+       } else
++>>>>>>> android-4.14-p
                rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
                                      (sector_t)BLK_DEF_MAX_SECTORS);
  
index e91686da49ea2853d4e95e1ce5fdc1445b045e0f,581571de24614df97fdb2ed959af02576f8b63ef..4b7d944c516dbdd86c71bb608a05fa3bedb5b7b1
@@@ -7499,7 -6765,16 +7501,20 @@@ static int __ufshcd_setup_clocks(struc
        if (list_empty(head))
                goto out;
  
++<<<<<<< HEAD
 +      ufshcd_vops_pre_setup_clocks(hba, on);
++=======
+       /*
+        * vendor specific setup_clocks ops may depend on clocks managed by
+        * this standard driver hence call the vendor specific setup_clocks
+        * before disabling the clocks managed here.
+        */
+       if (!on) {
+               ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+               if (ret)
+                       return ret;
+       }
++>>>>>>> android-4.14-p
  
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                }
        }
  
++<<<<<<< HEAD
 +      ret = ufshcd_vops_setup_clocks(hba, on);
++=======
+       /*
+        * vendor specific setup_clocks ops may depend on clocks managed by
+        * this standard driver hence call the vendor specific setup_clocks
+        * after enabling the clocks managed here.
+        */
+       if (on) {
+               ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+               if (ret)
+                       return ret;
+       }
++>>>>>>> android-4.14-p
  
  out:
        if (ret) {
index 754e2e337913011c9557903f4714fd2039e7b105,1a6ec226d6e46b36bc36051d3229b49e6441cb03..c640da6efec6be9e4e3c3026340cee7e000de50c
@@@ -1992,49 -1266,19 +1992,55 @@@ static int s3c64xx_spi_resume_operation
        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
        int ret;
  
 -      if (sci->cfg_gpio)
 -              sci->cfg_gpio();
 +      if (!pm_runtime_status_suspended(dev))
 +              s3c64xx_spi_runtime_resume(dev);
  
 -      ret = pm_runtime_force_resume(dev);
 -      if (ret < 0)
 -              return ret;
 +      if (sci->domain == DOMAIN_TOP) {
 +
++<<<<<<< HEAD
 +              /* Enable the clock */
 +#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
 +              exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
 +#endif
 +              clk_prepare_enable(sdd->src_clk);
 +              clk_prepare_enable(sdd->clk);
 +
 +              if (sci->cfg_gpio)
 +                      sci->cfg_gpio();
  
 +              if (sci->secure_mode)
 +                      sci->need_hw_init = 1;
 +              else {
 +                      exynos_usi_init(sdd);
 +                      s3c64xx_spi_hwinit(sdd, sdd->port_id);
 +              }
++=======
+       return spi_master_resume(master);
+ }
+ #endif /* CONFIG_PM_SLEEP */
++>>>>>>> android-4.14-p
  
  #ifdef CONFIG_PM
 -static int s3c64xx_spi_runtime_suspend(struct device *dev)
 +              /* Disable the clock */
 +              clk_disable_unprepare(sdd->src_clk);
 +              clk_disable_unprepare(sdd->clk);
 +#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
 +              exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
 +#endif
 +#endif
 +      }
 +
 +      /* Start the queue running */
 +      ret = spi_master_resume(master);
 +      if (ret)
 +              dev_err(dev, "problem starting queue (%d)\n", ret);
 +      else
 +              dev_dbg(dev, "resumed\n");
 +
 +      return ret;
 +}
 +
 +static int s3c64xx_spi_suspend(struct device *dev)
  {
        struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
@@@ -2052,56 -1294,34 +2058,62 @@@ static int s3c64xx_spi_suspend_noirq(st
  {
        struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 -      int ret;
 +      struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
  
 -      if (sdd->port_conf->clk_ioclk) {
 -              ret = clk_prepare_enable(sdd->ioclk);
 -              if (ret != 0)
 -                      return ret;
 -      }
 +      if (sci->dma_mode == DMA_MODE)
 +              return 0;
  
 -      ret = clk_prepare_enable(sdd->src_clk);
 -      if (ret != 0)
 -              goto err_disable_ioclk;
 +      dev_dbg(dev, "spi suspend is handled in suspend_noirq, dma mode = %d\n",
 +                      sci->dma_mode);
 +      return s3c64xx_spi_suspend_operation(dev);
 +}
  
 -      ret = clk_prepare_enable(sdd->clk);
 -      if (ret != 0)
 -              goto err_disable_src_clk;
 +static int s3c64xx_spi_resume(struct device *dev)
 +{
 +      struct spi_master *master = dev_get_drvdata(dev);
 +      struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 +      struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
  
++<<<<<<< HEAD
 +      if (sci->dma_mode != DMA_MODE)
 +              return 0;
++=======
+       s3c64xx_spi_hwinit(sdd, sdd->port_id);
+       return 0;
++>>>>>>> android-4.14-p
  
 -err_disable_src_clk:
 -      clk_disable_unprepare(sdd->src_clk);
 -err_disable_ioclk:
 -      clk_disable_unprepare(sdd->ioclk);
 +      dev_dbg(dev, "spi resume is handled in device resume, dma mode = %d\n",
 +                      sci->dma_mode);
  
 -      return ret;
 +      return s3c64xx_spi_resume_operation(dev);
  }
 -#endif /* CONFIG_PM */
 +
 +static int s3c64xx_spi_resume_noirq(struct device *dev)
 +{
 +      struct spi_master *master = dev_get_drvdata(dev);
 +      struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 +      struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 +
 +      if (sci->dma_mode == DMA_MODE)
 +              return 0;
 +
 +      dev_dbg(dev, "spi resume is handled in resume_noirq, dma mode = %d\n",
 +                      sci->dma_mode);
 +
 +      return s3c64xx_spi_resume_operation(dev);
 +}
 +#else
 +static int s3c64xx_spi_suspend(struct device *dev)
 +{
 +      return 0;
 +}
 +
 +static int s3c64xx_spi_resume(struct device *dev)
 +{
 +      return 0;
 +}
 +#endif /* CONFIG_PM_SLEEP */
  
  static const struct dev_pm_ops s3c64xx_spi_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
Simple merge
index a61ad765a540b5b1b084bf158de261c91238ca93,dd96ca61a5152cb0cdac912ddb4e5cba9273bdd1..061a079ae9acaae02e5f01bb150f4102360d4d36
@@@ -254,7 -254,15 +254,19 @@@ static int ion_dma_buf_attach(struct dm
  static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
                                struct dma_buf_attachment *attachment)
  {
++<<<<<<< HEAD
 +      free_duped_table(attachment->priv);
++=======
+       struct ion_dma_buf_attachment *a = attachment->priv;
+       struct ion_buffer *buffer = dmabuf->priv;
+       mutex_lock(&buffer->lock);
+       list_del(&a->list);
+       mutex_unlock(&buffer->lock);
+       free_duped_table(a->table);
+       kfree(a);
++>>>>>>> android-4.14-p
  }
  
  static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
index b0b5322342cf23a2180a608ddc7ac5bb9f05fe14,e64db42aeb1edf17e231c6ae11e797d2e26a7061..2e5f26ae263dd70537f5afced1996988384e50dd
@@@ -323,13 -298,13 +323,17 @@@ static int ion_system_heap_create_pools
                                        bool cached)
  {
        int i;
++<<<<<<< HEAD
 +      gfp_t gfp_flags = high_order_gfp_flags;
++=======
++>>>>>>> android-4.14-p
  
        for (i = 0; i < NUM_ORDERS; i++) {
                struct ion_page_pool *pool;
+               gfp_t gfp_flags = low_order_gfp_flags;
  
 -              if (orders[i] > 4)
 -                      gfp_flags = high_order_gfp_flags;
 +              if (orders[i] < 4)
 +                      gfp_flags = low_order_gfp_flags;
  
                pool = ion_page_pool_create(gfp_flags, orders[i], cached);
                if (!pool)
Simple merge
Simple merge
index 4c3624e4fb3efbf9fbba414019412da62c71ccaf,d60069b5dc98dee7bb4ccf82f11c057fc9519a11..3652004813e5854799762c0a67148f6d2201ef00
@@@ -359,185 -409,208 +359,192 @@@ static int exynos9810_tmu_initialize(st
  {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
        struct thermal_zone_device *tz = data->tzd;
 -      const struct thermal_trip * const trips =
 -              of_thermal_get_trip_points(tz);
 -      int ret = 0, threshold_code, i;
 -      unsigned long reference, temp;
 -      unsigned int status;
 -
 -      if (!trips) {
 -              pr_err("%s: Cannot get trip points from of-thermal.c!\n",
 -                     __func__);
 -              ret = -ENODEV;
 -              goto out;
 -      }
 -
 -      status = readb(data->base + EXYNOS_TMU_REG_STATUS);
 -      if (!status) {
 -              ret = -EBUSY;
 -              goto out;
 -      }
 -
 -      sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
 -
 -      /* Write temperature code for threshold */
 -      reference = trips[0].temperature / MCELSIUS;
 -      threshold_code = temp_to_code(data, reference);
 -      if (threshold_code < 0) {
 -              ret = threshold_code;
 -              goto out;
 -      }
 -      writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
 +      struct exynos_tmu_platform_data *pdata = data->pdata;
 +      unsigned int trim_info, temp_error1, temp_error2;
 +      unsigned short cal_type;
 +      unsigned int rising_threshold, falling_threshold;
 +      unsigned int reg_off, bit_off;
 +      enum thermal_trip_type type;
 +      int temp, temp_hist, threshold_code;
 +      int i, sensor, count = 0, interrupt_count;
  
 -      for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
 -              temp = trips[i].temperature / MCELSIUS;
 -              writeb(temp - reference, data->base +
 -                     EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
 -      }
 +      trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(0));
 +      cal_type = (trim_info >> EXYNOS_TMU_CALIB_SEL_SHIFT) & EXYNOS_TMU_CALIB_SEL_MASK;
  
 -      data->tmu_clear_irqs(data);
 -out:
 -      return ret;
 -}
 +      for (sensor = 0; sensor < TOTAL_SENSORS; sensor++) {
  
 -static int exynos4412_tmu_initialize(struct platform_device *pdev)
 -{
 -      struct exynos_tmu_data *data = platform_get_drvdata(pdev);
 -      const struct thermal_trip * const trips =
 -              of_thermal_get_trip_points(data->tzd);
 -      unsigned int status, trim_info, con, ctrl, rising_threshold;
 -      int ret = 0, threshold_code, i;
 -      unsigned long crit_temp = 0;
 -
 -      status = readb(data->base + EXYNOS_TMU_REG_STATUS);
 -      if (!status) {
 -              ret = -EBUSY;
 -              goto out;
 -      }
 +              if (!(data->sensors & (1 << sensor)))
 +                      continue;
  
 -      if (data->soc == SOC_ARCH_EXYNOS3250 ||
 -          data->soc == SOC_ARCH_EXYNOS4412 ||
 -          data->soc == SOC_ARCH_EXYNOS5250) {
 -              if (data->soc == SOC_ARCH_EXYNOS3250) {
 -                      ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
 -                      ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
 -                      writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
 -              }
 -              ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
 -              ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
 -              writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
 -      }
 +              /* Read the sensor error value from TRIMINFOX */
 +              trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(sensor));
 +              temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
 +              temp_error2 = (trim_info >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) & EXYNOS_TMU_TEMP_MASK;
  
 -      /* On exynos5420 the triminfo register is in the shared space */
 -      if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
 -              trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
 -      else
 -              trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
 +              /* Save sensor id */
 +              data->sensor_info[count].sensor_num = sensor;
 +              dev_info(&pdev->dev, "Sensor number = %d\n", sensor);
  
 -      sanitize_temp_error(data, trim_info);
 +              /* Check thermal calibration type */
 +              data->sensor_info[count].cal_type = cal_type;
  
 -      /* Write temperature code for rising and falling threshold */
 -      rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
 -      rising_threshold = get_th_reg(data, rising_threshold, false);
 -      writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
 -      writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
 +              /* Check temp_error1 value */
 +              data->sensor_info[count].temp_error1 = temp_error1;
 +              if (!data->sensor_info[count].temp_error1)
 +                      data->sensor_info[count].temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
  
 -      data->tmu_clear_irqs(data);
 +              /* Check temp_error2 if calibration type is TYPE_TWO_POINT_TRIMMING */
 +              if (data->sensor_info[count].cal_type == TYPE_TWO_POINT_TRIMMING) {
 +                      data->sensor_info[count].temp_error2 = temp_error2;
  
 -      /* if last threshold limit is also present */
 -      for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
 -              if (trips[i].type == THERMAL_TRIP_CRITICAL) {
 -                      crit_temp = trips[i].temperature;
 -                      break;
 +                      if (!data->sensor_info[count].temp_error2)
 +                              data->sensor_info[count].temp_error2 =
 +                                      (pdata->efuse_value >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) &
 +                                      EXYNOS_TMU_TEMP_MASK;
                }
 -      }
  
 -      if (i == of_thermal_get_ntrips(data->tzd)) {
 -              pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
 -                     __func__);
 -              ret = -EINVAL;
 -              goto out;
 +              interrupt_count = 0;
 +              /* Write temperature code for rising and falling threshold */
 +              for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
 +                      tz->ops->get_trip_type(tz, i, &type);
 +
 +                      if (type == THERMAL_TRIP_PASSIVE)
 +                              continue;
 +
 +                      reg_off = (interrupt_count / 2) * 4;
 +                      bit_off = ((interrupt_count + 1) % 2) * 16;
 +
 +                      if (sensor == 0)
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP0;
 +                      else if (sensor < 8)
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP1 + (sensor - 1) * 0x20;
 +                      else
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP8 + (sensor - 8) * 0x20;
 +
 +                      tz->ops->get_trip_temp(tz, i, &temp);
 +                      temp /= MCELSIUS;
 +
 +                      tz->ops->get_trip_hyst(tz, i, &temp_hist);
 +                      temp_hist = temp - (temp_hist / MCELSIUS);
 +
 +                      /* Set 9-bit temperature code for rising threshold levels */
 +                      threshold_code = temp_to_code_with_sensorinfo(data, temp, &data->sensor_info[count]);
 +                      rising_threshold = readl(data->base + reg_off);
 +                      rising_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
 +                      rising_threshold |= threshold_code << bit_off;
 +                      writel(rising_threshold, data->base + reg_off);
 +
 +                      /* Set 9-bit temperature code for falling threshold levels */
 +                      threshold_code = temp_to_code_with_sensorinfo(data, temp_hist, &data->sensor_info[count]);
 +                      falling_threshold = readl(data->base + reg_off + 0x10);
 +                      falling_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
 +                      falling_threshold |= threshold_code << bit_off;
 +                      writel(falling_threshold, data->base + reg_off + 0x10);
 +                      interrupt_count++;
 +              }
 +              count++;
        }
  
 -      threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
 -      /* 1-4 level to be assigned in th0 reg */
 -      rising_threshold &= ~(0xff << 8 * i);
 -      rising_threshold |= threshold_code << 8 * i;
 -      writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
 -      con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
 -      con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
 -      writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
 +      data->tmu_clear_irqs(data);
  
 -out:
 -      return ret;
 +      return 0;
  }
  
 -static int exynos5433_tmu_initialize(struct platform_device *pdev)
 +static int exynos9610_tmu_initialize(struct platform_device *pdev)
  {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
 -      struct exynos_tmu_platform_data *pdata = data->pdata;
        struct thermal_zone_device *tz = data->tzd;
 -      unsigned int status, trim_info;
 -      unsigned int rising_threshold = 0, falling_threshold = 0;
 -      int temp, temp_hist;
 -      int ret = 0, threshold_code, i, sensor_id, cal_type;
 -
 -      status = readb(data->base + EXYNOS_TMU_REG_STATUS);
 -      if (!status) {
 -              ret = -EBUSY;
 -              goto out;
 -      }
 +      struct exynos_tmu_platform_data *pdata = data->pdata;
 +      unsigned int trim_info, temp_error1, temp_error2;
 +      unsigned short cal_type;
 +      unsigned int rising_threshold, falling_threshold;
 +      unsigned int reg_off, bit_off;
 +      enum thermal_trip_type type;
 +      int temp, temp_hist, threshold_code;
 +      int i, sensor, count = 0, interrupt_count;
  
 -      trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
 -      sanitize_temp_error(data, trim_info);
 +      trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(0));
 +      cal_type = (trim_info >> EXYNOS_TMU_CALIB_SEL_SHIFT) & EXYNOS_TMU_CALIB_SEL_MASK;
  
 -      /* Read the temperature sensor id */
 -      sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
 -                              >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
 -      dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
 +      for (sensor = 0; sensor < TOTAL_SENSORS; sensor++) {
  
 -      /* Read the calibration mode */
 -      writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
 -      cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
 -                              >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
 +              if (!(data->sensors & (1 << sensor)))
 +                      continue;
  
 -      switch (cal_type) {
 -      case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
 -              pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
 -              break;
 -      case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
 -              pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
 -              break;
 -      default:
 -              pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
 -              break;
 -      }
 +              /* Read the sensor error value from TRIMINFOX */
 +              trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(sensor));
 +              temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
 +              temp_error2 = (trim_info >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) & EXYNOS_TMU_TEMP_MASK;
  
 -      dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
 -                      cal_type ?  2 : 1);
 -
 -      /* Write temperature code for rising and falling threshold */
 -      for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
 -              int rising_reg_offset, falling_reg_offset;
 -              int j = 0;
 -
 -              switch (i) {
 -              case 0:
 -              case 1:
 -              case 2:
 -              case 3:
 -                      rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
 -                      falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
 -                      j = i;
 -                      break;
 -              case 4:
 -              case 5:
 -              case 6:
 -              case 7:
 -                      rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
 -                      falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
 -                      j = i - 4;
 -                      break;
 -              default:
 -                      continue;
 -              }
 +              /* Save sensor id */
 +              data->sensor_info[count].sensor_num = sensor;
 +              dev_info(&pdev->dev, "Sensor number = %d\n", sensor);
  
 -              /* Write temperature code for rising threshold */
 -              tz->ops->get_trip_temp(tz, i, &temp);
 -              temp /= MCELSIUS;
 -              threshold_code = temp_to_code(data, temp);
 +              /* Check thermal calibration type */
 +              data->sensor_info[count].cal_type = cal_type;
 +
 +              /* Check temp_error1 value */
 +              data->sensor_info[count].temp_error1 = temp_error1;
 +              if (!data->sensor_info[count].temp_error1)
 +                      data->sensor_info[count].temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
  
++<<<<<<< HEAD
 +              /* Check temp_error2 if calibration type is TYPE_TWO_POINT_TRIMMING */
 +              if (data->sensor_info[count].cal_type == TYPE_TWO_POINT_TRIMMING) {
 +                      data->sensor_info[count].temp_error2 = temp_error2;
++=======
+               rising_threshold = readl(data->base + rising_reg_offset);
+               rising_threshold &= ~(0xff << j * 8);
+               rising_threshold |= (threshold_code << j * 8);
+               writel(rising_threshold, data->base + rising_reg_offset);
++>>>>>>> android-4.14-p
  
 -              /* Write temperature code for falling threshold */
 -              tz->ops->get_trip_hyst(tz, i, &temp_hist);
 -              temp_hist = temp - (temp_hist / MCELSIUS);
 -              threshold_code = temp_to_code(data, temp_hist);
 +                      if (!data->sensor_info[count].temp_error2)
 +                              data->sensor_info[count].temp_error2 =
 +                                      (pdata->efuse_value >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) &
 +                                      EXYNOS_TMU_TEMP_MASK;
 +              }
  
 -              falling_threshold = readl(data->base + falling_reg_offset);
 -              falling_threshold &= ~(0xff << j * 8);
 -              falling_threshold |= (threshold_code << j * 8);
 -              writel(falling_threshold, data->base + falling_reg_offset);
 +              interrupt_count = 0;
 +              /* Write temperature code for rising and falling threshold */
 +              for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
 +                      tz->ops->get_trip_type(tz, i, &type);
 +
 +                      if (type == THERMAL_TRIP_PASSIVE)
 +                              continue;
 +
 +                      reg_off = (interrupt_count / 2) * 4;
 +                      bit_off = ((interrupt_count + 1) % 2) * 16;
 +
 +                      if (sensor == 0)
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP0;
 +                      else if (sensor < 8)
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP1 + (sensor - 1) * 0x20;
 +                      else
 +                              reg_off += EXYNOS_TMU_REG_THD_TEMP8 + (sensor - 8) * 0x20;
 +
 +                      tz->ops->get_trip_temp(tz, i, &temp);
 +                      temp /= MCELSIUS;
 +
 +                      tz->ops->get_trip_hyst(tz, i, &temp_hist);
 +                      temp_hist = temp - (temp_hist / MCELSIUS);
 +
 +                      /* Set 9-bit temperature code for rising threshold levels */
 +                      threshold_code = temp_to_code_with_sensorinfo(data, temp, &data->sensor_info[count]);
 +                      rising_threshold = readl(data->base + reg_off);
 +                      rising_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
 +                      rising_threshold |= threshold_code << bit_off;
 +                      writel(rising_threshold, data->base + reg_off);
 +
 +                      /* Set 9-bit temperature code for falling threshold levels */
 +                      threshold_code = temp_to_code_with_sensorinfo(data, temp_hist, &data->sensor_info[count]);
 +                      falling_threshold = readl(data->base + reg_off + 0x10);
 +                      falling_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
 +                      falling_threshold |= threshold_code << bit_off;
 +                      writel(falling_threshold, data->base + reg_off + 0x10);
 +                      interrupt_count++;
 +              }
 +              count++;
        }
  
        data->tmu_clear_irqs(data);
Simple merge
index b369e26aa6851d25bed50ea175efb74b4f453cf4,f4b8e4e17a868bcdfdc85f906e2f52b0f22b1991..cddc1b98d2aeb22dbbad3dda33066863c33c4a65
@@@ -1074,8 -1343,7 +1074,12 @@@ static void s3c24xx_serial_set_termios(
        wr_regl(port, S3C2410_ULCON, ulcon);
        wr_regl(port, S3C2410_UBRDIV, quot);
  
++<<<<<<< HEAD
 +      if (ourport->info->has_divslot)
 +              wr_regl(port, S3C2443_DIVSLOT, udivslot);
++=======
+       port->status &= ~UPSTAT_AUTOCTS;
++>>>>>>> android-4.14-p
  
        umcon = rd_regl(port, S3C2410_UMCON);
        if (termios->c_cflag & CRTSCTS) {
Simple merge
Simple merge
Simple merge
index bc045ed0fa685ec1d35b04d81209e98507ee02ee,783d16a5346611e1945cc321d3eb832a27cc32b7..7bb13ebbdfa8603a3b43d5678013284d3cf211ca
@@@ -691,10 -535,9 +707,9 @@@ static int dwc3_core_ulpi_init(struct d
   * initialized. The PHY interfaces and the PHYs get initialized together with
   * the core in dwc3_core_init.
   */
 -static int dwc3_phy_setup(struct dwc3 *dwc)
 +int dwc3_phy_setup(struct dwc3 *dwc)
  {
        u32 reg;
-       int ret;
  
        reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
  
@@@ -971,34 -796,28 +984,54 @@@ int dwc3_core_init(struct dwc3 *dwc
                        dwc->maximum_speed = USB_SPEED_HIGH;
        }
  
++<<<<<<< HEAD
 +      ret = dwc3_core_get_phy(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "Can't get PHY structure!!!\n");
++=======
+       ret = dwc3_phy_setup(dwc);
+       if (ret)
++>>>>>>> android-4.14-p
                goto err0;
 +      }
 +
 +      /* Adjust SOF accuracy only for revisions >= 2.50a */
 +      if (dwc->revision < DWC3_REVISION_250A)
 +              dwc->adj_sof_accuracy = 0;
  
++<<<<<<< HEAD
 +      ret = dwc3_core_soft_reset(dwc);
 +      if (ret) {
 +              dev_err(dwc->dev, "Can't core_soft_reset!!!(%d)\n", ret);
 +              goto err0;
++=======
+       if (!dwc->ulpi_ready) {
+               ret = dwc3_core_ulpi_init(dwc);
+               if (ret)
+                       goto err0;
+               dwc->ulpi_ready = true;
++>>>>>>> android-4.14-p
        }
  
-       ret = dwc3_phy_setup(dwc);
+       if (!dwc->phys_ready) {
+               ret = dwc3_core_get_phy(dwc);
+               if (ret)
+                       goto err0a;
+               dwc->phys_ready = true;
+       }
+       ret = dwc3_core_soft_reset(dwc);
        if (ret)
-               goto err0;
+               goto err0a;
  
 +      if (dotg) {
 +              phy_tune(dwc->usb2_generic_phy, dotg->otg.state);
 +              phy_tune(dwc->usb3_generic_phy, dotg->otg.state);
 +      } else {
 +              phy_tune(dwc->usb2_generic_phy, 0);
 +              phy_tune(dwc->usb3_generic_phy, 0);
 +      }
 +
        dwc3_core_setup_global_control(dwc);
        dwc3_core_num_eps(dwc);
  
@@@ -1083,10 -889,9 +1116,16 @@@ err1
        phy_exit(dwc->usb2_generic_phy);
        phy_exit(dwc->usb3_generic_phy);
  
++<<<<<<< HEAD
 +      phy_power_off(dwc->usb2_generic_phy);
 +      phy_power_off(dwc->usb3_generic_phy);
 +
 +      dwc->link_state = DWC3_LINK_STATE_SS_DIS;
++=======
+ err0a:
+       dwc3_ulpi_exit(dwc);
++>>>>>>> android-4.14-p
  err0:
        return ret;
  }
Simple merge
index 62a02c7689c0534a18a067d3b269bdb579ba7291,1b99d44e52b9ac61760aa56d97b6460680861cda..1f6ccff39ed83dcd1aded5b1eeb56fc152586e36
@@@ -207,10 -180,10 +207,12 @@@ void dwc3_gadget_del_and_unmap_request(
        struct dwc3                     *dwc = dep->dwc;
  
        req->started = false;
 -      list_del(&req->list);
 +      /* Only delete from the list if the item isn't poisoned. */
 +      if (req->list.next != LIST_POISON1)
 +              list_del(&req->list);
        req->remaining = 0;
+       req->unaligned = false;
+       req->zero = false;
  
        if (req->request.status == -EINPROGRESS)
                req->request.status = status;
@@@ -1573,6 -1538,7 +1579,10 @@@ int __dwc3_gadget_ep_set_halt(struct dw
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
++<<<<<<< HEAD
++=======
++>>>>>>> android-4.14-p
                ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
                        dev_err(dwc->dev, "failed to clear STALL on %s\n",
@@@ -3734,13 -3286,7 +3745,17 @@@ int dwc3_gadget_init(struct dwc3 *dwc
                goto err4;
        }
  
++<<<<<<< HEAD
 +      if (dwc->dotg) {
 +              ret = otg_set_peripheral(&dwc->dotg->otg, &dwc->gadget);
 +              if (ret) {
 +                      dev_err(dwc->dev, "failed to set otg peripheral\n");
 +                      goto err4;
 +              }
 +      }
++=======
+       dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
++>>>>>>> android-4.14-p
  
        return 0;
  
Simple merge
Simple merge
Simple merge
index 8ab5f3bcb4eeee68f94fb77837eeeb8793cdb541,997ff183c9cbba856839c8dc5e169fa51cbc78b2..d5979b7c43036389f29ec4aa98c494a7c2b26c66
@@@ -1516,8 -1481,8 +1516,13 @@@ int xhci_bus_suspend(struct usb_hcd *hc
        __le32 __iomem **port_array;
        struct xhci_bus_state *bus_state;
        unsigned long flags;
++<<<<<<< HEAD
 +      int is_port_connect = 0;
 +      int ret;
++=======
+       u32 portsc_buf[USB_MAXCHILDREN];
+       bool wake_enabled;
++>>>>>>> android-4.14-p
  
        max_ports = xhci_get_ports(hcd, &port_array);
        bus_state = &xhci->bus_state[hcd_index(hcd)];
  
                t1 = readl(port_array[port_index]);
                t2 = xhci_port_state_to_neutral(t1);
+               portsc_buf[port_index] = 0;
  
++<<<<<<< HEAD
 +              if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
 +                      slot_id = xhci_find_slot_id_by_port(hcd, xhci,
 +                                      port_index + 1);
 +                      if (slot_id) {
++=======
+               /* Bail out if a USB3 port has a new device in link training */
+               if ((hcd->speed >= HCD_USB3) &&
+                   (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+                       bus_state->bus_suspended = 0;
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+                       return -EBUSY;
+               }
+               /* suspend ports in U0, or bail out for new connect changes */
+               if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+                       if ((t1 & PORT_CSC) && wake_enabled) {
+                               bus_state->bus_suspended = 0;
++>>>>>>> android-4.14-p
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               xhci_stop_device(xhci, slot_id, 1);
-                               spin_lock_irqsave(&xhci->lock, flags);
+                               xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+                               return -EBUSY;
                        }
+                       xhci_dbg(xhci, "port %d not suspended\n", port_index);
                        t2 &= ~PORT_PLS_MASK;
                        t2 |= PORT_LINK_STROBE | XDEV_U3;
                        set_bit(port_index, &bus_state->bus_suspended);
                                t2 |= PORT_WKOC_E | PORT_WKCONN_E;
                                t2 &= ~PORT_WKDISC_E;
                        }
 -
 -                      if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
 -                          (hcd->speed < HCD_USB3)) {
 -                              if (usb_amd_pt_check_port(hcd->self.controller,
 -                                                        port_index))
 -                                      t2 &= ~PORT_WAKE_BITS;
 -                      }
 -              } else
 +              } else {
                        t2 &= ~PORT_WAKE_BITS;
 +              }
  
                t1 = xhci_port_state_to_neutral(t1);
++<<<<<<< HEAD
 +              if (t1 != t2) {
 +                      writel(t2, port_array[port_index]);
 +              }
 +      }
 +
 +      if (is_port_connect && usb_hcd_is_primary_hcd(hcd)) {
 +              xhci_info(xhci, "port is connected, phy vendor set\n");
 +              ret = phy_vendor_set(xhci->main_hcd->phy, 1, 0);
 +              if (ret) {
 +                      xhci_info(xhci, "phy vendor set fail\n");
 +                      spin_unlock_irqrestore(&xhci->lock, flags);
 +                      return ret;
 +              }
++=======
+               if (t1 != t2)
+                       portsc_buf[port_index] = t2;
+       }
+       /* write port settings, stopping and suspending ports if needed */
+       port_index = max_ports;
+       while (port_index--) {
+               if (!portsc_buf[port_index])
+                       continue;
+               if (test_bit(port_index, &bus_state->bus_suspended)) {
+                       int slot_id;
+                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+                                                           port_index + 1);
+                       if (slot_id) {
+                               spin_unlock_irqrestore(&xhci->lock, flags);
+                               xhci_stop_device(xhci, slot_id, 1);
+                               spin_lock_irqsave(&xhci->lock, flags);
+                       }
+               }
+               writel(portsc_buf[port_index], port_array[port_index]);
++>>>>>>> android-4.14-p
        }
 +
 +      xhci_info(xhci, "%s 'HC_STATE_SUSPENDED' portcon: %d primary_hcd: %d\n",
 +              __func__, is_port_connect, usb_hcd_is_primary_hcd(hcd));
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
 +
        spin_unlock_irqrestore(&xhci->lock, flags);
 +
        return 0;
  }
  
index 9668114c4f72cbc5f5cfb31880bcaad3daace3a4,108a212294bfc673bba4b2dc5d18ac31fd2899a1..e21779e21133b403aa7f116624dae11d9420d2e5
mode 100755,100644..100755
@@@ -542,49 -332,16 +542,54 @@@ static int xhci_plat_remove(struct plat
        struct usb_hcd  *hcd = platform_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct clk *clk = xhci->clk;
++<<<<<<< HEAD
 +      int timeout = 0;
 +
 +      dev_info(&dev->dev, "XHCI PLAT REMOVE\n");
 +
 +      usb3_portsc = NULL;
 +      pp_set_delayed = 0;
 +
 +      /*
 +       * Sometimes deadlock occurred in this function.
 +       * So, below waiting for completion of hub_event was added.
 +       */
 +      while (xhci->shared_hcd->is_in_hub_event || hcd->is_in_hub_event) {
 +              msleep(10);
 +              timeout += 10;
 +              if (timeout >= XHCI_HUB_EVENT_TIMEOUT) {
 +                      xhci_err(xhci,
 +                              "ERROR: hub_event completion timeout\n");
 +                      break;
 +              }
 +      }
 +      xhci_dbg(xhci, "%s: waited %dmsec", __func__, timeout);
++=======
+       struct usb_hcd *shared_hcd = xhci->shared_hcd;
++>>>>>>> android-4.14-p
  
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 +      xhci->xhci_alloc->offset = 0;
 +
 +      dev_info(&dev->dev, "WAKE UNLOCK\n");
 +      wake_unlock(xhci->wakelock);
 +      wake_lock_destroy(xhci->wakelock);
  
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        usb_phy_shutdown(hcd->usb_phy);
  
 +      /*
 +       * In usb_remove_hcd, phy_exit is called if phy is not NULL.
 +       * However, in the case that PHY was turn on or off as runtime PM,
 +       * PHY sould not exit at this time. So, to prevent the PHY exit,
 +       * PHY pointer have to be NULL.
 +       */
 +      if (parent && hcd->phy)
 +              hcd->phy = NULL;
 +
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
  
        if (!IS_ERR(clk))
                clk_disable_unprepare(clk);
Simple merge
Simple merge
index 57fbd6ac1ffeab178d8c6db2800b94c862885317,cbc91536e5127dd5b735bba6b3336e75c9bdc075..82ffda77f390336d247cc33bd5489156c8721979
@@@ -1832,34 -1809,37 +1836,46 @@@ struct xhci_hcd 
   * commands, reset device commands, disable slot commands, and address device
   * commands.
   */
- #define XHCI_EP_LIMIT_QUIRK   (1 << 5)
- #define XHCI_BROKEN_MSI               (1 << 6)
- #define XHCI_RESET_ON_RESUME  (1 << 7)
- #define       XHCI_SW_BW_CHECKING     (1 << 8)
- #define XHCI_AMD_0x96_HOST    (1 << 9)
- #define XHCI_TRUST_TX_LENGTH  (1 << 10)
- #define XHCI_LPM_SUPPORT      (1 << 11)
- #define XHCI_INTEL_HOST               (1 << 12)
- #define XHCI_SPURIOUS_REBOOT  (1 << 13)
- #define XHCI_COMP_MODE_QUIRK  (1 << 14)
- #define XHCI_AVOID_BEI                (1 << 15)
- #define XHCI_PLAT             (1 << 16)
- #define XHCI_SLOW_SUSPEND     (1 << 17)
- #define XHCI_SPURIOUS_WAKEUP  (1 << 18)
+ #define XHCI_EP_LIMIT_QUIRK   BIT_ULL(5)
+ #define XHCI_BROKEN_MSI               BIT_ULL(6)
+ #define XHCI_RESET_ON_RESUME  BIT_ULL(7)
+ #define       XHCI_SW_BW_CHECKING     BIT_ULL(8)
+ #define XHCI_AMD_0x96_HOST    BIT_ULL(9)
+ #define XHCI_TRUST_TX_LENGTH  BIT_ULL(10)
+ #define XHCI_LPM_SUPPORT      BIT_ULL(11)
+ #define XHCI_INTEL_HOST               BIT_ULL(12)
+ #define XHCI_SPURIOUS_REBOOT  BIT_ULL(13)
+ #define XHCI_COMP_MODE_QUIRK  BIT_ULL(14)
+ #define XHCI_AVOID_BEI                BIT_ULL(15)
+ #define XHCI_PLAT             BIT_ULL(16)
+ #define XHCI_SLOW_SUSPEND     BIT_ULL(17)
+ #define XHCI_SPURIOUS_WAKEUP  BIT_ULL(18)
  /* For controllers with a broken beyond repair streams implementation */
- #define XHCI_BROKEN_STREAMS   (1 << 19)
- #define XHCI_PME_STUCK_QUIRK  (1 << 20)
- #define XHCI_MTK_HOST         (1 << 21)
- #define XHCI_SSIC_PORT_UNUSED (1 << 22)
- #define XHCI_NO_64BIT_SUPPORT (1 << 23)
- #define XHCI_MISSING_CAS      (1 << 24)
+ #define XHCI_BROKEN_STREAMS   BIT_ULL(19)
+ #define XHCI_PME_STUCK_QUIRK  BIT_ULL(20)
+ #define XHCI_MTK_HOST         BIT_ULL(21)
+ #define XHCI_SSIC_PORT_UNUSED BIT_ULL(22)
+ #define XHCI_NO_64BIT_SUPPORT BIT_ULL(23)
+ #define XHCI_MISSING_CAS      BIT_ULL(24)
  /* For controller with a broken Port Disable implementation */
++<<<<<<< HEAD
 +#define XHCI_BROKEN_PORT_PED  (1 << 25)
 +#define XHCI_LIMIT_ENDPOINT_INTERVAL_7        (1 << 26)
 +#define XHCI_U2_DISABLE_WAKE  (1 << 27)
 +#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL       (1 << 28)
 +#define XHCI_L2_SUPPORT       (1 << 29)
 +#define XHCI_SUSPEND_DELAY    (1 << 30)
++=======
+ #define XHCI_BROKEN_PORT_PED  BIT_ULL(25)
+ #define XHCI_LIMIT_ENDPOINT_INTERVAL_7        BIT_ULL(26)
+ #define XHCI_U2_DISABLE_WAKE  BIT_ULL(27)
+ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL       BIT_ULL(28)
+ #define XHCI_HW_LPM_DISABLE   BIT_ULL(29)
+ #define XHCI_SUSPEND_DELAY    BIT_ULL(30)
+ #define XHCI_INTEL_USB_ROLE_SW        BIT_ULL(31)
+ #define XHCI_RESET_PLL_ON_DISCONNECT  BIT_ULL(34)
+ #define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
++>>>>>>> android-4.14-p
  
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
Simple merge
diff --cc fs/buffer.c
Simple merge
Simple merge
Simple merge
diff --cc fs/direct-io.c
Simple merge
diff --cc fs/ext4/ext4.h
Simple merge
diff --cc fs/ext4/inode.c
Simple merge
diff --cc fs/f2fs/data.c
index 513e24024a75b58b5dfad85335c4b5bd2f2cf7ff,9bb57d2dccd0b51541e89bf5066d1f3742ecbb23..abf70989e99f14d4452af496561161bc95f7da73
@@@ -495,9 -439,11 +495,12 @@@ int f2fs_submit_page_bio(struct f2fs_io
        struct bio *bio;
        struct page *page = fio->encrypted_page ?
                        fio->encrypted_page : fio->page;
 +      struct inode *inode = fio->page->mapping->host;
  
-       verify_block_addr(fio, fio->new_blkaddr);
+       if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+                       __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+               return -EFAULT;
        trace_f2fs_submit_page_bio(page, fio);
        f2fs_trace_ios(fio, 0);
  
                bio_put(bio);
                return -EFAULT;
        }
 +      fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
        bio_set_op_attrs(bio, fio->op, fio->op_flags);
  
++<<<<<<< HEAD
 +      if (f2fs_may_encrypt_bio(inode, fio))
 +              fscrypt_set_bio(inode, bio, PG_DUN(inode, fio->page));
 +
 +      __submit_bio(fio->sbi, bio, fio->type);
 +
++=======
++>>>>>>> android-4.14-p
        if (!is_read_io(fio->op))
                inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
+       __submit_bio(fio->sbi, bio, fio->type);
        return 0;
  }
  
diff --cc fs/f2fs/f2fs.h
Simple merge
diff --cc fs/pnode.c
index 681916df422c777a8743608623258d1ffecbe501,56f9a28a688bc150be25d73417f64b8b4b1e4fb9..aa83a34e08d68c1541a42fe3e465b7f5ca4b5766
@@@ -608,18 -608,36 +608,41 @@@ int propagate_umount(struct list_head *
        return 0;
  }
  
++<<<<<<< HEAD
++=======
+ /*
+  *  Iterates over all slaves, and slaves of slaves.
+  */
+ static struct mount *next_descendent(struct mount *root, struct mount *cur)
+ {
+       if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
+               return first_slave(cur);
+       do {
+               struct mount *master = cur->mnt_master;
+               if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+                       struct mount *next = next_slave(cur);
+                       return (next == root) ? NULL : next;
+               }
+               cur = master;
+       } while (cur != root);
+       return NULL;
+ }
++>>>>>>> android-4.14-p
  void propagate_remount(struct mount *mnt)
  {
 -      struct mount *m = mnt;
 +      struct mount *parent = mnt->mnt_parent;
 +      struct mount *p = mnt, *m;
        struct super_block *sb = mnt->mnt.mnt_sb;
  
 -      if (sb->s_op->copy_mnt_data) {
 -              m = next_descendent(mnt, m);
 -              while (m) {
 +      if (!sb->s_op->copy_mnt_data)
 +              return;
 +      for (p = propagation_next(parent, parent); p;
 +                              p = propagation_next(p, parent)) {
 +              m = __lookup_mnt(&p->mnt, mnt->mnt_mountpoint);
 +              if (m)
                        sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
 -                      m = next_descendent(mnt, m);
 -              }
        }
  }
Simple merge
Simple merge
index ca91a885a6f85e098c31a49a2bedf8efdb0a54ce,6c5b4d73713e167bd08b97a5a4e44df7d9b85335..f08ff252cc4540c0e5acb68d5db55d98150efdaf
  #define __nocfi               __attribute__((no_sanitize("cfi")))
  #endif
  
++<<<<<<< HEAD
 +/* all clang versions usable with the kernel support KASAN ABI version 5 */
 +#define KASAN_ABI_VERSION 5
 +
 +/* emulate gcc's __SANITIZE_ADDRESS__ flag */
 +#if __has_feature(address_sanitizer)
 +#define __SANITIZE_ADDRESS__
++=======
+ /*
+  * Not all versions of clang implement the the type-generic versions
+  * of the builtin overflow checkers. Fortunately, clang implements
+  * __has_builtin allowing us to avoid awkward version
+  * checks. Unfortunately, we don't know which version of gcc clang
+  * pretends to be, so the macro may or may not be defined.
+  */
+ #undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+ #if __has_builtin(__builtin_mul_overflow) && \
+     __has_builtin(__builtin_add_overflow) && \
+     __has_builtin(__builtin_sub_overflow)
+ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
++>>>>>>> android-4.14-p
  #endif
Simple merge
index da6550de16353e11e0056ca629840716e644a4be,67f4e960880ea909b48c77bf84741c77b2f73121..1a4035b1be977e5d0f8a6ea24b04f3fb4ab2e15d
@@@ -259,16 -254,8 +259,19 @@@ __ATTR(_name, 0644, show_##_name, store
  static struct freq_attr _name =                       \
  __ATTR(_name, 0200, NULL, store_##_name)
  
++<<<<<<< HEAD
 +struct global_attr {
 +      struct attribute attr;
 +      ssize_t (*show)(struct kobject *kobj,
 +                      struct kobj_attribute *attr, char *buf);
 +      ssize_t (*store)(struct kobject *a, struct kobj_attribute *b,
 +                       const char *c, size_t count);
 +};
 +
++=======
++>>>>>>> android-4.14-p
  #define define_one_global_ro(_name)           \
- static struct global_attr _name =             \
+ static struct kobj_attribute _name =          \
  __ATTR(_name, 0444, show_##_name, NULL)
  
  #define define_one_global_rw(_name)           \
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 6cb29e983d66dd6f0cc6d08e1665d6762b592d55,44b54d6488387eee5b1dc369891c97c5074f21b8..5b567b24d4a3163da9d8415fc60aab759d2c43ee
@@@ -414,19 -414,6 +414,22 @@@ struct sched_avg 
        unsigned long                   load_avg;
        unsigned long                   util_avg;
        struct util_est                 util_est;
++<<<<<<< HEAD
 +};
 +
 +struct ontime_avg {
 +      u64 ontime_migration_time;
 +      u64 load_sum;
 +      u32 period_contrib;
 +      unsigned long load_avg;
 +};
 +
 +struct ontime_entity {
 +      struct ontime_avg avg;
 +      int migrating;
 +      int cpu;
++=======
++>>>>>>> android-4.14-p
  };
  
  struct sched_statistics {
@@@ -1527,8 -1492,8 +1530,13 @@@ static inline bool is_percpu_thread(voi
  #define PFA_SPREAD_SLAB                       2       /* Spread some slab caches over cpuset */
  #define PFA_SPEC_SSB_DISABLE          3       /* Speculative Store Bypass disabled */
  #define PFA_SPEC_SSB_FORCE_DISABLE    4       /* Speculative Store Bypass force disabled*/
++<<<<<<< HEAD
 +#define PFA_LMK_WAITING                       3       /* Lowmemorykiller is waiting */
 +
++=======
+ #define PFA_SPEC_IB_DISABLE           5       /* Indirect branch speculation restricted */
+ #define PFA_SPEC_IB_FORCE_DISABLE     6       /* Indirect branch speculation permanently restricted */
++>>>>>>> android-4.14-p
  
  #define TASK_PFA_TEST(name, func)                                     \
        static inline bool task_##func(struct task_struct *p)           \
@@@ -1560,8 -1525,12 +1568,17 @@@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_s
  TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
  TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
  
++<<<<<<< HEAD
 +TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
 +TASK_PFA_SET(LMK_WAITING, lmk_waiting)
++=======
+ TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+ TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+ TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+ TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+ TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
++>>>>>>> android-4.14-p
  
  static inline void
  current_restore_flags(unsigned long orig_flags, unsigned long flags)
index 5736801c209b184b88455a017b88867455c56f3d,1b722616d6bd72cde3173b21e175f5e314e9933e..063967bf1ca8c9758a413178a205b168ad75efdd
@@@ -46,8 -46,6 +46,11 @@@ extern unsigned int sysctl_numa_balanci
  extern unsigned int sysctl_numa_balancing_scan_size;
  
  #ifdef CONFIG_SCHED_DEBUG
++<<<<<<< HEAD
 +# include <linux/static_key.h>
 +
++=======
++>>>>>>> android-4.14-p
  extern __read_mostly unsigned int sysctl_sched_migration_cost;
  extern __read_mostly unsigned int sysctl_sched_nr_migrate;
  extern __read_mostly unsigned int sysctl_sched_time_avg;
Simple merge
index 21a760475dc11151287b5a610b25b9987c24e52c,bd8219943a91439192e275f0242b8168db59256d..737bf94ef3c6765984aa7eaa18ba33f00b41bc88
@@@ -117,8 -120,8 +120,13 @@@ extern void tick_nohz_idle_restart_tick
  extern void tick_nohz_idle_enter(void);
  extern void tick_nohz_idle_exit(void);
  extern void tick_nohz_irq_exit(void);
++<<<<<<< HEAD
 +extern ktime_t tick_nohz_get_sleep_length(void);
 +extern ktime_t tick_nohz_get_sleep_length_cpu(int cpu);
++=======
+ extern bool tick_nohz_idle_got_tick(void);
+ extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
++>>>>>>> android-4.14-p
  extern unsigned long tick_nohz_get_idle_calls(void);
  extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
  extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
@@@ -126,19 -137,22 +142,26 @@@ static inline void tick_nohz_idle_stop_
  #else /* !CONFIG_NO_HZ_COMMON */
  #define tick_nohz_enabled (0)
  static inline int tick_nohz_tick_stopped(void) { return 0; }
+ static inline void tick_nohz_idle_stop_tick(void) { }
+ static inline void tick_nohz_idle_retain_tick(void) { }
+ static inline void tick_nohz_idle_restart_tick(void) { }
  static inline void tick_nohz_idle_enter(void) { }
  static inline void tick_nohz_idle_exit(void) { }
+ static inline bool tick_nohz_idle_got_tick(void) { return false; }
  
- static inline ktime_t tick_nohz_get_sleep_length(void)
+ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
  {
-       return NSEC_PER_SEC / HZ;
+       *delta_next = TICK_NSEC;
+       return *delta_next;
  }
 +static inline ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
 +{
 +      return NSEC_PER_SEC / HZ;
 +}
  static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
  static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+ static inline void tick_nohz_idle_stop_tick_protected(void) { }
  #endif /* !CONFIG_NO_HZ_COMMON */
  
  #ifdef CONFIG_NO_HZ_FULL
Simple merge
Simple merge
Simple merge
index 6f44f7fd52a90311fd3d1ece2dc18e7642629b3d,de8069c8d0317a4a5704635c9e69d8b4690d9af7..32b139d401b47a9b5021cee0a498949e7795d123
@@@ -639,113 -649,19 +649,126 @@@ struct cfs_rq *__trace_sched_group_cfs_
  }
  #endif /* CREATE_TRACE_POINTS */
  
+ #ifdef CONFIG_SCHED_WALT
+ extern unsigned int sysctl_sched_use_walt_cpu_util;
+ extern unsigned int sysctl_sched_use_walt_task_util;
+ extern unsigned int walt_ravg_window;
+ extern bool walt_disabled;
+ #define walt_util(util_var, demand_sum) {\
+       u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
+       do_div(sum, walt_ravg_window);\
+       util_var = (typeof(util_var))sum;\
+       }
+ #endif
 +/*
 + * Tracepoint for logging FRT schedule activity
 + */
 +
 +TRACE_EVENT(sched_fluid_activated_cpus,
 +
 +      TP_PROTO(int cpu, int util_sum, int busy_thr, unsigned int prefer_mask),
 +
 +      TP_ARGS(cpu, util_sum, busy_thr, prefer_mask),
 +
 +      TP_STRUCT__entry(
 +              __field(        int,            cpu             )
 +              __field(        int,            util_sum        )
 +              __field(        int,            busy_thr        )
 +              __field(        unsigned long,  prefer_mask     )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->cpu            = cpu;
 +              __entry->util_sum       = util_sum;
 +              __entry->busy_thr       = busy_thr;
 +              __entry->prefer_mask    = prefer_mask;
 +      ),
 +
 +      TP_printk("cpu=%d util_sum=%d busy_thr=%d prefer_mask=%x",
 +              __entry->cpu,__entry->util_sum,
 +              __entry->busy_thr, __entry->prefer_mask)
 +);
 +
 +TRACE_EVENT(sched_fluid_stat,
 +
 +      TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, int best, char* str),
 +
 +      TP_ARGS(tsk, avg, best, str),
 +
 +      TP_STRUCT__entry(
 +              __array( char,  selectby,       TASK_COMM_LEN   )
 +              __array( char,  targettsk,      TASK_COMM_LEN   )
 +              __field( pid_t, pid                             )
 +              __field( int,   bestcpu                         )
 +              __field( int,   prevcpu                         )
 +              __field( unsigned long, load_avg                )
 +              __field( unsigned long, util_avg                )
 +      ),
 +
 +      TP_fast_assign(
 +              memcpy(__entry->selectby, str, TASK_COMM_LEN);
 +              memcpy(__entry->targettsk, tsk->comm, TASK_COMM_LEN);
 +              __entry->pid                    = tsk->pid;
 +              __entry->bestcpu                = best;
 +              __entry->prevcpu                = task_cpu(tsk);
 +              __entry->load_avg               = avg->load_avg;
 +              __entry->util_avg               = avg->util_avg;
 +      ),
 +      TP_printk("frt: comm=%s pid=%d assigned to #%d from #%d load_avg=%lu util_avg=%lu "
 +                      "by %s.",
 +                __entry->targettsk,
 +                __entry->pid,
 +                __entry->bestcpu,
 +                __entry->prevcpu,
 +                __entry->load_avg,
 +                __entry->util_avg,
 +                __entry->selectby)
 +);
 +/*
 + * Tracepoint for accounting sched averages for tasks.
 + */
 +TRACE_EVENT(sched_rt_load_avg_task,
 +
 +      TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
 +
 +      TP_ARGS(tsk, avg),
 +
 +      TP_STRUCT__entry(
 +              __array( char,  comm,   TASK_COMM_LEN           )
 +              __field( pid_t, pid                             )
 +              __field( int,   cpu                             )
 +              __field( unsigned long, load_avg                )
 +              __field( unsigned long, util_avg                )
 +              __field( u64,           load_sum                )
 +              __field( u32,           util_sum                )
 +              __field( u32,           period_contrib          )
 +      ),
 +
 +      TP_fast_assign(
 +              memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 +              __entry->pid                    = tsk->pid;
 +              __entry->cpu                    = task_cpu(tsk);
 +              __entry->load_avg               = avg->load_avg;
 +              __entry->util_avg               = avg->util_avg;
 +              __entry->load_sum               = avg->load_sum;
 +              __entry->util_sum               = avg->util_sum;
 +              __entry->period_contrib         = avg->period_contrib;
 +      ),
 +      TP_printk("rt: comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
 +                      "load_sum=%llu util_sum=%u period_contrib=%u",
 +                __entry->comm,
 +                __entry->pid,
 +                __entry->cpu,
 +                __entry->load_avg,
 +                __entry->util_avg,
 +                (u64)__entry->load_sum,
 +                (u32)__entry->util_sum,
 +                (u32)__entry->period_contrib)
 +);
 +
 +
  /*
   * Tracepoint for cfs_rq load tracking:
   */
@@@ -1052,73 -869,13 +1040,74 @@@ TRACE_EVENT(sched_tune_tasks_update
        ),
  
        TP_printk("pid=%d comm=%s "
-                       "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+                       "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu",
                __entry->pid, __entry->comm,
                __entry->cpu, __entry->tasks, __entry->idx,
-               __entry->boost, __entry->max_boost)
+               __entry->boost, __entry->max_boost,
+               __entry->group_ts)
  );
  
 +/*
 + * Tracepoint for schedtune_grouputil_update
 + */
 +TRACE_EVENT(sched_tune_grouputil_update,
 +
 +      TP_PROTO(int idx, int total, int accumulated, unsigned long group_util,
 +                      struct task_struct *heaviest_p, unsigned long biggest_util),
 +
 +      TP_ARGS(idx, total, accumulated, group_util, heaviest_p, biggest_util),
 +
 +      TP_STRUCT__entry(
 +              __field( int,           idx             )
 +              __field( int,           total           )
 +              __field( int,           accumulated     )
 +              __field( unsigned long, group_util      )
 +              __field( pid_t,         pid             )
 +              __array( char,  comm,   TASK_COMM_LEN   )
 +              __field( unsigned long, biggest_util    )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->idx            = idx;
 +              __entry->total          = total;
 +              __entry->accumulated    = accumulated;
 +              __entry->group_util     = group_util;
 +              __entry->pid            = heaviest_p->pid;
 +              memcpy(__entry->comm, heaviest_p->comm, TASK_COMM_LEN);
 +              __entry->biggest_util   = biggest_util;
 +      ),
 +
 +      TP_printk("idx=%d total=%d accumulated=%d group_util=%lu "
 +                      "heaviest task(pid=%d comm=%s util=%lu)",
 +              __entry->idx, __entry->total, __entry->accumulated, __entry->group_util,
 +              __entry->pid, __entry->comm, __entry->biggest_util)
 +);
 +
 +/*
 + * Tracepoint for checking group balancing
 + */
 +TRACE_EVENT(sched_tune_check_group_balance,
 +
 +      TP_PROTO(int idx, int ib_count, bool balancing),
 +
 +      TP_ARGS(idx, ib_count, balancing),
 +
 +      TP_STRUCT__entry(
 +              __field( int,           idx             )
 +              __field( int,           ib_count        )
 +              __field( bool,          balancing       )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->idx            = idx;
 +              __entry->ib_count       = ib_count;
 +              __entry->balancing      = balancing;
 +      ),
 +
 +      TP_printk("idx=%d imbalance_count=%d balancing=%d",
 +              __entry->idx, __entry->ib_count, __entry->balancing)
 +);
 +
  /*
   * Tracepoint for schedtune_boostgroup_update
   */
diff --cc init/Kconfig
index 266d7da167927b77ef3615e6e63f4fcddf3e96be,37ecab2cba99131b2165aa84a3c978072fb5195b..0c0aa1996e6ee7b2e1add6f1d9aa235eac7da39b
@@@ -599,7 -599,7 +599,11 @@@ menu "FAIR Scheuler tunables
  
  choice
        prompt "Utilization's PELT half-Life"
++<<<<<<< HEAD
 +      default PELT_UTIL_HALFLIFE_16
++=======
+       default PELT_UTIL_HALFLIFE_32
++>>>>>>> android-4.14-p
        help
          Allows choosing one of the possible values for the PELT half-life to
          be used for the update of the utilization of tasks and CPUs.
          build up to 50% utilization. The higher the half-life the longer it
          takes for a task to be represented as a big one.
  
++<<<<<<< HEAD
 +        If not sure, use the deafult of 16 ms.
 +
 +config PELT_UTIL_HALFLIFE_32
 +      bool "32 ms, for server"
 +
 +config PELT_UTIL_HALFLIFE_16
 +      bool "16 ms, suggested for interactive workloads"
 +
 +config PELT_UTIL_HALFLIFE_8
 +      bool "8 ms, very fast"
++=======
+         If not sure, use the default of 32 ms.
+ config PELT_UTIL_HALFLIFE_32
+       bool "32 ms, default for server"
+ config PELT_UTIL_HALFLIFE_16
+       bool "16 ms, suggested for interactive workloads"
+       help
+         Use 16ms as PELT half-life value. This will increase the ramp-up and
+         decay of utlization and load twice as fast as for the default
+         configuration using 32ms.
+ config PELT_UTIL_HALFLIFE_8
+       bool "8 ms, very fast"
+       help
+         Use 8ms as PELT half-life value. This will increase the ramp-up and
+         decay of utlization and load four time as fast as for the default
+         configuration using 32ms.
++>>>>>>> android-4.14-p
  
  endchoice
  
diff --cc kernel/cpu.c
index cb40d66e714ee532f5cbf381070831bd8633ef60,21f0be188073cea4aa6a8cd40af7b11f24eb4516..e737ba6d6178de39dd963907210b41eededea1fb
@@@ -375,9 -309,21 +377,22 @@@ void cpus_write_lock(void
  
  void cpus_write_unlock(void)
  {
 -      percpu_up_write(&cpu_hotplug_lock);
 -}
 -
 -void lockdep_assert_cpus_held(void)
 -{
++<<<<<<< HEAD
 +      cpu_hotplug.active_writer = NULL;
 +      mutex_unlock(&cpu_hotplug.lock);
 +      cpuhp_lock_release();
++=======
+       /*
+        * We can't have hotplug operations before userspace starts running,
+        * and some init codepaths will knowingly not take the hotplug lock.
+        * This is all valid, so mute lockdep until it makes sense to report
+        * unheld locks.
+        */
+       if (system_state < SYSTEM_RUNNING)
+               return;
+       percpu_rwsem_assert_held(&cpu_hotplug_lock);
++>>>>>>> android-4.14-p
  }
  
  /*
@@@ -594,10 -628,7 +698,14 @@@ static void cpuhp_thread_fun(unsigned i
         */
        smp_mb();
  
++<<<<<<< HEAD
 +      if (WARN_ON_ONCE(!st->should_run))
 +              return;
 +
 +      lock_map_acquire(&cpuhp_state_lock_map);
++=======
+       cpuhp_lock_acquire(bringup);
++>>>>>>> android-4.14-p
  
        if (st->single) {
                state = st->cb_state;
diff --cc kernel/exit.c
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/panic.c
Simple merge
index 69bd1abbc9dc19afc24b2904a6ebb63ee59a8e39,2e2c86dd226fea1d5d0c9a27a23318fd6d82fe06..54ad827d2c1b1b7b336e299512b2ec05bae4e3e1
@@@ -1778,22 -1834,7 +1924,26 @@@ int vprintk_store(int facility, int lev
        char *text = textbuf;
        size_t text_len;
        enum log_flags lflags = 0;
++<<<<<<< HEAD
 +      unsigned long flags;
 +      int this_cpu;
 +      int printed_len;
 +      bool in_sched = false;
 +
 +      if (level == LOGLEVEL_SCHED) {
 +              level = LOGLEVEL_DEFAULT;
 +              in_sched = true;
 +      }
 +
 +      boot_delay_msec(level);
 +      printk_delay();
 +
 +      /* This stops the holder of console_sem just where we want him */
 +      logbuf_lock_irqsave(flags);
 +      this_cpu = smp_processor_id();
++=======
++>>>>>>> android-4.14-p
        /*
         * The printf needs to come first; we need the syslog
         * prefix which might be passed-in as a parameter.
Simple merge
Simple merge
index a8a4eecfc951ed2e25860881acd01a415404c413,6cbd2b9d574832f578a45a96b4f025e060cd00c3..951b6978bf4901a08b2f74185c56c420f584155f
  #include <trace/events/power.h>
  
  #include "sched.h"
 +#include "tune.h"
  
- unsigned long boosted_cpu_util(int cpu);
+ unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
  
 -#define SUGOV_KTHREAD_PRIORITY        50
 -
  struct sugov_tunables {
        struct gov_attr_set attr_set;
        unsigned int up_rate_limit_us;
@@@ -664,9 -680,11 +664,15 @@@ fail
        policy->governor_data = NULL;
        sugov_tunables_free(tunables);
  
++<<<<<<< HEAD
 +free_sg_policy:
++=======
+ stop_kthread:
+       sugov_kthread_stop(sg_policy);
++>>>>>>> android-4.14-p
        mutex_unlock(&global_tunables_lock);
  
+ free_sg_policy:
        sugov_policy_free(sg_policy);
  
  disable_fast_switch:
index 0cae58fbb6d4f6aab114cb6c01c14d4c1cf6bbc2,f6c02005bd92e5dc6f0bcbc4490969578aa5670b..035e36359afd1aa1898f0fef3ce8e21c1a78c9ad
@@@ -796,13 -792,9 +797,14 @@@ void post_init_entity_util_avg(struct s
  {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct sched_avg *sa = &se->avg;
-       long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+       long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+       long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
  
 +      if (sched_feat(EXYNOS_MS)) {
 +              exynos_init_entity_util_avg(se);
 +              goto util_init_done;
 +      }
 +
        if (cap > 0) {
                if (cfs_rq->avg.util_avg != 0) {
                        sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
@@@ -3120,10 -3092,30 +3145,37 @@@ static in
  __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
  {
        if (___update_load_avg(now, cpu, &se->avg,
++<<<<<<< HEAD
 +                                se->on_rq * scale_load_down(se->load.weight),
 +                                cfs_rq->curr == se, NULL, NULL)) {
 +              if (schedtune_util_est_en(task_of(se)))
 +                      cfs_se_util_change(&se->avg);
++=======
+                              se->on_rq * scale_load_down(se->load.weight),
+                              cfs_rq->curr == se, NULL, NULL)) {
+               cfs_se_util_change(&se->avg);
+ #ifdef UTIL_EST_DEBUG
+               /*
+                * Trace utilization only for actual tasks.
+                *
+                * These trace events are mostly useful to get easier to
+                * read plots for the estimated utilization, where we can
+                * compare it with the actual grow/decrease of the original
+                * PELT signal.
+                * Let's keep them disabled by default in "production kernels".
+                */
+               if (entity_is_task(se)) {
+                       struct task_struct *tsk = task_of(se);
+                       trace_sched_util_est_task(tsk, &se->avg);
+                       /* Trace utilization only for top level CFS RQ */
+                       cfs_rq = &(task_rq(tsk)->cfs);
+                       trace_sched_util_est_cpu(cpu, cfs_rq);
+               }
+ #endif /* UTIL_EST_DEBUG */
++>>>>>>> android-4.14-p
  
                return 1;
        }
@@@ -3652,28 -3650,51 +3704,75 @@@ static inline unsigned long cfs_rq_load
  
  static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
  
++<<<<<<< HEAD
 +static inline unsigned long task_util(struct task_struct *p)
 +{
 +#ifdef CONFIG_SCHED_WALT
 +      if (!walt_disabled && sysctl_sched_use_walt_task_util) {
 +              return (p->ravg.demand / (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
 +      }
++=======
+ static inline int task_fits_capacity(struct task_struct *p, long capacity);
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ {
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return;
+       if (!p) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+       if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+       rq->misfit_task_load = task_h_load(p);
+ }
+ static inline unsigned long task_util(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHED_WALT
+       if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
+               return (p->ravg.demand /
+                       (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
++>>>>>>> android-4.14-p
  #endif
        return READ_ONCE(p->se.avg.util_avg);
  }
  
++<<<<<<< HEAD
 +inline unsigned long _task_util_est(struct task_struct *p)
 +{
 +      struct util_est ue = READ_ONCE(p->se.avg.util_est);
 +
 +      return schedtune_util_est_en(p) ? max(ue.ewma, ue.enqueued)
 +                                      : task_util(p);
 +}
 +
 +inline unsigned long task_util_est(struct task_struct *p)
 +{
 +      return schedtune_util_est_en(p) ? max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p))
 +                                      : task_util(p);
++=======
+ static inline unsigned long _task_util_est(struct task_struct *p)
+ {
+       struct util_est ue = READ_ONCE(p->se.avg.util_est);
+       return max(ue.ewma, ue.enqueued);
+ }
+ static inline unsigned long task_util_est(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHED_WALT
+       if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
+               return (p->ravg.demand /
+                       (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
+ #endif
+       return max(task_util(p), _task_util_est(p));
++>>>>>>> android-4.14-p
  }
  
  static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
        enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
  
++<<<<<<< HEAD
 +      /* Update plots for Task and CPU estimated utilization */
++=======
++>>>>>>> android-4.14-p
        trace_sched_util_est_task(p, &p->se.avg);
        trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
  }
  /*
   * Check if a (signed) value is within a specified (unsigned) margin,
   * based on the observation that:
++<<<<<<< HEAD
++=======
+  *
++>>>>>>> android-4.14-p
   *     abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
   *
   * NOTE: this only works when value + maring < INT_MAX.
@@@ -3729,7 -3750,6 +3835,10 @@@ util_est_dequeue(struct cfs_rq *cfs_rq
        }
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
  
++<<<<<<< HEAD
 +      /* Update plots for CPU's estimated utilization */
++=======
++>>>>>>> android-4.14-p
        trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
  
        /*
        if (!task_sleep)
                return;
  
++<<<<<<< HEAD
 +      if (!schedtune_util_est_en(p))
 +              return;
 +
++=======
++>>>>>>> android-4.14-p
        /*
         * If the PELT values haven't changed since enqueue time,
         * skip the util_est update.
         */
        ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
        last_ewma_diff = ue.enqueued - ue.ewma;
++<<<<<<< HEAD
 +      if (within_margin(last_ewma_diff, capacity_orig_of(task_cpu(p)) / 100))
++=======
+       if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
++>>>>>>> android-4.14-p
                return;
  
        /*
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
        WRITE_ONCE(p->se.avg.util_est, ue);
  
++<<<<<<< HEAD
 +      /* Update plots for Task's estimated utilization */
++=======
++>>>>>>> android-4.14-p
        trace_sched_util_est_task(p, &p->se.avg);
  }
  
@@@ -3822,6 -3838,8 +3941,11 @@@ static inline int idle_balance(struct r
        return 0;
  }
  
++<<<<<<< HEAD
++=======
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
++>>>>>>> android-4.14-p
  static inline void
  util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
  
@@@ -5754,8 -5783,234 +5898,155 @@@ static unsigned long __cpu_norm_util(un
   * Hence - be careful when enabling DEBUG_EENV_DECISIONS
   * expecially if WALT is the task signal.
   */
 -/*#define DEBUG_EENV_DECISIONS*/
 -
 -#ifdef DEBUG_EENV_DECISIONS
 -/* max of 8 levels of sched groups traversed */
 -#define EAS_EENV_DEBUG_LEVELS 16
 -
 -struct _eenv_debug {
 -      unsigned long cap;
 -      unsigned long norm_util;
 -      unsigned long cap_energy;
 -      unsigned long idle_energy;
 -      unsigned long this_energy;
 -      unsigned long this_busy_energy;
 -      unsigned long this_idle_energy;
 -      cpumask_t group_cpumask;
 -      unsigned long cpu_util[1];
 -};
 -#endif
 -
 -struct eenv_cpu {
 -      /* CPU ID, must be in cpus_mask */
 -      int     cpu_id;
 -
 -      /*
 -       * Index (into sched_group_energy::cap_states) of the OPP the
 -       * CPU needs to run at if the task is placed on it.
 -       * This includes the both active and blocked load, due to
 -       * other tasks on this CPU,  as well as the task's own
 -       * utilization.
 -      */
 -      int     cap_idx;
 -      int     cap;
 -
 -      /* Estimated system energy */
 -      unsigned long energy;
 -
 -      /* Estimated energy variation wrt EAS_CPU_PRV */
 -      long nrg_delta;
 -
 -#ifdef DEBUG_EENV_DECISIONS
 -      struct _eenv_debug *debug;
 -      int debug_idx;
 -#endif /* DEBUG_EENV_DECISIONS */
 -};
 -
 -struct energy_env {
 -      /* Utilization to move */
 -      struct task_struct      *p;
 -      unsigned long           util_delta;
 -      unsigned long           util_delta_boosted;
 -
 -      /* Mask of CPUs candidates to evaluate */
 -      cpumask_t               cpus_mask;
 -
 -      /* CPU candidates to evaluate */
 -      struct eenv_cpu *cpu;
 -      int eenv_cpu_count;
 -
 -#ifdef DEBUG_EENV_DECISIONS
 -      /* pointer to the memory block reserved
 -       * for debug on this CPU - there will be
 -       * sizeof(struct _eenv_debug) *
 -       *  (EAS_CPU_CNT * EAS_EENV_DEBUG_LEVELS)
 -       * bytes allocated here.
 -       */
 -      struct _eenv_debug *debug;
 -#endif
 -      /*
 -       * Index (into energy_env::cpu) of the morst energy efficient CPU for
 -       * the specified energy_env::task
 -       */
 -      int     next_idx;
 -      int     max_cpu_count;
 -
 -      /* Support data */
 -      struct sched_group      *sg_top;
 -      struct sched_group      *sg_cap;
 -      struct sched_group      *sg;
 -};
  
- static int cpu_util_wake(int cpu, struct task_struct *p);
+ /**
+  * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+  * @cpu: the CPU to get the utilization of
+  *
+  * The unit of the return value must be the one of capacity so we can compare
+  * the utilization with the capacity of the CPU that is available for CFS task
+  * (ie cpu_capacity).
+  *
+  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+  * recent utilization of currently non-runnable tasks on a CPU. It represents
+  * the amount of utilization of a CPU in the range [0..capacity_orig] where
+  * capacity_orig is the cpu_capacity available at the highest frequency,
+  * i.e. arch_scale_cpu_capacity().
+  * The utilization of a CPU converges towards a sum equal to or less than the
+  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+  * the running time on this CPU scaled by capacity_curr.
+  *
+  * The estimated utilization of a CPU is defined to be the maximum between its
+  * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
+  * currently RUNNABLE on that CPU.
+  * This allows to properly represent the expected utilization of a CPU which
+  * has just got a big task running since a long sleep period. At the same time
+  * however it preserves the benefits of the "blocked utilization" in
+  * describing the potential for other tasks waking up on the same CPU.
+  *
+  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+  * higher than capacity_orig because of unfortunate rounding in
+  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+  * the average stabilizes with the new running time. We need to check that the
+  * utilization stays within the range of [0..capacity_orig] and cap it if
+  * necessary. Without utilization capping, a group could be seen as overloaded
+  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+  * available capacity. We allow utilization to overshoot capacity_curr (but not
+  * capacity_orig) as it useful for predicting the capacity required after task
+  * migrations (scheduler-driven DVFS).
+  *
+  * Return: the (estimated) utilization for the specified CPU
+  */
+ static inline unsigned long cpu_util(int cpu)
+ {
+       struct cfs_rq *cfs_rq;
+       unsigned int util;
+ #ifdef CONFIG_SCHED_WALT
+       if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
+               u64 walt_cpu_util = cpu_rq(cpu)->cumulative_runnable_avg;
+               walt_cpu_util <<= SCHED_CAPACITY_SHIFT;
+               do_div(walt_cpu_util, walt_ravg_window);
+               return min_t(unsigned long, walt_cpu_util,
+                            capacity_orig_of(cpu));
+       }
+ #endif
+       cfs_rq = &cpu_rq(cpu)->cfs;
+       util = READ_ONCE(cfs_rq->avg.util_avg);
+       if (sched_feat(UTIL_EST))
+               util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+       return min_t(unsigned long, util, capacity_orig_of(cpu));
+ }
+ static inline unsigned long cpu_util_freq(int cpu)
+ {
+ #ifdef CONFIG_SCHED_WALT
+       u64 walt_cpu_util;
+       if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util))
+               return cpu_util(cpu);
+       walt_cpu_util = cpu_rq(cpu)->prev_runnable_sum;
+       walt_cpu_util <<= SCHED_CAPACITY_SHIFT;
+       do_div(walt_cpu_util, walt_ravg_window);
+       return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
+ #else
+       return cpu_util(cpu);
+ #endif
+ }
+ /*
+  * cpu_util_wake: Compute CPU utilization with any contributions from
+  * the waking task p removed.
+  */
+ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+ {
+       struct cfs_rq *cfs_rq;
+       unsigned int util;
+ #ifdef CONFIG_SCHED_WALT
+       /*
+        * WALT does not decay idle tasks in the same manner
+        * as PELT, so it makes little sense to subtract task
+        * utilization from cpu utilization. Instead just use
+        * cpu_util for this case.
+        */
+       if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util))
+               return cpu_util(cpu);
+ #endif
+       /* Task has no contribution or is new */
+       if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+               return cpu_util(cpu);
+       cfs_rq = &cpu_rq(cpu)->cfs;
+       util = READ_ONCE(cfs_rq->avg.util_avg);
+       /* Discount task's blocked util from CPU's util */
+       util -= min_t(unsigned int, util, task_util(p));
+       /*
+        * Covered cases:
+        *
+        * a) if *p is the only task sleeping on this CPU, then:
+        *      cpu_util (== task_util) > util_est (== 0)
+        *    and thus we return:
+        *      cpu_util_wake = (cpu_util - task_util) = 0
+        *
+        * b) if other tasks are SLEEPING on this CPU, which is now exiting
+        *    IDLE, then:
+        *      cpu_util >= task_util
+        *      cpu_util > util_est (== 0)
+        *    and thus we discount *p's blocked utilization to return:
+        *      cpu_util_wake = (cpu_util - task_util) >= 0
+        *
+        * c) if other tasks are RUNNABLE on that CPU and
+        *      util_est > cpu_util
+        *    then we use util_est since it returns a more restrictive
+        *    estimation of the spare capacity on that CPU, by just
+        *    considering the expected utilization of tasks already
+        *    runnable on that CPU.
+        *
+        * Cases a) and b) are covered by the above code, while case c) is
+        * covered by the following code when estimated utilization is
+        * enabled.
+        */
+       if (sched_feat(UTIL_EST))
+               util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+       /*
+        * Utilization (estimated) can exceed the CPU capacity, thus let's
+        * clamp to the maximum CPU capacity to ensure consistency with
+        * the cpu_util call.
+        */
+       return min_t(unsigned long, util, capacity_orig_of(cpu));
+ }
  
  static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
  {
@@@ -6431,10 -6673,10 +6720,10 @@@ boosted_cpu_util(int cpu, unsigned lon
        return util + margin;
  }
  
 -static inline unsigned long
 +unsigned long
  boosted_task_util(struct task_struct *task)
  {
-       unsigned long util = task_util(task);
+       unsigned long util = task_util_est(task);
        long margin = schedtune_task_margin(task);
  
        trace_sched_boost_task(task, util, margin);
@@@ -6968,72 -7212,6 +7259,75 @@@ static int select_idle_sibling(struct t
        return select_idle_sibling_cstate_aware(p, prev, target);
  }
  
++<<<<<<< HEAD
 +/*
 + * cpu_util_wake: Compute cpu utilization with any contributions from
 + * the waking task p removed.
 + */
 +static int cpu_util_wake(int cpu, struct task_struct *p)
 +{
 +      struct cfs_rq *cfs_rq;
 +      unsigned int util;
 +
 +#ifdef CONFIG_SCHED_WALT
 +      /*
 +       * WALT does not decay idle tasks in the same manner
 +       * as PELT, so it makes little sense to subtract task
 +       * utilization from cpu utilization. Instead just use
 +       * cpu_util for this case.
 +       */
 +      if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
 +              return cpu_util(cpu);
 +#endif
 +      /* Task has no contribution or is new */
 +      if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 +              return cpu_util(cpu);
 +
 +      cfs_rq = &cpu_rq(cpu)->cfs;
 +      util = READ_ONCE(cfs_rq->avg.util_avg);
 +
 +      /* Discount task's blocked util from CPU's util */
 +      util -= min_t(unsigned int, util, task_util(p));
 +
 +      /*
 +       * Covered cases:
 +       *
 +       * a) if *p is the only task sleeping on this CPU, then:
 +       *      cpu_util (== task_util) > util_est (== 0)
 +       *    and thus we return:
 +       *      cpu_util_wake = (cpu_util - task_util) = 0
 +       *
 +       * b) if other tasks are SLEEPING on this CPU, which is now exiting
 +       *    IDLE, then:
 +       *      cpu_util >= task_util
 +       *      cpu_util > util_est (== 0)
 +       *    and thus we discount *p's blocked utilization to return:
 +       *      cpu_util_wake = (cpu_util - task_util) >= 0
 +       *
 +       * c) if other tasks are RUNNABLE on that CPU and
 +       *      util_est > cpu_util
 +       *    then we use util_est since it returns a more restrictive
 +       *    estimation of the spare capacity on that CPU, by just
 +       *    considering the expected utilization of tasks already
 +       *    runnable on that CPU.
 +       *
 +       * Cases a) and b) are covered by the above code, while case c) is
 +       * covered by the following code when estimated utilization is
 +       * enabled.
 +       */
 +      if (sched_feat(UTIL_EST))
 +              util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
 +
 +      /*
 +       * Utilization (estimated) can exceed the CPU capacity, thus let's
 +       * clamp to the maximum CPU capacity to ensure consistency with
 +       * the cpu_util call.
 +       */
 +      return min_t(unsigned long, util, capacity_orig_of(cpu));
 +}
 +
++=======
++>>>>>>> android-4.14-p
  static inline int task_fits_capacity(struct task_struct *p, long capacity)
  {
        return capacity * 1024 > boosted_task_util(p) * capacity_margin;
@@@ -7046,10 -7224,9 +7340,9 @@@ int start_cpu(bool boosted
        return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
  }
  
 -static inline int find_best_target(struct task_struct *p, int *backup_cpu,
 +int find_best_target(struct task_struct *p, int *backup_cpu,
                                   bool boosted, bool prefer_idle)
  {
-       unsigned long best_idle_min_cap_orig = ULONG_MAX;
        unsigned long min_util = boosted_task_util(p);
        unsigned long target_capacity = ULONG_MAX;
        unsigned long min_wake_util = ULONG_MAX;
@@@ -9239,23 -9452,17 +9589,31 @@@ static inline void update_sg_lb_stats(s
                        sgs->idle_cpus++;
  
                if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
-                   !sgs->group_misfit_task && rq_has_misfit(rq))
-                       sgs->group_misfit_task = capacity_of(i);
+                   sgs->group_misfit_task_load < rq->misfit_task_load) {
+                       sgs->group_misfit_task_load = rq->misfit_task_load;
+                       *overload = 1;
+               }
  
 -              if (cpu_overutilized(i)) {
 -                      *overutilized = true;
 +              if (sched_feat(EXYNOS_MS)) {
 +                      if (lbt_overutilized(i, env->sd->level)) {
 +                              *overutilized = true;
  
++<<<<<<< HEAD
 +                              if (rq_has_misfit(rq))
 +                                      *misfit_task = true;
 +                      }
 +              } else {
 +                      if (cpu_overutilized(i)) {
 +                              *overutilized = true;
 +
 +                              if (rq_has_misfit(rq))
 +                                      *misfit_task = true;
 +                      }
++=======
+                       if (rq->misfit_task_load)
+                               *misfit_task = true;
++>>>>>>> android-4.14-p
                }
        }
  
@@@ -11077,13 -11338,9 +11499,13 @@@ static void task_tick_fair(struct rq *r
        if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);
  
-       update_misfit_task(rq, curr);
+       update_misfit_status(curr, rq);
  
        update_overutilized_status(rq);
 +
 +#ifdef CONFIG_EXYNOS_PSTATE_MODE_CHANGER
 +      exynos_emc_update(rq->cpu);
 +#endif
  }
  
  /*
index ea53adbf81bdeb4dad96115ca7c041f2f76130cb,dbade300ef8ca7ea53e2258d2c9f5a561b55b770..48867ad4e7ff444969995906be98ceda71788083
@@@ -119,4 -119,13 +124,17 @@@ SCHED_FEAT(EAS_PREFER_IDLE, true
  SCHED_FEAT(FIND_BEST_TARGET, true)
  SCHED_FEAT(FBT_STRICT_ORDER, true)
  
++<<<<<<< HEAD
 +SCHED_FEAT(EXYNOS_MS, true)
++=======
+ /*
+  * Apply schedtune boost hold to tasks of all sched classes.
+  * If enabled, schedtune will hold the boost applied to a CPU
+  * for 50ms regardless of task activation - if the task is
+  * still running 50ms later, the boost hold expires and schedtune
+  * boost will expire immediately the task stops.
+  * If disabled, this behaviour will only apply to tasks of the
+  * RT class.
+  */
+ SCHED_FEAT(SCHEDTUNE_BOOST_HOLD_ALL, false)
++>>>>>>> android-4.14-p
Simple merge
index b6d66b582d7b884624cd13abae9ab33781fa9252,58c7498af831dc77ed6220bf0594bfb65f4cdaaf..42c7c811706e0ad0da922fd464542a8f3ff53dec
  
  #include <linux/slab.h>
  #include <linux/irq_work.h>
++<<<<<<< HEAD
 +#include <linux/ems.h>
++=======
+ #include "tune.h"
++>>>>>>> android-4.14-p
  
  #include "walt.h"
 +#include <trace/events/sched.h>
 +
 +#ifdef CONFIG_SCHED_USE_FLUID_RT
 +struct frt_dom {
 +      unsigned int            coverage_ratio;
 +      unsigned int            coverage_thr;
 +      unsigned int            active_ratio;
 +      unsigned int            active_thr;
 +      int                     coregroup;
 +      struct cpumask          cpus;
 +
 +      /* It is updated to relfect the system idle situation */
 +      struct cpumask          *activated_cpus;
 +
 +      struct list_head        list;
 +      struct frt_dom          *next;
 +      /* kobject for sysfs group */
 +      struct kobject          kobj;
 +};
 +struct cpumask activated_mask;
 +
 +LIST_HEAD(frt_list);
 +DEFINE_RAW_SPINLOCK(frt_lock);
 +
 +DEFINE_PER_CPU_SHARED_ALIGNED(struct frt_dom *, frt_rqs);
 +
 +static struct kobject *frt_kobj;
 +#define RATIO_SCALE_SHIFT     10
 +#define cpu_util(rq) (rq->cfs.avg.util_avg + rq->rt.avg.util_avg)
 +#define ratio_scale(v, r) (((v) * (r) * 10) >> RATIO_SCALE_SHIFT)
 +
 +static int frt_set_coverage_ratio(int cpu);
 +static int frt_set_active_ratio(int cpu);
 +struct frt_attr {
 +      struct attribute attr;
 +      ssize_t (*show)(struct kobject *, char *);
 +      ssize_t (*store)(struct kobject *, const char *, size_t count);
 +};
 +
 +#define frt_attr_rw(_name)                            \
 +static struct frt_attr _name##_attr =                 \
 +__ATTR(_name, 0644, show_##_name, store_##_name)
 +
 +#define frt_show(_name)                                                               \
 +static ssize_t show_##_name(struct kobject *k, char *buf)                     \
 +{                                                                             \
 +      struct frt_dom *dom = container_of(k, struct frt_dom, kobj);            \
 +                                                                              \
 +      return sprintf(buf, "%u\n", (unsigned int)dom->_name);                  \
 +}
 +
 +#define frt_store(_name, _type, _max)                                         \
 +static ssize_t store_##_name(struct kobject *k, const char *buf, size_t count)        \
 +{                                                                             \
 +      unsigned int val;                                                       \
 +      struct frt_dom *dom = container_of(k, struct frt_dom, kobj);            \
 +                                                                              \
 +      if (!sscanf(buf, "%u", &val))                                           \
 +              return -EINVAL;                                                 \
 +                                                                              \
 +      val = val > _max ? _max : val;                                          \
 +      dom->_name = (_type)val;                                                \
 +      frt_set_##_name(cpumask_first(&dom->cpus));                             \
 +                                                                              \
 +      return count;                                                           \
 +}
 +
 +static ssize_t show_coverage_ratio(struct kobject *k, char *buf)
 +{
 +      struct frt_dom *dom = container_of(k, struct frt_dom, kobj);
 +
 +      return sprintf(buf, "%u (%u)\n", dom->coverage_ratio, dom->coverage_thr);
 +}
 +
 +static ssize_t show_active_ratio(struct kobject *k, char *buf)
 +{
 +      struct frt_dom *dom = container_of(k, struct frt_dom, kobj);
 +
 +      return sprintf(buf, "%u (%u)\n", dom->active_ratio, dom->active_thr);
 +}
 +
 +frt_store(coverage_ratio, int, 100);
 +frt_attr_rw(coverage_ratio);
 +frt_store(active_ratio, int, 100);
 +frt_attr_rw(active_ratio);
 +
 +static ssize_t show(struct kobject *kobj, struct attribute *at, char *buf)
 +{
 +      struct frt_attr *frtattr = container_of(at, struct frt_attr, attr);
 +
 +      return frtattr->show(kobj, buf);
 +}
 +
 +static ssize_t store(struct kobject *kobj, struct attribute *at,
 +                   const char *buf, size_t count)
 +{
 +      struct frt_attr *frtattr = container_of(at, struct frt_attr, attr);
 +
 +      return frtattr->store(kobj, buf, count);
 +}
 +
 +static const struct sysfs_ops frt_sysfs_ops = {
 +      .show   = show,
 +      .store  = store,
 +};
 +
 +static struct attribute *frt_attrs[] = {
 +      &coverage_ratio_attr.attr,
 +      &active_ratio_attr.attr,
 +      NULL
 +};
 +
 +static struct kobj_type ktype_frt = {
 +      .sysfs_ops      = &frt_sysfs_ops,
 +      .default_attrs  = frt_attrs,
 +};
 +
 +static int frt_find_prefer_cpu(struct task_struct *task)
 +{
 +      int cpu, allowed_cpu = 0;
 +      unsigned int coverage_thr;
 +      struct frt_dom *dom;
 +
 +      list_for_each_entry(dom, &frt_list, list) {
 +              coverage_thr = per_cpu(frt_rqs, cpumask_first(&dom->cpus))->coverage_thr;
 +              for_each_cpu_and(cpu, &task->cpus_allowed, &dom->cpus) {
 +                      allowed_cpu = cpu;
 +                      if (task->rt.avg.util_avg < coverage_thr)
 +                              return allowed_cpu;
 +              }
 +      }
 +      return allowed_cpu;
 +}
 +
 +static int frt_set_active_ratio(int cpu)
 +{
 +      unsigned long capacity;
 +      struct frt_dom *dom = per_cpu(frt_rqs, cpu);
 +
 +      if (!dom || !cpu_active(cpu))
 +              return -1;
 +
 +      capacity = get_cpu_max_capacity(cpu) *
 +                      cpumask_weight(cpu_coregroup_mask(cpu));
 +      dom->active_thr = ratio_scale(capacity, dom->active_ratio);
 +
 +      return 0;
 +}
 +
 +static int frt_set_coverage_ratio(int cpu)
 +{
 +      unsigned long capacity;
 +      struct frt_dom *dom = per_cpu(frt_rqs, cpu);
 +
 +      if (!dom || !cpu_active(cpu))
 +              return -1;
 +
 +      capacity = get_cpu_max_capacity(cpu);
 +      dom->coverage_thr = ratio_scale(capacity, dom->coverage_ratio);
 +
 +      return 0;
 +}
 +
 +static const struct cpumask *get_activated_cpus(void)
 +{
 +      struct frt_dom *dom = per_cpu(frt_rqs, 0);
 +      if (dom)
 +              return dom->activated_cpus;
 +      return cpu_active_mask;
 +}
 +
 +static void update_activated_cpus(void)
 +{
 +      struct frt_dom *dom, *prev_idle_dom = NULL;
 +      struct cpumask mask;
 +      unsigned long flags;
 +
 +      if (!raw_spin_trylock_irqsave(&frt_lock, flags))
 +              return;
 +
 +      cpumask_setall(&mask);
 +      list_for_each_entry_reverse(dom, &frt_list, list) {
 +              unsigned long dom_util_sum = 0;
 +              unsigned long dom_active_thr = 0;
 +              unsigned long capacity;
 +              struct cpumask active_cpus;
 +              int first_cpu, cpu;
 +
 +              cpumask_and(&active_cpus, &dom->cpus, cpu_active_mask);
 +              first_cpu = cpumask_first(&active_cpus);
 +              /* all cpus of domain is offed */
 +              if (first_cpu == NR_CPUS)
 +                      continue;
 +
 +              for_each_cpu(cpu, &active_cpus) {
 +                      struct rq *rq = cpu_rq(cpu);
 +                      dom_util_sum += cpu_util(rq);
 +              }
 +
 +              capacity = get_cpu_max_capacity(first_cpu) * cpumask_weight(&active_cpus);
 +              dom_active_thr = ratio_scale(capacity, dom->active_ratio);
 +
 +              /* domain is idle */
 +              if (dom_util_sum < dom_active_thr) {
 +                      /* if prev domain is also idle, clear prev domain cpus */
 +                      if (prev_idle_dom)
 +                              cpumask_andnot(&mask, &mask, &prev_idle_dom->cpus);
 +                      prev_idle_dom = dom;
 +              }
 +
 +              trace_sched_fluid_activated_cpus(first_cpu, dom_util_sum,
 +                      dom_active_thr, *(unsigned int *)cpumask_bits(&mask));
 +
 +              /* this is first domain, do update activated_cpus */
 +              if (first_cpu == 0)
 +                      cpumask_copy(dom->activated_cpus, &mask);
 +      }
 +      raw_spin_unlock_irqrestore(&frt_lock, flags);
 +}
 +
 +
 +static int __init frt_sysfs_init(void)
 +{
 +      struct frt_dom *dom;
 +
 +      if (list_empty(&frt_list))
 +              return 0;
 +
 +      frt_kobj = kobject_create_and_add("frt", ems_kobj);
 +      if (!frt_kobj)
 +              goto out;
 +
 +      /* Add frt sysfs node for each coregroup */
 +      list_for_each_entry(dom, &frt_list, list) {
 +              int ret;
 +
 +              ret = kobject_init_and_add(&dom->kobj, &ktype_frt,
 +                              frt_kobj, "coregroup%d", dom->coregroup);
 +              if (ret)
 +                      goto out;
 +      }
 +
 +      return 0;
 +
 +out:
 +      pr_err("FRT(%s): failed to create sysfs node\n", __func__);
 +      return -EINVAL;
 +}
 +
 +static void frt_parse_dt(struct device_node *dn, struct frt_dom *dom, int cnt)
 +{
 +      struct device_node *frt, *coregroup;
 +      char name[15];
 +
 +      frt = of_get_child_by_name(dn, "frt");
 +      if (!frt)
 +              goto disable;
 +
 +      snprintf(name, sizeof(name), "coregroup%d", cnt);
 +      coregroup = of_get_child_by_name(frt, name);
 +      if (!coregroup)
 +              goto disable;
 +      dom->coregroup = cnt;
 +
 +      of_property_read_u32(coregroup, "coverage-ratio", &dom->coverage_ratio);
 +      if (!dom->coverage_ratio)
 +              dom->coverage_ratio = 100;
 +
 +      of_property_read_u32(coregroup, "active-ratio", &dom->active_ratio);
 +      if (!dom->active_ratio)
 +              dom->active_thr = 0;
 +
 +      return;
 +
 +disable:
 +      dom->coregroup = cnt;
 +      dom->coverage_ratio = 100;
 +      dom->active_thr = 0;
 +      pr_err("FRT(%s): failed to parse frt node\n", __func__);
 +}
 +
 +static int __init init_frt(void)
 +{
 +      struct frt_dom *dom, *prev = NULL, *head;
 +      struct device_node *dn;
 +      int cpu, tcpu, cnt = 0;
 +
 +      dn = of_find_node_by_path("/cpus/ems");
 +      if (!dn)
 +              return 0;
 +
 +      INIT_LIST_HEAD(&frt_list);
 +      cpumask_setall(&activated_mask);
 +
 +      for_each_possible_cpu(cpu) {
 +              if (cpu != cpumask_first(cpu_coregroup_mask(cpu)))
 +                      continue;
 +
 +              dom = kzalloc(sizeof(struct frt_dom), GFP_KERNEL);
 +              if (!dom) {
 +                      pr_err("FRT(%s): failed to allocate dom\n");
 +                      goto put_node;
 +              }
 +
 +              if (cpu == 0)
 +                      head = dom;
 +
 +              dom->activated_cpus = &activated_mask;
 +
 +              cpumask_copy(&dom->cpus, cpu_coregroup_mask(cpu));
 +
 +              frt_parse_dt(dn, dom, cnt++);
 +
 +              dom->next = head;
 +              if (prev)
 +                      prev->next = dom;
 +              prev = dom;
 +
 +              for_each_cpu(tcpu, &dom->cpus)
 +                      per_cpu(frt_rqs, tcpu) = dom;
 +
 +              frt_set_coverage_ratio(cpu);
 +              frt_set_active_ratio(cpu);
 +
 +              list_add_tail(&dom->list, &frt_list);
 +      }
 +      frt_sysfs_init();
 +
 +put_node:
 +      of_node_put(dn);
 +
 +      return 0;
 +
 +} late_initcall(init_frt);
 +#else
 +static inline void update_activated_cpus(void) { };
 +#endif
  
  int sched_rr_timeslice = RR_TIMESLICE;
  int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
index 3982c027b6910dfc986b370059f8aa1316f568a1,ac7d489be8cedde98fe3c105b4150f353116e5fd..a5b64079e36ff1446aff247476cf9d5a94d97d28
@@@ -1,6 -1,7 +1,10 @@@
  /* SPDX-License-Identifier: GPL-2.0 */
  /* Generated by Documentation/scheduler/sched-pelt; do not modify. */
  
++<<<<<<< HEAD
++=======
++>>>>>>> android-4.14-p
  #ifdef CONFIG_PELT_UTIL_HALFLIFE_32
  static const u32 runnable_avg_yN_inv[] = {
        0xffffffff,0xfa83b2da,0xf5257d14,0xefe4b99a,
@@@ -23,11 -24,6 +27,14 @@@ static const u32 runnable_avg_yN_sum[] 
  #define LOAD_AVG_MAX 47742
  #define LOAD_AVG_MAX_N 345
  
++<<<<<<< HEAD
 +static const u32 __accumulated_sum_N32[] = {
 +           0, 23371, 35056, 40899, 43820, 45281,
 +       46011, 46376, 46559, 46650, 46696, 46719,
 +};
 +
++=======
++>>>>>>> android-4.14-p
  #endif
  
  #ifdef CONFIG_PELT_UTIL_HALFLIFE_16
@@@ -47,15 -43,6 +54,18 @@@ static const u32 runnable_avg_yN_sum[] 
  #define LOAD_AVG_MAX 24152
  #define LOAD_AVG_MAX_N 517
  
++<<<<<<< HEAD
 +static const u32 __accumulated_sum_N32[] = {
 +           0, 22731, 34096, 39779, 42620, 44041,
 +       44751, 45106, 45284, 45373, 45417, 45439,
 +       45450, 45456, 45459, 45460, 45461, 45461,
 +       45461, 45461, 45461, 45461, 45461, 45461,
 +       45461, 45461, 45461, 45461, 45461, 45461,
 +       45461, 45461, 45461, 45461,
 +};
 +
++=======
++>>>>>>> android-4.14-p
  #endif
  
  #ifdef CONFIG_PELT_UTIL_HALFLIFE_8
@@@ -72,21 -59,4 +82,25 @@@ static const u32 runnable_avg_yN_sum[] 
  #define LOAD_AVG_MAX 12337
  #define LOAD_AVG_MAX_N 603
  
++<<<<<<< HEAD
 +static const u32 __accumulated_sum_N32[] = {
 +           0, 16507, 24760, 28887, 30950, 31982,
 +       32498, 32756, 32885, 32949, 32981, 32997,
 +       33005, 33009, 33011, 33012, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013, 33013,
 +       33013, 33013, 33013, 33013, 33013,
 +};
 +
 +#endif
 +
++=======
+ #endif
++>>>>>>> android-4.14-p
index 1f91980130db24da2e52e38ceef1eeea38123b25,c3ea9a3ad3a6416e813d6e3ab0a4b86b2b382085..8f60006c2631365224b569e4aa655b1a94a14c4e
@@@ -1814,103 -1778,7 +1837,107 @@@ extern unsigned int sysctl_sched_use_wa
  extern unsigned int walt_ravg_window;
  extern bool walt_disabled;
  
++<<<<<<< HEAD
 +#ifdef CONFIG_SCHED_WALT
 +#define walt_util(util_var, demand_sum) {\
 +      u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
 +      do_div(sum, walt_ravg_window);\
 +      util_var = (typeof(util_var))sum;\
 +      }
 +#endif
 +
 +/**
 + * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
 + * @cpu: the CPU to get the utilization of
 + *
 + * The unit of the return value must be the one of capacity so we can compare
 + * the utilization with the capacity of the CPU that is available for CFS task
 + * (ie cpu_capacity).
 + *
 + * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
 + * recent utilization of currently non-runnable tasks on a CPU. It represents
 + * the amount of utilization of a CPU in the range [0..capacity_orig] where
 + * capacity_orig is the cpu_capacity available at the highest frequency
 + * (arch_scale_freq_capacity()).
 + * The utilization of a CPU converges towards a sum equal to or less than the
 + * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
 + * the running time on this CPU scaled by capacity_curr.
 + *
 + * The estimated utilization of a CPU is defined to be the maximum between its
 + * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
 + * currently RUNNABLE on that CPU.
 + * This allows to properly represent the expected utilization of a CPU which
 + * has just got a big task running since a long sleep period. At the same time
 + * however it preserves the benefits of the "blocked utilization" in
 + * describing the potential for other tasks waking up on the same CPU.
 + *
 + * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
 + * higher than capacity_orig because of unfortunate rounding in
 + * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
 + * the average stabilizes with the new running time. We need to check that the
 + * utilization stays within the range of [0..capacity_orig] and cap it if
 + * necessary. Without utilization capping, a group could be seen as overloaded
 + * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
 + * available capacity. We allow utilization to overshoot capacity_curr (but not
 + * capacity_orig) as it useful for predicting the capacity required after task
 + * migrations (scheduler-driven DVFS).
 + *
 + * Return: the (estimated) utilization for the specified CPU
 + */
 +static inline unsigned long __cpu_util(int cpu, int delta)
 +{
 +      struct cfs_rq *cfs_rq;
 +      unsigned int util;
 +
 +      cfs_rq = &cpu_rq(cpu)->cfs;
 +      util = READ_ONCE(cfs_rq->avg.util_avg);
 +
 +#ifdef CONFIG_SCHED_WALT
 +      if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
 +              walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg);
 +      }
 +#endif
 +      if (sched_feat(UTIL_EST))
 +              util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
 +
 +      delta += util;
 +      if (delta < 0)
 +              return 0;
 +
 +      return min_t(unsigned long, delta, capacity_orig_of(cpu));
 +}
 +
 +static inline unsigned long cpu_util(int cpu)
 +{
 +      return __cpu_util(cpu, 0);
 +}
 +
 +static inline unsigned long cpu_util_freq(int cpu)
 +{
 +      struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
 +      unsigned long capacity = capacity_orig_of(cpu);
 +      unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
 +
 +#ifdef CONFIG_SCHED_WALT
 +      if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
 +              walt_util(util, cpu_rq(cpu)->prev_runnable_sum);
 +      }
 +#endif
 +      if (sched_feat(UTIL_EST)) {
 +              util = max_t(unsigned long, util,
 +                           READ_ONCE(cfs_rq->avg.util_est.enqueued));
 +      }
 +
 +      return (util >= capacity) ? capacity : util;
 +}
 +
 +unsigned long _task_util_est(struct task_struct *p);
 +unsigned long task_util_est(struct task_struct *p);
 +
 +#endif
++=======
+ #endif /* CONFIG_SMP */
++>>>>>>> android-4.14-p
  
  static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  {
Simple merge
Simple merge
Simple merge
index eda329d99cb8886c448e2cbc29383d38c44d4423,f0c53b815ce9dc5d15e7d4fe833248913676a967..00d8c083c762ef82190a9b80b2cb54a543798d5d
@@@ -1018,26 -1075,39 +1075,52 @@@ bool tick_nohz_idle_got_tick(void
   *
   * Called from power state control code with interrupts disabled
   */
- ktime_t tick_nohz_get_sleep_length(void)
+ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
  {
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+       int cpu = smp_processor_id();
+       /*
+        * The idle entry time is expected to be a sufficient approximation of
+        * the current time at this point.
+        */
+       ktime_t now = ts->idle_entrytime;
+       ktime_t next_event;
  
-       return ts->sleep_length;
+       WARN_ON_ONCE(!ts->inidle);
+       *delta_next = ktime_sub(dev->next_event, now);
+       if (!can_stop_idle_tick(cpu, ts))
+               return *delta_next;
+       next_event = tick_nohz_next_event(ts, cpu);
+       if (!next_event)
+               return *delta_next;
+       /*
+        * If the next highres timer to expire is earlier than next_event, the
+        * idle governor needs to know that.
+        */
+       next_event = min_t(u64, next_event,
+                          hrtimer_next_event_without(&ts->sched_timer));
+       return ktime_sub(next_event, now);
  }
  
 +/**
 + * tick_nohz_get_sleep_length_cpu - return the length of the current sleep
 + * for a particular CPU.
 + *
 + * Called from power state control code with interrupts disabled
 + */
 +ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
 +{
 +      struct tick_sched *ts = tick_get_tick_sched(cpu);
 +
 +      return ts->sleep_length;
 +}
 +
  /**
   * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
   * for a particular CPU.
Simple merge
Simple merge
Simple merge
diff --cc mm/gup.c
Simple merge
Simple merge
diff --cc mm/migrate.c
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/shmem.c
Simple merge
Simple merge
Simple merge
index 8a64a4ac148c08fdaf10266649fd64b288df3bbd,ebfbc3f1be4286ed13ae6eeae49970b3a46422d2..38c74b093caf4f2a8156b9bce8e5ca686cee7a65
mode 100755,100644..100755
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge