arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+ifndef CONFIG_ARCH_EXYNOS
+arm64-obj-y += topology.o
+endif
+
ifeq ($(CONFIG_KVM),y)
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
endif
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}
++<<<<<<< HEAD
+#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
+ dss_binder_transaction(reply, t, t->from ? t->from : thread, target_node ? target_node->debug_id : 0);
+#endif
++=======
++>>>>>>> android-4.14-p
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
++<<<<<<< HEAD
+ "%d:%d(%s:%s) BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
+ (u64)data_ptr);
+ } else {
+ binder_user_error(
+ "%d:%d(%s:%s) BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
++=======
+ "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ proc->pid, thread->pid,
+ (u64)data_ptr);
+ } else {
+ binder_user_error(
+ "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid,
++>>>>>>> android-4.14-p
(u64)data_ptr);
}
break;
return 0;
err_bad_arg:
++<<<<<<< HEAD
+ pr_err("%s: %d(%s) %lx-%lx %s failed %d\n", __func__,
+ proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end, failure_string, ret);
++=======
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
++>>>>>>> android-4.14-p
return ret;
}
struct binder_proc *proc;
struct binder_device *binder_dev;
++<<<<<<< HEAD
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d (%s:%s)\n", __func__,
+ current->group_leader->pid, current->pid,
+ current->group_leader->comm, current->comm);
++=======
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
+ current->group_leader->pid, current->pid);
++>>>>>>> android-4.14-p
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return vma ? -ENOMEM : -ESRCH;
}
++<<<<<<< HEAD
++=======
+ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+ {
+ if (vma)
+ alloc->vma_vm_mm = vma->vm_mm;
+ /*
+ * If we see alloc->vma is not NULL, buffer data structures set up
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
+ * if alloc->vma is set.
+ */
+ smp_wmb();
+ alloc->vma = vma;
+ }
+
+ static inline struct vm_area_struct *binder_alloc_get_vma(
+ struct binder_alloc *alloc)
+ {
+ struct vm_area_struct *vma = NULL;
+
+ if (alloc->vma) {
+ /* Look at description in binder_alloc_set_vma */
+ smp_rmb();
+ vma = alloc->vma;
+ }
+ return vma;
+ }
+
++>>>>>>> android-4.14-p
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
++<<<<<<< HEAD
+
+ mm = alloc->vma_vm_mm;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ vma = alloc->vma;
++=======
+ vma = binder_alloc_get_vma(alloc);
+ if (vma) {
+ if (!mmget_not_zero(alloc->vma_vm_mm))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+ }
++>>>>>>> android-4.14-p
list_lru_isolate(lru, item);
spin_unlock(lock);
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
+ dev->power.direct_complete = false;
async_error = -EBUSY;
+ if (dev->power.direct_complete)
+ dev->power.is_suspend_aborted = true;
goto Complete;
}
struct mct_clock_event_device *mevt;
mevt = container_of(evt, struct mct_clock_event_device, evt);
++<<<<<<< HEAD
+ exynos4_mct_tick_stop(mevt, 1);
++=======
+ exynos4_mct_tick_stop(mevt);
+ exynos4_mct_tick_clear(mevt);
++>>>>>>> android-4.14-p
return 0;
}
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
++<<<<<<< HEAD
+ exynos4_mct_tick_stop(mevt, 0);
++=======
+ /*
+ * This is for supporting oneshot mode.
+ * Mct would generate interrupt periodically
+ * without explicit stopping.
+ */
+ if (!clockevent_state_periodic(&mevt->evt))
+ exynos4_mct_tick_stop(mevt);
+
+ exynos4_mct_tick_clear(mevt);
++>>>>>>> android-4.14-p
evt->event_handler(evt);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
++<<<<<<< HEAD
+ struct kobj_attribute *attr, char *buf)
++=======
+ struct kobj_attribute *attr, char *buf)
++>>>>>>> android-4.14-p
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++<<<<<<< HEAD
+ const char *buf, size_t count)
++=======
+ const char *buf, size_t count)
++>>>>>>> android-4.14-p
{
int ret, enable;
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
+ int ret;
- if (IS_REACHABLE(CONFIG_INPUT)) {
+ if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
free_irq(info->tsirq, info);
input_unregister_device(info->input);
}
goto bad_mem;
chainmode = strsep(&tmp, "-");
++<<<<<<< HEAD
+ *ivopts = strsep(&tmp, "-");
+ *ivmode = strsep(&*ivopts, ":");
+
+ if (*ivmode && (!strcmp(*ivmode, "disk") || !strcmp(*ivmode, "fmp")))
+ set_bit(CRYPT_MODE_DISKCIPHER, &cc->cipher_flags);
+
+ if (tmp)
+ DMWARN("Ignoring unexpected additional cipher options");
++=======
+ *ivmode = strsep(&tmp, ":");
+ *ivopts = tmp;
++>>>>>>> android-4.14-p
/*
* For compatibility with the original dm-crypt mapping format, if
*/
limits->max_segment_size = PAGE_SIZE;
++<<<<<<< HEAD
+ if (cc->sector_size != (1 << SECTOR_SHIFT)) {
+ limits->logical_block_size = cc->sector_size;
+ limits->physical_block_size = cc->sector_size;
+ blk_limits_io_min(limits, cc->sector_size);
+ }
+
+ if (crypt_mode_diskcipher(cc))
+ limits->logical_block_size = PAGE_SIZE;
++=======
+ limits->logical_block_size =
+ max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ limits->physical_block_size =
+ max_t(unsigned, limits->physical_block_size, cc->sector_size);
+ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
++>>>>>>> android-4.14-p
}
static struct target_type crypt_target = {
state != VB2_BUF_STATE_REQUEUEING) {
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
++<<<<<<< HEAD
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv,
+ vb->planes[plane].bytesused, memflags);
++=======
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
++>>>>>>> android-4.14-p
}
spin_lock_irqsave(&q->done_lock, flags);
return ret;
}
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
+static void __qbuf_work(struct work_struct *work)
{
struct vb2_buffer *vb;
+ struct vb2_queue *q;
+
+ vb = container_of(work, struct vb2_buffer, qbuf_work);
+ q = vb->vb2_queue;
+
+ if (q->start_streaming_called)
+ __enqueue_in_driver(vb);
+}
+
+static void vb2_qbuf_fence_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct vb2_buffer *vb = container_of(cb, struct vb2_buffer, fence_cb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vb->fence_cb_lock, flags);
+ del_timer(&vb->fence_timer);
+ if (!vb->in_fence) {
+ spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
+ return;
+ }
+ /*
+ * If the fence signals with an error we mark the buffer as such
+ * and avoid using it by setting it to VB2_BUF_STATE_ERROR and
+ * not queueing it to the driver. However we can't notify the error
+ * to userspace right now because, at the time this callback run, QBUF
+ * returned already.
+ * So we delay that to DQBUF time. See comments in vb2_buffer_done()
+ * as well.
+ */
+ if (vb->in_fence->error)
+ vb->state = VB2_BUF_STATE_ERROR;
+
+ dma_fence_put(vb->in_fence);
+ vb->in_fence = NULL;
+
+ if (vb->state == VB2_BUF_STATE_ERROR) {
+ spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
+
+ schedule_work(&vb->qbuf_work);
+}
+
+#define VB2_FENCE_TIMEOUT (1000)
+static void vb2_fence_timeout_handler(unsigned long arg)
+{
+ struct vb2_buffer *vb = (struct vb2_buffer *)arg;
+ struct dma_fence *fence;
+ unsigned long flags;
+ char name[32];
+
+ pr_err("%s: fence callback is not called during %d ms\n",
+ __func__, VB2_FENCE_TIMEOUT);
+ spin_lock_irqsave(&vb->fence_cb_lock, flags);
+ if (!vb->in_fence) {
+ spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
+ return;
+ }
+
+ fence = vb->in_fence;
+ if (fence) {
+ strlcpy(name, fence->ops->get_driver_name(fence),
+ sizeof(name));
+ pr_err("%s: vb2 in-fence: %s #%d (%s), error: %d\n",
+ __func__, name, fence->seqno,
+ dma_fence_is_signaled(fence) ?
+ "signaled" : "active", fence->error);
+
+ dma_fence_remove_callback(vb->in_fence, &vb->fence_cb);
+ dma_fence_put(fence);
+ vb->in_fence = NULL;
+ vb->state = VB2_BUF_STATE_ERROR;
+ }
+
+ fence = vb->out_fence;
+ if (fence)
+ pr_err("%s: vb2 out-fence: #%d\n", __func__, fence->seqno);
+
+ spin_unlock_irqrestore(&vb->fence_cb_lock, flags);
+
+ schedule_work(&vb->qbuf_work);
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct dma_fence *in_fence)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
int ret;
+ if (q->error) {
+ dprintk(1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
vb = q->bufs[index];
switch (vb->state) {
return;
check_once = true;
++<<<<<<< HEAD
+ /* WARN_ON(1); */
++=======
++>>>>>>> android-4.14-p
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
u32 clk_en_a;
u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
++<<<<<<< HEAD
++=======
+ /* We must continue to set bit 28 in CMD until the change is complete */
+ if (host->state == STATE_WAITING_CMD11_DONE)
+ sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
+
+ slot->mmc->actual_clock = 0;
+
++>>>>>>> android-4.14-p
if (!clock) {
mci_writel(host, CLKENA, 0);
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
++<<<<<<< HEAD
+ /*
+ * Determine the device's preferred I/O size for reads and writes
+ * unless the reported value is unreasonably small, large, or
+ * garbage.
+ */
+ if (sdkp->opt_xfer_blocks &&
+ sdkp->opt_xfer_blocks <= dev_max &&
+ sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+ rw_max = q->limits.io_opt =
+ sdkp->opt_xfer_blocks * sdp->sector_size;
+ else
++=======
+ if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
+ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ } else
++>>>>>>> android-4.14-p
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
if (list_empty(head))
goto out;
++<<<<<<< HEAD
+ ufshcd_vops_pre_setup_clocks(hba, on);
++=======
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
++>>>>>>> android-4.14-p
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
}
}
++<<<<<<< HEAD
+ ret = ufshcd_vops_setup_clocks(hba, on);
++=======
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
++>>>>>>> android-4.14-p
out:
if (ret) {
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
int ret;
- if (sci->cfg_gpio)
- sci->cfg_gpio();
+ if (!pm_runtime_status_suspended(dev))
+ s3c64xx_spi_runtime_resume(dev);
- ret = pm_runtime_force_resume(dev);
- if (ret < 0)
- return ret;
+ if (sci->domain == DOMAIN_TOP) {
+
++<<<<<<< HEAD
+ /* Enable the clock */
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+ exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
+#endif
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
+
+ if (sci->cfg_gpio)
+ sci->cfg_gpio();
+ if (sci->secure_mode)
+ sci->need_hw_init = 1;
+ else {
+ exynos_usi_init(sdd);
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
+ }
++=======
+ return spi_master_resume(master);
+ }
+ #endif /* CONFIG_PM_SLEEP */
++>>>>>>> android-4.14-p
#ifdef CONFIG_PM
-static int s3c64xx_spi_runtime_suspend(struct device *dev)
+ /* Disable the clock */
+ clk_disable_unprepare(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+ exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
+#endif
+#endif
+ }
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+ else
+ dev_dbg(dev, "resumed\n");
+
+ return ret;
+}
+
+static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- int ret;
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- if (sdd->port_conf->clk_ioclk) {
- ret = clk_prepare_enable(sdd->ioclk);
- if (ret != 0)
- return ret;
- }
+ if (sci->dma_mode == DMA_MODE)
+ return 0;
- ret = clk_prepare_enable(sdd->src_clk);
- if (ret != 0)
- goto err_disable_ioclk;
+ dev_dbg(dev, "spi suspend is handled in suspend_noirq, dma mode = %d\n",
+ sci->dma_mode);
+ return s3c64xx_spi_suspend_operation(dev);
+}
- ret = clk_prepare_enable(sdd->clk);
- if (ret != 0)
- goto err_disable_src_clk;
+static int s3c64xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
++<<<<<<< HEAD
+ if (sci->dma_mode != DMA_MODE)
+ return 0;
++=======
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
+
+ return 0;
++>>>>>>> android-4.14-p
-err_disable_src_clk:
- clk_disable_unprepare(sdd->src_clk);
-err_disable_ioclk:
- clk_disable_unprepare(sdd->ioclk);
+ dev_dbg(dev, "spi resume is handled in device resume, dma mode = %d\n",
+ sci->dma_mode);
- return ret;
+ return s3c64xx_spi_resume_operation(dev);
}
-#endif /* CONFIG_PM */
+
+static int s3c64xx_spi_resume_noirq(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+
+ if (sci->dma_mode == DMA_MODE)
+ return 0;
+
+ dev_dbg(dev, "spi resume is handled in resume_noirq, dma mode = %d\n",
+ sci->dma_mode);
+
+ return s3c64xx_spi_resume_operation(dev);
+}
+#else
+static int s3c64xx_spi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int s3c64xx_spi_resume(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops s3c64xx_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
++<<<<<<< HEAD
+ free_duped_table(attachment->priv);
++=======
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+ free_duped_table(a->table);
+
+ kfree(a);
++>>>>>>> android-4.14-p
}
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
bool cached)
{
int i;
++<<<<<<< HEAD
+ gfp_t gfp_flags = high_order_gfp_flags;
++=======
++>>>>>>> android-4.14-p
for (i = 0; i < NUM_ORDERS; i++) {
struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
+ if (orders[i] < 4)
+ gfp_flags = low_order_gfp_flags;
pool = ion_page_pool_create(gfp_flags, orders[i], cached);
if (!pool)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
- const struct thermal_trip * const trips =
- of_thermal_get_trip_points(tz);
- int ret = 0, threshold_code, i;
- unsigned long reference, temp;
- unsigned int status;
-
- if (!trips) {
- pr_err("%s: Cannot get trip points from of-thermal.c!\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
- status = readb(data->base + EXYNOS_TMU_REG_STATUS);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
-
- sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
-
- /* Write temperature code for threshold */
- reference = trips[0].temperature / MCELSIUS;
- threshold_code = temp_to_code(data, reference);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int trim_info, temp_error1, temp_error2;
+ unsigned short cal_type;
+ unsigned int rising_threshold, falling_threshold;
+ unsigned int reg_off, bit_off;
+ enum thermal_trip_type type;
+ int temp, temp_hist, threshold_code;
+ int i, sensor, count = 0, interrupt_count;
- for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
- temp = trips[i].temperature / MCELSIUS;
- writeb(temp - reference, data->base +
- EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
- }
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(0));
+ cal_type = (trim_info >> EXYNOS_TMU_CALIB_SEL_SHIFT) & EXYNOS_TMU_CALIB_SEL_MASK;
- data->tmu_clear_irqs(data);
-out:
- return ret;
-}
+ for (sensor = 0; sensor < TOTAL_SENSORS; sensor++) {
-static int exynos4412_tmu_initialize(struct platform_device *pdev)
-{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- const struct thermal_trip * const trips =
- of_thermal_get_trip_points(data->tzd);
- unsigned int status, trim_info, con, ctrl, rising_threshold;
- int ret = 0, threshold_code, i;
- unsigned long crit_temp = 0;
-
- status = readb(data->base + EXYNOS_TMU_REG_STATUS);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
+ if (!(data->sensors & (1 << sensor)))
+ continue;
- if (data->soc == SOC_ARCH_EXYNOS3250 ||
- data->soc == SOC_ARCH_EXYNOS4412 ||
- data->soc == SOC_ARCH_EXYNOS5250) {
- if (data->soc == SOC_ARCH_EXYNOS3250) {
- ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
- ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
- writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
- }
- ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
- ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
- writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
- }
+ /* Read the sensor error value from TRIMINFOX */
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(sensor));
+ temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
+ temp_error2 = (trim_info >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) & EXYNOS_TMU_TEMP_MASK;
- /* On exynos5420 the triminfo register is in the shared space */
- if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
- trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
- else
- trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+ /* Save sensor id */
+ data->sensor_info[count].sensor_num = sensor;
+ dev_info(&pdev->dev, "Sensor number = %d\n", sensor);
- sanitize_temp_error(data, trim_info);
+ /* Check thermal calibration type */
+ data->sensor_info[count].cal_type = cal_type;
- /* Write temperature code for rising and falling threshold */
- rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
- rising_threshold = get_th_reg(data, rising_threshold, false);
- writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
- writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
+ /* Check temp_error1 value */
+ data->sensor_info[count].temp_error1 = temp_error1;
+ if (!data->sensor_info[count].temp_error1)
+ data->sensor_info[count].temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
- data->tmu_clear_irqs(data);
+ /* Check temp_error2 if calibration type is TYPE_TWO_POINT_TRIMMING */
+ if (data->sensor_info[count].cal_type == TYPE_TWO_POINT_TRIMMING) {
+ data->sensor_info[count].temp_error2 = temp_error2;
- /* if last threshold limit is also present */
- for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
- if (trips[i].type == THERMAL_TRIP_CRITICAL) {
- crit_temp = trips[i].temperature;
- break;
+ if (!data->sensor_info[count].temp_error2)
+ data->sensor_info[count].temp_error2 =
+ (pdata->efuse_value >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) &
+ EXYNOS_TMU_TEMP_MASK;
}
- }
- if (i == of_thermal_get_ntrips(data->tzd)) {
- pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
- __func__);
- ret = -EINVAL;
- goto out;
+ interrupt_count = 0;
+ /* Write temperature code for rising and falling threshold */
+ for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
+ tz->ops->get_trip_type(tz, i, &type);
+
+ if (type == THERMAL_TRIP_PASSIVE)
+ continue;
+
+ reg_off = (interrupt_count / 2) * 4;
+ bit_off = ((interrupt_count + 1) % 2) * 16;
+
+ if (sensor == 0)
+ reg_off += EXYNOS_TMU_REG_THD_TEMP0;
+ else if (sensor < 8)
+ reg_off += EXYNOS_TMU_REG_THD_TEMP1 + (sensor - 1) * 0x20;
+ else
+ reg_off += EXYNOS_TMU_REG_THD_TEMP8 + (sensor - 8) * 0x20;
+
+ tz->ops->get_trip_temp(tz, i, &temp);
+ temp /= MCELSIUS;
+
+ tz->ops->get_trip_hyst(tz, i, &temp_hist);
+ temp_hist = temp - (temp_hist / MCELSIUS);
+
+ /* Set 9-bit temperature code for rising threshold levels */
+ threshold_code = temp_to_code_with_sensorinfo(data, temp, &data->sensor_info[count]);
+ rising_threshold = readl(data->base + reg_off);
+ rising_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
+ rising_threshold |= threshold_code << bit_off;
+ writel(rising_threshold, data->base + reg_off);
+
+ /* Set 9-bit temperature code for falling threshold levels */
+ threshold_code = temp_to_code_with_sensorinfo(data, temp_hist, &data->sensor_info[count]);
+ falling_threshold = readl(data->base + reg_off + 0x10);
+ falling_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
+ falling_threshold |= threshold_code << bit_off;
+ writel(falling_threshold, data->base + reg_off + 0x10);
+ interrupt_count++;
+ }
+ count++;
}
- threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
- /* 1-4 level to be assigned in th0 reg */
- rising_threshold &= ~(0xff << 8 * i);
- rising_threshold |= threshold_code << 8 * i;
- writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
- con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
- con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
- writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+ data->tmu_clear_irqs(data);
-out:
- return ret;
+ return 0;
}
-static int exynos5433_tmu_initialize(struct platform_device *pdev)
+static int exynos9610_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct exynos_tmu_platform_data *pdata = data->pdata;
struct thermal_zone_device *tz = data->tzd;
- unsigned int status, trim_info;
- unsigned int rising_threshold = 0, falling_threshold = 0;
- int temp, temp_hist;
- int ret = 0, threshold_code, i, sensor_id, cal_type;
-
- status = readb(data->base + EXYNOS_TMU_REG_STATUS);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int trim_info, temp_error1, temp_error2;
+ unsigned short cal_type;
+ unsigned int rising_threshold, falling_threshold;
+ unsigned int reg_off, bit_off;
+ enum thermal_trip_type type;
+ int temp, temp_hist, threshold_code;
+ int i, sensor, count = 0, interrupt_count;
- trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
- sanitize_temp_error(data, trim_info);
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(0));
+ cal_type = (trim_info >> EXYNOS_TMU_CALIB_SEL_SHIFT) & EXYNOS_TMU_CALIB_SEL_MASK;
- /* Read the temperature sensor id */
- sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
- >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
- dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
+ for (sensor = 0; sensor < TOTAL_SENSORS; sensor++) {
- /* Read the calibration mode */
- writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
- cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
- >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
+ if (!(data->sensors & (1 << sensor)))
+ continue;
- switch (cal_type) {
- case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
- pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
- break;
- case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
- pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
- break;
- default:
- pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
- break;
- }
+ /* Read the sensor error value from TRIMINFOX */
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO(sensor));
+ temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
+ temp_error2 = (trim_info >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) & EXYNOS_TMU_TEMP_MASK;
- dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
- cal_type ? 2 : 1);
-
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
- int rising_reg_offset, falling_reg_offset;
- int j = 0;
-
- switch (i) {
- case 0:
- case 1:
- case 2:
- case 3:
- rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
- falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
- j = i;
- break;
- case 4:
- case 5:
- case 6:
- case 7:
- rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
- falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
- j = i - 4;
- break;
- default:
- continue;
- }
+ /* Save sensor id */
+ data->sensor_info[count].sensor_num = sensor;
+ dev_info(&pdev->dev, "Sensor number = %d\n", sensor);
- /* Write temperature code for rising threshold */
- tz->ops->get_trip_temp(tz, i, &temp);
- temp /= MCELSIUS;
- threshold_code = temp_to_code(data, temp);
+ /* Check thermal calibration type */
+ data->sensor_info[count].cal_type = cal_type;
+
+ /* Check temp_error1 value */
+ data->sensor_info[count].temp_error1 = temp_error1;
+ if (!data->sensor_info[count].temp_error1)
+ data->sensor_info[count].temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
++<<<<<<< HEAD
+ /* Check temp_error2 if calibration type is TYPE_TWO_POINT_TRIMMING */
+ if (data->sensor_info[count].cal_type == TYPE_TWO_POINT_TRIMMING) {
+ data->sensor_info[count].temp_error2 = temp_error2;
++=======
+ rising_threshold = readl(data->base + rising_reg_offset);
+ rising_threshold &= ~(0xff << j * 8);
+ rising_threshold |= (threshold_code << j * 8);
+ writel(rising_threshold, data->base + rising_reg_offset);
++>>>>>>> android-4.14-p
- /* Write temperature code for falling threshold */
- tz->ops->get_trip_hyst(tz, i, &temp_hist);
- temp_hist = temp - (temp_hist / MCELSIUS);
- threshold_code = temp_to_code(data, temp_hist);
+ if (!data->sensor_info[count].temp_error2)
+ data->sensor_info[count].temp_error2 =
+ (pdata->efuse_value >> EXYNOS_TMU_TRIMINFO_85_P0_SHIFT) &
+ EXYNOS_TMU_TEMP_MASK;
+ }
- falling_threshold = readl(data->base + falling_reg_offset);
- falling_threshold &= ~(0xff << j * 8);
- falling_threshold |= (threshold_code << j * 8);
- writel(falling_threshold, data->base + falling_reg_offset);
+ interrupt_count = 0;
+ /* Write temperature code for rising and falling threshold */
+ for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
+ tz->ops->get_trip_type(tz, i, &type);
+
+ if (type == THERMAL_TRIP_PASSIVE)
+ continue;
+
+ reg_off = (interrupt_count / 2) * 4;
+ bit_off = ((interrupt_count + 1) % 2) * 16;
+
+ if (sensor == 0)
+ reg_off += EXYNOS_TMU_REG_THD_TEMP0;
+ else if (sensor < 8)
+ reg_off += EXYNOS_TMU_REG_THD_TEMP1 + (sensor - 1) * 0x20;
+ else
+ reg_off += EXYNOS_TMU_REG_THD_TEMP8 + (sensor - 8) * 0x20;
+
+ tz->ops->get_trip_temp(tz, i, &temp);
+ temp /= MCELSIUS;
+
+ tz->ops->get_trip_hyst(tz, i, &temp_hist);
+ temp_hist = temp - (temp_hist / MCELSIUS);
+
+ /* Set 9-bit temperature code for rising threshold levels */
+ threshold_code = temp_to_code_with_sensorinfo(data, temp, &data->sensor_info[count]);
+ rising_threshold = readl(data->base + reg_off);
+ rising_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
+ rising_threshold |= threshold_code << bit_off;
+ writel(rising_threshold, data->base + reg_off);
+
+ /* Set 9-bit temperature code for falling threshold levels */
+ threshold_code = temp_to_code_with_sensorinfo(data, temp_hist, &data->sensor_info[count]);
+ falling_threshold = readl(data->base + reg_off + 0x10);
+ falling_threshold &= ~(EXYNOS_TMU_TEMP_MASK << bit_off);
+ falling_threshold |= threshold_code << bit_off;
+ writel(falling_threshold, data->base + reg_off + 0x10);
+ interrupt_count++;
+ }
+ count++;
}
data->tmu_clear_irqs(data);
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
++<<<<<<< HEAD
+ if (ourport->info->has_divslot)
+ wr_regl(port, S3C2443_DIVSLOT, udivslot);
++=======
+ port->status &= ~UPSTAT_AUTOCTS;
++>>>>>>> android-4.14-p
umcon = rd_regl(port, S3C2410_UMCON);
if (termios->c_cflag & CRTSCTS) {
* initialized. The PHY interfaces and the PHYs get initialized together with
* the core in dwc3_core_init.
*/
-static int dwc3_phy_setup(struct dwc3 *dwc)
+int dwc3_phy_setup(struct dwc3 *dwc)
{
u32 reg;
- int ret;
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
dwc->maximum_speed = USB_SPEED_HIGH;
}
++<<<<<<< HEAD
+ ret = dwc3_core_get_phy(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Can't get PHY structure!!!\n");
++=======
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
++>>>>>>> android-4.14-p
goto err0;
+ }
+
+ /* Adjust SOF accuracy only for revisions >= 2.50a */
+ if (dwc->revision < DWC3_REVISION_250A)
+ dwc->adj_sof_accuracy = 0;
++<<<<<<< HEAD
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Can't core_soft_reset!!!(%d)\n", ret);
+ goto err0;
++=======
+ if (!dwc->ulpi_ready) {
+ ret = dwc3_core_ulpi_init(dwc);
+ if (ret)
+ goto err0;
+ dwc->ulpi_ready = true;
++>>>>>>> android-4.14-p
}
- ret = dwc3_phy_setup(dwc);
+ if (!dwc->phys_ready) {
+ ret = dwc3_core_get_phy(dwc);
+ if (ret)
+ goto err0a;
+ dwc->phys_ready = true;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
if (ret)
- goto err0;
+ goto err0a;
+ if (dotg) {
+ phy_tune(dwc->usb2_generic_phy, dotg->otg.state);
+ phy_tune(dwc->usb3_generic_phy, dotg->otg.state);
+ } else {
+ phy_tune(dwc->usb2_generic_phy, 0);
+ phy_tune(dwc->usb3_generic_phy, 0);
+ }
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
++<<<<<<< HEAD
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
++=======
+ err0a:
+ dwc3_ulpi_exit(dwc);
+
++>>>>>>> android-4.14-p
err0:
return ret;
}
struct dwc3 *dwc = dep->dwc;
req->started = false;
- list_del(&req->list);
+ /* Only delete from the list if the item isn't poisoned. */
+ if (req->list.next != LIST_POISON1)
+ list_del(&req->list);
req->remaining = 0;
+ req->unaligned = false;
+ req->zero = false;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
else
dep->flags |= DWC3_EP_STALL;
} else {
++<<<<<<< HEAD
++=======
+
++>>>>>>> android-4.14-p
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
goto err4;
}
++<<<<<<< HEAD
+ if (dwc->dotg) {
+ ret = otg_set_peripheral(&dwc->dotg->otg, &dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set otg peripheral\n");
+ goto err4;
+ }
+ }
++=======
+ dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
++>>>>>>> android-4.14-p
return 0;
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
unsigned long flags;
++<<<<<<< HEAD
+ int is_port_connect = 0;
+ int ret;
++=======
+ u32 portsc_buf[USB_MAXCHILDREN];
+ bool wake_enabled;
++>>>>>>> android-4.14-p
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
t1 = readl(port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
+ portsc_buf[port_index] = 0;
++<<<<<<< HEAD
+ if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
+ if (slot_id) {
++=======
+ /* Bail out if a USB3 port has a new device in link training */
+ if ((hcd->speed >= HCD_USB3) &&
+ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+ return -EBUSY;
+ }
+
+ /* suspend ports in U0, or bail out for new connect changes */
+ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+ if ((t1 & PORT_CSC) && wake_enabled) {
+ bus_state->bus_suspended = 0;
++>>>>>>> android-4.14-p
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
- spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+ return -EBUSY;
}
+ xhci_dbg(xhci, "port %d not suspended\n", port_index);
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
set_bit(port_index, &bus_state->bus_suspended);
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
-
- if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
- (hcd->speed < HCD_USB3)) {
- if (usb_amd_pt_check_port(hcd->self.controller,
- port_index))
- t2 &= ~PORT_WAKE_BITS;
- }
- } else
+ } else {
t2 &= ~PORT_WAKE_BITS;
+ }
t1 = xhci_port_state_to_neutral(t1);
++<<<<<<< HEAD
+ if (t1 != t2) {
+ writel(t2, port_array[port_index]);
+ }
+ }
+
+ if (is_port_connect && usb_hcd_is_primary_hcd(hcd)) {
+ xhci_info(xhci, "port is connected, phy vendor set\n");
+ ret = phy_vendor_set(xhci->main_hcd->phy, 1, 0);
+ if (ret) {
+ xhci_info(xhci, "phy vendor set fail\n");
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
++=======
+ if (t1 != t2)
+ portsc_buf[port_index] = t2;
+ }
+
+ /* write port settings, stopping and suspending ports if needed */
+ port_index = max_ports;
+ while (port_index--) {
+ if (!portsc_buf[port_index])
+ continue;
+ if (test_bit(port_index, &bus_state->bus_suspended)) {
+ int slot_id;
+
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
+ if (slot_id) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_stop_device(xhci, slot_id, 1);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
+ }
+ writel(portsc_buf[port_index], port_array[port_index]);
++>>>>>>> android-4.14-p
}
+
+ xhci_info(xhci, "%s 'HC_STATE_SUSPENDED' portcon: %d primary_hcd: %d\n",
+ __func__, is_port_connect, usb_hcd_is_primary_hcd(hcd));
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+
spin_unlock_irqrestore(&xhci->lock, flags);
+
return 0;
}
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
++<<<<<<< HEAD
+ int timeout = 0;
+
+ dev_info(&dev->dev, "XHCI PLAT REMOVE\n");
+
+ usb3_portsc = NULL;
+ pp_set_delayed = 0;
+
+ /*
+ * Sometimes deadlock occurred in this function.
+ * So, below waiting for completion of hub_event was added.
+ */
+ while (xhci->shared_hcd->is_in_hub_event || hcd->is_in_hub_event) {
+ msleep(10);
+ timeout += 10;
+ if (timeout >= XHCI_HUB_EVENT_TIMEOUT) {
+ xhci_err(xhci,
+ "ERROR: hub_event completion timeout\n");
+ break;
+ }
+ }
+ xhci_dbg(xhci, "%s: waited %dmsec", __func__, timeout);
++=======
+ struct usb_hcd *shared_hcd = xhci->shared_hcd;
++>>>>>>> android-4.14-p
xhci->xhc_state |= XHCI_STATE_REMOVING;
+ xhci->xhci_alloc->offset = 0;
+
+ dev_info(&dev->dev, "WAKE UNLOCK\n");
+ wake_unlock(xhci->wakelock);
+ wake_lock_destroy(xhci->wakelock);
- usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
usb_phy_shutdown(hcd->usb_phy);
+ /*
+ * In usb_remove_hcd, phy_exit is called if phy is not NULL.
+ * However, in the case that PHY was turn on or off as runtime PM,
+ * PHY sould not exit at this time. So, to prevent the PHY exit,
+ * PHY pointer have to be NULL.
+ */
+ if (parent && hcd->phy)
+ hcd->phy = NULL;
+
usb_remove_hcd(hcd);
- usb_put_hcd(xhci->shared_hcd);
+ usb_put_hcd(shared_hcd);
if (!IS_ERR(clk))
clk_disable_unprepare(clk);
* commands, reset device commands, disable slot commands, and address device
* commands.
*/
- #define XHCI_EP_LIMIT_QUIRK (1 << 5)
- #define XHCI_BROKEN_MSI (1 << 6)
- #define XHCI_RESET_ON_RESUME (1 << 7)
- #define XHCI_SW_BW_CHECKING (1 << 8)
- #define XHCI_AMD_0x96_HOST (1 << 9)
- #define XHCI_TRUST_TX_LENGTH (1 << 10)
- #define XHCI_LPM_SUPPORT (1 << 11)
- #define XHCI_INTEL_HOST (1 << 12)
- #define XHCI_SPURIOUS_REBOOT (1 << 13)
- #define XHCI_COMP_MODE_QUIRK (1 << 14)
- #define XHCI_AVOID_BEI (1 << 15)
- #define XHCI_PLAT (1 << 16)
- #define XHCI_SLOW_SUSPEND (1 << 17)
- #define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ #define XHCI_EP_LIMIT_QUIRK BIT_ULL(5)
+ #define XHCI_BROKEN_MSI BIT_ULL(6)
+ #define XHCI_RESET_ON_RESUME BIT_ULL(7)
+ #define XHCI_SW_BW_CHECKING BIT_ULL(8)
+ #define XHCI_AMD_0x96_HOST BIT_ULL(9)
+ #define XHCI_TRUST_TX_LENGTH BIT_ULL(10)
+ #define XHCI_LPM_SUPPORT BIT_ULL(11)
+ #define XHCI_INTEL_HOST BIT_ULL(12)
+ #define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
+ #define XHCI_COMP_MODE_QUIRK BIT_ULL(14)
+ #define XHCI_AVOID_BEI BIT_ULL(15)
+ #define XHCI_PLAT BIT_ULL(16)
+ #define XHCI_SLOW_SUSPEND BIT_ULL(17)
+ #define XHCI_SPURIOUS_WAKEUP BIT_ULL(18)
/* For controllers with a broken beyond repair streams implementation */
- #define XHCI_BROKEN_STREAMS (1 << 19)
- #define XHCI_PME_STUCK_QUIRK (1 << 20)
- #define XHCI_MTK_HOST (1 << 21)
- #define XHCI_SSIC_PORT_UNUSED (1 << 22)
- #define XHCI_NO_64BIT_SUPPORT (1 << 23)
- #define XHCI_MISSING_CAS (1 << 24)
+ #define XHCI_BROKEN_STREAMS BIT_ULL(19)
+ #define XHCI_PME_STUCK_QUIRK BIT_ULL(20)
+ #define XHCI_MTK_HOST BIT_ULL(21)
+ #define XHCI_SSIC_PORT_UNUSED BIT_ULL(22)
+ #define XHCI_NO_64BIT_SUPPORT BIT_ULL(23)
+ #define XHCI_MISSING_CAS BIT_ULL(24)
/* For controller with a broken Port Disable implementation */
++<<<<<<< HEAD
+#define XHCI_BROKEN_PORT_PED (1 << 25)
+#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+#define XHCI_U2_DISABLE_WAKE (1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_L2_SUPPORT (1 << 29)
+#define XHCI_SUSPEND_DELAY (1 << 30)
++=======
+ #define XHCI_BROKEN_PORT_PED BIT_ULL(25)
+ #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26)
+ #define XHCI_U2_DISABLE_WAKE BIT_ULL(27)
+ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL BIT_ULL(28)
+ #define XHCI_HW_LPM_DISABLE BIT_ULL(29)
+ #define XHCI_SUSPEND_DELAY BIT_ULL(30)
+ #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
+ #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
+ #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
++>>>>>>> android-4.14-p
unsigned int num_active_eps;
unsigned int limit_active_eps;
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode = fio->page->mapping->host;
- verify_block_addr(fio, fio->new_blkaddr);
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+ return -EFAULT;
+
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
bio_put(bio);
return -EFAULT;
}
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
++<<<<<<< HEAD
+ if (f2fs_may_encrypt_bio(inode, fio))
+ fscrypt_set_bio(inode, bio, PG_DUN(inode, fio->page));
+
+ __submit_bio(fio->sbi, bio, fio->type);
+
++=======
++>>>>>>> android-4.14-p
if (!is_read_io(fio->op))
inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
+
+ __submit_bio(fio->sbi, bio, fio->type);
return 0;
}
return 0;
}
++<<<<<<< HEAD
++=======
+ /*
+ * Iterates over all slaves, and slaves of slaves.
+ */
+ static struct mount *next_descendent(struct mount *root, struct mount *cur)
+ {
+ if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
+ return first_slave(cur);
+ do {
+ struct mount *master = cur->mnt_master;
+
+ if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+ struct mount *next = next_slave(cur);
+
+ return (next == root) ? NULL : next;
+ }
+ cur = master;
+ } while (cur != root);
+ return NULL;
+ }
+
++>>>>>>> android-4.14-p
void propagate_remount(struct mount *mnt)
{
- struct mount *m = mnt;
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *p = mnt, *m;
struct super_block *sb = mnt->mnt.mnt_sb;
- if (sb->s_op->copy_mnt_data) {
- m = next_descendent(mnt, m);
- while (m) {
+ if (!sb->s_op->copy_mnt_data)
+ return;
+ for (p = propagation_next(parent, parent); p;
+ p = propagation_next(p, parent)) {
+ m = __lookup_mnt(&p->mnt, mnt->mnt_mountpoint);
+ if (m)
sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
- m = next_descendent(mnt, m);
- }
}
}
#define __nocfi __attribute__((no_sanitize("cfi")))
#endif
++<<<<<<< HEAD
+/* all clang versions usable with the kernel support KASAN ABI version 5 */
+#define KASAN_ABI_VERSION 5
+
+/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__
++=======
+ /*
+ * Not all versions of clang implement the the type-generic versions
+ * of the builtin overflow checkers. Fortunately, clang implements
+ * __has_builtin allowing us to avoid awkward version
+ * checks. Unfortunately, we don't know which version of gcc clang
+ * pretends to be, so the macro may or may not be defined.
+ */
+ #undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+ #if __has_builtin(__builtin_mul_overflow) && \
+ __has_builtin(__builtin_add_overflow) && \
+ __has_builtin(__builtin_sub_overflow)
+ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
++>>>>>>> android-4.14-p
#endif
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
++<<<<<<< HEAD
+struct global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct kobj_attribute *b,
+ const char *c, size_t count);
+};
+
++=======
++>>>>>>> android-4.14-p
#define define_one_global_ro(_name) \
- static struct global_attr _name = \
+ static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
unsigned long load_avg;
unsigned long util_avg;
struct util_est util_est;
++<<<<<<< HEAD
+};
+
+struct ontime_avg {
+ u64 ontime_migration_time;
+ u64 load_sum;
+ u32 period_contrib;
+ unsigned long load_avg;
+};
+
+struct ontime_entity {
+ struct ontime_avg avg;
+ int migrating;
+ int cpu;
++=======
++>>>>>>> android-4.14-p
};
struct sched_statistics {
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
++<<<<<<< HEAD
+#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
+
++=======
+ #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+ #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
++>>>>>>> android-4.14-p
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++<<<<<<< HEAD
+TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+TASK_PFA_SET(LMK_WAITING, lmk_waiting)
++=======
+ TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+ TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+ TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+ TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+ TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
++>>>>>>> android-4.14-p
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
++<<<<<<< HEAD
+# include <linux/static_key.h>
+
++=======
++>>>>>>> android-4.14-p
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
extern __read_mostly unsigned int sysctl_sched_time_avg;
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
++<<<<<<< HEAD
+extern ktime_t tick_nohz_get_sleep_length(void);
+extern ktime_t tick_nohz_get_sleep_length_cpu(int cpu);
++=======
+ extern bool tick_nohz_idle_got_tick(void);
+ extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
++>>>>>>> android-4.14-p
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
+ static inline void tick_nohz_idle_stop_tick(void) { }
+ static inline void tick_nohz_idle_retain_tick(void) { }
+ static inline void tick_nohz_idle_restart_tick(void) { }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
+ static inline bool tick_nohz_idle_got_tick(void) { return false; }
- static inline ktime_t tick_nohz_get_sleep_length(void)
+ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
- return NSEC_PER_SEC / HZ;
+ *delta_next = TICK_NSEC;
+ return *delta_next;
}
+static inline ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
+{
+ return NSEC_PER_SEC / HZ;
+}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+
+ static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
}
#endif /* CREATE_TRACE_POINTS */
+ #ifdef CONFIG_SCHED_WALT
+ extern unsigned int sysctl_sched_use_walt_cpu_util;
+ extern unsigned int sysctl_sched_use_walt_task_util;
+ extern unsigned int walt_ravg_window;
+ extern bool walt_disabled;
+
+ #define walt_util(util_var, demand_sum) {\
+ u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
+ do_div(sum, walt_ravg_window);\
+ util_var = (typeof(util_var))sum;\
+ }
+ #endif
+
+/*
+ * Tracepoint for logging FRT schedule activity
+ */
+
+TRACE_EVENT(sched_fluid_activated_cpus,
+
+ TP_PROTO(int cpu, int util_sum, int busy_thr, unsigned int prefer_mask),
+
+ TP_ARGS(cpu, util_sum, busy_thr, prefer_mask),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, util_sum )
+ __field( int, busy_thr )
+ __field( unsigned long, prefer_mask )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util_sum = util_sum;
+ __entry->busy_thr = busy_thr;
+ __entry->prefer_mask = prefer_mask;
+ ),
+
+ TP_printk("cpu=%d util_sum=%d busy_thr=%d prefer_mask=%x",
+ __entry->cpu,__entry->util_sum,
+ __entry->busy_thr, __entry->prefer_mask)
+);
+
+TRACE_EVENT(sched_fluid_stat,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, int best, char* str),
+
+ TP_ARGS(tsk, avg, best, str),
+
+ TP_STRUCT__entry(
+ __array( char, selectby, TASK_COMM_LEN )
+ __array( char, targettsk, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, bestcpu )
+ __field( int, prevcpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->selectby, str, TASK_COMM_LEN);
+ memcpy(__entry->targettsk, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->bestcpu = best;
+ __entry->prevcpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->util_avg = avg->util_avg;
+ ),
+ TP_printk("frt: comm=%s pid=%d assigned to #%d from #%d load_avg=%lu util_avg=%lu "
+ "by %s.",
+ __entry->targettsk,
+ __entry->pid,
+ __entry->bestcpu,
+ __entry->prevcpu,
+ __entry->load_avg,
+ __entry->util_avg,
+ __entry->selectby)
+);
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_rt_load_avg_task,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ __field( u64, load_sum )
+ __field( u32, util_sum )
+ __field( u32, period_contrib )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->util_avg = avg->util_avg;
+ __entry->load_sum = avg->load_sum;
+ __entry->util_sum = avg->util_sum;
+ __entry->period_contrib = avg->period_contrib;
+ ),
+ TP_printk("rt: comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
+ "load_sum=%llu util_sum=%u period_contrib=%u",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->load_avg,
+ __entry->util_avg,
+ (u64)__entry->load_sum,
+ (u32)__entry->util_sum,
+ (u32)__entry->period_contrib)
+);
+
+
/*
* Tracepoint for cfs_rq load tracking:
*/
),
TP_printk("pid=%d comm=%s "
- "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+ "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu",
__entry->pid, __entry->comm,
__entry->cpu, __entry->tasks, __entry->idx,
- __entry->boost, __entry->max_boost)
+ __entry->boost, __entry->max_boost,
+ __entry->group_ts)
);
+/*
+ * Tracepoint for schedtune_grouputil_update
+ */
+TRACE_EVENT(sched_tune_grouputil_update,
+
+ TP_PROTO(int idx, int total, int accumulated, unsigned long group_util,
+ struct task_struct *heaviest_p, unsigned long biggest_util),
+
+ TP_ARGS(idx, total, accumulated, group_util, heaviest_p, biggest_util),
+
+ TP_STRUCT__entry(
+ __field( int, idx )
+ __field( int, total )
+ __field( int, accumulated )
+ __field( unsigned long, group_util )
+ __field( pid_t, pid )
+ __array( char, comm, TASK_COMM_LEN )
+ __field( unsigned long, biggest_util )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = idx;
+ __entry->total = total;
+ __entry->accumulated = accumulated;
+ __entry->group_util = group_util;
+ __entry->pid = heaviest_p->pid;
+ memcpy(__entry->comm, heaviest_p->comm, TASK_COMM_LEN);
+ __entry->biggest_util = biggest_util;
+ ),
+
+ TP_printk("idx=%d total=%d accumulated=%d group_util=%lu "
+ "heaviest task(pid=%d comm=%s util=%lu)",
+ __entry->idx, __entry->total, __entry->accumulated, __entry->group_util,
+ __entry->pid, __entry->comm, __entry->biggest_util)
+);
+
+/*
+ * Tracepoint for checking group balancing
+ */
+TRACE_EVENT(sched_tune_check_group_balance,
+
+ TP_PROTO(int idx, int ib_count, bool balancing),
+
+ TP_ARGS(idx, ib_count, balancing),
+
+ TP_STRUCT__entry(
+ __field( int, idx )
+ __field( int, ib_count )
+ __field( bool, balancing )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = idx;
+ __entry->ib_count = ib_count;
+ __entry->balancing = balancing;
+ ),
+
+ TP_printk("idx=%d imbalance_count=%d balancing=%d",
+ __entry->idx, __entry->ib_count, __entry->balancing)
+);
+
/*
* Tracepoint for schedtune_boostgroup_update
*/
choice
prompt "Utilization's PELT half-Life"
++<<<<<<< HEAD
+ default PELT_UTIL_HALFLIFE_16
++=======
+ default PELT_UTIL_HALFLIFE_32
++>>>>>>> android-4.14-p
help
Allows choosing one of the possible values for the PELT half-life to
be used for the update of the utilization of tasks and CPUs.
build up to 50% utilization. The higher the half-life the longer it
takes for a task to be represented as a big one.
++<<<<<<< HEAD
+ If not sure, use the deafult of 16 ms.
+
+config PELT_UTIL_HALFLIFE_32
+ bool "32 ms, for server"
+
+config PELT_UTIL_HALFLIFE_16
+ bool "16 ms, suggested for interactive workloads"
+
+config PELT_UTIL_HALFLIFE_8
+ bool "8 ms, very fast"
++=======
+ If not sure, use the default of 32 ms.
+
+ config PELT_UTIL_HALFLIFE_32
+ bool "32 ms, default for server"
+
+ config PELT_UTIL_HALFLIFE_16
+ bool "16 ms, suggested for interactive workloads"
+ help
+ Use 16ms as PELT half-life value. This will increase the ramp-up and
+ decay of utlization and load twice as fast as for the default
+ configuration using 32ms.
+
+ config PELT_UTIL_HALFLIFE_8
+ bool "8 ms, very fast"
+ help
+ Use 8ms as PELT half-life value. This will increase the ramp-up and
+ decay of utlization and load four time as fast as for the default
+ configuration using 32ms.
++>>>>>>> android-4.14-p
endchoice
void cpus_write_unlock(void)
{
- percpu_up_write(&cpu_hotplug_lock);
-}
-
-void lockdep_assert_cpus_held(void)
-{
++<<<<<<< HEAD
+ cpu_hotplug.active_writer = NULL;
+ mutex_unlock(&cpu_hotplug.lock);
+ cpuhp_lock_release();
++=======
+ /*
+ * We can't have hotplug operations before userspace starts running,
+ * and some init codepaths will knowingly not take the hotplug lock.
+ * This is all valid, so mute lockdep until it makes sense to report
+ * unheld locks.
+ */
+ if (system_state < SYSTEM_RUNNING)
+ return;
+
+ percpu_rwsem_assert_held(&cpu_hotplug_lock);
++>>>>>>> android-4.14-p
}
/*
*/
smp_mb();
++<<<<<<< HEAD
+ if (WARN_ON_ONCE(!st->should_run))
+ return;
+
+ lock_map_acquire(&cpuhp_state_lock_map);
++=======
+ cpuhp_lock_acquire(bringup);
++>>>>>>> android-4.14-p
if (st->single) {
state = st->cb_state;
char *text = textbuf;
size_t text_len;
enum log_flags lflags = 0;
++<<<<<<< HEAD
+ unsigned long flags;
+ int this_cpu;
+ int printed_len;
+ bool in_sched = false;
+
+ if (level == LOGLEVEL_SCHED) {
+ level = LOGLEVEL_DEFAULT;
+ in_sched = true;
+ }
+
+ boot_delay_msec(level);
+ printk_delay();
+
+ /* This stops the holder of console_sem just where we want him */
+ logbuf_lock_irqsave(flags);
+ this_cpu = smp_processor_id();
++=======
+
++>>>>>>> android-4.14-p
/*
* The printf needs to come first; we need the syslog
* prefix which might be passed-in as a parameter.
#include <trace/events/power.h>
#include "sched.h"
+#include "tune.h"
- unsigned long boosted_cpu_util(int cpu);
+ unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
-#define SUGOV_KTHREAD_PRIORITY 50
-
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int up_rate_limit_us;
policy->governor_data = NULL;
sugov_tunables_free(tunables);
++<<<<<<< HEAD
+free_sg_policy:
++=======
+ stop_kthread:
+ sugov_kthread_stop(sg_policy);
++>>>>>>> android-4.14-p
mutex_unlock(&global_tunables_lock);
+ free_sg_policy:
sugov_policy_free(sg_policy);
disable_fast_switch:
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
- long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+ long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+ long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
+ if (sched_feat(EXYNOS_MS)) {
+ exynos_init_entity_util_avg(se);
+ goto util_init_done;
+ }
+
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (___update_load_avg(now, cpu, &se->avg,
++<<<<<<< HEAD
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL, NULL)) {
+ if (schedtune_util_est_en(task_of(se)))
+ cfs_se_util_change(&se->avg);
++=======
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL, NULL)) {
+ cfs_se_util_change(&se->avg);
+
+ #ifdef UTIL_EST_DEBUG
+ /*
+ * Trace utilization only for actual tasks.
+ *
+ * These trace events are mostly useful to get easier to
+ * read plots for the estimated utilization, where we can
+ * compare it with the actual grow/decrease of the original
+ * PELT signal.
+ * Let's keep them disabled by default in "production kernels".
+ */
+ if (entity_is_task(se)) {
+ struct task_struct *tsk = task_of(se);
+
+ trace_sched_util_est_task(tsk, &se->avg);
+
+ /* Trace utilization only for top level CFS RQ */
+ cfs_rq = &(task_rq(tsk)->cfs);
+ trace_sched_util_est_cpu(cpu, cfs_rq);
+ }
+ #endif /* UTIL_EST_DEBUG */
++>>>>>>> android-4.14-p
return 1;
}
static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
++<<<<<<< HEAD
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ return (p->ravg.demand / (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
+ }
++=======
+ static inline int task_fits_capacity(struct task_struct *p, long capacity);
+
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ {
+ if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ return;
+
+ if (!p) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+
+ if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+
+ rq->misfit_task_load = task_h_load(p);
+ }
+
+ static inline unsigned long task_util(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHED_WALT
+ if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
+ return (p->ravg.demand /
+ (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
++>>>>>>> android-4.14-p
#endif
return READ_ONCE(p->se.avg.util_avg);
}
++<<<<<<< HEAD
+inline unsigned long _task_util_est(struct task_struct *p)
+{
+ struct util_est ue = READ_ONCE(p->se.avg.util_est);
+
+ return schedtune_util_est_en(p) ? max(ue.ewma, ue.enqueued)
+ : task_util(p);
+}
+
+inline unsigned long task_util_est(struct task_struct *p)
+{
+ return schedtune_util_est_en(p) ? max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p))
+ : task_util(p);
++=======
+ static inline unsigned long _task_util_est(struct task_struct *p)
+ {
+ struct util_est ue = READ_ONCE(p->se.avg.util_est);
+
+ return max(ue.ewma, ue.enqueued);
+ }
+
+ static inline unsigned long task_util_est(struct task_struct *p)
+ {
+ #ifdef CONFIG_SCHED_WALT
+ if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
+ return (p->ravg.demand /
+ (walt_ravg_window >> SCHED_CAPACITY_SHIFT));
+ #endif
+ return max(task_util(p), _task_util_est(p));
++>>>>>>> android-4.14-p
}
static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
++<<<<<<< HEAD
+ /* Update plots for Task and CPU estimated utilization */
++=======
++>>>>>>> android-4.14-p
trace_sched_util_est_task(p, &p->se.avg);
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
/*
* Check if a (signed) value is within a specified (unsigned) margin,
* based on the observation that:
++<<<<<<< HEAD
++=======
+ *
++>>>>>>> android-4.14-p
* abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
*
* NOTE: this only works when value + maring < INT_MAX.
}
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
++<<<<<<< HEAD
+ /* Update plots for CPU's estimated utilization */
++=======
++>>>>>>> android-4.14-p
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
/*
if (!task_sleep)
return;
++<<<<<<< HEAD
+ if (!schedtune_util_est_en(p))
+ return;
+
++=======
++>>>>>>> android-4.14-p
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
*/
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
last_ewma_diff = ue.enqueued - ue.ewma;
++<<<<<<< HEAD
+ if (within_margin(last_ewma_diff, capacity_orig_of(task_cpu(p)) / 100))
++=======
+ if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
++>>>>>>> android-4.14-p
return;
/*
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
WRITE_ONCE(p->se.avg.util_est, ue);
++<<<<<<< HEAD
+ /* Update plots for Task's estimated utilization */
++=======
++>>>>>>> android-4.14-p
trace_sched_util_est_task(p, &p->se.avg);
}
return 0;
}
++<<<<<<< HEAD
++=======
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
++>>>>>>> android-4.14-p
static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
* Hence - be careful when enabling DEBUG_EENV_DECISIONS
* expecially if WALT is the task signal.
*/
-/*#define DEBUG_EENV_DECISIONS*/
-
-#ifdef DEBUG_EENV_DECISIONS
-/* max of 8 levels of sched groups traversed */
-#define EAS_EENV_DEBUG_LEVELS 16
-
-struct _eenv_debug {
- unsigned long cap;
- unsigned long norm_util;
- unsigned long cap_energy;
- unsigned long idle_energy;
- unsigned long this_energy;
- unsigned long this_busy_energy;
- unsigned long this_idle_energy;
- cpumask_t group_cpumask;
- unsigned long cpu_util[1];
-};
-#endif
-
-struct eenv_cpu {
- /* CPU ID, must be in cpus_mask */
- int cpu_id;
-
- /*
- * Index (into sched_group_energy::cap_states) of the OPP the
- * CPU needs to run at if the task is placed on it.
- * This includes the both active and blocked load, due to
- * other tasks on this CPU, as well as the task's own
- * utilization.
- */
- int cap_idx;
- int cap;
-
- /* Estimated system energy */
- unsigned long energy;
-
- /* Estimated energy variation wrt EAS_CPU_PRV */
- long nrg_delta;
-
-#ifdef DEBUG_EENV_DECISIONS
- struct _eenv_debug *debug;
- int debug_idx;
-#endif /* DEBUG_EENV_DECISIONS */
-};
-
-struct energy_env {
- /* Utilization to move */
- struct task_struct *p;
- unsigned long util_delta;
- unsigned long util_delta_boosted;
-
- /* Mask of CPUs candidates to evaluate */
- cpumask_t cpus_mask;
-
- /* CPU candidates to evaluate */
- struct eenv_cpu *cpu;
- int eenv_cpu_count;
-
-#ifdef DEBUG_EENV_DECISIONS
- /* pointer to the memory block reserved
- * for debug on this CPU - there will be
- * sizeof(struct _eenv_debug) *
- * (EAS_CPU_CNT * EAS_EENV_DEBUG_LEVELS)
- * bytes allocated here.
- */
- struct _eenv_debug *debug;
-#endif
- /*
- * Index (into energy_env::cpu) of the morst energy efficient CPU for
- * the specified energy_env::task
- */
- int next_idx;
- int max_cpu_count;
-
- /* Support data */
- struct sched_group *sg_top;
- struct sched_group *sg_cap;
- struct sched_group *sg;
-};
- static int cpu_util_wake(int cpu, struct task_struct *p);
+ /**
+ * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+ * @cpu: the CPU to get the utilization of
+ *
+ * The unit of the return value must be the one of capacity so we can compare
+ * the utilization with the capacity of the CPU that is available for CFS task
+ * (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency,
+ * i.e. arch_scale_cpu_capacity().
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * The estimated utilization of a CPU is defined to be the maximum between its
+ * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
+ * currently RUNNABLE on that CPU.
+ * This allows to properly represent the expected utilization of a CPU which
+ * has just got a big task running since a long sleep period. At the same time
+ * however it preserves the benefits of the "blocked utilization" in
+ * describing the potential for other tasks waking up on the same CPU.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
+ *
+ * Return: the (estimated) utilization for the specified CPU
+ */
+ static inline unsigned long cpu_util(int cpu)
+ {
+ struct cfs_rq *cfs_rq;
+ unsigned int util;
+
+ #ifdef CONFIG_SCHED_WALT
+ if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
+ u64 walt_cpu_util = cpu_rq(cpu)->cumulative_runnable_avg;
+
+ walt_cpu_util <<= SCHED_CAPACITY_SHIFT;
+ do_div(walt_cpu_util, walt_ravg_window);
+
+ return min_t(unsigned long, walt_cpu_util,
+ capacity_orig_of(cpu));
+ }
+ #endif
+
+ cfs_rq = &cpu_rq(cpu)->cfs;
+ util = READ_ONCE(cfs_rq->avg.util_avg);
+
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
+ return min_t(unsigned long, util, capacity_orig_of(cpu));
+ }
+
+ static inline unsigned long cpu_util_freq(int cpu)
+ {
+ #ifdef CONFIG_SCHED_WALT
+ u64 walt_cpu_util;
+
+ if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util))
+ return cpu_util(cpu);
+
+ walt_cpu_util = cpu_rq(cpu)->prev_runnable_sum;
+ walt_cpu_util <<= SCHED_CAPACITY_SHIFT;
+ do_div(walt_cpu_util, walt_ravg_window);
+
+ return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
+ #else
+ return cpu_util(cpu);
+ #endif
+ }
+
+ /*
+ * cpu_util_wake: Compute CPU utilization with any contributions from
+ * the waking task p removed.
+ */
+ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+ {
+ struct cfs_rq *cfs_rq;
+ unsigned int util;
+
+ #ifdef CONFIG_SCHED_WALT
+ /*
+ * WALT does not decay idle tasks in the same manner
+ * as PELT, so it makes little sense to subtract task
+ * utilization from cpu utilization. Instead just use
+ * cpu_util for this case.
+ */
+ if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util))
+ return cpu_util(cpu);
+ #endif
+
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+ return cpu_util(cpu);
+
+ cfs_rq = &cpu_rq(cpu)->cfs;
+ util = READ_ONCE(cfs_rq->avg.util_avg);
+
+ /* Discount task's blocked util from CPU's util */
+ util -= min_t(unsigned int, util, task_util(p));
+
+ /*
+ * Covered cases:
+ *
+ * a) if *p is the only task sleeping on this CPU, then:
+ * cpu_util (== task_util) > util_est (== 0)
+ * and thus we return:
+ * cpu_util_wake = (cpu_util - task_util) = 0
+ *
+ * b) if other tasks are SLEEPING on this CPU, which is now exiting
+ * IDLE, then:
+ * cpu_util >= task_util
+ * cpu_util > util_est (== 0)
+ * and thus we discount *p's blocked utilization to return:
+ * cpu_util_wake = (cpu_util - task_util) >= 0
+ *
+ * c) if other tasks are RUNNABLE on that CPU and
+ * util_est > cpu_util
+ * then we use util_est since it returns a more restrictive
+ * estimation of the spare capacity on that CPU, by just
+ * considering the expected utilization of tasks already
+ * runnable on that CPU.
+ *
+ * Cases a) and b) are covered by the above code, while case c) is
+ * covered by the following code when estimated utilization is
+ * enabled.
+ */
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
+ /*
+ * Utilization (estimated) can exceed the CPU capacity, thus let's
+ * clamp to the maximum CPU capacity to ensure consistency with
+ * the cpu_util call.
+ */
+ return min_t(unsigned long, util, capacity_orig_of(cpu));
+ }
static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
{
return util + margin;
}
-static inline unsigned long
+unsigned long
boosted_task_util(struct task_struct *task)
{
- unsigned long util = task_util(task);
+ unsigned long util = task_util_est(task);
long margin = schedtune_task_margin(task);
trace_sched_boost_task(task, util, margin);
return select_idle_sibling_cstate_aware(p, prev, target);
}
++<<<<<<< HEAD
+/*
+ * cpu_util_wake: Compute cpu utilization with any contributions from
+ * the waking task p removed.
+ */
+static int cpu_util_wake(int cpu, struct task_struct *p)
+{
+ struct cfs_rq *cfs_rq;
+ unsigned int util;
+
+#ifdef CONFIG_SCHED_WALT
+ /*
+ * WALT does not decay idle tasks in the same manner
+ * as PELT, so it makes little sense to subtract task
+ * utilization from cpu utilization. Instead just use
+ * cpu_util for this case.
+ */
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return cpu_util(cpu);
+#endif
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+ return cpu_util(cpu);
+
+ cfs_rq = &cpu_rq(cpu)->cfs;
+ util = READ_ONCE(cfs_rq->avg.util_avg);
+
+ /* Discount task's blocked util from CPU's util */
+ util -= min_t(unsigned int, util, task_util(p));
+
+ /*
+ * Covered cases:
+ *
+ * a) if *p is the only task sleeping on this CPU, then:
+ * cpu_util (== task_util) > util_est (== 0)
+ * and thus we return:
+ * cpu_util_wake = (cpu_util - task_util) = 0
+ *
+ * b) if other tasks are SLEEPING on this CPU, which is now exiting
+ * IDLE, then:
+ * cpu_util >= task_util
+ * cpu_util > util_est (== 0)
+ * and thus we discount *p's blocked utilization to return:
+ * cpu_util_wake = (cpu_util - task_util) >= 0
+ *
+ * c) if other tasks are RUNNABLE on that CPU and
+ * util_est > cpu_util
+ * then we use util_est since it returns a more restrictive
+ * estimation of the spare capacity on that CPU, by just
+ * considering the expected utilization of tasks already
+ * runnable on that CPU.
+ *
+ * Cases a) and b) are covered by the above code, while case c) is
+ * covered by the following code when estimated utilization is
+ * enabled.
+ */
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
+ /*
+ * Utilization (estimated) can exceed the CPU capacity, thus let's
+ * clamp to the maximum CPU capacity to ensure consistency with
+ * the cpu_util call.
+ */
+ return min_t(unsigned long, util, capacity_orig_of(cpu));
+}
+
++=======
++>>>>>>> android-4.14-p
static inline int task_fits_capacity(struct task_struct *p, long capacity)
{
return capacity * 1024 > boosted_task_util(p) * capacity_margin;
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
-static inline int find_best_target(struct task_struct *p, int *backup_cpu,
+int find_best_target(struct task_struct *p, int *backup_cpu,
bool boosted, bool prefer_idle)
{
- unsigned long best_idle_min_cap_orig = ULONG_MAX;
unsigned long min_util = boosted_task_util(p);
unsigned long target_capacity = ULONG_MAX;
unsigned long min_wake_util = ULONG_MAX;
sgs->idle_cpus++;
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
- !sgs->group_misfit_task && rq_has_misfit(rq))
- sgs->group_misfit_task = capacity_of(i);
+ sgs->group_misfit_task_load < rq->misfit_task_load) {
+ sgs->group_misfit_task_load = rq->misfit_task_load;
+ *overload = 1;
+ }
+
- if (cpu_overutilized(i)) {
- *overutilized = true;
+ if (sched_feat(EXYNOS_MS)) {
+ if (lbt_overutilized(i, env->sd->level)) {
+ *overutilized = true;
++<<<<<<< HEAD
+ if (rq_has_misfit(rq))
+ *misfit_task = true;
+ }
+ } else {
+ if (cpu_overutilized(i)) {
+ *overutilized = true;
+
+ if (rq_has_misfit(rq))
+ *misfit_task = true;
+ }
++=======
+ if (rq->misfit_task_load)
+ *misfit_task = true;
++>>>>>>> android-4.14-p
}
}
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
- update_misfit_task(rq, curr);
+ update_misfit_status(curr, rq);
update_overutilized_status(rq);
+
+#ifdef CONFIG_EXYNOS_PSTATE_MODE_CHANGER
+ exynos_emc_update(rq->cpu);
+#endif
}
/*
SCHED_FEAT(FIND_BEST_TARGET, true)
SCHED_FEAT(FBT_STRICT_ORDER, true)
++<<<<<<< HEAD
+SCHED_FEAT(EXYNOS_MS, true)
++=======
+ /*
+ * Apply schedtune boost hold to tasks of all sched classes.
+ * If enabled, schedtune will hold the boost applied to a CPU
+ * for 50ms regardless of task activation - if the task is
+ * still running 50ms later, the boost hold expires and schedtune
+ * boost will expire immediately the task stops.
+ * If disabled, this behaviour will only apply to tasks of the
+ * RT class.
+ */
+ SCHED_FEAT(SCHEDTUNE_BOOST_HOLD_ALL, false)
++>>>>>>> android-4.14-p
#include <linux/slab.h>
#include <linux/irq_work.h>
++<<<<<<< HEAD
+#include <linux/ems.h>
++=======
+ #include "tune.h"
++>>>>>>> android-4.14-p
#include "walt.h"
+#include <trace/events/sched.h>
+
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+struct frt_dom {
+ unsigned int coverage_ratio;
+ unsigned int coverage_thr;
+ unsigned int active_ratio;
+ unsigned int active_thr;
+ int coregroup;
+ struct cpumask cpus;
+
+ /* It is updated to relfect the system idle situation */
+ struct cpumask *activated_cpus;
+
+ struct list_head list;
+ struct frt_dom *next;
+ /* kobject for sysfs group */
+ struct kobject kobj;
+};
+struct cpumask activated_mask;
+
+LIST_HEAD(frt_list);
+DEFINE_RAW_SPINLOCK(frt_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct frt_dom *, frt_rqs);
+
+static struct kobject *frt_kobj;
+#define RATIO_SCALE_SHIFT 10
+#define cpu_util(rq) (rq->cfs.avg.util_avg + rq->rt.avg.util_avg)
+#define ratio_scale(v, r) (((v) * (r) * 10) >> RATIO_SCALE_SHIFT)
+
+static int frt_set_coverage_ratio(int cpu);
+static int frt_set_active_ratio(int cpu);
+struct frt_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *, char *);
+ ssize_t (*store)(struct kobject *, const char *, size_t count);
+};
+
+#define frt_attr_rw(_name) \
+static struct frt_attr _name##_attr = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define frt_show(_name) \
+static ssize_t show_##_name(struct kobject *k, char *buf) \
+{ \
+ struct frt_dom *dom = container_of(k, struct frt_dom, kobj); \
+ \
+ return sprintf(buf, "%u\n", (unsigned int)dom->_name); \
+}
+
+#define frt_store(_name, _type, _max) \
+static ssize_t store_##_name(struct kobject *k, const char *buf, size_t count) \
+{ \
+ unsigned int val; \
+ struct frt_dom *dom = container_of(k, struct frt_dom, kobj); \
+ \
+ if (!sscanf(buf, "%u", &val)) \
+ return -EINVAL; \
+ \
+ val = val > _max ? _max : val; \
+ dom->_name = (_type)val; \
+ frt_set_##_name(cpumask_first(&dom->cpus)); \
+ \
+ return count; \
+}
+
+static ssize_t show_coverage_ratio(struct kobject *k, char *buf)
+{
+ struct frt_dom *dom = container_of(k, struct frt_dom, kobj);
+
+ return sprintf(buf, "%u (%u)\n", dom->coverage_ratio, dom->coverage_thr);
+}
+
+static ssize_t show_active_ratio(struct kobject *k, char *buf)
+{
+ struct frt_dom *dom = container_of(k, struct frt_dom, kobj);
+
+ return sprintf(buf, "%u (%u)\n", dom->active_ratio, dom->active_thr);
+}
+
+frt_store(coverage_ratio, int, 100);
+frt_attr_rw(coverage_ratio);
+frt_store(active_ratio, int, 100);
+frt_attr_rw(active_ratio);
+
+static ssize_t show(struct kobject *kobj, struct attribute *at, char *buf)
+{
+ struct frt_attr *frtattr = container_of(at, struct frt_attr, attr);
+
+ return frtattr->show(kobj, buf);
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *at,
+ const char *buf, size_t count)
+{
+ struct frt_attr *frtattr = container_of(at, struct frt_attr, attr);
+
+ return frtattr->store(kobj, buf, count);
+}
+
+static const struct sysfs_ops frt_sysfs_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct attribute *frt_attrs[] = {
+ &coverage_ratio_attr.attr,
+ &active_ratio_attr.attr,
+ NULL
+};
+
+static struct kobj_type ktype_frt = {
+ .sysfs_ops = &frt_sysfs_ops,
+ .default_attrs = frt_attrs,
+};
+
+static int frt_find_prefer_cpu(struct task_struct *task)
+{
+ int cpu, allowed_cpu = 0;
+ unsigned int coverage_thr;
+ struct frt_dom *dom;
+
+ list_for_each_entry(dom, &frt_list, list) {
+ coverage_thr = per_cpu(frt_rqs, cpumask_first(&dom->cpus))->coverage_thr;
+ for_each_cpu_and(cpu, &task->cpus_allowed, &dom->cpus) {
+ allowed_cpu = cpu;
+ if (task->rt.avg.util_avg < coverage_thr)
+ return allowed_cpu;
+ }
+ }
+ return allowed_cpu;
+}
+
+static int frt_set_active_ratio(int cpu)
+{
+ unsigned long capacity;
+ struct frt_dom *dom = per_cpu(frt_rqs, cpu);
+
+ if (!dom || !cpu_active(cpu))
+ return -1;
+
+ capacity = get_cpu_max_capacity(cpu) *
+ cpumask_weight(cpu_coregroup_mask(cpu));
+ dom->active_thr = ratio_scale(capacity, dom->active_ratio);
+
+ return 0;
+}
+
+static int frt_set_coverage_ratio(int cpu)
+{
+ unsigned long capacity;
+ struct frt_dom *dom = per_cpu(frt_rqs, cpu);
+
+ if (!dom || !cpu_active(cpu))
+ return -1;
+
+ capacity = get_cpu_max_capacity(cpu);
+ dom->coverage_thr = ratio_scale(capacity, dom->coverage_ratio);
+
+ return 0;
+}
+
+static const struct cpumask *get_activated_cpus(void)
+{
+ struct frt_dom *dom = per_cpu(frt_rqs, 0);
+ if (dom)
+ return dom->activated_cpus;
+ return cpu_active_mask;
+}
+
+static void update_activated_cpus(void)
+{
+ struct frt_dom *dom, *prev_idle_dom = NULL;
+ struct cpumask mask;
+ unsigned long flags;
+
+ if (!raw_spin_trylock_irqsave(&frt_lock, flags))
+ return;
+
+ cpumask_setall(&mask);
+ list_for_each_entry_reverse(dom, &frt_list, list) {
+ unsigned long dom_util_sum = 0;
+ unsigned long dom_active_thr = 0;
+ unsigned long capacity;
+ struct cpumask active_cpus;
+ int first_cpu, cpu;
+
+ cpumask_and(&active_cpus, &dom->cpus, cpu_active_mask);
+ first_cpu = cpumask_first(&active_cpus);
+ /* all cpus of domain is offed */
+ if (first_cpu == NR_CPUS)
+ continue;
+
+ for_each_cpu(cpu, &active_cpus) {
+ struct rq *rq = cpu_rq(cpu);
+ dom_util_sum += cpu_util(rq);
+ }
+
+ capacity = get_cpu_max_capacity(first_cpu) * cpumask_weight(&active_cpus);
+ dom_active_thr = ratio_scale(capacity, dom->active_ratio);
+
+ /* domain is idle */
+ if (dom_util_sum < dom_active_thr) {
+ /* if prev domain is also idle, clear prev domain cpus */
+ if (prev_idle_dom)
+ cpumask_andnot(&mask, &mask, &prev_idle_dom->cpus);
+ prev_idle_dom = dom;
+ }
+
+ trace_sched_fluid_activated_cpus(first_cpu, dom_util_sum,
+ dom_active_thr, *(unsigned int *)cpumask_bits(&mask));
+
+ /* this is first domain, do update activated_cpus */
+ if (first_cpu == 0)
+ cpumask_copy(dom->activated_cpus, &mask);
+ }
+ raw_spin_unlock_irqrestore(&frt_lock, flags);
+}
+
+
+static int __init frt_sysfs_init(void)
+{
+ struct frt_dom *dom;
+
+ if (list_empty(&frt_list))
+ return 0;
+
+ frt_kobj = kobject_create_and_add("frt", ems_kobj);
+ if (!frt_kobj)
+ goto out;
+
+ /* Add frt sysfs node for each coregroup */
+ list_for_each_entry(dom, &frt_list, list) {
+ int ret;
+
+ ret = kobject_init_and_add(&dom->kobj, &ktype_frt,
+ frt_kobj, "coregroup%d", dom->coregroup);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ pr_err("FRT(%s): failed to create sysfs node\n", __func__);
+ return -EINVAL;
+}
+
+static void frt_parse_dt(struct device_node *dn, struct frt_dom *dom, int cnt)
+{
+ struct device_node *frt, *coregroup;
+ char name[15];
+
+ frt = of_get_child_by_name(dn, "frt");
+ if (!frt)
+ goto disable;
+
+ snprintf(name, sizeof(name), "coregroup%d", cnt);
+ coregroup = of_get_child_by_name(frt, name);
+ if (!coregroup)
+ goto disable;
+ dom->coregroup = cnt;
+
+ of_property_read_u32(coregroup, "coverage-ratio", &dom->coverage_ratio);
+ if (!dom->coverage_ratio)
+ dom->coverage_ratio = 100;
+
+ of_property_read_u32(coregroup, "active-ratio", &dom->active_ratio);
+ if (!dom->active_ratio)
+ dom->active_thr = 0;
+
+ return;
+
+disable:
+ dom->coregroup = cnt;
+ dom->coverage_ratio = 100;
+ dom->active_thr = 0;
+ pr_err("FRT(%s): failed to parse frt node\n", __func__);
+}
+
+static int __init init_frt(void)
+{
+ struct frt_dom *dom, *prev = NULL, *head;
+ struct device_node *dn;
+ int cpu, tcpu, cnt = 0;
+
+ dn = of_find_node_by_path("/cpus/ems");
+ if (!dn)
+ return 0;
+
+ INIT_LIST_HEAD(&frt_list);
+ cpumask_setall(&activated_mask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != cpumask_first(cpu_coregroup_mask(cpu)))
+ continue;
+
+ dom = kzalloc(sizeof(struct frt_dom), GFP_KERNEL);
+ if (!dom) {
+ pr_err("FRT(%s): failed to allocate dom\n");
+ goto put_node;
+ }
+
+ if (cpu == 0)
+ head = dom;
+
+ dom->activated_cpus = &activated_mask;
+
+ cpumask_copy(&dom->cpus, cpu_coregroup_mask(cpu));
+
+ frt_parse_dt(dn, dom, cnt++);
+
+ dom->next = head;
+ if (prev)
+ prev->next = dom;
+ prev = dom;
+
+ for_each_cpu(tcpu, &dom->cpus)
+ per_cpu(frt_rqs, tcpu) = dom;
+
+ frt_set_coverage_ratio(cpu);
+ frt_set_active_ratio(cpu);
+
+ list_add_tail(&dom->list, &frt_list);
+ }
+ frt_sysfs_init();
+
+put_node:
+ of_node_put(dn);
+
+ return 0;
+
+} late_initcall(init_frt);
+#else
+static inline void update_activated_cpus(void) { };
+#endif
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
/* SPDX-License-Identifier: GPL-2.0 */
/* Generated by Documentation/scheduler/sched-pelt; do not modify. */
++<<<<<<< HEAD
++=======
+
++>>>>>>> android-4.14-p
#ifdef CONFIG_PELT_UTIL_HALFLIFE_32
static const u32 runnable_avg_yN_inv[] = {
0xffffffff,0xfa83b2da,0xf5257d14,0xefe4b99a,
#define LOAD_AVG_MAX 47742
#define LOAD_AVG_MAX_N 345
++<<<<<<< HEAD
+static const u32 __accumulated_sum_N32[] = {
+ 0, 23371, 35056, 40899, 43820, 45281,
+ 46011, 46376, 46559, 46650, 46696, 46719,
+};
+
++=======
++>>>>>>> android-4.14-p
#endif
#ifdef CONFIG_PELT_UTIL_HALFLIFE_16
#define LOAD_AVG_MAX 24152
#define LOAD_AVG_MAX_N 517
++<<<<<<< HEAD
+static const u32 __accumulated_sum_N32[] = {
+ 0, 22731, 34096, 39779, 42620, 44041,
+ 44751, 45106, 45284, 45373, 45417, 45439,
+ 45450, 45456, 45459, 45460, 45461, 45461,
+ 45461, 45461, 45461, 45461, 45461, 45461,
+ 45461, 45461, 45461, 45461, 45461, 45461,
+ 45461, 45461, 45461, 45461,
+};
+
++=======
++>>>>>>> android-4.14-p
#endif
#ifdef CONFIG_PELT_UTIL_HALFLIFE_8
#define LOAD_AVG_MAX 12337
#define LOAD_AVG_MAX_N 603
++<<<<<<< HEAD
+static const u32 __accumulated_sum_N32[] = {
+ 0, 16507, 24760, 28887, 30950, 31982,
+ 32498, 32756, 32885, 32949, 32981, 32997,
+ 33005, 33009, 33011, 33012, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013, 33013,
+ 33013, 33013, 33013, 33013, 33013,
+};
+
+#endif
+
++=======
+ #endif
++>>>>>>> android-4.14-p
extern unsigned int walt_ravg_window;
extern bool walt_disabled;
++<<<<<<< HEAD
+#ifdef CONFIG_SCHED_WALT
+#define walt_util(util_var, demand_sum) {\
+ u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
+ do_div(sum, walt_ravg_window);\
+ util_var = (typeof(util_var))sum;\
+ }
+#endif
+
+/**
+ * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
+ * @cpu: the CPU to get the utilization of
+ *
+ * The unit of the return value must be the one of capacity so we can compare
+ * the utilization with the capacity of the CPU that is available for CFS task
+ * (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency
+ * (arch_scale_freq_capacity()).
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * The estimated utilization of a CPU is defined to be the maximum between its
+ * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
+ * currently RUNNABLE on that CPU.
+ * This allows to properly represent the expected utilization of a CPU which
+ * has just got a big task running since a long sleep period. At the same time
+ * however it preserves the benefits of the "blocked utilization" in
+ * describing the potential for other tasks waking up on the same CPU.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
+ *
+ * Return: the (estimated) utilization for the specified CPU
+ */
+static inline unsigned long __cpu_util(int cpu, int delta)
+{
+ struct cfs_rq *cfs_rq;
+ unsigned int util;
+
+ cfs_rq = &cpu_rq(cpu)->cfs;
+ util = READ_ONCE(cfs_rq->avg.util_avg);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg);
+ }
+#endif
+ if (sched_feat(UTIL_EST))
+ util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+
+ delta += util;
+ if (delta < 0)
+ return 0;
+
+ return min_t(unsigned long, delta, capacity_orig_of(cpu));
+}
+
+static inline unsigned long cpu_util(int cpu)
+{
+ return __cpu_util(cpu, 0);
+}
+
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ unsigned long capacity = capacity_orig_of(cpu);
+ unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ walt_util(util, cpu_rq(cpu)->prev_runnable_sum);
+ }
+#endif
+ if (sched_feat(UTIL_EST)) {
+ util = max_t(unsigned long, util,
+ READ_ONCE(cfs_rq->avg.util_est.enqueued));
+ }
+
+ return (util >= capacity) ? capacity : util;
+}
+
+unsigned long _task_util_est(struct task_struct *p);
+unsigned long task_util_est(struct task_struct *p);
+
+#endif
++=======
+ #endif /* CONFIG_SMP */
++>>>>>>> android-4.14-p
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
*
* Called from power state control code with interrupts disabled
*/
- ktime_t tick_nohz_get_sleep_length(void)
+ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+ int cpu = smp_processor_id();
+ /*
+ * The idle entry time is expected to be a sufficient approximation of
+ * the current time at this point.
+ */
+ ktime_t now = ts->idle_entrytime;
+ ktime_t next_event;
- return ts->sleep_length;
+ WARN_ON_ONCE(!ts->inidle);
+
+ *delta_next = ktime_sub(dev->next_event, now);
+
+ if (!can_stop_idle_tick(cpu, ts))
+ return *delta_next;
+
+ next_event = tick_nohz_next_event(ts, cpu);
+ if (!next_event)
+ return *delta_next;
+
+ /*
+ * If the next highres timer to expire is earlier than next_event, the
+ * idle governor needs to know that.
+ */
+ next_event = min_t(u64, next_event,
+ hrtimer_next_event_without(&ts->sched_timer));
+
+ return ktime_sub(next_event, now);
}
+/**
+ * tick_nohz_get_sleep_length_cpu - return the length of the current sleep
+ * for a particular CPU.
+ *
+ * Called from power state control code with interrupts disabled
+ */
+ktime_t tick_nohz_get_sleep_length_cpu(int cpu)
+{
+ struct tick_sched *ts = tick_get_tick_sched(cpu);
+
+ return ts->sleep_length;
+}
+
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
* for a particular CPU.