#define ARM_CPU_IMP_CAVIUM 0x43
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
++<<<<<<< HEAD
+#define ARM_CPU_IMP_SEC 0x53
++=======
+ #define ARM_CPU_IMP_NVIDIA 0x4E
++>>>>>>> 818299f6bdae
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
*/
static __must_check inline bool may_use_simd(void)
{
++<<<<<<< HEAD
+ return true;
++=======
+ /*
+ * kernel_neon_busy is only set while preemption is disabled,
+ * and is clear whenever preemption is enabled. Since
+ * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+ * cannot change under our feet -- if it's set we cannot be
+ * migrated, and if it's clear we cannot be migrated to a CPU
+ * where it is set.
+ */
+ return !in_irq() && !irqs_disabled() && !in_nmi() &&
+ !this_cpu_read(kernel_neon_busy);
++>>>>>>> 818299f6bdae
}
-#else /* ! CONFIG_KERNEL_MODE_NEON */
-
-static __must_check inline bool may_use_simd(void) {
- return false;
-}
-
-#endif /* ! CONFIG_KERNEL_MODE_NEON */
-
#endif
for (j = 0; j < 8; j++) {
u32 data;
if (probe_kernel_address(p, data)) {
++<<<<<<< HEAD
+ if (j == 7)
+ pr_cont(" ********\n");
+ else
+ pr_cont(" ********");
+ } else {
+ if (j == 7)
+ pr_cont(" %08X\n", data);
+ else
+ pr_cont(" %08X", data);
+ }
+ ++p;
+ }
++=======
+ pr_cont(" ********");
+ } else {
+ pr_cont(" %08x", data);
+ }
+ ++p;
+ }
+ pr_cont("\n");
++>>>>>>> 818299f6bdae
}
}
#include <linux/cpufreq.h>
#include <linux/cpufreq_times.h>
++<<<<<<< HEAD
+#include <linux/jiffies.h>
++=======
+ #include <linux/hashtable.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
+ #include <linux/proc_fs.h>
++>>>>>>> 818299f6bdae
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
++<<<<<<< HEAD
++static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
++=======
+ #define UID_HASH_BITS 10
+
+ static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
+
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+ static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
+
+ struct uid_entry {
+ uid_t uid;
+ unsigned int max_state;
+ struct hlist_node hash;
+ struct rcu_head rcu;
+ u64 time_in_state[0];
+ };
++>>>>>>> 818299f6bdae
/**
* struct cpu_freqs - per-cpu frequency information
static unsigned int next_offset;
++<<<<<<< HEAD
+void cpufreq_task_times_init(struct task_struct *p)
+{
+ void *temp;
+ unsigned long flags;
+ unsigned int max_state;
++=======
+
+ /* Caller must hold rcu_read_lock() */
+ static struct uid_entry *find_uid_entry_rcu(uid_t uid)
+ {
+ struct uid_entry *uid_entry;
+
+ hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+ }
+
+ /* Caller must hold uid lock */
+ static struct uid_entry *find_uid_entry_locked(uid_t uid)
+ {
+ struct uid_entry *uid_entry;
+
+ hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+ }
+
+ /* Caller must hold uid lock */
+ static struct uid_entry *find_or_register_uid_locked(uid_t uid)
+ {
+ struct uid_entry *uid_entry, *temp;
+ unsigned int max_state = READ_ONCE(next_offset);
+ size_t alloc_size = sizeof(*uid_entry) + max_state *
+ sizeof(uid_entry->time_in_state[0]);
+
+ uid_entry = find_uid_entry_locked(uid);
+ if (uid_entry) {
+ if (uid_entry->max_state == max_state)
+ return uid_entry;
+ /* uid_entry->time_in_state is too small to track all freqs, so
+ * expand it.
+ */
+ temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
+ if (!temp)
+ return uid_entry;
+ temp->max_state = max_state;
+ memset(temp->time_in_state + uid_entry->max_state, 0,
+ (max_state - uid_entry->max_state) *
+ sizeof(uid_entry->time_in_state[0]));
+ if (temp != uid_entry) {
+ hlist_replace_rcu(&uid_entry->hash, &temp->hash);
+ kfree_rcu(uid_entry, rcu);
+ }
+ return temp;
+ }
+
+ uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+ uid_entry->max_state = max_state;
+
+ hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+ }
+
+ static bool freq_index_invalid(unsigned int index)
+ {
+ unsigned int cpu;
+ struct cpu_freqs *freqs;
+
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || index < freqs->offset ||
+ freqs->offset + freqs->max_state <= index)
+ continue;
+ return freqs->freq_table[index - freqs->offset] ==
+ CPUFREQ_ENTRY_INVALID;
+ }
+ return true;
+ }
+
+ static int single_uid_time_in_state_show(struct seq_file *m, void *ptr)
+ {
+ struct uid_entry *uid_entry;
+ unsigned int i;
+ u64 time;
+ uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private);
+
+ if (uid == overflowuid)
+ return -EINVAL;
+
+ rcu_read_lock();
+
+ uid_entry = find_uid_entry_rcu(uid);
+ if (!uid_entry) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (i = 0; i < uid_entry->max_state; ++i) {
+ if (freq_index_invalid(i))
+ continue;
+ time = nsec_to_clock_t(uid_entry->time_in_state[i]);
+ seq_write(m, &time, sizeof(time));
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+ }
+
+ static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+
+ return &uid_hash_table[*pos];
+ }
+
+ static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+ (*pos)++;
+
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+
+ return &uid_hash_table[*pos];
+ }
+
+ static void uid_seq_stop(struct seq_file *seq, void *v) { }
+
+ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
+ {
+ struct uid_entry *uid_entry;
+ struct cpu_freqs *freqs, *last_freqs = NULL;
+ int i, cpu;
+
+ if (v == uid_hash_table) {
+ seq_puts(m, "uid:");
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || freqs == last_freqs)
+ continue;
+ last_freqs = freqs;
+ for (i = 0; i < freqs->max_state; i++) {
+ if (freqs->freq_table[i] ==
+ CPUFREQ_ENTRY_INVALID)
+ continue;
+ seq_printf(m, " %d", freqs->freq_table[i]);
+ }
+ }
+ seq_putc(m, '\n');
+ }
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+ if (uid_entry->max_state)
+ seq_printf(m, "%d:", uid_entry->uid);
+ for (i = 0; i < uid_entry->max_state; ++i) {
+ if (freq_index_invalid(i))
+ continue;
+ seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
+ uid_entry->time_in_state[i]));
+ }
+ if (uid_entry->max_state)
+ seq_putc(m, '\n');
+ }
+
+ rcu_read_unlock();
+ return 0;
+ }
+
+ void cpufreq_task_times_init(struct task_struct *p)
+ {
+ unsigned long flags;
++>>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = 0;
++<<<<<<< HEAD
+
+ max_state = READ_ONCE(next_offset);
++=======
+ }
+
+ void cpufreq_task_times_alloc(struct task_struct *p)
+ {
+ void *temp;
+ unsigned long flags;
+ unsigned int max_state = READ_ONCE(next_offset);
++>>>>>>> 818299f6bdae
/* We use one array to avoid multiple allocs per task */
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
unsigned long flags;
void *temp;
++<<<<<<< HEAD
++=======
+ if (!p->time_in_state)
+ return;
+
++>>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
temp = p->time_in_state;
p->time_in_state = NULL;
{
unsigned long flags;
unsigned int state;
++<<<<<<< HEAD
+ struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
++=======
+ struct uid_entry *uid_entry;
+ struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+ uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
++>>>>>>> 818299f6bdae
if (!freqs || p->flags & PF_EXITING)
return;
p->time_in_state)
p->time_in_state[state] += cputime;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
++<<<<<<< HEAD
++=======
+
+ spin_lock_irqsave(&uid_lock, flags);
+ uid_entry = find_or_register_uid_locked(uid);
+ if (uid_entry && state < uid_entry->max_state)
+ uid_entry->time_in_state[state] += cputime;
+ spin_unlock_irqrestore(&uid_lock, flags);
++>>>>>>> 818299f6bdae
}
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
all_freqs[cpu] = freqs;
}
++<<<<<<< HEAD
++=======
+ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
+ {
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uid_lock, flags);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
+ hash, uid_start) {
+ if (uid_start == uid_entry->uid) {
+ hash_del_rcu(&uid_entry->hash);
+ kfree_rcu(uid_entry, rcu);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&uid_lock, flags);
+ }
+
++>>>>>>> 818299f6bdae
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
{
int index;
cpufreq_cpu_put(policy);
}
++<<<<<<< HEAD
++=======
+
+ static const struct seq_operations uid_time_in_state_seq_ops = {
+ .start = uid_seq_start,
+ .next = uid_seq_next,
+ .stop = uid_seq_stop,
+ .show = uid_time_in_state_seq_show,
+ };
+
+ static int uid_time_in_state_open(struct inode *inode, struct file *file)
+ {
+ return seq_open(file, &uid_time_in_state_seq_ops);
+ }
+
+ int single_uid_time_in_state_open(struct inode *inode, struct file *file)
+ {
+ return single_open(file, single_uid_time_in_state_show,
+ &(inode->i_uid));
+ }
+
+ static const struct file_operations uid_time_in_state_fops = {
+ .open = uid_time_in_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+
+ static int __init cpufreq_times_init(void)
+ {
+ proc_create_data("uid_time_in_state", 0444, NULL,
+ &uid_time_in_state_fops, NULL);
+
+ return 0;
+ }
+
+ early_initcall(cpufreq_times_init);
++>>>>>>> 818299f6bdae
* Card write Threshold is introduced since 2.80a
* It's used when HS400 mode is enabled.
*/
++<<<<<<< HEAD
+ if (data->flags & MMC_DATA_WRITE && !(host->timing != MMC_TIMING_MMC_HS400))
+ return;
++=======
+ if (data->flags & MMC_DATA_WRITE &&
+ host->timing != MMC_TIMING_MMC_HS400)
+ goto disable;
++>>>>>>> 818299f6bdae
if (data->flags & MMC_DATA_WRITE)
enable = SDMMC_CARD_WR_THR_EN;
enable = SDMMC_CARD_RD_THR_EN;
if (host->timing != MMC_TIMING_MMC_HS200 &&
++<<<<<<< HEAD
+ host->timing != MMC_TIMING_MMC_HS400 && host->timing != MMC_TIMING_UHS_SDR104)
++=======
+ host->timing != MMC_TIMING_UHS_SDR104 &&
+ host->timing != MMC_TIMING_MMC_HS400)
++>>>>>>> 818299f6bdae
goto disable;
blksz_depth = blksz / (1 << host->data_shift);
void ion_buffer_destroy(struct ion_buffer *buffer)
{
++<<<<<<< HEAD
+ exynos_ion_free_fixup(buffer);
+ if (WARN_ON(buffer->kmap_cnt > 0))
++=======
+ if (buffer->kmap_cnt > 0) {
+ pr_warn_once("%s: buffer still mapped in the kernel\n",
+ __func__);
++>>>>>>> 818299f6bdae
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ }
buffer->heap->ops->free(buffer);
kfree(buffer);
}
#define EXYNOS_EMUL_TIME 0x57F0
#define EXYNOS_EMUL_TIME_MASK 0xffff
#define EXYNOS_EMUL_TIME_SHIFT 16
-#define EXYNOS_EMUL_DATA_SHIFT 8
-#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_DATA_SHIFT 7
+#define EXYNOS_EMUL_DATA_MASK 0x1FF
#define EXYNOS_EMUL_ENABLE 0x1
++<<<<<<< HEAD
+#define EXYNOS_TMU_REG_THD_TEMP0 0x50
+#define EXYNOS_TMU_REG_THD_TEMP1 0x170
+#define EXYNOS_TMU_REG_THD_TEMP8 0x450
+#define EXYNOS_THD_TEMP_RISE7_6 0x50
+#define EXYNOS_THD_TEMP_FALL7_6 0x60
+#define EXYNOS_THD_TEMP_R_OFFSET 0x100
+#define EXYNOS_THD_TEMP_RISE7_6_SHIFT 16
+#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS_TMU_INTEN_RISE1_SHIFT 1
+#define EXYNOS_TMU_INTEN_RISE2_SHIFT 2
+#define EXYNOS_TMU_INTEN_RISE3_SHIFT 3
+#define EXYNOS_TMU_INTEN_RISE4_SHIFT 4
+#define EXYNOS_TMU_INTEN_RISE5_SHIFT 5
+#define EXYNOS_TMU_INTEN_RISE6_SHIFT 6
+#define EXYNOS_TMU_INTEN_RISE7_SHIFT 7
+
+#define EXYNOS_TMU_CALIB_SEL_SHIFT (23)
+#define EXYNOS_TMU_CALIB_SEL_MASK (0x1)
+#define EXYNOS_TMU_TEMP_SHIFT (9)
+#define EXYNOS_TMU_TEMP_MASK (0x1ff)
+#define EXYNOS_TMU_TRIMINFO_85_P0_SHIFT (9)
+#define EXYNOS_TRIMINFO_ONE_POINT_TRIMMING (0)
+#define EXYNOS_TRIMINFO_TWO_POINT_TRIMMING (1)
+#define EXYNOS_TMU_T_BUF_VREF_SEL_SHIFT (18)
+#define EXYNOS_TMU_T_BUF_VREF_SEL_MASK (0x1F)
+#define EXYNOS_TMU_T_PTAT_CONT_MASK (0x7)
+#define EXYNOS_TMU_T_BUF_SLOPE_SEL_SHIFT (18)
+#define EXYNOS_TMU_T_BUF_SLOPE_SEL_MASK (0xF)
+#define EXYNOS_TMU_T_BUF_CONT_MASK (0xF)
+
+#define EXYNOS_TMU_REG_TRIM0 (0x3C)
+#define EXYNOS_TMU_T_TRIM0_SHIFT (18)
+#define EXYNOS_TMU_T_TRIM0_MASK (0xF)
+#define EXYNOS_TMU_T_VREF_TRIM_SHIFT (14)
+#define EXYNOS_TMU_T_VREF_TRIM_MASK (0xF)
+#define EXYNOS_TMU_T_VBE_I_TRIM_SHIFT (10)
+#define EXYNOS_TMU_T_VBE_I_TRIM_MASK (0xF)
+#define EXYNOS_TMU_BGRI_TRIM_SHIFT (20)
+#define EXYNOS_TMU_BGRI_TRIM_MASK (0xF)
+#define EXYNOS_TMU_VREF_TRIM_SHIFT (12)
+#define EXYNOS_TMU_VREF_TRIM_MASK (0xF)
+#define EXYNOS_TMU_VBEI_TRIM_SHIFT (8)
+#define EXYNOS_TMU_VBEI_TRIM_MASK (0xF)
+
+#define EXYNOS_TMU_REG_INTPEND0 (0x118)
+#define EXYNOS_TMU_REG_INTPEND5 (0x318)
+#define EXYNOS_TMU_REG_INTPEND8 (0x658)
+#define EXYNOS_TMU_REG_INTPEN_OFFSET (0x10)
+#define EXYNOS_TMU_REG_EMUL_CON (0x160)
+
+#define EXYNOS_TMU_REG_AVG_CON (0x38)
+#define EXYNOS_TMU_AVG_CON_SHIFT (18)
+#define EXYNOS_TMU_AVG_CON_MASK (0x3)
+#define EXYNOS_TMU_AVG_MODE_MASK (0x7)
+#define EXYNOS_TMU_AVG_MODE_DEFAULT (0x0)
+#define EXYNOS_TMU_AVG_MODE_2 (0x5)
+#define EXYNOS_TMU_AVG_MODE_4 (0x6)
+
+#define EXYNOS_TMU_DEM_ENABLE (1)
+#define EXYNOS_TMU_DEM_SHIFT (4)
+
+#define EXYNOS_TMU_REG_COUNTER_VALUE0 (0x30)
+#define EXYNOS_TMU_EN_TEMP_SEN_OFF_SHIFT (0)
+#define EXYNOS_TMU_EN_TEMP_SEN_OFF_MASK (0xffff)
+#define EXYNOS_TMU_REG_COUNTER_VALUE1 (0x34)
+#define EXYNOS_TMU_CLK_SENSE_ON_SHIFT (16)
+#define EXYNOS_TMU_CLK_SENSE_ON_MASK (0xffff)
+#define EXYNOS_TMU_TEM1456X_SENSE_VALUE (0x0A28)
+#define EXYNOS_TMU_TEM1051X_SENSE_VALUE (0x028A)
+#define EXYNOS_TMU_TEM1002X_SENSE_VALUE (0x0514)
+
+#define EXYNOS_TMU_NUM_PROBE_SHIFT (16)
+#define EXYNOS_TMU_NUM_PROBE_MASK (0xf)
+#define EXYNOS_TMU_LPI_MODE_SHIFT (10)
+#define EXYNOS_TMU_LPI_MODE_MASK (1)
+
+#define EXYNOS_GPU_THERMAL_ZONE_ID (2)
+
+#define TOTAL_SENSORS 16
+#define DEFAULT_BALANCE_OFFSET 20
+
+#ifdef CONFIG_EXYNOS_ACPM_THERMAL
+#define PMUREG_AUD_STATUS 0x4004
+static struct acpm_tmu_cap cap;
+static unsigned int num_of_devices, suspended_count;
+static bool cp_call_mode;
+#ifdef CONFIG_SND_SOC_SAMSUNG_ABOX
+#if defined(CONFIG_SOC_EXYNOS9810)
+static bool is_aud_on(void)
+{
+ unsigned int val;
+
+ exynos_pmu_read(PMUREG_AUD_STATUS, &val);
+
+ return ((val & 0xf) == 0xf);
+}
+#endif
+#endif
+#else
+static bool suspended;
+static DEFINE_MUTEX (thermal_suspend_lock);
+#endif
+static bool is_cpu_hotplugged_out;
+
+/* list of multiple instance for each thermal sensor */
+static LIST_HEAD(dtm_dev_list);
+
+static u32 t_bgri_trim;
+static u32 t_vref_trim;
+static u32 t_vbei_trim;
++=======
+ /* Exynos5260 specific */
+ #define EXYNOS5260_TMU_REG_INTEN 0xC0
+ #define EXYNOS5260_TMU_REG_INTSTAT 0xC4
+ #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
+ #define EXYNOS5260_EMUL_CON 0x100
+
+ /* Exynos4412 specific */
+ #define EXYNOS4412_MUX_ADDR_VALUE 6
+ #define EXYNOS4412_MUX_ADDR_SHIFT 20
+
+ /* Exynos5433 specific registers */
+ #define EXYNOS5433_TMU_REG_CONTROL1 0x024
+ #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c
+ #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030
+ #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034
+ #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044
+ #define EXYNOS5433_THD_TEMP_RISE3_0 0x050
+ #define EXYNOS5433_THD_TEMP_RISE7_4 0x054
+ #define EXYNOS5433_THD_TEMP_FALL3_0 0x060
+ #define EXYNOS5433_THD_TEMP_FALL7_4 0x064
+ #define EXYNOS5433_TMU_REG_INTEN 0x0c0
+ #define EXYNOS5433_TMU_REG_INTPEND 0x0c8
+ #define EXYNOS5433_TMU_EMUL_CON 0x110
+ #define EXYNOS5433_TMU_PD_DET_EN 0x130
+
+ #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
+ #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
+ #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
+ (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
+ #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
+
+ #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
+ #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
+
+ #define EXYNOS5433_PD_DET_EN 1
+
+ /*exynos5440 specific registers*/
+ #define EXYNOS5440_TMU_S0_7_TRIM 0x000
+ #define EXYNOS5440_TMU_S0_7_CTRL 0x020
+ #define EXYNOS5440_TMU_S0_7_DEBUG 0x040
+ #define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
+ #define EXYNOS5440_TMU_S0_7_TH0 0x110
+ #define EXYNOS5440_TMU_S0_7_TH1 0x130
+ #define EXYNOS5440_TMU_S0_7_TH2 0x150
+ #define EXYNOS5440_TMU_S0_7_IRQEN 0x210
+ #define EXYNOS5440_TMU_S0_7_IRQ 0x230
+ /* exynos5440 common registers */
+ #define EXYNOS5440_TMU_IRQ_STATUS 0x000
+ #define EXYNOS5440_TMU_PMIN 0x004
+
+ #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
+ #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
+ #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
+ #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
+ #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
+ #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
+ #define EXYNOS5440_EFUSE_SWAP_OFFSET 8
+
+ /* Exynos7 specific registers */
+ #define EXYNOS7_THD_TEMP_RISE7_6 0x50
+ #define EXYNOS7_THD_TEMP_FALL7_6 0x60
+ #define EXYNOS7_TMU_REG_INTEN 0x110
+ #define EXYNOS7_TMU_REG_INTPEND 0x118
+ #define EXYNOS7_TMU_REG_EMUL_CON 0x160
+
+ #define EXYNOS7_TMU_TEMP_MASK 0x1ff
+ #define EXYNOS7_PD_DET_EN_SHIFT 23
+ #define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
+ #define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1
+ #define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2
+ #define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3
+ #define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4
+ #define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5
+ #define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6
+ #define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7
+ #define EXYNOS7_EMUL_DATA_SHIFT 7
+ #define EXYNOS7_EMUL_DATA_MASK 0x1ff
+
+ #define MCELSIUS 1000
+ /**
+ * struct exynos_tmu_data : A structure to hold the private data of the TMU
+ driver
+ * @id: identifier of the one instance of the TMU controller.
+ * @pdata: pointer to the tmu platform/configuration data
+ * @base: base address of the single instance of the TMU controller.
+ * @base_second: base address of the common registers of the TMU controller.
+ * @irq: irq number of the TMU controller.
+ * @soc: id of the SOC type.
+ * @irq_work: pointer to the irq work structure.
+ * @lock: lock to implement synchronization.
+ * @clk: pointer to the clock structure.
+ * @clk_sec: pointer to the clock structure for accessing the base_second.
+ * @sclk: pointer to the clock structure for accessing the tmu special clk.
+ * @temp_error1: fused value of the first point trim.
+ * @temp_error2: fused value of the second point trim.
+ * @regulator: pointer to the TMU regulator structure.
+ * @reg_conf: pointer to structure to register with core thermal.
+ * @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
+ * @tmu_initialize: SoC specific TMU initialization method
+ * @tmu_control: SoC specific TMU control method
+ * @tmu_read: SoC specific TMU temperature read method
+ * @tmu_set_emulation: SoC specific TMU emulation setting method
+ * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
+ */
+ struct exynos_tmu_data {
+ int id;
+ struct exynos_tmu_platform_data *pdata;
+ void __iomem *base;
+ void __iomem *base_second;
+ int irq;
+ enum soc_type soc;
+ struct work_struct irq_work;
+ struct mutex lock;
+ struct clk *clk, *clk_sec, *sclk;
+ u16 temp_error1, temp_error2;
+ struct regulator *regulator;
+ struct thermal_zone_device *tzd;
+ unsigned int ntrip;
+ bool enabled;
+
+ int (*tmu_initialize)(struct platform_device *pdev);
+ void (*tmu_control)(struct platform_device *pdev, bool on);
+ int (*tmu_read)(struct exynos_tmu_data *data);
+ void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
+ void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
+ };
++>>>>>>> 818299f6bdae
static void exynos_report_trigger(struct exynos_tmu_data *p)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
mutex_lock(&data->lock);
- clk_enable(data->clk);
data->tmu_control(pdev, on);
++<<<<<<< HEAD
++=======
+ data->enabled = on;
+ clk_disable(data->clk);
++>>>>>>> 818299f6bdae
mutex_unlock(&data->lock);
}
static int exynos_get_temp(void *p, int *temp)
{
struct exynos_tmu_data *data = p;
++<<<<<<< HEAD
+#ifndef CONFIG_EXYNOS_ACPM_THERMAL
+ struct thermal_cooling_device *cdev = NULL;
+ struct thermal_zone_device *tz;
+ struct thermal_instance *instance;
+#endif
+#ifdef CONFIG_EXYNOS_MCINFO
+ unsigned int mcinfo_count;
+ unsigned int mcinfo_result[4] = {0, 0, 0, 0};
+ unsigned int mcinfo_logging = 0;
+ unsigned int mcinfo_temp = 0;
+ unsigned int i;
+#endif
++=======
+ int value, ret = 0;
++>>>>>>> 818299f6bdae
- if (!data || !data->tmu_read)
+ if (!data || !data->tmu_read || !data->enabled)
return -EINVAL;
mutex_lock(&data->lock);
- clk_enable(data->clk);
++<<<<<<< HEAD
+ if (data->num_of_sensors)
+ *temp = data->tmu_read(data) * MCELSIUS;
+ else
+ *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
++=======
+ value = data->tmu_read(data);
+ if (value < 0)
+ ret = value;
+ else
+ *temp = code_to_temp(data, value) * MCELSIUS;
++>>>>>>> 818299f6bdae
- clk_disable(data->clk);
mutex_unlock(&data->lock);
++<<<<<<< HEAD
+#ifndef CONFIG_EXYNOS_ACPM_THERMAL
+ tz = data->tzd;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->cdev) {
+ cdev = instance->cdev;
+ break;
+ }
+ }
+
+ if (!cdev)
+ return 0;
+
+ mutex_lock(&thermal_suspend_lock);
+
+ if (cdev->ops->set_cur_temp && data->id != 1)
+ cdev->ops->set_cur_temp(cdev, suspended, *temp / 1000);
+
+ mutex_unlock(&thermal_suspend_lock);
+#endif
+
+ dbg_snapshot_thermal(data->pdata, *temp / 1000, data->tmu_name, 0);
+#ifdef CONFIG_EXYNOS_MCINFO
+ if (data->id == 0) {
+ mcinfo_count = get_mcinfo_base_count();
+ get_refresh_rate(mcinfo_result);
+
+ for (i = 0; i < mcinfo_count; i++) {
+ mcinfo_temp |= (mcinfo_result[i] & 0xf) << (8 * i);
+
+ if (mcinfo_result[i] >= MCINFO_LOG_THRESHOLD)
+ mcinfo_logging = 1;
+ }
+
+ if (mcinfo_logging == 1)
+ dbg_snapshot_thermal(NULL, mcinfo_temp, "MCINFO", 0);
+ }
+#endif
+ return 0;
++=======
+ return ret;
++>>>>>>> 818299f6bdae
}
-#ifdef CONFIG_THERMAL_EMULATION
-static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
- int temp)
+static int exynos_get_trend(void *p, int trip, enum thermal_trend *trend)
{
- if (temp) {
- temp /= MCELSIUS;
+ struct exynos_tmu_data *data = p;
+ struct thermal_zone_device *tz = data->tzd;
+ int trip_temp, ret = 0;
- if (data->soc != SOC_ARCH_EXYNOS5440) {
- val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
- val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
- }
- if (data->soc == SOC_ARCH_EXYNOS7) {
- val &= ~(EXYNOS7_EMUL_DATA_MASK <<
- EXYNOS7_EMUL_DATA_SHIFT);
- val |= (temp_to_code(data, temp) <<
- EXYNOS7_EMUL_DATA_SHIFT) |
- EXYNOS_EMUL_ENABLE;
- } else {
- val &= ~(EXYNOS_EMUL_DATA_MASK <<
- EXYNOS_EMUL_DATA_SHIFT);
- val |= (temp_to_code(data, temp) <<
- EXYNOS_EMUL_DATA_SHIFT) |
- EXYNOS_EMUL_ENABLE;
- }
- } else {
- val &= ~EXYNOS_EMUL_ENABLE;
- }
+ ret = tz->ops->get_trip_temp(tz, trip, &trip_temp);
+ if (ret < 0)
+ return ret;
+
+ if (tz->temperature >= trip_temp)
+ *trend = THERMAL_TREND_RAISE_FULL;
+ else
+ *trend = THERMAL_TREND_DROP_FULL;
- return val;
+ return 0;
}
-static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
+#ifdef CONFIG_THERMAL_EMULATION
+static void exynos9810_tmu_set_emulation(struct exynos_tmu_data *data,
int temp)
{
unsigned int val;
spin_unlock_irqrestore(&port->lock, flags);
}
++<<<<<<< HEAD
++=======
+ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+ {
+ struct s3c24xx_uart_dma *dma = p->dma;
+ int ret;
+
+ /* Default slave configuration parameters */
+ dma->rx_conf.direction = DMA_DEV_TO_MEM;
+ dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
+ dma->rx_conf.src_maxburst = 1;
+
+ dma->tx_conf.direction = DMA_MEM_TO_DEV;
+ dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
+ dma->tx_conf.dst_maxburst = 1;
+
+ dma->rx_chan = dma_request_chan(p->port.dev, "rx");
+
+ if (IS_ERR(dma->rx_chan))
+ return PTR_ERR(dma->rx_chan);
+
+ dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);
+
+ dma->tx_chan = dma_request_chan(p->port.dev, "tx");
+ if (IS_ERR(dma->tx_chan)) {
+ ret = PTR_ERR(dma->tx_chan);
+ goto err_release_rx;
+ }
+
+ dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
+
+ /* RX buffer */
+ dma->rx_size = PAGE_SIZE;
+
+ dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
+ if (!dma->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release_tx;
+ }
+
+ dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
+ dma->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(p->port.dev, dma->rx_addr)) {
+ ret = -EIO;
+ goto err_free_rx;
+ }
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(p->port.dev, dma->tx_addr)) {
+ ret = -EIO;
+ goto err_unmap_rx;
+ }
+
+ return 0;
+
+ err_unmap_rx:
+ dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,
+ DMA_FROM_DEVICE);
+ err_free_rx:
+ kfree(dma->rx_buf);
+ err_release_tx:
+ dma_release_channel(dma->tx_chan);
+ err_release_rx:
+ dma_release_channel(dma->rx_chan);
+ return ret;
+ }
+
+ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
+ {
+ struct s3c24xx_uart_dma *dma = p->dma;
+
+ if (dma->rx_chan) {
+ dmaengine_terminate_all(dma->rx_chan);
+ dma_unmap_single(p->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+ kfree(dma->rx_buf);
+ dma_release_channel(dma->rx_chan);
+ dma->rx_chan = NULL;
+ }
+
+ if (dma->tx_chan) {
+ dmaengine_terminate_all(dma->tx_chan);
+ dma_unmap_single(p->port.dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->tx_chan);
+ dma->tx_chan = NULL;
+ }
+ }
+
++>>>>>>> 818299f6bdae
static void s3c24xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
platform_get_device_id(pdev)->driver_data;
}
+void s3c24xx_serial_fifo_wait(void)
+{
+ struct s3c24xx_uart_port *ourport;
+ struct uart_port *port;
+ unsigned int fifo_stat;
+ unsigned long wait_time;
+
+ list_for_each_entry(ourport, &drvdata_list, node) {
+ if (ourport->port.line != CONFIG_S3C_LOWLEVEL_UART_PORT)
+ continue;
+
+ wait_time = jiffies + HZ / 4;
+ do {
+ port = &ourport->port;
+ fifo_stat = rd_regl(port, S3C2410_UFSTAT);
+ cpu_relax();
+ } while (s3c24xx_serial_tx_fifocnt(ourport, fifo_stat)
+ && time_before(jiffies, wait_time));
+ }
+}
+EXPORT_SYMBOL_GPL(s3c24xx_serial_fifo_wait);
+
+#ifdef CONFIG_CPU_IDLE
+static int s3c24xx_serial_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct s3c24xx_uart_port *ourport;
+ struct uart_port *port;
+ unsigned long flags;
+ unsigned int umcon;
+
+ switch (cmd) {
+ case LPA_ENTER:
+ s3c24xx_serial_fifo_wait();
+ break;
+
+ case SICD_ENTER:
+ case SICD_AUD_ENTER:
+ list_for_each_entry(ourport, &drvdata_list, node) {
+ if (ourport->port.line == CONFIG_S3C_LOWLEVEL_UART_PORT)
+ continue;
+
+ port = &ourport->port;
+
+ if (port->state->pm_state == UART_PM_STATE_OFF)
+ continue;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* disable auto flow control & set nRTS for High */
+ umcon = rd_regl(port, S3C2410_UMCON);
+ umcon &= ~(S3C2410_UMCOM_AFC | S3C2410_UMCOM_RTS_LOW);
+ wr_regl(port, S3C2410_UMCON, umcon);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (ourport->rts_control)
+ change_uart_gpio(RTS_PINCTRL, ourport);
+ }
+
+ s3c24xx_serial_fifo_wait();
+ break;
+
+ case SICD_EXIT:
+ case SICD_AUD_EXIT:
+ list_for_each_entry(ourport, &drvdata_list, node) {
+ if (ourport->port.line == CONFIG_S3C_LOWLEVEL_UART_PORT)
+ continue;
+
+ port = &ourport->port;
+
+ if (port->state->pm_state == UART_PM_STATE_OFF)
+ continue;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* enable auto flow control */
+ umcon = rd_regl(port, S3C2410_UMCON);
+ umcon |= S3C2410_UMCOM_AFC;
+ wr_regl(port, S3C2410_UMCON, umcon);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (ourport->rts_control)
+ change_uart_gpio(DEFAULT_PINCTRL, ourport);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block s3c24xx_serial_notifier_block = {
+ .notifier_call = s3c24xx_serial_notifier,
+};
+#endif
+
static int s3c24xx_serial_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
- int ret;
+ int ret, fifo_size;
+ int port_index = probe_index;
+
+ dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
- if (np) {
- ret = of_alias_get_id(np, "serial");
- if (ret >= 0)
- index = ret;
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "uart");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "UART aliases are not defined(%d).\n",
+ ret);
+ } else {
+ port_index = ret;
+ }
}
+ ourport = &s3c24xx_serial_ports[port_index];
- dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
+ if (ourport->port.line != port_index)
+ ourport = exynos_serial_default_port(port_index);
++<<<<<<< HEAD
+ if (ourport->port.line >= CONFIG_SERIAL_SAMSUNG_UARTS) {
+ dev_err(&pdev->dev,
+ "the port %d exceeded CONFIG_SERIAL_SAMSUNG_UARTS(%d)\n"
+ , ourport->port.line, CONFIG_SERIAL_SAMSUNG_UARTS);
+ return -EINVAL;
+ }
++=======
+ if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", index);
+ return -EINVAL;
+ }
+ ourport = &s3c24xx_serial_ports[index];
++>>>>>>> 818299f6bdae
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
udelay(1);
} while (--retries);
+ phy_exit(dwc->usb3_generic_phy);
+ phy_exit(dwc->usb2_generic_phy);
+
return -ETIMEDOUT;
++<<<<<<< HEAD
+err_usb2phy_init:
+ phy_exit(dwc->usb2_generic_phy);
+
+err_usb2phy_power:
+ phy_power_off(dwc->usb2_generic_phy);
+
+err_usb3phy_power:
+ phy_power_off(dwc->usb3_generic_phy);
+
+ return ret;
+}
+
+/**
+ * dwc3_soft_reset - Issue soft reset
+ * @dwc: Pointer to our controller context structure
+ */
+int dwc3_soft_reset(struct dwc3 *dwc)
+{
+ unsigned long timeout;
+ u32 reg;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dwc->dev, "Reset Timed Out\n");
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ } while (true);
++=======
+ done:
+ /*
+ * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared,
+ * we must wait at least 50ms before accessing the PHY domain
+ * (synchronization delay). DWC_usb31 programming guide section 1.3.2.
+ */
+ if (dwc3_is_usb31(dwc))
+ msleep(50);
++>>>>>>> 818299f6bdae
return 0;
}
dwc3_ep_inc_trb(&dep->trb_dequeue);
}
- /**
- * dwc3_gadget_giveback - call struct usb_request's ->complete callback
- * @dep: The endpoint to whom the request belongs to
- * @req: The request we're giving back
- * @status: completion code for the request
- *
- * Must be called with controller's lock held and interrupts disabled. This
- * function will unmap @req and call its ->complete() callback to notify upper
- * layers that it has completed.
- */
- void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
- int status)
+ void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
+ struct dwc3_request *req, int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
req->started = false;
- list_del(&req->list);
+ /* Only delete from the list if the item isn't poisoned. */
+ if (req->list.next != LIST_POISON1)
+ list_del(&req->list);
req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
++<<<<<<< HEAD
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (req->trb) {
+ if (dwc->ep0_bounced && dep->number <= 1) {
+ dwc->ep0_bounced = false;
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+ req->trb = NULL;
+ }
+ }
+
++=======
+ if (req->trb)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+
+ req->trb = NULL;
++>>>>>>> 818299f6bdae
trace_dwc3_gadget_giveback(req);
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
+ }
+
+ /**
+ * dwc3_gadget_giveback - call struct usb_request's ->complete callback
+ * @dep: The endpoint to whom the request belongs to
+ * @req: The request we're giving back
+ * @status: completion code for the request
+ *
+ * Must be called with controller's lock held and interrupts disabled. This
+ * function will unmap @req and call its ->complete() callback to notify upper
+ * layers that it has completed.
+ */
+ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status)
+ {
+ struct dwc3 *dwc = dep->dwc;
+
+ dwc3_gadget_del_and_unmap_request(dep, req, status);
+
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
++<<<<<<< HEAD
+
+ if (unmap_after_complete)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+ req->trb = NULL;
+
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
++=======
++>>>>>>> 818299f6bdae
}
/**
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
++<<<<<<< HEAD
+ } else {
++=======
+
+ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+ (hcd->speed < HCD_USB3)) {
+ if (usb_amd_pt_check_port(hcd->self.controller,
+ port_index))
+ t2 &= ~PORT_WAKE_BITS;
+ }
+ } else
++>>>>>>> 818299f6bdae
t2 &= ~PORT_WAKE_BITS;
+ }
t1 = xhci_port_state_to_neutral(t1);
- if (t1 != t2)
+ if (t1 != t2) {
writel(t2, port_array[port_index]);
+ }
}
+
+ if (is_port_connect && usb_hcd_is_primary_hcd(hcd)) {
+ xhci_info(xhci, "port is connected, phy vendor set\n");
+ ret = phy_vendor_set(xhci->main_hcd->phy, 1, 0);
+ if (ret) {
+ xhci_info(xhci, "phy vendor set fail\n");
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+ }
+
+ xhci_info(xhci, "%s 'HC_STATE_SUSPENDED' portcon: %d primary_hcd: %d\n",
+ __func__, is_port_connect, usb_hcd_is_primary_hcd(hcd));
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+
spin_unlock_irqrestore(&xhci->lock, flags);
+
return 0;
}
static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
++<<<<<<< HEAD
+ /*
+ *struct usb_hcd *hcd = dev_get_drvdata(dev);
+ *struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ *int ret;
+ */
+
+ pr_info("[%s] \n",__func__);
++=======
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++>>>>>>> 818299f6bdae
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
* also applies to runtime suspend.
*/
++<<<<<<< HEAD
+
+ /*
+ *ret = xhci_suspend(xhci, device_may_wakeup(dev));
+ *
+ *if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
+ * clk_disable_unprepare(xhci->clk);
+ */
+ return 0;
++=======
+ return xhci_suspend(xhci, device_may_wakeup(dev));
++>>>>>>> 818299f6bdae
}
static int __maybe_unused xhci_plat_resume(struct device *dev)
{
++<<<<<<< HEAD
+ /*
+ *struct usb_hcd *hcd = dev_get_drvdata(dev);
+ *struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ *int ret;
+ */
+
+ pr_info("[%s] \n",__func__);
++=======
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ ret = xhci_priv_resume_quirk(hcd);
+ if (ret)
+ return ret;
++>>>>>>> 818299f6bdae
- return xhci_resume(xhci, 0);
+ /*
+ *if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
+ * clk_prepare_enable(xhci->clk);
+ *
+ *ret = xhci_priv_resume_quirk(hcd);
+ *if (ret)
+ * return ret;
+ *
+ *return xhci_resume(xhci, 0);
+ */
+ return 0;
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
- /* Reserved. It was XHCI_U2_DISABLE_WAKE */
+ #define XHCI_U2_DISABLE_WAKE (1 << 27)
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_L2_SUPPORT (1 << 29)
#define XHCI_SUSPEND_DELAY (1 << 30)
unsigned int num_active_eps;
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
+extern void fscrypt_free_bounce_page(void *pool);
+ /* fname.c */
+ extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+ extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret);
+
/* keyinfo.c */
extern void __exit fscrypt_essiv_cleanup(void);
#ifdef CONFIG_CPU_FREQ_TIMES
void cpufreq_task_times_init(struct task_struct *p);
++<<<<<<< HEAD
++=======
+ void cpufreq_task_times_alloc(struct task_struct *p);
++>>>>>>> 818299f6bdae
void cpufreq_task_times_exit(struct task_struct *p);
int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p);
void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
void cpufreq_times_create_policy(struct cpufreq_policy *policy);
void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
++<<<<<<< HEAD
+#else
+static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+static inline void cpufreq_times_record_transition(
+ struct cpufreq_freqs *freq) {}
++=======
+ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
+ int single_uid_time_in_state_open(struct inode *inode, struct file *file);
+ #else
+ static inline void cpufreq_task_times_init(struct task_struct *p) {}
+ static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+ static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+ static inline void cpufreq_acct_update_power(struct task_struct *p,
+ u64 cputime) {}
+ static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+ static inline void cpufreq_times_record_transition(
+ struct cpufreq_freqs *freq) {}
+ static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
+ uid_t uid_end) {}
++>>>>>>> 818299f6bdae
#endif /* CONFIG_CPU_FREQ_TIMES */
#endif /* _LINUX_CPUFREQ_TIMES_H */
#ifndef _LINUX_FSCRYPT_H
#define _LINUX_FSCRYPT_H
- #include <linux/key.h>
#include <linux/fs.h>
- struct fscrypt_info;
++<<<<<<< HEAD
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+#include <crypto/diskcipher.h>
+
+#define FS_CRYPTO_BLOCK_SIZE 16
+
+#ifndef __FS_HAS_ENCRYPTION
+#define __FS_HAS_ENCRYPTION (IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) || \
+ IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION) || \
+ IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION))
+#endif
+
++=======
- struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
- };
+ #define FS_CRYPTO_BLOCK_SIZE 16
- /**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
- struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
- } __packed;
+ struct fscrypt_ctx;
++>>>>>>> 818299f6bdae
+ struct fscrypt_info;
struct fscrypt_str {
unsigned char *name;
return -EOPNOTSUPP;
}
++<<<<<<< HEAD
+static inline int fscrypt_disk_encrypted(const struct inode *inode)
+{
+ return;
+}
+
+static inline void fscrypt_set_bio(const struct inode *inode, struct bio *bio)
+{
+ return;
+}
+
+static inline void *fscrypt_get_diskcipher(const struct inode *inode)
+{
+ return NULL;
+}
++=======
+ static inline int __fscrypt_prepare_symlink(struct inode *dir,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+ {
+ return -EOPNOTSUPP;
+ }
+
+ static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+ {
+ return -EOPNOTSUPP;
+ }
+
+ static inline const char *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done)
+ {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
++>>>>>>> 818299f6bdae
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
}
}
++<<<<<<< HEAD
++=======
+ #ifdef CONFIG_NO_HZ_COMMON
+ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+ {
+ unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
+ bool ret = idle_calls == sg_cpu->saved_idle_calls;
+
+ sg_cpu->saved_idle_calls = idle_calls;
+ return ret;
+ }
+ #else
+ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+ #endif /* CONFIG_NO_HZ_COMMON */
+
+ static void sugov_update_single(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+ {
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned long util, max;
+ unsigned int next_f;
+ bool busy;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
+ if (!sugov_should_update_freq(sg_policy, time))
+ return;
+
+ busy = sugov_cpu_is_busy(sg_cpu);
+
+ if (flags & SCHED_CPUFREQ_DL) {
+ next_f = policy->cpuinfo.max_freq;
+ } else {
+ sugov_get_util(&util, &max, sg_cpu->cpu);
+ sugov_iowait_boost(sg_cpu, &util, &max);
+ next_f = get_next_freq(sg_policy, util, max);
+ /*
+ * Do not reduce the frequency if the CPU has not been idle
+ * recently, as the reduction is likely to be premature then.
+ */
+ if (busy && next_f < sg_policy->next_freq &&
+ sg_policy->next_freq != UINT_MAX) {
+ next_f = sg_policy->next_freq;
+
+ /* Reset cached freq as next_freq has changed */
+ sg_policy->cached_raw_freq = 0;
+ }
+ }
+
+ sugov_update_commit(sg_policy, time, next_f);
+ }
+
++>>>>>>> 818299f6bdae
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
/* Account for user time used */
acct_account_cputime(p);
++<<<<<<< HEAD
+#ifdef CONFIG_CPU_FREQ_TIMES
/* Account power usage for user time */
cpufreq_acct_update_power(p, cputime);
+#endif
++=======
++ /* Account power usage for user time */
++ cpufreq_acct_update_power(p, cputime);
++>>>>>>> 818299f6bdae
}
/*
/* Account for system time used */
acct_account_cputime(p);
++<<<<<<< HEAD
+#ifdef CONFIG_CPU_FREQ_TIMES
+ /* Account power usage for system time */
+ cpufreq_acct_update_power(p, cputime);
+#endif
++=======
+
+ /* Account power usage for system time */
+ cpufreq_acct_update_power(p, cputime);
++>>>>>>> 818299f6bdae
}
/*
parst->hook, __func__);
BUG();
}
++<<<<<<< HEAD
+#if 0
+ if (unlikely(!(*el_dev)->name)) {
+ pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+ parst->hook, __func__);
+ BUG();
+ }
+#endif
++=======
++>>>>>>> 818299f6bdae
if (skb->dev && *el_dev != skb->dev) {
MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs par->%s=%p %s\n",
parst->hook, skb->dev, skb->dev->name,