From b2f28cb8d1d32a15f2616b9ffc49a56f68e3bc28 Mon Sep 17 00:00:00 2001 From: Choonghoon Park Date: Mon, 15 Jan 2018 15:11:54 +0900 Subject: [PATCH] hiu: Introduce HIU driver. Change-Id: Ib8c490128ab6dcc36fc7f502ded8c2d5b9eddc41 [9820] hiu: sync up H/W and S/W frequency with EFF Exynos FF and HIU driver cowork to sync up H/W and S/W frequency. Change-Id: I71689390ca7cf8bea6cfe222b419788c47708ed7 [9820] hiu: update hiu data using cpufreq ready callback Change-Id: I398899d97541410776b7b780ac72fb87de8ff796 [9820] hiu: modify logging type Change-Id: I1cc7927255d574462639e6eda4fbd5533eaed890 [9820] hiu: add offset to level for communication with ACPM Change-Id: Ic37b5c812004f2168c625e6c224e07090a526f6a [9820] hiu: add field in hiu data for sw power budget limit When current frequecy is lower than boost threshold, HIU don't need to request dynamic power bugeting. SW requests DVFS with fixed power budgeet limit Change-Id: I2b20d14d39c729552854bd4226e3d862f65c44a4 [9820] hiu: move some functions under helper function category Change-Id: I5511e5f9a6778cd92dc7fcdfe5ed905e33a45e5c [9820] hiu: wait for SR1 response in when normal DVFS is requested Change-Id: Ib6676d2b586ec11c208f02cb9bbac77dc1acb281 [9820] hiu: make sr1_check_loop more stable Change-Id: I9fefe89eb4a631238789e34629882447d6bc4adc [9820] hiu: give normal DVFS request higher prio than turbo boost If 1) normal DVFS request and trubo boost request tb_threshold DVFS value and 2) turbo boost request get to know SR1 write and clear SR1 write bit, then normal DVFS will be stuck in loop until other SR1 write comes. It means that other normal DVFS requests will be stuck by mutex the normal DVFS request stuck has. This patch is for solving the problem. Change-Id: I6671fa1149cdc657560df1bdf3d8ac6929d95ac9 [9820] hiu: enable/disable power constraint and turbo boost using dt Change-Id: I89724d836be9b9a5475d85981a047a423372c9b6 [9820] hiu: modify API & polling thread for processing DVFS triggered by HIU HW Change-Id: I11afdf7a32a492e19faf347a43eaa81f9b22949d [9820] hiu: synchronize SW request and HW request using mutex Change-Id: I7407a29da0a23422a54a70fa33f7619198faf996 [9820] hiu: do not adjust max frequency If cpufreq_update_policy is not called, policy's max frequency could be mis-set. Therefore, remove max frequency update code in cpufreq policy callback of HIU Change-Id: I5cf7073532bc885b360cb0b47764e824fa958c52 [9820] hiu: do not write SR0 with boost threshold when turbo boosting Change-Id: I575f61f6994f56d2c2498c0371a27f9e32dd5ea7 [9820] hiu: wait for updating sw structure by hwidvfs Change-Id: Ie8e1387aeb2ca355679f340b7480eed0f7b1361a [9820] hiu: do not update DVFSLIMIT unnecessarily Change-Id: Ic3dabb9ee8ada067827dc597bbd3efc53a6cbac3 [9820] hiu: stop polling when turbo boost is released Change-Id: I58528ee2a93ad7ec8de091daa9e0554cc8bca05f [9820] hiu: increase polling term Change-Id: Icd086d3231f4b1b3f2ae399511d7eb5659b6460c [9820] hiu: check normal dvfs done in API Change-Id: Ifec361cd4a146cb633f8b82e469d24b6cda895e6 [9820] hiu: locking when updating limit dvfs Change-Id: I56aa95b5d14c4d89daff1708c8b79131ac0b403e [9820] hiu: set boost level increments using dt Change-Id: I0247aa315a20f634f5479d4ead34645321e96ea4 [9820] hiu: deal with hwi dvfs using hwi_dvfs_flag Change-Id: Ib50268be24994751cab07c9d460d349509426fc5 [9820] hiu: update boost_max using cpufreq policy Change-Id: I0b34be80c4a5d652e241f29d6318c23bf9ee032f [9820] hiu: modify condition to set limit dvfs Condition: Only when clipped freq is higher than or equal to boost threshold Change-Id: I588e4b0d5828f3f59ccf182674fc3aa65c920ef4 [9820] hiu: define polling period using macro Change-Id: Idd74c36d9875f415e2bcc3f08f25a752f78d483c [9820] hiu: bind polling thread to little Change-Id: I54862683a1a9d765fb80d2008e0da7529c9fb23b [9820] hiu: restore hiu data after CHT cluster exits CPD Change-Id: I342b6e3243755f6da49374a4da76f02affe46bcc [9820] hiu: use usleep_range instead of udelay Change-Id: Id19a14242b2d1b25d13b540104b2da0a5543bc16 [9820] hiu: set limit dvfs with hiu data clipped_freq Change-Id: I6e289246800d28f0f990b4a95ce1c49d084a0395 [9820] hiu: determine SR0 write in set_freq API Change-Id: Ic5c46476b9ea6ef0b0a418a42fb2085bc7b796c1 [9820] hiu: refactoring functionality to write on SR0 Change-Id: I42ebceba7946f69fdf4242df8dbbecf5be6732bf [9820] hiu: find policy using online cpu Change-Id: I4f3ad4d13f348765399290b61fefa2e5a1ca150d [9820] hiu: create polling thread when probing Change-Id: I7a4f22b6f573e50e2eedb52aa9e54490109ecf70 [9820] hiu: lock just before cur_freq could be updated Change-Id: I8e0d7eb2e67af8b6633c429041c6ae08b1b69e5f [9820] hiu: force work to run only on big Change-Id: I9a5230f407722a2293eb78fa8a1362797c0407c2 --- drivers/soc/samsung/Kconfig | 7 + drivers/soc/samsung/Makefile | 3 + drivers/soc/samsung/exynos-hiu.c | 798 +++++++++++++++++++++++++++++++ drivers/soc/samsung/exynos-hiu.h | 168 +++++++ 4 files changed, 976 insertions(+) create mode 100644 drivers/soc/samsung/exynos-hiu.c create mode 100644 drivers/soc/samsung/exynos-hiu.h diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 950b4981b8f0..d36e589b670e 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -126,6 +126,13 @@ config EXYNOS_OCP help Enable OCP handler for Exynos SoC. +config EXYNOS_HIU + bool "Exynos HIU Handler" + depends on CPU_FREQ + default n + help + Enable HIU handler for Exynos SoC. + config EXYNOS_DVFS_MANAGER bool "Exynos DVFS Manager" default n diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index 07533ccc27bd..ab3835e51cd8 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile @@ -63,3 +63,6 @@ obj-$(CONFIG_USI_V2) += usi_v2.o # secmem obj-$(CONFIG_ARCH_EXYNOS) += secmem.o + +# HIU +obj-$(CONFIG_EXYNOS_HIU) += exynos-hiu.o diff --git a/drivers/soc/samsung/exynos-hiu.c b/drivers/soc/samsung/exynos-hiu.c new file mode 100644 index 000000000000..dcc5fa1ee1b6 --- /dev/null +++ b/drivers/soc/samsung/exynos-hiu.c @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * EXYNOS - HIU(Hardware Intervention Unit) support + * Auther : PARK CHOONGHOON (choong.park@samsung.com) + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "exynos-hiu.h" +#include "../../cpufreq/exynos-ff.h" +#include "../../cpufreq/exynos-acme.h" + +static struct exynos_hiu_data *data; + +static void hiu_stats_create_table(struct cpufreq_policy *policy); + +#define POLL_PERIOD 100 + +/****************************************************************/ +/* HIU HELPER FUNCTION */ +/****************************************************************/ +static unsigned int hiu_get_freq_level(unsigned int freq) +{ + int level; + struct hiu_stats *stats = data->stats; + + if (unlikely(!stats)) + return 0; + + for (level = 0; level < stats->last_level; level++) + if (stats->freq_table[level] == freq) + return level + data->level_offset; + + return -EINVAL; +} + +static unsigned int hiu_get_power_budget(unsigned int freq) +{ + return data->sw_pbl; +} + +static void hiu_update_reg(int offset, int mask, int shift, unsigned int val) +{ + unsigned int reg_val; + + reg_val = __raw_readl(data->base + offset); + reg_val &= ~(mask << shift); + reg_val |= val << shift; + __raw_writel(reg_val, data->base + offset); +} + +static unsigned int hiu_read_reg(int offset, int mask, int shift) +{ + unsigned int reg_val; + + reg_val = __raw_readl(data->base + offset); + return (reg_val >> shift) & mask; +} + +static unsigned int hiu_get_act_dvfs(void) +{ + return hiu_read_reg(HIUTOPCTL1, ACTDVFS_MASK, ACTDVFS_SHIFT); +} + +static void hiu_control_err_interrupts(int enable) +{ + if (enable) + hiu_update_reg(HIUTOPCTL1, ENB_ERR_INTERRUPTS_MASK, 0, ENB_ERR_INTERRUPTS_MASK); + else + hiu_update_reg(HIUTOPCTL1, ENB_ERR_INTERRUPTS_MASK, 0, 0); +} + +static void hiu_control_mailbox(int enable) +{ + hiu_update_reg(HIUTOPCTL1, ENB_SR1INTR_MASK, ENB_SR1INTR_SHIFT, !!enable); + hiu_update_reg(HIUTOPCTL1, ENB_ACPM_COMM_MASK, ENB_ACPM_COMM_SHIFT, !!enable); +} + +static void hiu_set_limit_dvfs(unsigned int freq) +{ + unsigned int level; + + level = hiu_get_freq_level(freq); + + hiu_update_reg(HIUTOPCTL2, LIMITDVFS_MASK, LIMITDVFS_SHIFT, level); +} + +static void hiu_set_tb_dvfs(unsigned int freq) +{ + unsigned int level; + + level = hiu_get_freq_level(freq); + + hiu_update_reg(HIUTBCTL, TBDVFS_MASK, TBDVFS_SHIFT, level); +} + +static void hiu_control_tb(int enable) +{ + hiu_update_reg(HIUTBCTL, TB_ENB_MASK, TB_ENB_SHIFT, !!enable); +} + +static void hiu_control_pc(int enable) +{ + hiu_update_reg(HIUTBCTL, PC_DISABLE_MASK, PC_DISABLE_SHIFT, !enable); +} + +static void hiu_set_boost_level_inc(void) +{ + unsigned int inc; + struct device_node *dn = data->dn; + + if (!of_property_read_u32(dn, "bl1-inc", &inc)) + hiu_update_reg(HIUTBCTL, B1_INC_MASK, B1_INC_SHIFT, inc); + if (!of_property_read_u32(dn, "bl2-inc", &inc)) + hiu_update_reg(HIUTBCTL, B2_INC_MASK, B2_INC_SHIFT, inc); + if (!of_property_read_u32(dn, "bl3-inc", &inc)) + hiu_update_reg(HIUTBCTL, B3_INC_MASK, B3_INC_SHIFT, inc); +} + +static void hiu_set_tb_ps_cfg_each(int index, unsigned int cfg_val) +{ + int offset; + + offset = HIUTBPSCFG_BASE + index * HIUTBPSCFG_OFFSET; + hiu_update_reg(offset, HIUTBPSCFG_MASK, 0, cfg_val); +} + +static int hiu_set_tb_ps_cfg(void) +{ + int size, index; + unsigned int val; + struct hiu_cfg *table; + struct device_node *dn = data->dn; + + size = of_property_count_u32_elems(dn, "config-table"); + if (size < 0) + return size; + + table = kzalloc(sizeof(struct hiu_cfg) * size / 4, GFP_KERNEL); + if (!table) + return -ENOMEM; + + of_property_read_u32_array(dn, "config-table", (unsigned int *)table, size); + + for (index = 0; index < size / 4; index++) { + val = 0; + val |= table[index].power_borrowed << PB_SHIFT; + val |= table[index].boost_level << BL_SHIFT; + val |= table[index].power_budget_limit << PBL_SHIFT; + val |= table[index].power_threshold_inc << TBPWRTHRESH_INC_SHIFT; + + hiu_set_tb_ps_cfg_each(index, val); + } + + kfree(table); + + return 0; +} + +static bool check_hiu_sr1_irq_pending(void) +{ + return !!hiu_read_reg(HIUTOPCTL1, HIU_MBOX_RESPONSE_MASK, SR1INTR_SHIFT); +} + +static void clear_hiu_sr1_irq_pending(void) +{ + hiu_update_reg(HIUTOPCTL1, HIU_MBOX_RESPONSE_MASK, SR1INTR_SHIFT, 0); +} + +static bool check_hiu_mailbox_err_pending(void) +{ + return !!hiu_read_reg(HIUTOPCTL1, HIU_MBOX_ERR_MASK, HIU_MBOX_ERR_SHIFT); +} + +static unsigned int get_hiu_mailbox_err(void) +{ + return hiu_read_reg(HIUTOPCTL1, HIU_MBOX_ERR_MASK, HIU_MBOX_ERR_SHIFT); +} + +static void hiu_mailbox_err_handler(void) +{ + unsigned int err, val; + + err = get_hiu_mailbox_err(); + + if (err & SR1UXPERR_MASK) + pr_err("exynos-hiu: unexpected error occurs\n"); + + if (err & SR1SNERR_MASK) { + val = __raw_readl(data->base + HIUTOPCTL2); + val = (val >> SEQNUM_SHIFT) & SEQNUM_MASK; + pr_err("exynos-hiu: erroneous sequence num %d\n", val); + } + + if (err & SR1TIMEOUT_MASK) + pr_err("exynos-hiu: TIMEOUT on SR1 write\n"); + + if (err & SR0RDERR_MASK) + pr_err("exynos-hiu: SR0 read twice or more\n"); +} + +static bool check_hiu_req_freq_updated(unsigned int req_freq) +{ + unsigned int cur_level, cur_freq; + + cur_level = hiu_get_act_dvfs(); + cur_freq = data->stats->freq_table[cur_level - data->level_offset]; + + /* + * If req_freq == boost_threshold, HIU could request turbo boost + * That's why in case of req_freq == boost_threshold, + * requested frequency update is consdered as done, + * if act_dvfs is larger than or equal to boost threshold + */ + if (req_freq == data->boost_threshold) + return cur_freq >= data->boost_threshold; + + return cur_freq == req_freq; +} + +static bool check_hiu_normal_req_done(unsigned int req_freq) +{ + return check_hiu_sr1_irq_pending() && + check_hiu_req_freq_updated(req_freq); +} + +static bool check_hiu_need_register_restore(void) +{ + return !hiu_read_reg(HIUTOPCTL1, ENB_SR1INTR_MASK, ENB_SR1INTR_SHIFT); +} + +static int request_dvfs_on_sr0(unsigned int req_freq) +{ + unsigned int val, level, budget; + + /* Get dvfs level */ + level = hiu_get_freq_level(req_freq); + if (level < 0) + return -EINVAL; + + /* Get power budget */ + budget = hiu_get_power_budget(req_freq); + if (budget < 0) + return -EINVAL; + + /* write REQDVFS & REQPBL to HIU SFR */ + val = __raw_readl(data->base + HIUTOPCTL2); + val &= ~(REQDVFS_MASK << REQDVFS_SHIFT | REQPBL_MASK << REQPBL_SHIFT); + val |= (level << REQDVFS_SHIFT | budget << REQPBL_SHIFT); + __raw_writel(val, data->base + HIUTOPCTL2); + + return 0; +} + +/****************************************************************/ +/* HIU API */ +/****************************************************************/ +static int hiu_sr1_check_loop(void *unused); +static void __exynos_hiu_update_data(struct cpufreq_policy *policy); + +int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq) +{ + bool need_update_cur_freq = true; + + if (unlikely(!data)) + return -ENODEV; + + if (!data->enabled) + return -ENODEV; + + pr_debug("exynos-hiu: update data->cur_freq:%d\n", data->cur_freq); + + mutex_lock(&data->lock); + + if (check_hiu_need_register_restore()) + __exynos_hiu_update_data(NULL); + + /* PM QoS could make req_freq bigger than boost_threshold */ + if (req_freq >= data->boost_threshold){ + /* + * 1) If turbo boost is already activated + * just update cur_freq and return. + * 2) If not, req_freq should be boost_threshold; + * DO NOT allow req_freq to be bigger than boost_threshold. + */ + if (data->cur_freq >= data->boost_threshold) { + data->cur_freq = req_freq; + mutex_unlock(&data->lock); + return 0; + } + else { + data->cur_freq = req_freq; + req_freq = data->boost_threshold; + need_update_cur_freq = false; + } + } + + /* Write req_freq on SR0 to request DVFS */ + request_dvfs_on_sr0(req_freq); + + if (data->operation_mode == POLLING_MODE) { + while (!check_hiu_normal_req_done(req_freq) && + !check_hiu_mailbox_err_pending()) + usleep_range(POLL_PERIOD, 2 * POLL_PERIOD); + + if (check_hiu_mailbox_err_pending()) { + hiu_mailbox_err_handler(); + BUG_ON(1); + } + + if (need_update_cur_freq) + data->cur_freq = req_freq; + clear_hiu_sr1_irq_pending(); + + if (req_freq == data->boost_threshold && !data->boosting_activated) { + data->boosting_activated = true; + wake_up(&data->polling_wait); + } + } + + mutex_unlock(&data->lock); + + pr_debug("exynos-hiu: set REQDVFS to HIU : %ukHz\n", req_freq); + + return 0; +} + +int exynos_hiu_get_freq(unsigned int id) +{ + if (unlikely(!data)) + return -ENODEV; + + return data->cur_freq; +} + +int exynos_hiu_get_max_freq(void) +{ + if (unlikely(!data)) + return -1; + + return data->clipped_freq; +} + +/****************************************************************/ +/* HIU SR1 WRITE HANDLER */ +/****************************************************************/ +static void exynos_hiu_work(struct work_struct *work) +{ + int cpu; + unsigned int boost_freq, level; + struct cpufreq_policy *policy; + struct hiu_stats *stats = data->stats; + struct cpumask mask; + + cpumask_and(&mask, &data->cpus, cpu_online_mask); + if (cpumask_empty(&mask)) { + pr_debug("exynos-hiu: all cores in big cluster off\n"); + goto done; + } + + /* Test current cpu is in data->cpus */ + cpu = smp_processor_id(); + if (!cpumask_test_cpu(cpu, &mask)) { + pr_debug("exynos-hiu: work task is moved to cpu%d\n", cpu); + goto done; + } + + level = hiu_get_act_dvfs(); + boost_freq = stats->freq_table[level - data->level_offset]; + + /* + * Only when TB bit is set, this work callback is called. + * However, while this callback is waiting to start, + * well... turbo boost could be released. + * So, acting frequency coulde be lower than turbo boost threshold. + * This condition code is for treating that case. + */ + if (boost_freq < data->boost_threshold) + goto done; + + policy = cpufreq_cpu_get(cpumask_first(&mask)); + if (!policy) { + pr_debug("Failed to get CPUFreq policy in HIU work\n"); + goto done; + } + + __cpufreq_driver_target(policy, boost_freq, + CPUFREQ_RELATION_H | CPUFREQ_HW_DVFS_REQ); + + cpufreq_cpu_put(policy); +done: + data->hwidvfs_done = true; + wake_up(&data->hwidvfs_wait); +} + +static irqreturn_t exynos_hiu_irq_handler(int irq, void *id) +{ + schedule_work_on(data->cpu, &data->work); + + return IRQ_HANDLED; +} + +static bool hiu_need_hw_request(void) +{ + unsigned int cur_level, cur_freq; + + cur_level = hiu_get_act_dvfs(); + cur_freq = data->stats->freq_table[cur_level - data->level_offset]; + + return cur_freq >= data->boost_threshold; +} + +static int hiu_sr1_check_loop(void *unused) +{ +wait: + wait_event(data->polling_wait, data->boosting_activated); +poll: + mutex_lock(&data->lock); + + if (data->cur_freq < data->boost_threshold) + goto done; + + while (!check_hiu_sr1_irq_pending() && + !check_hiu_mailbox_err_pending()) { + mutex_unlock(&data->lock); + usleep_range(POLL_PERIOD, 2 * POLL_PERIOD); + mutex_lock(&data->lock); + + if (data->cur_freq < data->boost_threshold) + goto done; + } + + if (check_hiu_mailbox_err_pending()) { + hiu_mailbox_err_handler(); + BUG_ON(1); + } + + if (hiu_need_hw_request()) { + schedule_work_on(data->cpu, &data->work); + clear_hiu_sr1_irq_pending(); + mutex_unlock(&data->lock); + + wait_event(data->hwidvfs_wait, data->hwidvfs_done); + data->hwidvfs_done = false; + + goto poll; + } + +done: + data->boosting_activated = false; + mutex_unlock(&data->lock); + goto wait; + + /* NEVER come here */ + + return 0; +} + +/****************************************************************/ +/* EXTERNAL EVENT HANDLER */ +/****************************************************************/ +static void __exynos_hiu_update_data(struct cpufreq_policy *policy) +{ + /* Explicitly disable the whole HW */ + /* ex) hiu_control_pc, tb(0), hiu_control_mailbox(0) */ + + /* Set dvfs limit and TB threshold */ + hiu_set_limit_dvfs(data->clipped_freq); + hiu_set_tb_dvfs(data->boost_threshold); + + /* Initialize TB level offset */ + hiu_set_boost_level_inc(); + + /* Initialize TB power state config */ + hiu_set_tb_ps_cfg(); + + /* Enable TB */ + hiu_control_pc(data->pc_enabled); + hiu_control_tb(data->tb_enabled); + + /* Enable error interrupts */ + hiu_control_err_interrupts(1); + /* Enable mailbox communication with ACPM */ + hiu_control_mailbox(1); +} + +static int exynos_hiu_update_data(struct cpufreq_policy *policy) +{ + if (!cpumask_test_cpu(data->cpu, policy->cpus)) + return 0; + + data->boost_max = policy->user_policy.max; + data->clipped_freq = data->boost_max; + hiu_stats_create_table(policy); + + __exynos_hiu_update_data(policy); + + data->enabled = true; + + pr_info("exynos-hiu: HIU data structure update complete\n"); + + return 0; +} + +static struct exynos_cpufreq_ready_block exynos_hiu_ready = { + .update = exynos_hiu_update_data, +}; + +static bool check_hiu_need_boost_thrott(void) +{ + return data->cur_freq > data->boost_threshold && + data->cur_freq > data->clipped_freq; +} + +static int exynos_hiu_policy_callback(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct cpufreq_policy *policy = info; + + if (policy->cpu != data->cpu) + return NOTIFY_DONE; + + if (policy->max == data->clipped_freq) + return NOTIFY_DONE; + + switch (event) { + case CPUFREQ_NOTIFY: + + /* Note : MUST write LIMIT_DVFS to HIU SFR */ + mutex_lock(&data->lock); + if (policy->max >= data->boost_threshold) { + data->clipped_freq = policy->max; + hiu_set_limit_dvfs(data->clipped_freq); + } + mutex_unlock(&data->lock); + + pr_debug("exynos-hiu: update clipped freq:%d\n", data->clipped_freq); + if (check_hiu_need_boost_thrott()) + atomic_inc(&boost_throttling); + break; + default: + ; + } + + return NOTIFY_OK; +} + +static struct notifier_block exynos_hiu_policy_notifier = { + .notifier_call = exynos_hiu_policy_callback, +}; + +static int exynos_hiu_transition_callback(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct cpufreq_freqs *freq = info; + int cpu = freq->cpu; + + if (cpu != data->cpu) + return NOTIFY_DONE; + + if (event != CPUFREQ_POSTCHANGE) + return NOTIFY_DONE; + + if (atomic_read(&boost_throttling) && + data->cur_freq <= data->clipped_freq) { + atomic_dec(&boost_throttling); + } + + return NOTIFY_OK; +} + +static struct notifier_block exynos_hiu_transition_notifier = { + .notifier_call = exynos_hiu_transition_callback, + .priority = INT_MIN, +}; + +/****************************************************************/ +/* SYSFS INTERFACE */ +/****************************************************************/ +static ssize_t +hiu_enable_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", data->enabled); +} + +static ssize_t +hiu_enable_store(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned int input; + + if (kstrtos32(buf, 10, &input)) + return -EINVAL; + + return count; +} + +static DEVICE_ATTR(enabled, 0644, hiu_enable_show, hiu_enable_store); + +static struct attribute *exynos_hiu_attrs[] = { + &dev_attr_enabled.attr, + NULL, +}; + +static struct attribute_group exynos_hiu_attr_group = { + .name = "hiu", + .attrs = exynos_hiu_attrs, +}; + +/****************************************************************/ +/* INITIALIZE EXYNOS HIU DRIVER */ +/****************************************************************/ +static int hiu_dt_parsing(struct device_node *dn) +{ + const char *buf; + int ret = 0; + + ret |= of_property_read_u32(dn, "operation-mode", &data->operation_mode); + ret |= of_property_read_u32(dn, "boot-freq", &data->cur_freq); + ret |= of_property_read_u32(dn, "boost-threshold", &data->boost_threshold); + ret |= of_property_read_u32(dn, "sw-pbl", &data->sw_pbl); + ret |= of_property_read_string(dn, "sibling-cpus", &buf); + if (ret) + return ret; + + if (of_property_read_bool(dn, "pc-enabled")) + data->pc_enabled = true; + + if (of_property_read_bool(dn, "tb-enabled")) + data->tb_enabled = true; + + cpulist_parse(buf, &data->cpus); + cpumask_and(&data->cpus, &data->cpus, cpu_possible_mask); + if (cpumask_weight(&data->cpus) == 0) + return -ENODEV; + + data->cpu = cpumask_first(&data->cpus); + + return 0; +} + +static void hiu_stats_create_table(struct cpufreq_policy *policy) +{ + unsigned int i = 0, count = 0, alloc_size; + struct hiu_stats *stats; + struct cpufreq_frequency_table *pos, *table; + + table = policy->freq_table; + if (unlikely(!table)) + return; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return; + + cpufreq_for_each_valid_entry(pos, table) + count++; + + alloc_size = count * (sizeof(unsigned int) + sizeof(u64)); + + stats->freq_table = kzalloc(alloc_size, GFP_KERNEL); + if (!stats->freq_table) + goto free_stat; + + stats->time_in_state = (unsigned long long *)(stats->freq_table + count); + + stats->last_level = count; + + cpufreq_for_each_valid_entry(pos, table) + stats->freq_table[i++] = pos->frequency; + + data->stats = stats; + + cpufreq_for_each_valid_entry(pos, table) { + data->level_offset = pos->driver_data; + break; + } + + return; +free_stat: + kfree(stats); +} + +static int exynos_hiu_probe(struct platform_device *pdev) +{ + struct task_struct *polling_thread; + struct device_node *dn = pdev->dev.of_node; + int ret; + + data = kzalloc(sizeof(struct exynos_hiu_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->lock); + + platform_set_drvdata(pdev, data); + + data->base = ioremap(GCU_BASE, SZ_4K); + + ret = hiu_dt_parsing(dn); + if (ret) { + dev_err(&pdev->dev, "Failed to parse HIU data\n"); + return -ENODEV; + } + + data->dn = dn; + + if (data->operation_mode == INTERRUPT_MODE) { + data->irq = irq_of_parse_and_map(dn, 0); + if (data->irq <= 0) { + dev_err(&pdev->dev, "Failed to get IRQ\n"); + return -ENODEV; + } + + ret = devm_request_irq(&pdev->dev, data->irq, exynos_hiu_irq_handler, + IRQF_TRIGGER_RISING, dev_name(&pdev->dev), data); + if (ret) { + dev_err(&pdev->dev, "Failed to request IRQ handler: %d\n", data->irq); + return -ENODEV; + } + } else { + init_waitqueue_head(&data->polling_wait); + data->boosting_activated = false; + + polling_thread = kthread_create(hiu_sr1_check_loop, NULL, "hiu_polling"); + kthread_bind_mask(polling_thread, cpu_coregroup_mask(0)); + wake_up_process(polling_thread); + } + + cpufreq_register_notifier(&exynos_hiu_policy_notifier, CPUFREQ_POLICY_NOTIFIER); + cpufreq_register_notifier(&exynos_hiu_transition_notifier, CPUFREQ_TRANSITION_NOTIFIER); + + INIT_WORK(&data->work, exynos_hiu_work); + init_waitqueue_head(&data->hwidvfs_wait); + + ret = sysfs_create_group(&pdev->dev.kobj, &exynos_hiu_attr_group); + if (ret) + dev_err(&pdev->dev, "Failed to create Exynos HIU attr group"); + + exynos_cpufreq_ready_list_add(&exynos_hiu_ready); + + dev_info(&pdev->dev, "HIU Handler initialization complete\n"); + return 0; +} + +static int exynos_hiu_suspend(struct platform_device *pdev, pm_message_t state) +{ + /* HACK : disable turbo boost */ + return 0; +} + +static int exynos_hiu_resume(struct platform_device *pdev) +{ + /* HACK : enable turbo boost */ + return 0; +} + +static const struct of_device_id of_exynos_hiu_match[] = { + { .compatible = "samsung,exynos-hiu", }, + { }, +}; + +static const struct platform_device_id exynos_hiu_ids[] = { + { "exynos-hiu", }, + { } +}; + +static struct platform_driver exynos_hiu_driver = { + .driver = { + .name = "exynos-hiu", + .owner = THIS_MODULE, + .of_match_table = of_exynos_hiu_match, + }, + .probe = exynos_hiu_probe, + .suspend = exynos_hiu_suspend, + .resume = exynos_hiu_resume, + .id_table = exynos_hiu_ids, +}; + +int __init exynos_hiu_init(void) +{ + return platform_driver_register(&exynos_hiu_driver); +} +arch_initcall(exynos_hiu_init); diff --git a/drivers/soc/samsung/exynos-hiu.h b/drivers/soc/samsung/exynos-hiu.h new file mode 100644 index 000000000000..e382456a44c7 --- /dev/null +++ b/drivers/soc/samsung/exynos-hiu.h @@ -0,0 +1,168 @@ +#ifndef __EXYNOS_HIU_H__ +#define __EXYNOS_HIU_H__ + +#include +#include + +/* Function Id to Enable HIU in EL3 */ +#define GCU_BASE (0x1E4C0000) + +/* SR1 write monitoring operation mode */ +#define POLLING_MODE (0) +#define INTERRUPT_MODE (1) + +/* GCU Control Register */ +#define GCUCTL (0x22C) +#define HIUINTR_EN_MASK (0x1) +#define HIUINTR_EN_SHIFT (3) +#define HIUERR_EN_MASK (0x1) +#define HIUERR_EN_SHIFT (2) + +/* GCU ERROR Register */ +#define GCUERR (0x26C) +#define HIUINTR_MASK (0x1) +#define HIUINTR_SHIFT (3) +#define HIUERR_MASK (0x1) +#define HIUERR_SHIFT (2) + +/* HIU Top Level Control 1 Register */ +#define HIUTOPCTL1 (0xE00) +#define ACTDVFS_MASK (0x3F) +#define ACTDVFS_SHIFT (24) +#define HIU_MBOX_RESPONSE_MASK (0x1) +#define SR1INTR_SHIFT (15) +#define SR1UXPERR_MASK (1 << 3) +#define SR1SNERR_MASK (1 << 2) +#define SR1TIMEOUT_MASK (1 << 1) +#define SR0RDERR_MASK (1 << 0) +#define HIU_MBOX_ERR_MASK (0xF) +#define HIU_MBOX_ERR_SHIFT (11) +#define ENB_SR1INTR_MASK (0x1) +#define ENB_SR1INTR_SHIFT (5) +#define ENB_ERR_INTERRUPTS_MASK (0x1E) +#define ENB_SR1UXPERR_MASK (0x1) +#define ENB_SR1UXPERR_SHIFT (4) +#define ENB_SR1SNERR_MASK (0x1) +#define ENB_SR1SNERR_SHIFT (3) +#define ENB_SR1TIMEOUT_MASK (0x1) +#define ENB_SR1TIMEOUT_SHIFT (2) +#define ENB_SR0RDERR_MASK (0x1) +#define ENB_SR0RDERR_SHIFT (1) +#define ENB_ACPM_COMM_MASK (0x1) +#define ENB_ACPM_COMM_SHIFT (0) + +/* HIU Top Level Control 2 Register */ +#define HIUTOPCTL2 (0xE04) +#define SEQNUM_MASK (0x7) +#define SEQNUM_SHIFT (26) +#define LIMITDVFS_MASK (0x3F) +#define LIMITDVFS_SHIFT (14) +#define REQDVFS_MASK (0x3F) +#define REQDVFS_SHIFT (8) +#define REQPBL_MASK (0xFF) +#define REQPBL_SHIFT (0) + +/* HIU Turbo Boost Control Register */ +#define HIUTBCTL (0xE10) +#define BOOSTED_MASK (0x1) +#define BOOSTED_SHIFT (31) +#define B3_INC_MASK (0x3) +#define B3_INC_SHIFT (20) +#define B2_INC_MASK (0x3) +#define B2_INC_SHIFT (18) +#define B1_INC_MASK (0x3) +#define B1_INC_SHIFT (16) +#define TBDVFS_MASK (0x3F) +#define TBDVFS_SHIFT (8) +#define PC_DISABLE_MASK (0x1) +#define PC_DISABLE_SHIFT (1) +#define TB_ENB_MASK (0x1) +#define TB_ENB_SHIFT (0) + +/* HIU Turbo Boost Power State Config Registers */ +#define HIUTBPSCFG_BASE (0xE14) +#define HIUTBPSCFG_OFFSET (0x4) +#define HIUTBPSCFG_MASK (0x1 << 31 | 0x7 << 28 | 0xFF << 20 | 0xFFFF << 0x0) +#define PB_MASK (0x1) +#define PB_SHIFT (31) +#define BL_MASK (0x7) +#define BL_SHIFT (28) +#define PBL_MASK (0xFF) +#define PBL_SHIFT (20) +#define TBPWRTHRESH_INC_MASK (0xFFFF) +#define TBPWRTHRESH_INC_SHIFT (0x0) + +/* HIU Turbo Boost Power Threshold Register */ +#define HIUTBPWRTHRESH1 (0xE34) +#define HIUTBPWRTHRESH2 (0xE38) +#define HIUTBPWRTHRESH3 (0xE3C) +#define R_MASK (0x3F) +#define R_SHIFT (24) +#define MONINTERVAL_MASK (0xF) +#define MONINTERVAL_SHIFT (20) +#define TBPWRTHRESH_EXP_MASK (0xF) +#define TBPWRTHRESH_EXP_SHIFT (16) +#define TBPWRTHRESH_FRAC_MASK (0xFFFF) +#define TBPWRTHRESH_FRAC_SHIFT (0) + +struct hiu_stats { + unsigned int last_level; + unsigned int *freq_table; + unsigned long long *time_in_state; +}; + +struct hiu_tasklet_data { + unsigned int index; +}; + +struct hiu_cfg { + unsigned int power_borrowed; + unsigned int boost_level; + unsigned int power_budget_limit; + unsigned int power_threshold_inc; +}; + +struct exynos_hiu_data { + bool enabled; + bool pc_enabled; + bool tb_enabled; + bool hwidvfs_done; + bool boosting_activated; + + int operation_mode; + + int irq; + struct work_struct work; + struct mutex lock; + wait_queue_head_t hwidvfs_wait; + wait_queue_head_t polling_wait; + + struct cpumask cpus; + unsigned int cpu; + + unsigned int cur_budget; + + unsigned int cur_freq; + unsigned int clipped_freq; + unsigned int boost_threshold; + unsigned int boost_max; + unsigned int level_offset; + unsigned int sw_pbl; + + void __iomem * base; + + struct device_node * dn; + struct hiu_stats * stats; +}; + +#ifdef CONFIG_EXYNOS_HIU +extern int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq); +extern int exynos_hiu_get_freq(unsigned int id); +extern int exynos_hiu_get_max_freq(void); +#else +inline int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq) { return 0; } +inline int exynos_hiu_get_freq(unsigned int id) { return 0; } +inline int exynos_hiu_get_max_freq(void) { return 0; } +#endif + +#endif -- 2.20.1