--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS - HIU(Hardware Intervention Unit) support
+ * Auther : PARK CHOONGHOON (choong.park@samsung.com)
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/regmap.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+#include "exynos-hiu.h"
+#include "../../cpufreq/exynos-ff.h"
+#include "../../cpufreq/exynos-acme.h"
+
+static struct exynos_hiu_data *data;
+
+static void hiu_stats_create_table(struct cpufreq_policy *policy);
+
+#define POLL_PERIOD 100
+
+/****************************************************************/
+/* HIU HELPER FUNCTION */
+/****************************************************************/
+static unsigned int hiu_get_freq_level(unsigned int freq)
+{
+ int level;
+ struct hiu_stats *stats = data->stats;
+
+ if (unlikely(!stats))
+ return 0;
+
+ for (level = 0; level < stats->last_level; level++)
+ if (stats->freq_table[level] == freq)
+ return level + data->level_offset;
+
+ return -EINVAL;
+}
+
+static unsigned int hiu_get_power_budget(unsigned int freq)
+{
+ return data->sw_pbl;
+}
+
+static void hiu_update_reg(int offset, int mask, int shift, unsigned int val)
+{
+ unsigned int reg_val;
+
+ reg_val = __raw_readl(data->base + offset);
+ reg_val &= ~(mask << shift);
+ reg_val |= val << shift;
+ __raw_writel(reg_val, data->base + offset);
+}
+
+static unsigned int hiu_read_reg(int offset, int mask, int shift)
+{
+ unsigned int reg_val;
+
+ reg_val = __raw_readl(data->base + offset);
+ return (reg_val >> shift) & mask;
+}
+
+static unsigned int hiu_get_act_dvfs(void)
+{
+ return hiu_read_reg(HIUTOPCTL1, ACTDVFS_MASK, ACTDVFS_SHIFT);
+}
+
+static void hiu_control_err_interrupts(int enable)
+{
+ if (enable)
+ hiu_update_reg(HIUTOPCTL1, ENB_ERR_INTERRUPTS_MASK, 0, ENB_ERR_INTERRUPTS_MASK);
+ else
+ hiu_update_reg(HIUTOPCTL1, ENB_ERR_INTERRUPTS_MASK, 0, 0);
+}
+
+static void hiu_control_mailbox(int enable)
+{
+ hiu_update_reg(HIUTOPCTL1, ENB_SR1INTR_MASK, ENB_SR1INTR_SHIFT, !!enable);
+ hiu_update_reg(HIUTOPCTL1, ENB_ACPM_COMM_MASK, ENB_ACPM_COMM_SHIFT, !!enable);
+}
+
+static void hiu_set_limit_dvfs(unsigned int freq)
+{
+ unsigned int level;
+
+ level = hiu_get_freq_level(freq);
+
+ hiu_update_reg(HIUTOPCTL2, LIMITDVFS_MASK, LIMITDVFS_SHIFT, level);
+}
+
+static void hiu_set_tb_dvfs(unsigned int freq)
+{
+ unsigned int level;
+
+ level = hiu_get_freq_level(freq);
+
+ hiu_update_reg(HIUTBCTL, TBDVFS_MASK, TBDVFS_SHIFT, level);
+}
+
+static void hiu_control_tb(int enable)
+{
+ hiu_update_reg(HIUTBCTL, TB_ENB_MASK, TB_ENB_SHIFT, !!enable);
+}
+
+static void hiu_control_pc(int enable)
+{
+ hiu_update_reg(HIUTBCTL, PC_DISABLE_MASK, PC_DISABLE_SHIFT, !enable);
+}
+
+static void hiu_set_boost_level_inc(void)
+{
+ unsigned int inc;
+ struct device_node *dn = data->dn;
+
+ if (!of_property_read_u32(dn, "bl1-inc", &inc))
+ hiu_update_reg(HIUTBCTL, B1_INC_MASK, B1_INC_SHIFT, inc);
+ if (!of_property_read_u32(dn, "bl2-inc", &inc))
+ hiu_update_reg(HIUTBCTL, B2_INC_MASK, B2_INC_SHIFT, inc);
+ if (!of_property_read_u32(dn, "bl3-inc", &inc))
+ hiu_update_reg(HIUTBCTL, B3_INC_MASK, B3_INC_SHIFT, inc);
+}
+
+static void hiu_set_tb_ps_cfg_each(int index, unsigned int cfg_val)
+{
+ int offset;
+
+ offset = HIUTBPSCFG_BASE + index * HIUTBPSCFG_OFFSET;
+ hiu_update_reg(offset, HIUTBPSCFG_MASK, 0, cfg_val);
+}
+
+static int hiu_set_tb_ps_cfg(void)
+{
+ int size, index;
+ unsigned int val;
+ struct hiu_cfg *table;
+ struct device_node *dn = data->dn;
+
+ size = of_property_count_u32_elems(dn, "config-table");
+ if (size < 0)
+ return size;
+
+ table = kzalloc(sizeof(struct hiu_cfg) * size / 4, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dn, "config-table", (unsigned int *)table, size);
+
+ for (index = 0; index < size / 4; index++) {
+ val = 0;
+ val |= table[index].power_borrowed << PB_SHIFT;
+ val |= table[index].boost_level << BL_SHIFT;
+ val |= table[index].power_budget_limit << PBL_SHIFT;
+ val |= table[index].power_threshold_inc << TBPWRTHRESH_INC_SHIFT;
+
+ hiu_set_tb_ps_cfg_each(index, val);
+ }
+
+ kfree(table);
+
+ return 0;
+}
+
+static bool check_hiu_sr1_irq_pending(void)
+{
+ return !!hiu_read_reg(HIUTOPCTL1, HIU_MBOX_RESPONSE_MASK, SR1INTR_SHIFT);
+}
+
+static void clear_hiu_sr1_irq_pending(void)
+{
+ hiu_update_reg(HIUTOPCTL1, HIU_MBOX_RESPONSE_MASK, SR1INTR_SHIFT, 0);
+}
+
+static bool check_hiu_mailbox_err_pending(void)
+{
+ return !!hiu_read_reg(HIUTOPCTL1, HIU_MBOX_ERR_MASK, HIU_MBOX_ERR_SHIFT);
+}
+
+static unsigned int get_hiu_mailbox_err(void)
+{
+ return hiu_read_reg(HIUTOPCTL1, HIU_MBOX_ERR_MASK, HIU_MBOX_ERR_SHIFT);
+}
+
+static void hiu_mailbox_err_handler(void)
+{
+ unsigned int err, val;
+
+ err = get_hiu_mailbox_err();
+
+ if (err & SR1UXPERR_MASK)
+ pr_err("exynos-hiu: unexpected error occurs\n");
+
+ if (err & SR1SNERR_MASK) {
+ val = __raw_readl(data->base + HIUTOPCTL2);
+ val = (val >> SEQNUM_SHIFT) & SEQNUM_MASK;
+ pr_err("exynos-hiu: erroneous sequence num %d\n", val);
+ }
+
+ if (err & SR1TIMEOUT_MASK)
+ pr_err("exynos-hiu: TIMEOUT on SR1 write\n");
+
+ if (err & SR0RDERR_MASK)
+ pr_err("exynos-hiu: SR0 read twice or more\n");
+}
+
+static bool check_hiu_req_freq_updated(unsigned int req_freq)
+{
+ unsigned int cur_level, cur_freq;
+
+ cur_level = hiu_get_act_dvfs();
+ cur_freq = data->stats->freq_table[cur_level - data->level_offset];
+
+ /*
+ * If req_freq == boost_threshold, HIU could request turbo boost
+ * That's why in case of req_freq == boost_threshold,
+ * requested frequency update is consdered as done,
+ * if act_dvfs is larger than or equal to boost threshold
+ */
+ if (req_freq == data->boost_threshold)
+ return cur_freq >= data->boost_threshold;
+
+ return cur_freq == req_freq;
+}
+
+static bool check_hiu_normal_req_done(unsigned int req_freq)
+{
+ return check_hiu_sr1_irq_pending() &&
+ check_hiu_req_freq_updated(req_freq);
+}
+
+static bool check_hiu_need_register_restore(void)
+{
+ return !hiu_read_reg(HIUTOPCTL1, ENB_SR1INTR_MASK, ENB_SR1INTR_SHIFT);
+}
+
+static int request_dvfs_on_sr0(unsigned int req_freq)
+{
+ unsigned int val, level, budget;
+
+ /* Get dvfs level */
+ level = hiu_get_freq_level(req_freq);
+ if (level < 0)
+ return -EINVAL;
+
+ /* Get power budget */
+ budget = hiu_get_power_budget(req_freq);
+ if (budget < 0)
+ return -EINVAL;
+
+ /* write REQDVFS & REQPBL to HIU SFR */
+ val = __raw_readl(data->base + HIUTOPCTL2);
+ val &= ~(REQDVFS_MASK << REQDVFS_SHIFT | REQPBL_MASK << REQPBL_SHIFT);
+ val |= (level << REQDVFS_SHIFT | budget << REQPBL_SHIFT);
+ __raw_writel(val, data->base + HIUTOPCTL2);
+
+ return 0;
+}
+
+/****************************************************************/
+/* HIU API */
+/****************************************************************/
+static int hiu_sr1_check_loop(void *unused);
+static void __exynos_hiu_update_data(struct cpufreq_policy *policy);
+
+int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq)
+{
+ bool need_update_cur_freq = true;
+
+ if (unlikely(!data))
+ return -ENODEV;
+
+ if (!data->enabled)
+ return -ENODEV;
+
+ pr_debug("exynos-hiu: update data->cur_freq:%d\n", data->cur_freq);
+
+ mutex_lock(&data->lock);
+
+ if (check_hiu_need_register_restore())
+ __exynos_hiu_update_data(NULL);
+
+ /* PM QoS could make req_freq bigger than boost_threshold */
+ if (req_freq >= data->boost_threshold){
+ /*
+ * 1) If turbo boost is already activated
+ * just update cur_freq and return.
+ * 2) If not, req_freq should be boost_threshold;
+ * DO NOT allow req_freq to be bigger than boost_threshold.
+ */
+ if (data->cur_freq >= data->boost_threshold) {
+ data->cur_freq = req_freq;
+ mutex_unlock(&data->lock);
+ return 0;
+ }
+ else {
+ data->cur_freq = req_freq;
+ req_freq = data->boost_threshold;
+ need_update_cur_freq = false;
+ }
+ }
+
+ /* Write req_freq on SR0 to request DVFS */
+ request_dvfs_on_sr0(req_freq);
+
+ if (data->operation_mode == POLLING_MODE) {
+ while (!check_hiu_normal_req_done(req_freq) &&
+ !check_hiu_mailbox_err_pending())
+ usleep_range(POLL_PERIOD, 2 * POLL_PERIOD);
+
+ if (check_hiu_mailbox_err_pending()) {
+ hiu_mailbox_err_handler();
+ BUG_ON(1);
+ }
+
+ if (need_update_cur_freq)
+ data->cur_freq = req_freq;
+ clear_hiu_sr1_irq_pending();
+
+ if (req_freq == data->boost_threshold && !data->boosting_activated) {
+ data->boosting_activated = true;
+ wake_up(&data->polling_wait);
+ }
+ }
+
+ mutex_unlock(&data->lock);
+
+ pr_debug("exynos-hiu: set REQDVFS to HIU : %ukHz\n", req_freq);
+
+ return 0;
+}
+
+int exynos_hiu_get_freq(unsigned int id)
+{
+ if (unlikely(!data))
+ return -ENODEV;
+
+ return data->cur_freq;
+}
+
+int exynos_hiu_get_max_freq(void)
+{
+ if (unlikely(!data))
+ return -1;
+
+ return data->clipped_freq;
+}
+
+/****************************************************************/
+/* HIU SR1 WRITE HANDLER */
+/****************************************************************/
+static void exynos_hiu_work(struct work_struct *work)
+{
+ int cpu;
+ unsigned int boost_freq, level;
+ struct cpufreq_policy *policy;
+ struct hiu_stats *stats = data->stats;
+ struct cpumask mask;
+
+ cpumask_and(&mask, &data->cpus, cpu_online_mask);
+ if (cpumask_empty(&mask)) {
+ pr_debug("exynos-hiu: all cores in big cluster off\n");
+ goto done;
+ }
+
+ /* Test current cpu is in data->cpus */
+ cpu = smp_processor_id();
+ if (!cpumask_test_cpu(cpu, &mask)) {
+ pr_debug("exynos-hiu: work task is moved to cpu%d\n", cpu);
+ goto done;
+ }
+
+ level = hiu_get_act_dvfs();
+ boost_freq = stats->freq_table[level - data->level_offset];
+
+ /*
+ * Only when TB bit is set, this work callback is called.
+ * However, while this callback is waiting to start,
+ * well... turbo boost could be released.
+ * So, acting frequency coulde be lower than turbo boost threshold.
+ * This condition code is for treating that case.
+ */
+ if (boost_freq < data->boost_threshold)
+ goto done;
+
+ policy = cpufreq_cpu_get(cpumask_first(&mask));
+ if (!policy) {
+ pr_debug("Failed to get CPUFreq policy in HIU work\n");
+ goto done;
+ }
+
+ __cpufreq_driver_target(policy, boost_freq,
+ CPUFREQ_RELATION_H | CPUFREQ_HW_DVFS_REQ);
+
+ cpufreq_cpu_put(policy);
+done:
+ data->hwidvfs_done = true;
+ wake_up(&data->hwidvfs_wait);
+}
+
+static irqreturn_t exynos_hiu_irq_handler(int irq, void *id)
+{
+ schedule_work_on(data->cpu, &data->work);
+
+ return IRQ_HANDLED;
+}
+
+static bool hiu_need_hw_request(void)
+{
+ unsigned int cur_level, cur_freq;
+
+ cur_level = hiu_get_act_dvfs();
+ cur_freq = data->stats->freq_table[cur_level - data->level_offset];
+
+ return cur_freq >= data->boost_threshold;
+}
+
+static int hiu_sr1_check_loop(void *unused)
+{
+wait:
+ wait_event(data->polling_wait, data->boosting_activated);
+poll:
+ mutex_lock(&data->lock);
+
+ if (data->cur_freq < data->boost_threshold)
+ goto done;
+
+ while (!check_hiu_sr1_irq_pending() &&
+ !check_hiu_mailbox_err_pending()) {
+ mutex_unlock(&data->lock);
+ usleep_range(POLL_PERIOD, 2 * POLL_PERIOD);
+ mutex_lock(&data->lock);
+
+ if (data->cur_freq < data->boost_threshold)
+ goto done;
+ }
+
+ if (check_hiu_mailbox_err_pending()) {
+ hiu_mailbox_err_handler();
+ BUG_ON(1);
+ }
+
+ if (hiu_need_hw_request()) {
+ schedule_work_on(data->cpu, &data->work);
+ clear_hiu_sr1_irq_pending();
+ mutex_unlock(&data->lock);
+
+ wait_event(data->hwidvfs_wait, data->hwidvfs_done);
+ data->hwidvfs_done = false;
+
+ goto poll;
+ }
+
+done:
+ data->boosting_activated = false;
+ mutex_unlock(&data->lock);
+ goto wait;
+
+ /* NEVER come here */
+
+ return 0;
+}
+
+/****************************************************************/
+/* EXTERNAL EVENT HANDLER */
+/****************************************************************/
+static void __exynos_hiu_update_data(struct cpufreq_policy *policy)
+{
+ /* Explicitly disable the whole HW */
+ /* ex) hiu_control_pc, tb(0), hiu_control_mailbox(0) */
+
+ /* Set dvfs limit and TB threshold */
+ hiu_set_limit_dvfs(data->clipped_freq);
+ hiu_set_tb_dvfs(data->boost_threshold);
+
+ /* Initialize TB level offset */
+ hiu_set_boost_level_inc();
+
+ /* Initialize TB power state config */
+ hiu_set_tb_ps_cfg();
+
+ /* Enable TB */
+ hiu_control_pc(data->pc_enabled);
+ hiu_control_tb(data->tb_enabled);
+
+ /* Enable error interrupts */
+ hiu_control_err_interrupts(1);
+ /* Enable mailbox communication with ACPM */
+ hiu_control_mailbox(1);
+}
+
+static int exynos_hiu_update_data(struct cpufreq_policy *policy)
+{
+ if (!cpumask_test_cpu(data->cpu, policy->cpus))
+ return 0;
+
+ data->boost_max = policy->user_policy.max;
+ data->clipped_freq = data->boost_max;
+ hiu_stats_create_table(policy);
+
+ __exynos_hiu_update_data(policy);
+
+ data->enabled = true;
+
+ pr_info("exynos-hiu: HIU data structure update complete\n");
+
+ return 0;
+}
+
+static struct exynos_cpufreq_ready_block exynos_hiu_ready = {
+ .update = exynos_hiu_update_data,
+};
+
+static bool check_hiu_need_boost_thrott(void)
+{
+ return data->cur_freq > data->boost_threshold &&
+ data->cur_freq > data->clipped_freq;
+}
+
+static int exynos_hiu_policy_callback(struct notifier_block *nb,
+ unsigned long event, void *info)
+{
+ struct cpufreq_policy *policy = info;
+
+ if (policy->cpu != data->cpu)
+ return NOTIFY_DONE;
+
+ if (policy->max == data->clipped_freq)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case CPUFREQ_NOTIFY:
+
+ /* Note : MUST write LIMIT_DVFS to HIU SFR */
+ mutex_lock(&data->lock);
+ if (policy->max >= data->boost_threshold) {
+ data->clipped_freq = policy->max;
+ hiu_set_limit_dvfs(data->clipped_freq);
+ }
+ mutex_unlock(&data->lock);
+
+ pr_debug("exynos-hiu: update clipped freq:%d\n", data->clipped_freq);
+ if (check_hiu_need_boost_thrott())
+ atomic_inc(&boost_throttling);
+ break;
+ default:
+ ;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_hiu_policy_notifier = {
+ .notifier_call = exynos_hiu_policy_callback,
+};
+
+static int exynos_hiu_transition_callback(struct notifier_block *nb,
+ unsigned long event, void *info)
+{
+ struct cpufreq_freqs *freq = info;
+ int cpu = freq->cpu;
+
+ if (cpu != data->cpu)
+ return NOTIFY_DONE;
+
+ if (event != CPUFREQ_POSTCHANGE)
+ return NOTIFY_DONE;
+
+ if (atomic_read(&boost_throttling) &&
+ data->cur_freq <= data->clipped_freq) {
+ atomic_dec(&boost_throttling);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_hiu_transition_notifier = {
+ .notifier_call = exynos_hiu_transition_callback,
+ .priority = INT_MIN,
+};
+
+/****************************************************************/
+/* SYSFS INTERFACE */
+/****************************************************************/
+static ssize_t
+hiu_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", data->enabled);
+}
+
+static ssize_t
+hiu_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtos32(buf, 10, &input))
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(enabled, 0644, hiu_enable_show, hiu_enable_store);
+
+static struct attribute *exynos_hiu_attrs[] = {
+ &dev_attr_enabled.attr,
+ NULL,
+};
+
+static struct attribute_group exynos_hiu_attr_group = {
+ .name = "hiu",
+ .attrs = exynos_hiu_attrs,
+};
+
+/****************************************************************/
+/* INITIALIZE EXYNOS HIU DRIVER */
+/****************************************************************/
+static int hiu_dt_parsing(struct device_node *dn)
+{
+ const char *buf;
+ int ret = 0;
+
+ ret |= of_property_read_u32(dn, "operation-mode", &data->operation_mode);
+ ret |= of_property_read_u32(dn, "boot-freq", &data->cur_freq);
+ ret |= of_property_read_u32(dn, "boost-threshold", &data->boost_threshold);
+ ret |= of_property_read_u32(dn, "sw-pbl", &data->sw_pbl);
+ ret |= of_property_read_string(dn, "sibling-cpus", &buf);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(dn, "pc-enabled"))
+ data->pc_enabled = true;
+
+ if (of_property_read_bool(dn, "tb-enabled"))
+ data->tb_enabled = true;
+
+ cpulist_parse(buf, &data->cpus);
+ cpumask_and(&data->cpus, &data->cpus, cpu_possible_mask);
+ if (cpumask_weight(&data->cpus) == 0)
+ return -ENODEV;
+
+ data->cpu = cpumask_first(&data->cpus);
+
+ return 0;
+}
+
+static void hiu_stats_create_table(struct cpufreq_policy *policy)
+{
+ unsigned int i = 0, count = 0, alloc_size;
+ struct hiu_stats *stats;
+ struct cpufreq_frequency_table *pos, *table;
+
+ table = policy->freq_table;
+ if (unlikely(!table))
+ return;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ count++;
+
+ alloc_size = count * (sizeof(unsigned int) + sizeof(u64));
+
+ stats->freq_table = kzalloc(alloc_size, GFP_KERNEL);
+ if (!stats->freq_table)
+ goto free_stat;
+
+ stats->time_in_state = (unsigned long long *)(stats->freq_table + count);
+
+ stats->last_level = count;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ stats->freq_table[i++] = pos->frequency;
+
+ data->stats = stats;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ data->level_offset = pos->driver_data;
+ break;
+ }
+
+ return;
+free_stat:
+ kfree(stats);
+}
+
+static int exynos_hiu_probe(struct platform_device *pdev)
+{
+ struct task_struct *polling_thread;
+ struct device_node *dn = pdev->dev.of_node;
+ int ret;
+
+ data = kzalloc(sizeof(struct exynos_hiu_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->lock);
+
+ platform_set_drvdata(pdev, data);
+
+ data->base = ioremap(GCU_BASE, SZ_4K);
+
+ ret = hiu_dt_parsing(dn);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse HIU data\n");
+ return -ENODEV;
+ }
+
+ data->dn = dn;
+
+ if (data->operation_mode == INTERRUPT_MODE) {
+ data->irq = irq_of_parse_and_map(dn, 0);
+ if (data->irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, data->irq, exynos_hiu_irq_handler,
+ IRQF_TRIGGER_RISING, dev_name(&pdev->dev), data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request IRQ handler: %d\n", data->irq);
+ return -ENODEV;
+ }
+ } else {
+ init_waitqueue_head(&data->polling_wait);
+ data->boosting_activated = false;
+
+ polling_thread = kthread_create(hiu_sr1_check_loop, NULL, "hiu_polling");
+ kthread_bind_mask(polling_thread, cpu_coregroup_mask(0));
+ wake_up_process(polling_thread);
+ }
+
+ cpufreq_register_notifier(&exynos_hiu_policy_notifier, CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_register_notifier(&exynos_hiu_transition_notifier, CPUFREQ_TRANSITION_NOTIFIER);
+
+ INIT_WORK(&data->work, exynos_hiu_work);
+ init_waitqueue_head(&data->hwidvfs_wait);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &exynos_hiu_attr_group);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create Exynos HIU attr group");
+
+ exynos_cpufreq_ready_list_add(&exynos_hiu_ready);
+
+ dev_info(&pdev->dev, "HIU Handler initialization complete\n");
+ return 0;
+}
+
+static int exynos_hiu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* HACK : disable turbo boost */
+ return 0;
+}
+
+static int exynos_hiu_resume(struct platform_device *pdev)
+{
+ /* HACK : enable turbo boost */
+ return 0;
+}
+
+static const struct of_device_id of_exynos_hiu_match[] = {
+ { .compatible = "samsung,exynos-hiu", },
+ { },
+};
+
+static const struct platform_device_id exynos_hiu_ids[] = {
+ { "exynos-hiu", },
+ { }
+};
+
+static struct platform_driver exynos_hiu_driver = {
+ .driver = {
+ .name = "exynos-hiu",
+ .owner = THIS_MODULE,
+ .of_match_table = of_exynos_hiu_match,
+ },
+ .probe = exynos_hiu_probe,
+ .suspend = exynos_hiu_suspend,
+ .resume = exynos_hiu_resume,
+ .id_table = exynos_hiu_ids,
+};
+
+int __init exynos_hiu_init(void)
+{
+ return platform_driver_register(&exynos_hiu_driver);
+}
+arch_initcall(exynos_hiu_init);
--- /dev/null
+#ifndef __EXYNOS_HIU_H__
+#define __EXYNOS_HIU_H__
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+
+/* Function Id to Enable HIU in EL3 */
+#define GCU_BASE (0x1E4C0000)
+
+/* SR1 write monitoring operation mode */
+#define POLLING_MODE (0)
+#define INTERRUPT_MODE (1)
+
+/* GCU Control Register */
+#define GCUCTL (0x22C)
+#define HIUINTR_EN_MASK (0x1)
+#define HIUINTR_EN_SHIFT (3)
+#define HIUERR_EN_MASK (0x1)
+#define HIUERR_EN_SHIFT (2)
+
+/* GCU ERROR Register */
+#define GCUERR (0x26C)
+#define HIUINTR_MASK (0x1)
+#define HIUINTR_SHIFT (3)
+#define HIUERR_MASK (0x1)
+#define HIUERR_SHIFT (2)
+
+/* HIU Top Level Control 1 Register */
+#define HIUTOPCTL1 (0xE00)
+#define ACTDVFS_MASK (0x3F)
+#define ACTDVFS_SHIFT (24)
+#define HIU_MBOX_RESPONSE_MASK (0x1)
+#define SR1INTR_SHIFT (15)
+#define SR1UXPERR_MASK (1 << 3)
+#define SR1SNERR_MASK (1 << 2)
+#define SR1TIMEOUT_MASK (1 << 1)
+#define SR0RDERR_MASK (1 << 0)
+#define HIU_MBOX_ERR_MASK (0xF)
+#define HIU_MBOX_ERR_SHIFT (11)
+#define ENB_SR1INTR_MASK (0x1)
+#define ENB_SR1INTR_SHIFT (5)
+#define ENB_ERR_INTERRUPTS_MASK (0x1E)
+#define ENB_SR1UXPERR_MASK (0x1)
+#define ENB_SR1UXPERR_SHIFT (4)
+#define ENB_SR1SNERR_MASK (0x1)
+#define ENB_SR1SNERR_SHIFT (3)
+#define ENB_SR1TIMEOUT_MASK (0x1)
+#define ENB_SR1TIMEOUT_SHIFT (2)
+#define ENB_SR0RDERR_MASK (0x1)
+#define ENB_SR0RDERR_SHIFT (1)
+#define ENB_ACPM_COMM_MASK (0x1)
+#define ENB_ACPM_COMM_SHIFT (0)
+
+/* HIU Top Level Control 2 Register */
+#define HIUTOPCTL2 (0xE04)
+#define SEQNUM_MASK (0x7)
+#define SEQNUM_SHIFT (26)
+#define LIMITDVFS_MASK (0x3F)
+#define LIMITDVFS_SHIFT (14)
+#define REQDVFS_MASK (0x3F)
+#define REQDVFS_SHIFT (8)
+#define REQPBL_MASK (0xFF)
+#define REQPBL_SHIFT (0)
+
+/* HIU Turbo Boost Control Register */
+#define HIUTBCTL (0xE10)
+#define BOOSTED_MASK (0x1)
+#define BOOSTED_SHIFT (31)
+#define B3_INC_MASK (0x3)
+#define B3_INC_SHIFT (20)
+#define B2_INC_MASK (0x3)
+#define B2_INC_SHIFT (18)
+#define B1_INC_MASK (0x3)
+#define B1_INC_SHIFT (16)
+#define TBDVFS_MASK (0x3F)
+#define TBDVFS_SHIFT (8)
+#define PC_DISABLE_MASK (0x1)
+#define PC_DISABLE_SHIFT (1)
+#define TB_ENB_MASK (0x1)
+#define TB_ENB_SHIFT (0)
+
+/* HIU Turbo Boost Power State Config Registers */
+#define HIUTBPSCFG_BASE (0xE14)
+#define HIUTBPSCFG_OFFSET (0x4)
+#define HIUTBPSCFG_MASK (0x1 << 31 | 0x7 << 28 | 0xFF << 20 | 0xFFFF << 0x0)
+#define PB_MASK (0x1)
+#define PB_SHIFT (31)
+#define BL_MASK (0x7)
+#define BL_SHIFT (28)
+#define PBL_MASK (0xFF)
+#define PBL_SHIFT (20)
+#define TBPWRTHRESH_INC_MASK (0xFFFF)
+#define TBPWRTHRESH_INC_SHIFT (0x0)
+
+/* HIU Turbo Boost Power Threshold Register */
+#define HIUTBPWRTHRESH1 (0xE34)
+#define HIUTBPWRTHRESH2 (0xE38)
+#define HIUTBPWRTHRESH3 (0xE3C)
+#define R_MASK (0x3F)
+#define R_SHIFT (24)
+#define MONINTERVAL_MASK (0xF)
+#define MONINTERVAL_SHIFT (20)
+#define TBPWRTHRESH_EXP_MASK (0xF)
+#define TBPWRTHRESH_EXP_SHIFT (16)
+#define TBPWRTHRESH_FRAC_MASK (0xFFFF)
+#define TBPWRTHRESH_FRAC_SHIFT (0)
+
+struct hiu_stats {
+ unsigned int last_level;
+ unsigned int *freq_table;
+ unsigned long long *time_in_state;
+};
+
+struct hiu_tasklet_data {
+ unsigned int index;
+};
+
+struct hiu_cfg {
+ unsigned int power_borrowed;
+ unsigned int boost_level;
+ unsigned int power_budget_limit;
+ unsigned int power_threshold_inc;
+};
+
+struct exynos_hiu_data {
+ bool enabled;
+ bool pc_enabled;
+ bool tb_enabled;
+ bool hwidvfs_done;
+ bool boosting_activated;
+
+ int operation_mode;
+
+ int irq;
+ struct work_struct work;
+ struct mutex lock;
+ wait_queue_head_t hwidvfs_wait;
+ wait_queue_head_t polling_wait;
+
+ struct cpumask cpus;
+ unsigned int cpu;
+
+ unsigned int cur_budget;
+
+ unsigned int cur_freq;
+ unsigned int clipped_freq;
+ unsigned int boost_threshold;
+ unsigned int boost_max;
+ unsigned int level_offset;
+ unsigned int sw_pbl;
+
+ void __iomem * base;
+
+ struct device_node * dn;
+ struct hiu_stats * stats;
+};
+
+#ifdef CONFIG_EXYNOS_HIU
+extern int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq);
+extern int exynos_hiu_get_freq(unsigned int id);
+extern int exynos_hiu_get_max_freq(void);
+#else
+inline int exynos_hiu_set_freq(unsigned int id, unsigned int req_freq) { return 0; }
+inline int exynos_hiu_get_freq(unsigned int id) { return 0; }
+inline int exynos_hiu_get_max_freq(void) { return 0; }
+#endif
+
+#endif