Commit | Line | Data |
---|---|---|
3c2a0909 S |
1 | /* |
2 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
3 | * http://www.samsung.com | |
4 | * | |
5 | * Jonghwan Choi <jhbird.choi@samsung.com> | |
6 | * | |
7 | * EXYNOS7580 - CPU frequency scaling support | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
14 | * kind, whether express or implied; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
20 | ||
21 | #include <linux/clk.h> | |
22 | #include <linux/cpu.h> | |
23 | #include <linux/cpufreq.h> | |
24 | #include <linux/cpumask.h> | |
25 | #include <linux/export.h> | |
26 | #include <linux/exynos-ss.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/of_platform.h> | |
29 | #include <linux/opp.h> | |
30 | #include <linux/pm_qos.h> | |
31 | #include <linux/reboot.h> | |
32 | #include <linux/regulator/consumer.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/suspend.h> | |
35 | #include <linux/topology.h> | |
36 | #include <linux/types.h> | |
37 | ||
38 | #include <plat/cpu.h> | |
39 | ||
40 | #include <mach/asv-exynos.h> | |
41 | #include <mach/regs-clock-exynos7580.h> | |
42 | #include <mach/tmu.h> | |
43 | ||
44 | /* Currently we support only two clusters */ | |
45 | #ifndef CONFIG_EXYNOS7580_QUAD | |
46 | #define MAX_CLUSTERS 2 | |
47 | #else | |
48 | #define MAX_CLUSTERS 1 | |
49 | #endif | |
50 | ||
51 | #define DIV_MASK_ALL 0xffffffff | |
52 | #define LIMIT_COLD_VOLTAGE 1350000 | |
53 | #define MIN_COLD_VOLTAGE 950000 | |
54 | #define COLD_VOLT_OFFSET 37500 | |
55 | ||
56 | #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, b0, b1, m, p, s) \ | |
57 | { \ | |
58 | .freq = (f) * 1000, \ | |
59 | .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \ | |
60 | (a4) << 16 | (a5) << 20 | (a6) << 24), \ | |
61 | .clk_div_cpu1 = (b0 << 0 | b1 << 4), \ | |
62 | .mps = ((m) << 12 | (p) << 4 | (s)), \ | |
63 | } | |
64 | ||
65 | /* This enum is also existed in mach/cpufreq.h */ | |
66 | typedef enum { | |
67 | CL_ZERO, | |
68 | #ifndef CONFIG_EXYNOS7580_QUAD | |
69 | CL_ONE, | |
70 | #endif | |
71 | CL_END, | |
72 | } cluster_type; | |
73 | ||
74 | static struct pm_qos_request pm_qos_mif; | |
75 | static struct pm_qos_request cluster_qos_min[CL_END]; | |
76 | static struct pm_qos_request cluster_qos_max[CL_END]; | |
77 | static struct pm_qos_request boost_qos_min[CL_END]; | |
78 | #ifdef CONFIG_SW_SELF_DISCHARGING | |
79 | static int self_discharging; | |
80 | #endif | |
81 | ||
82 | /* Include CPU mask of each cluster */ | |
83 | cluster_type exynos_boot_cluster; | |
84 | ||
85 | static struct { | |
86 | unsigned long freq; | |
87 | u32 clk_div_cpu0; | |
88 | u32 clk_div_cpu1; | |
89 | u32 mps; | |
90 | } apll_freq[] = { | |
91 | /* | |
92 | * values: | |
93 | * freq | |
94 | * clock divider for CPU1, CPU2, ATCLK, PCLK_DBG_CP, ACLK_CPU, PCLK_CPU, SCLK_CNTCLK | |
95 | * clock divider for SCLK_CPU_PLL, SCLK_HPM_CPU | |
96 | * PLL M, P, S | |
97 | */ | |
98 | APLL_FREQ(1600000, 0, 0, 7, 7, 2, 7, 3, 7, 7, 246, 4, 0), | |
99 | APLL_FREQ(1500000, 0, 0, 7, 7, 2, 7, 3, 7, 7, 230, 4, 0), | |
100 | APLL_FREQ(1400000, 0, 0, 7, 7, 2, 7, 3, 7, 7, 216, 4, 0), | |
101 | APLL_FREQ(1300000, 0, 0, 7, 7, 2, 7, 3, 6, 7, 200, 4, 0), | |
102 | APLL_FREQ(1200000, 0, 0, 7, 7, 2, 7, 3, 6, 7, 368, 4, 1), | |
103 | APLL_FREQ(1100000, 0, 0, 7, 7, 2, 7, 3, 5, 7, 340, 4, 1), | |
104 | APLL_FREQ(1000000, 0, 0, 7, 7, 2, 7, 3, 5, 7, 308, 4, 1), | |
105 | APLL_FREQ(900000, 0, 0, 7, 7, 2, 7, 3, 4, 7, 276, 4, 1), | |
106 | APLL_FREQ(800000, 0, 0, 7, 7, 2, 7, 3, 4, 7, 248, 4, 1), | |
107 | APLL_FREQ(700000, 0, 0, 7, 7, 2, 7, 3, 3, 7, 216, 4, 1), | |
108 | APLL_FREQ(600000, 0, 0, 7, 7, 2, 7, 3, 3, 7, 368, 4, 2), | |
109 | APLL_FREQ(500000, 0, 0, 7, 7, 2, 7, 3, 2, 7, 312, 4, 2), | |
110 | APLL_FREQ(400000, 0, 0, 7, 7, 2, 7, 3, 2, 7, 248, 4, 2), | |
111 | APLL_FREQ(300000, 0, 0, 7, 7, 2, 7, 3, 1, 7, 368, 4, 3), | |
112 | }; | |
113 | ||
114 | static unsigned int exynos_bus_table[] = { | |
115 | 825000, /* 1.6GHz */ | |
116 | 825000, /* 1.5GHz */ | |
117 | 825000, /* 1.4GHz */ | |
118 | 825000, /* 1.3GHz */ | |
119 | 728000, /* 1.2GHz */ | |
120 | 728000, /* 1.1GHz */ | |
121 | 667000, /* 1.0GHz */ | |
122 | 559000, /* 900MHz */ | |
123 | 559000, /* 800MHz */ | |
124 | 416000, /* 700MHz */ | |
125 | 416000, /* 600MHz */ | |
126 | 416000, /* 500MHz */ | |
127 | 0, /* 400MHz */ | |
128 | 0, /* 300MHz */ | |
129 | }; | |
130 | ||
131 | static unsigned int voltage_tolerance; /* in percentage */ | |
132 | static DEFINE_MUTEX(exynos_cpu_lock); | |
133 | static bool is_suspended; | |
134 | static unsigned int sync_frequency; | |
135 | static unsigned int locking_frequency; | |
136 | static unsigned int locking_volt; | |
137 | static unsigned int cold_offset; | |
138 | ||
139 | static struct clk *clk[MAX_CLUSTERS]; | |
140 | static struct clk *mux[MAX_CLUSTERS]; | |
141 | static struct clk *alt[MAX_CLUSTERS]; | |
142 | static struct regulator *reg[MAX_CLUSTERS]; | |
143 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; | |
144 | #ifndef CONFIG_EXYNOS7580_QUAD | |
145 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; | |
146 | #else | |
147 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0)}; | |
148 | #endif | |
149 | static unsigned int alt_freq[MAX_CLUSTERS]; | |
150 | static const unsigned int boost_freq = 1300000; /* KHz */ | |
151 | #ifndef CONFIG_EXYNOS7580_QUAD | |
152 | static unsigned int maxlock_freq; | |
153 | #endif | |
154 | ||
155 | #ifndef CONFIG_EXYNOS7580_QUAD | |
156 | static const char *cpu_mux[MAX_CLUSTERS] = {"mout_cpu", "mout_apl"}; | |
157 | static const char *alt_pat[MAX_CLUSTERS] = {"mout_bus_pll_cpu_user", "mout_bus_pll_apl_user"}; | |
158 | #else | |
159 | static const char *cpu_mux[MAX_CLUSTERS] = {"mout_cpu"}; | |
160 | static const char *alt_pat[MAX_CLUSTERS] = {"mout_bus_pll_cpu_user"}; | |
161 | #endif | |
162 | ||
163 | static inline int cpu_to_cluster(int cpu) | |
164 | { | |
165 | return topology_physical_package_id(cpu); | |
166 | } | |
167 | ||
168 | static bool support_full_frequency(void) | |
169 | { | |
170 | unsigned int data; | |
171 | ||
172 | data = __raw_readl(S5P_VA_CHIPID + 0x14); | |
173 | ||
174 | /* Check the magic number */ | |
175 | if ((data & 0xffffff) == 0x16e493) | |
176 | return false; | |
177 | ||
178 | return true; | |
179 | } | |
180 | ||
181 | static unsigned int exynos_cpufreq_get_cluster(unsigned int cluster) | |
182 | { | |
183 | unsigned int freq = clk_get_rate(clk[cluster]) / 1000; | |
184 | ||
185 | freq += 50000; | |
186 | freq = (freq / 100000) * 100000; | |
187 | ||
188 | return freq; | |
189 | } | |
190 | ||
191 | static unsigned int exynos_cpufreq_get(unsigned int cpu) | |
192 | { | |
193 | u32 cur_cluster = cpu_to_cluster(cpu); | |
194 | ||
195 | return exynos_cpufreq_get_cluster(cur_cluster); | |
196 | } | |
197 | ||
198 | /* Validate policy frequency range */ | |
199 | static int exynos_cpufreq_verify_policy(struct cpufreq_policy *policy) | |
200 | { | |
201 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
202 | ||
203 | return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); | |
204 | } | |
205 | ||
206 | static void exynos_cpufreq_boost_frequency(int cpu, unsigned int timeout_ms) | |
207 | { | |
208 | unsigned int booting_freq; | |
209 | cluster_type target_cluster; | |
210 | ||
211 | #ifndef CONFIG_EXYNOS7580_QUAD | |
212 | target_cluster = (cpu < 4) ? (CL_ZERO) : (CL_ONE); | |
213 | #else | |
214 | target_cluster = (CL_ZERO); | |
215 | #endif | |
216 | ||
217 | if (!support_full_frequency()) | |
218 | booting_freq = boost_freq; | |
219 | else | |
220 | booting_freq = boost_freq + 200000; | |
221 | ||
222 | if (timeout_ms) | |
223 | pm_qos_update_request_timeout(&boost_qos_min[target_cluster], booting_freq, timeout_ms * 1000); | |
224 | else | |
225 | pm_qos_update_request(&boost_qos_min[target_cluster], booting_freq); | |
226 | } | |
227 | ||
228 | static unsigned int exynos_get_safe_armvolt(struct cpufreq_policy *policy) | |
229 | { | |
230 | struct device *cpu_dev; | |
231 | struct opp *opp; | |
232 | u32 cluster; | |
233 | ||
234 | cpu_dev = get_cpu_device(policy->cpu); | |
235 | cluster = cpu_to_cluster(policy->cpu); | |
236 | opp = opp_find_freq_exact(cpu_dev, alt_freq[cluster] * 1000, true); | |
237 | ||
238 | return opp_get_voltage(opp); | |
239 | } | |
240 | ||
241 | static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask) | |
242 | { | |
243 | unsigned long timeout = jiffies + msecs_to_jiffies(10); | |
244 | ||
245 | do { | |
246 | if (!(__raw_readl(div_reg) & mask)) | |
247 | return; | |
248 | } while (time_before(jiffies, timeout)); | |
249 | ||
250 | pr_err("%s: timeout in divider stablization\n", __func__); | |
251 | } | |
252 | ||
253 | static int exynos_cpufreq_get_index(int cluster, unsigned long freq) | |
254 | { | |
255 | int index; | |
256 | ||
257 | freq = freq / 1000; | |
258 | ||
259 | for (index = 0; | |
260 | freq_table[cluster][index].frequency != CPUFREQ_TABLE_END; index++) { | |
261 | if (freq_table[cluster][index].frequency == freq) | |
262 | break; | |
263 | } | |
264 | ||
265 | if (freq_table[cluster][index].frequency == CPUFREQ_TABLE_END) | |
266 | return -EINVAL; | |
267 | ||
268 | return index; | |
269 | } | |
270 | ||
271 | static void exynos_apll_set_clkdiv(int cluster, int div_index) | |
272 | { | |
273 | unsigned int div; | |
274 | ||
275 | /* Change Divider - CPU0 */ | |
276 | div = apll_freq[div_index].clk_div_cpu0; | |
277 | ||
278 | __raw_writel(div, cluster ? EXYNOS7580_DIV_APL_0 : EXYNOS7580_DIV_CPU_0); | |
279 | ||
280 | wait_until_divider_stable(cluster ? EXYNOS7580_DIV_STAT_APL_0 : EXYNOS7580_DIV_STAT_CPU_0, DIV_MASK_ALL); | |
281 | ||
282 | /* Change Divider - CPU1 */ | |
283 | div = apll_freq[div_index].clk_div_cpu1; | |
284 | ||
285 | __raw_writel(div, cluster ? EXYNOS7580_DIV_APL_1 : EXYNOS7580_DIV_CPU_1); | |
286 | ||
287 | wait_until_divider_stable(cluster ? EXYNOS7580_DIV_STAT_APL_1 : EXYNOS7580_DIV_STAT_CPU_1, DIV_MASK_ALL); | |
288 | } | |
289 | ||
290 | static int exynos_apll_set_pms(int cluster, unsigned long freq) | |
291 | { | |
292 | int ret; | |
293 | ||
294 | clk_set_parent(mux[cluster], alt[cluster]); | |
295 | ||
296 | ret = clk_set_rate(clk[cluster], freq); | |
297 | if (ret) | |
298 | pr_err("clk_set_rate failed: %d\n", ret); | |
299 | ||
300 | clk_set_parent(mux[cluster], clk[cluster]); | |
301 | ||
302 | return ret; | |
303 | } | |
304 | ||
305 | static void exynos_request_mif(void) | |
306 | { | |
307 | unsigned long freq; | |
308 | int index; | |
309 | ||
310 | /* Get the max freq from cluter0/1 */ | |
311 | #ifndef CONFIG_EXYNOS7580_QUAD | |
312 | freq = max(cpufreq_quick_get(0), cpufreq_quick_get(4)); | |
313 | #else | |
314 | freq = cpufreq_quick_get(0); | |
315 | #endif | |
316 | /* Cluster 0/1 have a same freq table */ | |
317 | index = exynos_cpufreq_get_index(0, freq * 1000); | |
318 | if (index >= 0) | |
319 | pm_qos_update_request(&pm_qos_mif, exynos_bus_table[index]); | |
320 | } | |
321 | ||
322 | static int exynos_set_rate(int cluster, unsigned long freq, bool up) | |
323 | { | |
324 | int ret; | |
325 | int index; | |
326 | ||
327 | index = exynos_cpufreq_get_index(cluster, freq); | |
328 | if (index < 0) | |
329 | return -EINVAL; | |
330 | ||
331 | if (up) { | |
332 | exynos_apll_set_clkdiv(cluster, index); | |
333 | ret = exynos_apll_set_pms(cluster, freq); | |
334 | } else { | |
335 | ret = exynos_apll_set_pms(cluster, freq); | |
336 | exynos_apll_set_clkdiv(cluster, index); | |
337 | } | |
338 | ||
339 | return ret; | |
340 | } | |
341 | ||
342 | static int exynos_regulator_set_voltage(int cluster, unsigned long volt) | |
343 | { | |
344 | unsigned long target; | |
345 | ||
346 | if (volt > LIMIT_COLD_VOLTAGE) | |
347 | target = volt; | |
348 | else if (volt + cold_offset > LIMIT_COLD_VOLTAGE) | |
349 | target = LIMIT_COLD_VOLTAGE; | |
350 | else if (cold_offset && ((volt + cold_offset) < MIN_COLD_VOLTAGE)) | |
351 | target = MIN_COLD_VOLTAGE; | |
352 | else | |
353 | target = volt + cold_offset; | |
354 | ||
355 | /* 6250(BUCK STEP valye) value depends on pmic */ | |
356 | return regulator_set_voltage(reg[cluster], target, target + 6250); | |
357 | } | |
358 | ||
359 | /* Set clock frequency */ | |
360 | static int exynos_cpufreq_scale(struct cpufreq_policy *policy, | |
361 | unsigned int target_freq) | |
362 | { | |
363 | struct cpufreq_freqs freqs; | |
364 | struct device *cpu_dev; | |
365 | struct opp *opp; | |
366 | u32 cur_cluster; | |
367 | unsigned long volt, safe_volt = 0; | |
368 | int ret = 0; | |
369 | ||
370 | freqs.old = exynos_cpufreq_get(policy->cpu); | |
371 | freqs.new = target_freq; | |
372 | ||
373 | if (freqs.new == freqs.old) | |
374 | return 0; | |
375 | ||
376 | cur_cluster = cpu_to_cluster(policy->cpu); | |
377 | ||
378 | if (freqs.old < alt_freq[cur_cluster] && freqs.new < alt_freq[cur_cluster]) | |
379 | safe_volt = exynos_get_safe_armvolt(policy); | |
380 | ||
381 | cpu_dev = get_cpu_device(policy->cpu); | |
382 | opp = opp_find_freq_exact(cpu_dev, freqs.new * 1000, true); | |
383 | if (IS_ERR(opp)) { | |
384 | pr_err("failed to find OPP for %u\n", freqs.new * 1000); | |
385 | return PTR_ERR(opp); | |
386 | } | |
387 | ||
388 | volt = opp_get_voltage(opp); | |
389 | if ((freqs.new > freqs.old) && !safe_volt) { | |
390 | ret = exynos_regulator_set_voltage(cur_cluster, volt); | |
391 | if (ret) { | |
392 | pr_err("failed to scale voltage up : %d\n", ret); | |
393 | goto out; | |
394 | } | |
395 | set_match_abb(cur_cluster, get_match_abb(cur_cluster, freqs.new * 1000)); | |
396 | } else if (safe_volt) { | |
397 | ret = exynos_regulator_set_voltage(cur_cluster, safe_volt); | |
398 | if (ret) { | |
399 | pr_err("failed to scale voltage up : %d\n", ret); | |
400 | goto out; | |
401 | } | |
402 | set_match_abb(cur_cluster, get_match_abb(cur_cluster, freqs.new * 1000)); | |
403 | } | |
404 | ||
405 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | |
406 | ||
407 | exynos_ss_freq(cur_cluster, freqs.old, ESS_FLAG_IN); | |
408 | ||
409 | ret = exynos_set_rate(cur_cluster, freqs.new * 1000, freqs.new > freqs.old); | |
410 | if (ret) { | |
411 | pr_err("exynos_set_rate failed: %d\n", ret); | |
412 | freqs.new = freqs.old; | |
413 | } | |
414 | ||
415 | exynos_ss_freq(cur_cluster, freqs.new, ESS_FLAG_OUT); | |
416 | ||
417 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | |
418 | if ((freqs.new < freqs.old) || | |
419 | ((freqs.new > freqs.old) && safe_volt)) { | |
420 | set_match_abb(cur_cluster, get_match_abb(cur_cluster, freqs.new * 1000)); | |
421 | ret = exynos_regulator_set_voltage(cur_cluster, volt); | |
422 | if (ret) | |
423 | pr_err("failed to scale voltage down : %d\n", ret); | |
424 | } | |
425 | ||
426 | exynos_request_mif(); | |
427 | out: | |
428 | return ret; | |
429 | } | |
430 | ||
431 | static unsigned int exynos_verify_pm_qos_limit(int cluster, unsigned int freq) | |
432 | { | |
433 | unsigned int target_freq; | |
434 | int pm_qos_class_min = PM_QOS_CLUSTER0_FREQ_MIN; | |
435 | int pm_qos_class_max = PM_QOS_CLUSTER0_FREQ_MAX; | |
436 | ||
437 | #ifndef CONFIG_EXYNOS7580_QUAD | |
438 | if (cluster == CL_ONE) { | |
439 | pm_qos_class_min = PM_QOS_CLUSTER1_FREQ_MIN; | |
440 | pm_qos_class_max = PM_QOS_CLUSTER1_FREQ_MAX; | |
441 | } | |
442 | #endif | |
443 | ||
444 | target_freq = max((unsigned int)pm_qos_request(pm_qos_class_min), freq); | |
445 | target_freq = min((unsigned int)pm_qos_request(pm_qos_class_max), target_freq); | |
446 | ||
447 | #ifndef CONFIG_EXYNOS7580_QUAD | |
448 | /* If cluster1 is turned on, first freq should be higher than cluster 0 */ | |
449 | if (sync_frequency && (cluster == CL_ONE)) { | |
450 | target_freq = max(target_freq, sync_frequency); | |
451 | sync_frequency = 0; | |
452 | } | |
453 | #endif | |
454 | ||
455 | return target_freq; | |
456 | } | |
457 | ||
458 | /* Set clock frequency */ | |
459 | static int exynos_cpufreq_set_target(struct cpufreq_policy *policy, | |
460 | unsigned int target_freq, unsigned int relation) | |
461 | { | |
462 | u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; | |
463 | unsigned int freq; | |
464 | int ret = 0; | |
465 | ||
466 | mutex_lock(&exynos_cpu_lock); | |
467 | ||
468 | if (is_suspended) | |
469 | goto out; | |
470 | ||
471 | if (target_freq == 0) | |
472 | target_freq = policy->min; | |
473 | ||
474 | /* if PLL bypass, frequency scale is skip */ | |
475 | if (exynos_cpufreq_get(cpu) <= 26000) | |
476 | goto out; | |
477 | ||
478 | cur_cluster = cpu_to_cluster(policy->cpu); | |
479 | ||
480 | target_freq = exynos_verify_pm_qos_limit(cur_cluster, target_freq); | |
481 | ||
482 | /* Determine valid target frequency using freq_table */ | |
483 | ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster], | |
484 | target_freq, relation, &freq_tab_idx); | |
485 | if (ret) { | |
486 | pr_err("failed to match target freqency %d: %d\n", | |
487 | target_freq, ret); | |
488 | goto out; | |
489 | } | |
490 | ||
491 | freq = freq_table[cur_cluster][freq_tab_idx].frequency; | |
492 | ||
493 | pr_debug("%s: cpu: %d, cluster: %d, target freq: %d, new freq: %d\n", | |
494 | __func__, cpu, cur_cluster, target_freq, freq); | |
495 | ||
496 | ret = exynos_cpufreq_scale(policy, freq); | |
497 | ||
498 | out: | |
499 | mutex_unlock(&exynos_cpu_lock); | |
500 | return ret; | |
501 | } | |
502 | ||
503 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev) | |
504 | { | |
505 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
506 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); | |
507 | } | |
508 | ||
509 | /* get cpu node with valid operating-points */ | |
510 | static struct device_node *get_cpu_node_with_valid_op(int cpu) | |
511 | { | |
512 | struct device_node *np = NULL, *parent; | |
513 | int count = 0; | |
514 | ||
515 | parent = of_find_node_by_path("/cpus"); | |
516 | if (!parent) { | |
517 | pr_err("failed to find OF /cpus\n"); | |
518 | return NULL; | |
519 | } | |
520 | ||
521 | for_each_child_of_node(parent, np) { | |
522 | if (count++ != cpu) | |
523 | continue; | |
524 | if (!of_get_property(np, "operating-points", NULL)) { | |
525 | of_node_put(np); | |
526 | np = NULL; | |
527 | } | |
528 | ||
529 | break; | |
530 | } | |
531 | ||
532 | of_node_put(parent); | |
533 | return np; | |
534 | } | |
535 | ||
536 | /** | |
537 | * exynos_of_init_opp_table() - Initialize opp table from device tree | |
538 | * @dev: device pointer used to lookup device OPPs. | |
539 | * | |
540 | * Register the initial OPP table with the OPP library for given device. | |
541 | */ | |
542 | static int exynos_of_init_opp_table(struct device *dev) | |
543 | { | |
544 | u32 cluster = cpu_to_cluster(dev->id); | |
545 | const struct property *prop; | |
546 | const __be32 *val; | |
547 | enum asv_type_id asv_id; | |
548 | int nr; | |
549 | ||
550 | prop = of_find_property(dev->of_node, "operating-points", NULL); | |
551 | if (!prop) | |
552 | return -ENODEV; | |
553 | if (!prop->value) | |
554 | return -ENODATA; | |
555 | ||
556 | /* | |
557 | * Each OPP is a set of tuples consisting of frequency and | |
558 | * voltage like <freq-kHz vol-uV>. | |
559 | */ | |
560 | nr = prop->length / sizeof(u32); | |
561 | if (nr % 2) { | |
562 | dev_err(dev, "%s: Invalid OPP list\n", __func__); | |
563 | return -EINVAL; | |
564 | } | |
565 | ||
566 | val = prop->value; | |
567 | while (nr) { | |
568 | unsigned long freq = be32_to_cpup(val++) * 1000; | |
569 | unsigned long volt = be32_to_cpup(val++); | |
570 | unsigned long temp; | |
571 | ||
572 | asv_id = cluster; | |
573 | temp = get_match_volt(asv_id, freq); | |
574 | if (temp) | |
575 | volt = temp; | |
576 | ||
577 | if (opp_add_dec(dev, freq, volt)) | |
578 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | |
579 | __func__, freq); | |
580 | nr -= 2; | |
581 | } | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
586 | static int exynos_init_opp_table(struct device *cpu_dev) | |
587 | { | |
588 | struct device_node *np; | |
589 | int ret; | |
590 | ||
591 | np = get_cpu_node_with_valid_op(cpu_dev->id); | |
592 | if (!np) | |
593 | return -ENODATA; | |
594 | ||
595 | cpu_dev->of_node = np; | |
596 | ret = exynos_of_init_opp_table(cpu_dev); | |
597 | of_node_put(np); | |
598 | ||
599 | return ret; | |
600 | } | |
601 | ||
602 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | |
603 | { | |
604 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
605 | char name[14] = "cpu-cluster."; | |
606 | int ret; | |
607 | ||
608 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | |
609 | return 0; | |
610 | ||
611 | ret = exynos_init_opp_table(cpu_dev); | |
612 | if (ret) { | |
613 | dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", | |
614 | __func__, cpu_dev->id, ret); | |
615 | goto atomic_dec; | |
616 | } | |
617 | ||
618 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
619 | if (ret) { | |
620 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", | |
621 | __func__, cpu_dev->id, ret); | |
622 | goto atomic_dec; | |
623 | } | |
624 | name[12] = cluster + '0'; | |
625 | reg[cluster] = devm_regulator_get(cpu_dev, name); | |
626 | if (IS_ERR(reg[cluster])) { | |
627 | dev_err(cpu_dev, "%s: failed to get regulator, cluster: %d\n", | |
628 | __func__, cluster); | |
629 | goto opp_free; | |
630 | } | |
631 | ||
632 | mux[cluster] = devm_clk_get(cpu_dev, cpu_mux[cluster]); | |
633 | if (IS_ERR(mux[cluster])) { | |
634 | dev_err(cpu_dev, "%s: failed to get clk for cpu mux, cluster: %d\n", | |
635 | __func__, cluster); | |
636 | goto opp_free; | |
637 | } | |
638 | ||
639 | alt[cluster] = devm_clk_get(cpu_dev, alt_pat[cluster]); | |
640 | if (IS_ERR(alt[cluster])) { | |
641 | dev_err(cpu_dev, "%s: failed to get clk for alt parent, cluster: %d\n", | |
642 | __func__, cluster); | |
643 | goto opp_free; | |
644 | } | |
645 | ||
646 | alt_freq[cluster] = clk_get_rate(alt[cluster]) / 1000; | |
647 | ||
648 | clk[cluster] = devm_clk_get(cpu_dev, name); | |
649 | if (!IS_ERR(clk[cluster])) { | |
650 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", | |
651 | __func__, clk[cluster], freq_table[cluster], | |
652 | cluster); | |
653 | return 0; | |
654 | } | |
655 | ||
656 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | |
657 | __func__, cpu_dev->id, cluster); | |
658 | ret = PTR_ERR(clk[cluster]); | |
659 | ||
660 | opp_free: | |
661 | opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
662 | ||
663 | atomic_dec: | |
664 | atomic_dec(&cluster_usage[cluster]); | |
665 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, | |
666 | cluster); | |
667 | return ret; | |
668 | } | |
669 | ||
670 | static int exynos_get_transition_latency(struct device *cpu_dev) | |
671 | { | |
672 | struct device_node *np; | |
673 | u32 transition_latency = CPUFREQ_ETERNAL; | |
674 | ||
675 | np = get_cpu_node_with_valid_op(cpu_dev->id); | |
676 | if (!np) | |
677 | return CPUFREQ_ETERNAL; | |
678 | ||
679 | of_property_read_u32(np, "clock-latency", &transition_latency); | |
680 | of_node_put(np); | |
681 | ||
682 | pr_debug("%s: clock-latency: %d\n", __func__, transition_latency); | |
683 | return transition_latency; | |
684 | } | |
685 | ||
686 | static int exynos_get_voltage_tolerance(struct device *cpu_dev) | |
687 | { | |
688 | struct device_node *np; | |
689 | u32 voltage_tolerance = 0; | |
690 | ||
691 | np = get_cpu_node_with_valid_op(cpu_dev->id); | |
692 | if (!np) | |
693 | return voltage_tolerance; | |
694 | ||
695 | of_property_read_u32(np, "voltage-torelance", &voltage_tolerance); | |
696 | of_node_put(np); | |
697 | ||
698 | pr_debug("%s: voltage-torelance: %d\n", __func__, voltage_tolerance); | |
699 | return voltage_tolerance; | |
700 | } | |
701 | ||
702 | static int exynos_pm_notify(struct notifier_block *nb, unsigned long event, | |
703 | void *dummy) | |
704 | { | |
705 | int i, ret; | |
706 | ||
707 | if (event == PM_SUSPEND_PREPARE) { | |
708 | mutex_lock(&exynos_cpu_lock); | |
709 | is_suspended = true; | |
710 | mutex_unlock(&exynos_cpu_lock); | |
711 | ||
712 | for (i = 0; i < MAX_CLUSTERS; i++) { | |
713 | if (locking_frequency > exynos_cpufreq_get_cluster(i)) { | |
714 | ret = exynos_regulator_set_voltage(i, locking_volt); | |
715 | if (ret < 0) { | |
716 | pr_err("%s: Exynos cpufreq suspend: setting voltage to %d\n", | |
717 | __func__, locking_volt); | |
718 | mutex_lock(&exynos_cpu_lock); | |
719 | is_suspended = false; | |
720 | mutex_unlock(&exynos_cpu_lock); | |
721 | ||
722 | return NOTIFY_BAD; | |
723 | } | |
724 | } | |
725 | ||
726 | } | |
727 | ||
728 | } else if (event == PM_POST_SUSPEND) { | |
729 | mutex_lock(&exynos_cpu_lock); | |
730 | is_suspended = false; | |
731 | mutex_unlock(&exynos_cpu_lock); | |
732 | } | |
733 | ||
734 | return NOTIFY_OK; | |
735 | } | |
736 | ||
737 | ||
738 | #ifndef CONFIG_EXYNOS7580_QUAD | |
739 | static int __cpuinit exynos_cpufreq_cpu_up_notifier(struct notifier_block *notifier, | |
740 | unsigned long action, void *hcpu) | |
741 | { | |
742 | unsigned int cpu = (unsigned long)hcpu; | |
743 | struct device *dev; | |
744 | struct cpumask mask; | |
745 | int cluster; | |
746 | ||
747 | dev = get_cpu_device(cpu); | |
748 | if (dev) { | |
749 | switch (action) { | |
750 | case CPU_ONLINE: | |
751 | cluster = cpu_to_cluster(cpu); | |
752 | if (cluster == CL_ONE) { | |
753 | cpumask_and(&mask, cpu_coregroup_mask(cpu), cpu_online_mask); | |
754 | if (cpumask_weight(&mask) == 1) | |
755 | pm_qos_update_request(&cluster_qos_max[CL_ONE], maxlock_freq); | |
756 | } | |
757 | break; | |
758 | } | |
759 | } | |
760 | ||
761 | return NOTIFY_OK; | |
762 | } | |
763 | ||
764 | static int __cpuinit exynos_cpufreq_cpu_down_notifier(struct notifier_block *notifier, | |
765 | unsigned long action, void *hcpu) | |
766 | { | |
767 | unsigned int cpu = (unsigned long)hcpu; | |
768 | struct device *dev; | |
769 | struct cpumask mask; | |
770 | int cluster; | |
771 | ||
772 | if (is_suspended) | |
773 | return NOTIFY_OK; | |
774 | ||
775 | dev = get_cpu_device(cpu); | |
776 | if (dev) { | |
777 | switch (action) { | |
778 | case CPU_DOWN_PREPARE: | |
779 | cluster = cpu_to_cluster(cpu); | |
780 | if (cluster == CL_ONE) { | |
781 | cpumask_and(&mask, cpu_coregroup_mask(cpu), cpu_online_mask); | |
782 | if (cpumask_weight(&mask) == 1) | |
783 | pm_qos_update_request(&cluster_qos_max[CL_ONE], apll_freq[ARRAY_SIZE(apll_freq) - 2].freq / 1000); | |
784 | } | |
785 | break; | |
786 | } | |
787 | } | |
788 | ||
789 | return NOTIFY_OK; | |
790 | } | |
791 | ||
792 | static struct notifier_block __refdata exynos_cpufreq_cpu_up_nb = { | |
793 | .notifier_call = exynos_cpufreq_cpu_up_notifier, | |
794 | .priority = INT_MIN, | |
795 | }; | |
796 | ||
797 | static struct notifier_block __refdata exynos_cpufreq_cpu_down_nb = { | |
798 | .notifier_call = exynos_cpufreq_cpu_down_notifier, | |
799 | .priority = INT_MAX, | |
800 | }; | |
801 | #endif | |
802 | ||
803 | static struct notifier_block exynos_cpu_pm_notifier = { | |
804 | .notifier_call = exynos_pm_notify, | |
805 | .priority = -1, | |
806 | }; | |
807 | ||
808 | /* reboot notifier */ | |
809 | static int exynos_reboot_notify(struct notifier_block *nb, unsigned long event, | |
810 | void *dummy) | |
811 | { | |
812 | int i, ret; | |
813 | ||
814 | mutex_lock(&exynos_cpu_lock); | |
815 | is_suspended = true; | |
816 | mutex_unlock(&exynos_cpu_lock); | |
817 | ||
818 | for (i = 0; i < MAX_CLUSTERS; i++) { | |
819 | if (locking_frequency > exynos_cpufreq_get_cluster(i)) { | |
820 | ret = exynos_regulator_set_voltage(i, locking_volt); | |
821 | if (ret < 0) { | |
822 | pr_err("%s: Exynos cpufreq reboot: setting voltage to %d\n", | |
823 | __func__, locking_volt); | |
824 | mutex_lock(&exynos_cpu_lock); | |
825 | is_suspended = false; | |
826 | mutex_unlock(&exynos_cpu_lock); | |
827 | ||
828 | return NOTIFY_BAD; | |
829 | } | |
830 | } | |
831 | ||
832 | } | |
833 | ||
834 | return NOTIFY_OK; | |
835 | } | |
836 | ||
837 | static struct notifier_block exynos_cpu_reboot_notifier = { | |
838 | .notifier_call = exynos_reboot_notify, | |
839 | }; | |
840 | ||
841 | static int exynos_cpufreq_tmu_notifier(struct notifier_block *notifier, | |
842 | unsigned long event, void *v) | |
843 | { | |
844 | struct device *cpu_dev; | |
845 | struct opp *opp; | |
846 | unsigned long freq, volt; | |
847 | int i, ret = NOTIFY_OK; | |
848 | unsigned int *on = v; | |
849 | ||
850 | if (event != TMU_COLD) | |
851 | return NOTIFY_OK; | |
852 | ||
853 | mutex_lock(&exynos_cpu_lock); | |
854 | ||
855 | if (is_suspended) | |
856 | goto out; | |
857 | ||
858 | if (*on) | |
859 | cold_offset = COLD_VOLT_OFFSET; | |
860 | else | |
861 | cold_offset = 0; | |
862 | ||
863 | for (i = 0; i < MAX_CLUSTERS; i++) { | |
864 | freq = exynos_cpufreq_get_cluster(i); | |
865 | cpu_dev = get_cpu_device(0); | |
866 | opp = opp_find_freq_exact(cpu_dev, freq * 1000, true); | |
867 | volt = opp_get_voltage(opp); | |
868 | ret = exynos_regulator_set_voltage(i, volt); | |
869 | if (ret) { | |
870 | mutex_unlock(&exynos_cpu_lock); | |
871 | return NOTIFY_BAD; | |
872 | } | |
873 | } | |
874 | ||
875 | out: | |
876 | mutex_unlock(&exynos_cpu_lock); | |
877 | ||
878 | return NOTIFY_OK; | |
879 | } | |
880 | ||
881 | static struct notifier_block exynos_tmu_nb = { | |
882 | .notifier_call = exynos_cpufreq_tmu_notifier, | |
883 | }; | |
884 | ||
885 | /* Per-CPU initialization */ | |
886 | static int exynos_cpufreq_init(struct cpufreq_policy *policy) | |
887 | { | |
888 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
889 | struct device *cpu_dev; | |
890 | int ret; | |
891 | ||
892 | cpu_dev = get_cpu_device(policy->cpu); | |
893 | if (!cpu_dev) { | |
894 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
895 | policy->cpu); | |
896 | return -ENODEV; | |
897 | } | |
898 | ||
899 | ret = get_cluster_clk_and_freq_table(cpu_dev); | |
900 | if (ret) | |
901 | return ret; | |
902 | ||
903 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); | |
904 | if (ret) { | |
905 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", | |
906 | policy->cpu, cur_cluster); | |
907 | put_cluster_clk_and_freq_table(cpu_dev); | |
908 | return ret; | |
909 | } | |
910 | ||
911 | cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); | |
912 | ||
913 | policy->cpuinfo.transition_latency = exynos_get_transition_latency(cpu_dev); | |
914 | voltage_tolerance = exynos_get_voltage_tolerance(cpu_dev); | |
915 | policy->cur = exynos_cpufreq_get(policy->cpu); | |
916 | /* Later this code will be removed. This is for first lot */ | |
917 | policy->cpuinfo.min_freq = 400000; | |
918 | freq_table[cur_cluster][13].frequency = CPUFREQ_ENTRY_INVALID; | |
919 | ||
920 | if (samsung_rev() == EXYNOS7580_REV_0) { | |
921 | if (!support_full_frequency()) | |
922 | policy->cpuinfo.max_freq = 800000; | |
923 | else | |
924 | policy->cpuinfo.max_freq = 1400000; | |
925 | } else if (soc_is_exynos7580_v1()) { | |
926 | policy->cpuinfo.max_freq = 1500000; | |
927 | freq_table[cur_cluster][0].frequency = CPUFREQ_ENTRY_INVALID; | |
928 | } | |
929 | ||
930 | if (soc_is_exynos7580_v1()) | |
931 | policy->cpuinfo.max_freq = 1500000; | |
932 | ||
933 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | |
934 | ||
935 | if (policy->cpu == 0) { | |
936 | exynos_boot_cluster = cpu_to_cluster(0); | |
937 | locking_frequency = exynos_cpufreq_get(0); | |
938 | register_pm_notifier(&exynos_cpu_pm_notifier); | |
939 | register_reboot_notifier(&exynos_cpu_reboot_notifier); | |
940 | } else { | |
941 | sync_frequency = exynos_cpufreq_get(0); | |
942 | } | |
943 | ||
944 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); | |
945 | return 0; | |
946 | } | |
947 | ||
948 | static int exynos_cpufreq_exit(struct cpufreq_policy *policy) | |
949 | { | |
950 | struct device *cpu_dev; | |
951 | ||
952 | cpu_dev = get_cpu_device(policy->cpu); | |
953 | if (!cpu_dev) { | |
954 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
955 | policy->cpu); | |
956 | return -ENODEV; | |
957 | } | |
958 | ||
959 | put_cluster_clk_and_freq_table(cpu_dev); | |
960 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); | |
961 | ||
962 | return 0; | |
963 | } | |
964 | ||
965 | /* Export freq_table to sysfs */ | |
966 | static struct freq_attr *exynos_cpufreq_attr[] = { | |
967 | &cpufreq_freq_attr_scaling_available_freqs, | |
968 | NULL, | |
969 | }; | |
970 | ||
971 | static void exynos_qos_nop(void *info) | |
972 | { | |
973 | } | |
974 | ||
975 | static ssize_t show_cpufreq_table(struct kobject *kobj, struct attribute *attr, | |
976 | char *buf) | |
977 | { | |
978 | unsigned int i; | |
979 | ssize_t count = 0; | |
980 | struct cpufreq_frequency_table *table = freq_table[0]; | |
981 | ||
982 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | |
983 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | |
984 | continue; | |
985 | count += sprintf(&buf[count], "%d ", table[i].frequency); | |
986 | } | |
987 | count += sprintf(&buf[count], "\n"); | |
988 | ||
989 | return count; | |
990 | } | |
991 | ||
992 | static ssize_t show_cpufreq_min_limit(struct kobject *kobj, struct attribute *attr, | |
993 | char *buf) | |
994 | { | |
995 | int len; | |
996 | ||
997 | len = sprintf(buf, "%u\n", pm_qos_request(PM_QOS_CLUSTER0_FREQ_MIN)); | |
998 | #ifndef CONFIG_EXYNOS7580_QUAD | |
999 | len += sprintf(buf + len, "%u\n", pm_qos_request(PM_QOS_CLUSTER1_FREQ_MIN)); | |
1000 | #endif | |
1001 | ||
1002 | return len; | |
1003 | } | |
1004 | ||
1005 | static ssize_t store_cpufreq_min_limit(struct kobject *kobj, struct attribute *attr, | |
1006 | const char *buf, size_t n) | |
1007 | { | |
1008 | int i; | |
1009 | int ret, freq; | |
1010 | ||
1011 | ret = sscanf(buf, "%d", &freq); | |
1012 | if (ret != 1) | |
1013 | return -EINVAL; | |
1014 | ||
1015 | if (freq < 0) | |
1016 | freq = 0; | |
1017 | ||
1018 | #ifdef CONFIG_SW_SELF_DISCHARGING | |
1019 | if (freq < self_discharging) { | |
1020 | freq = self_discharging; | |
1021 | } | |
1022 | #endif | |
1023 | ||
1024 | for (i = 0; i < CL_END; i++) | |
1025 | pm_qos_update_request(&cluster_qos_min[i], freq); | |
1026 | ||
1027 | return n; | |
1028 | } | |
1029 | ||
1030 | static ssize_t show_cpufreq_max_limit(struct kobject *kobj, struct attribute *attr, | |
1031 | char *buf) | |
1032 | { | |
1033 | int len; | |
1034 | ||
1035 | len = sprintf(buf, "%u\n", pm_qos_request(PM_QOS_CLUSTER0_FREQ_MAX)); | |
1036 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1037 | len += sprintf(buf + len, "%u\n", pm_qos_request(PM_QOS_CLUSTER1_FREQ_MAX)); | |
1038 | #endif | |
1039 | ||
1040 | return len; | |
1041 | } | |
1042 | ||
1043 | static ssize_t store_cpufreq_max_limit(struct kobject *kobj, struct attribute *attr, | |
1044 | const char *buf, size_t n) | |
1045 | { | |
1046 | int i; | |
1047 | int ret, freq; | |
1048 | int index = 0; | |
1049 | ||
1050 | ret = sscanf(buf, "%d", &freq); | |
1051 | if (ret != 1) | |
1052 | return -EINVAL; | |
1053 | ||
1054 | if (soc_is_exynos7580_v1()) | |
1055 | index = 1; | |
1056 | ||
1057 | if (freq < 0) | |
1058 | freq = apll_freq[index].freq / 1000; | |
1059 | ||
1060 | for (i = 0; i < CL_END; i++) | |
1061 | pm_qos_update_request(&cluster_qos_max[i], freq); | |
1062 | ||
1063 | ||
1064 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1065 | maxlock_freq = freq; | |
1066 | #endif | |
1067 | return n; | |
1068 | } | |
1069 | ||
1070 | #ifdef CONFIG_SW_SELF_DISCHARGING | |
1071 | static ssize_t show_cpufreq_self_discharging(struct kobject *kobj, | |
1072 | struct attribute *attr, char *buf) | |
1073 | { | |
1074 | return sprintf(buf, "%d\n", self_discharging); | |
1075 | } | |
1076 | ||
1077 | static ssize_t store_cpufreq_self_discharging(struct kobject *kobj, struct attribute *attr, | |
1078 | const char *buf, size_t count) | |
1079 | { | |
1080 | int input; | |
1081 | int i; | |
1082 | ||
1083 | if (!sscanf(buf, "%d", &input)) | |
1084 | return -EINVAL; | |
1085 | ||
1086 | if (input > 0) { | |
1087 | self_discharging = input; | |
1088 | cpu_idle_poll_ctrl(true); | |
1089 | } | |
1090 | else { | |
1091 | self_discharging = 0; | |
1092 | cpu_idle_poll_ctrl(false); | |
1093 | } | |
1094 | ||
1095 | /* Isla Quad(A53 quad) need cpufreq min limit */ | |
1096 | for (i = 0; i < CL_END; i++) { | |
1097 | pm_qos_update_request(&cluster_qos_min[i], self_discharging); | |
1098 | } | |
1099 | ||
1100 | return count; | |
1101 | } | |
1102 | #endif | |
1103 | ||
1104 | define_one_global_ro(cpufreq_table); | |
1105 | define_one_global_rw(cpufreq_min_limit); | |
1106 | define_one_global_rw(cpufreq_max_limit); | |
1107 | #ifdef CONFIG_SW_SELF_DISCHARGING | |
1108 | define_one_global_rw(cpufreq_self_discharging); | |
1109 | #endif | |
1110 | ||
1111 | static struct attribute * g[] = { | |
1112 | &cpufreq_table.attr, | |
1113 | &cpufreq_min_limit.attr, | |
1114 | &cpufreq_max_limit.attr, | |
1115 | #ifdef CONFIG_SW_SELF_DISCHARGING | |
1116 | &cpufreq_self_discharging.attr, | |
1117 | #endif | |
1118 | NULL, | |
1119 | }; | |
1120 | ||
1121 | static struct attribute_group attr_group = { | |
1122 | .attrs = g, | |
1123 | }; | |
1124 | ||
1125 | extern void (*disable_c3_idle)(bool disable); | |
1126 | static int exynos_min_cluster0_notifier(struct notifier_block *notifier, | |
1127 | unsigned long val, void *v) | |
1128 | { | |
1129 | struct cpufreq_policy *policy; | |
1130 | int ret; | |
1131 | unsigned int freq; | |
1132 | ||
1133 | policy = cpufreq_cpu_get(0); | |
1134 | if (!policy) | |
1135 | return NOTIFY_BAD; | |
1136 | ||
1137 | if (!policy->user_policy.governor) { | |
1138 | cpufreq_cpu_put(policy); | |
1139 | return NOTIFY_BAD; | |
1140 | } | |
1141 | ||
1142 | freq = exynos_cpufreq_get(0); | |
1143 | ||
1144 | if (freq >= val) | |
1145 | goto out; | |
1146 | ||
1147 | freq = val; | |
1148 | ||
1149 | if (disable_c3_idle) | |
1150 | disable_c3_idle(true); | |
1151 | ||
1152 | smp_call_function_single(0, exynos_qos_nop, NULL, 0); | |
1153 | ||
1154 | ret = __cpufreq_driver_target(policy, freq, 0); | |
1155 | ||
1156 | if (disable_c3_idle) | |
1157 | disable_c3_idle(false); | |
1158 | ||
1159 | if (ret < 0) { | |
1160 | cpufreq_cpu_put(policy); | |
1161 | return NOTIFY_BAD; | |
1162 | } | |
1163 | ||
1164 | out: | |
1165 | cpufreq_cpu_put(policy); | |
1166 | ||
1167 | return NOTIFY_OK; | |
1168 | } | |
1169 | ||
1170 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1171 | static int exynos_min_cluster1_notifier(struct notifier_block *notifier, | |
1172 | unsigned long val, void *v) | |
1173 | { | |
1174 | struct cpufreq_policy *policy; | |
1175 | int ret; | |
1176 | unsigned int freq; | |
1177 | ||
1178 | policy = cpufreq_cpu_get(4); | |
1179 | if (!policy) | |
1180 | return NOTIFY_BAD; | |
1181 | ||
1182 | if (!policy->user_policy.governor) { | |
1183 | cpufreq_cpu_put(policy); | |
1184 | return NOTIFY_BAD; | |
1185 | } | |
1186 | ||
1187 | freq = exynos_cpufreq_get(4); | |
1188 | ||
1189 | if (freq >= val) | |
1190 | goto out; | |
1191 | ||
1192 | freq = val; | |
1193 | ||
1194 | if (disable_c3_idle) | |
1195 | disable_c3_idle(true); | |
1196 | ||
1197 | smp_call_function_single(4, exynos_qos_nop, NULL, 0); | |
1198 | ||
1199 | if (disable_c3_idle) | |
1200 | disable_c3_idle(false); | |
1201 | ||
1202 | ret = __cpufreq_driver_target(policy, freq, 0); | |
1203 | if (ret < 0) { | |
1204 | cpufreq_cpu_put(policy); | |
1205 | return NOTIFY_BAD; | |
1206 | } | |
1207 | ||
1208 | out: | |
1209 | cpufreq_cpu_put(policy); | |
1210 | ||
1211 | return NOTIFY_OK; | |
1212 | } | |
1213 | #endif | |
1214 | ||
1215 | static struct notifier_block exynos_min_cluster0_nb = { | |
1216 | .notifier_call = exynos_min_cluster0_notifier, | |
1217 | }; | |
1218 | ||
1219 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1220 | static struct notifier_block exynos_min_cluster1_nb = { | |
1221 | .notifier_call = exynos_min_cluster1_notifier, | |
1222 | }; | |
1223 | #endif | |
1224 | ||
1225 | static int exynos_max_cluster0_notifier(struct notifier_block *notifier, | |
1226 | unsigned long val, void *v) | |
1227 | { | |
1228 | struct cpufreq_policy *policy; | |
1229 | int ret; | |
1230 | unsigned int freq; | |
1231 | ||
1232 | policy = cpufreq_cpu_get(0); | |
1233 | if (!policy) | |
1234 | return NOTIFY_BAD; | |
1235 | ||
1236 | if (!policy->user_policy.governor) { | |
1237 | cpufreq_cpu_put(policy); | |
1238 | return NOTIFY_BAD; | |
1239 | } | |
1240 | ||
1241 | freq = exynos_cpufreq_get(0); | |
1242 | ||
1243 | if (freq <= val) | |
1244 | goto out; | |
1245 | ||
1246 | freq = val; | |
1247 | ||
1248 | if (disable_c3_idle) | |
1249 | disable_c3_idle(true); | |
1250 | ||
1251 | smp_call_function_single(0, exynos_qos_nop, NULL, 0); | |
1252 | ||
1253 | ret = __cpufreq_driver_target(policy, freq, 0); | |
1254 | ||
1255 | if (disable_c3_idle) | |
1256 | disable_c3_idle(false); | |
1257 | ||
1258 | if (ret < 0) { | |
1259 | cpufreq_cpu_put(policy); | |
1260 | return NOTIFY_BAD; | |
1261 | } | |
1262 | ||
1263 | out: | |
1264 | cpufreq_cpu_put(policy); | |
1265 | ||
1266 | return NOTIFY_OK; | |
1267 | } | |
1268 | ||
1269 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1270 | static int exynos_max_cluster1_notifier(struct notifier_block *notifier, | |
1271 | unsigned long val, void *v) | |
1272 | { | |
1273 | struct cpufreq_policy *policy; | |
1274 | int ret; | |
1275 | unsigned int freq; | |
1276 | ||
1277 | policy = cpufreq_cpu_get(4); | |
1278 | if (!policy) | |
1279 | return NOTIFY_BAD; | |
1280 | ||
1281 | if (!policy->user_policy.governor) { | |
1282 | cpufreq_cpu_put(policy); | |
1283 | return NOTIFY_BAD; | |
1284 | } | |
1285 | ||
1286 | freq = exynos_cpufreq_get(4); | |
1287 | ||
1288 | if (freq <= val) | |
1289 | goto out; | |
1290 | ||
1291 | freq = val; | |
1292 | ||
1293 | if (disable_c3_idle) | |
1294 | disable_c3_idle(true); | |
1295 | ||
1296 | smp_call_function_single(4, exynos_qos_nop, NULL, 0); | |
1297 | ||
1298 | ret = __cpufreq_driver_target(policy, freq, 0); | |
1299 | ||
1300 | if (disable_c3_idle) | |
1301 | disable_c3_idle(false); | |
1302 | ||
1303 | if (ret < 0) { | |
1304 | cpufreq_cpu_put(policy); | |
1305 | return NOTIFY_BAD; | |
1306 | } | |
1307 | ||
1308 | out: | |
1309 | cpufreq_cpu_put(policy); | |
1310 | ||
1311 | return NOTIFY_OK; | |
1312 | } | |
1313 | #endif | |
1314 | ||
1315 | static struct notifier_block exynos_max_cluster0_nb = { | |
1316 | .notifier_call = exynos_max_cluster0_notifier, | |
1317 | }; | |
1318 | ||
1319 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1320 | static struct notifier_block exynos_max_cluster1_nb = { | |
1321 | .notifier_call = exynos_max_cluster1_notifier, | |
1322 | }; | |
1323 | #endif | |
1324 | ||
1325 | static struct cpufreq_driver exynos_cpufreq_driver = { | |
1326 | .name = "smp-cpufreq", | |
1327 | .flags = CPUFREQ_STICKY, | |
1328 | .verify = exynos_cpufreq_verify_policy, | |
1329 | .target = exynos_cpufreq_set_target, | |
1330 | .get = exynos_cpufreq_get, | |
1331 | .init = exynos_cpufreq_init, | |
1332 | .exit = exynos_cpufreq_exit, | |
1333 | .have_governor_per_policy = true, | |
1334 | .attr = exynos_cpufreq_attr, | |
1335 | }; | |
1336 | ||
1337 | static struct platform_device_info devinfo = { .name = "exynos-smp-cpufreq", }; | |
1338 | ||
1339 | static int exynos_cpufreq_device_init(void) | |
1340 | { | |
1341 | return IS_ERR(platform_device_register_full(&devinfo)); | |
1342 | } | |
1343 | device_initcall(exynos_cpufreq_device_init); | |
1344 | ||
1345 | static int exynos_smp_probe(struct platform_device *pdev) | |
1346 | { | |
1347 | struct device_node *np; | |
1348 | struct device *cpu_dev; | |
1349 | struct opp *opp; | |
1350 | int ret; | |
1351 | ||
1352 | np = get_cpu_node_with_valid_op(pdev->dev.id); | |
1353 | if (!np) | |
1354 | return -ENODEV; | |
1355 | ||
1356 | of_node_put(np); | |
1357 | ||
1358 | if (soc_is_exynos7580_v1()) { | |
1359 | pm_qos_add_request(&pm_qos_mif, PM_QOS_BUS_THROUGHPUT, exynos_bus_table[ARRAY_SIZE(apll_freq) - 2]); | |
1360 | pm_qos_add_request(&cluster_qos_max[CL_ZERO], PM_QOS_CLUSTER0_FREQ_MAX, apll_freq[1].freq / 1000); | |
1361 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1362 | pm_qos_add_request(&cluster_qos_max[CL_ONE], PM_QOS_CLUSTER1_FREQ_MAX, apll_freq[1].freq / 1000); | |
1363 | maxlock_freq = apll_freq[1].freq / 1000; | |
1364 | #endif | |
1365 | } else { | |
1366 | pm_qos_add_request(&pm_qos_mif, PM_QOS_BUS_THROUGHPUT, exynos_bus_table[ARRAY_SIZE(apll_freq) - 1]); | |
1367 | pm_qos_add_request(&cluster_qos_max[CL_ZERO], PM_QOS_CLUSTER0_FREQ_MAX, apll_freq[0].freq / 1000); | |
1368 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1369 | pm_qos_add_request(&cluster_qos_max[CL_ONE], PM_QOS_CLUSTER1_FREQ_MAX, apll_freq[0].freq / 1000); | |
1370 | maxlock_freq = apll_freq[0].freq / 1000; | |
1371 | #endif | |
1372 | } | |
1373 | ||
1374 | pm_qos_add_request(&cluster_qos_min[CL_ZERO], PM_QOS_CLUSTER0_FREQ_MIN, 0); | |
1375 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1376 | pm_qos_add_request(&cluster_qos_min[CL_ONE], PM_QOS_CLUSTER1_FREQ_MIN, 0); | |
1377 | #endif | |
1378 | ||
1379 | ret = sysfs_create_group(power_kobj, &attr_group); | |
1380 | if (ret) | |
1381 | pr_err("%s: Failed creating sysfs group, err: %d\n", | |
1382 | __func__, ret); | |
1383 | ||
1384 | exynos_tmu_add_notifier(&exynos_tmu_nb); | |
1385 | ||
1386 | pm_qos_add_notifier(PM_QOS_CLUSTER0_FREQ_MIN, &exynos_min_cluster0_nb); | |
1387 | pm_qos_add_notifier(PM_QOS_CLUSTER0_FREQ_MAX, &exynos_max_cluster0_nb); | |
1388 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1389 | pm_qos_add_notifier(PM_QOS_CLUSTER1_FREQ_MIN, &exynos_min_cluster1_nb); | |
1390 | pm_qos_add_notifier(PM_QOS_CLUSTER1_FREQ_MAX, &exynos_max_cluster1_nb); | |
1391 | #endif | |
1392 | ||
1393 | ret = cpufreq_register_driver(&exynos_cpufreq_driver); | |
1394 | if (ret) | |
1395 | pr_info("%s: Failed registering platform driver, err: %d\n", | |
1396 | __func__, ret); | |
1397 | ||
1398 | pm_qos_add_request(&boost_qos_min[CL_ZERO] , PM_QOS_CLUSTER0_FREQ_MIN, 0); | |
1399 | exynos_cpufreq_boost_frequency(0, 30000); | |
1400 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1401 | pm_qos_add_request(&boost_qos_min[CL_ONE] , PM_QOS_CLUSTER1_FREQ_MIN, 0); | |
1402 | exynos_cpufreq_boost_frequency(4, 30000); | |
1403 | #endif | |
1404 | ||
1405 | cpu_dev = get_cpu_device(0); | |
1406 | opp = opp_find_freq_exact(cpu_dev, locking_frequency * 1000, true); | |
1407 | locking_volt = opp_get_voltage(opp); | |
1408 | ||
1409 | #ifndef CONFIG_EXYNOS7580_QUAD | |
1410 | register_hotcpu_notifier(&exynos_cpufreq_cpu_up_nb); | |
1411 | register_hotcpu_notifier(&exynos_cpufreq_cpu_down_nb); | |
1412 | #endif | |
1413 | ||
1414 | return ret; | |
1415 | } | |
1416 | ||
1417 | static int exynos_smp_remove(struct platform_device *pdev) | |
1418 | { | |
1419 | cpufreq_unregister_driver(&exynos_cpufreq_driver); | |
1420 | ||
1421 | pm_qos_remove_request(&pm_qos_mif); | |
1422 | ||
1423 | pr_info("%s: Un-registered platform driver\n", __func__); | |
1424 | ||
1425 | return 0; | |
1426 | } | |
1427 | ||
1428 | static struct platform_driver exynos_smp_platdrv = { | |
1429 | .driver = { | |
1430 | .name = "exynos-smp-cpufreq", | |
1431 | .owner = THIS_MODULE, | |
1432 | }, | |
1433 | .probe = exynos_smp_probe, | |
1434 | .remove = exynos_smp_remove, | |
1435 | }; | |
1436 | module_platform_driver(exynos_smp_platdrv); | |
1437 | ||
1438 | MODULE_AUTHOR("Jonghwan Choi <jhbird.choi@samsung.com>"); | |
1439 | MODULE_DESCRIPTION("Exynos SMP cpufreq driver via DT"); | |
1440 | MODULE_LICENSE("GPL"); |