battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / thermal / exynos_thermal.c
CommitLineData
9d97e5c8 1/*
c48cbba6 2 * exynos_thermal.c - Samsung EXYNOS TMU (Thermal Management Unit)
9d97e5c8
DK
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
c48cbba6 6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
9d97e5c8
DK
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/err.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/platform_device.h>
29#include <linux/interrupt.h>
3c2a0909 30#include <linux/ipa.h>
9d97e5c8
DK
31#include <linux/workqueue.h>
32#include <linux/sysfs.h>
33#include <linux/kobject.h>
34#include <linux/io.h>
35#include <linux/mutex.h>
c48cbba6 36#include <linux/platform_data/exynos_thermal.h>
7e0b55e6
ADK
37#include <linux/thermal.h>
38#include <linux/cpufreq.h>
39#include <linux/cpu_cooling.h>
f22d9c03 40#include <linux/of.h>
3c2a0909
S
41#include <linux/delay.h>
42#include <linux/suspend.h>
43#include <linux/pm_qos.h>
44#include <linux/exynos-ss.h>
45#include <plat/cpu.h>
46#include <mach/tmu.h>
47#include <mach/cpufreq.h>
48#include <mach/asv-exynos.h>
49#include <mach/exynos-pm.h>
50#include <mach/devfreq.h>
51#include "cal_tmu.h"
52
53#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
54static struct cpumask mp_cluster_cpus[CL_END];
55#endif
f22d9c03 56
3c2a0909
S
57#define CLUST0_POLICY_CORE ((exynos_boot_cluster == CL_ZERO) ? 0 : 4)
58#define CLUST1_POLICY_CORE ((exynos_boot_cluster == CL_ONE) ? 0 : 4)
59#define CS_POLICY_CORE 0
f22d9c03 60
3c2a0909
S
61#if defined(CONFIG_SOC_EXYNOS5430)
62#define CPU_HOTPLUG_IN_TEMP 95
63#define CPU_HOTPLUG_OUT_TEMP 105
64#elif defined(CONFIG_SOC_EXYNOS5422)
65#define CPU_HOTPLUG_IN_TEMP 95
66#define CPU_HOTPLUG_OUT_TEMP 100
67#endif
68
69#ifdef CONFIG_EXYNOS_SWTRIP
70#define SWTRIP_TEMP 110
71#define SWTRIP_NOISE_COUNT 1
bbf63be4 72
3c2a0909
S
73static unsigned int swtrip_counter = 0;
74#endif
75
76static bool is_tmu_probed;
f22d9c03 77
3c2a0909
S
78extern int gpu_is_power_on(void);
79static enum tmu_noti_state_t tmu_old_state = TMU_NORMAL;
80static enum gpu_noti_state_t gpu_old_state = GPU_NORMAL;
81static enum mif_noti_state_t mif_old_state = MIF_TH_LV1;
82static bool is_suspending;
83static bool is_cpu_hotplugged_out;
f22d9c03 84
3c2a0909
S
85static BLOCKING_NOTIFIER_HEAD(exynos_tmu_notifier);
86static BLOCKING_NOTIFIER_HEAD(exynos_gpu_notifier);
7e0b55e6 87
f22d9c03
ADK
88struct exynos_tmu_data {
89 struct exynos_tmu_platform_data *pdata;
3c2a0909
S
90 struct resource *mem[EXYNOS_TMU_COUNT];
91 void __iomem *base[EXYNOS_TMU_COUNT];
92 int irq[EXYNOS_TMU_COUNT];
f22d9c03 93 enum soc_type soc;
9d97e5c8
DK
94 struct work_struct irq_work;
95 struct mutex lock;
3c2a0909 96 struct cal_tmu_data *cal_data;
9d97e5c8
DK
97};
98
7e0b55e6
ADK
99struct thermal_trip_point_conf {
100 int trip_val[MAX_TRIP_COUNT];
101 int trip_count;
4f0a6847 102 u8 trigger_falling;
7e0b55e6
ADK
103};
104
105struct thermal_cooling_conf {
106 struct freq_clip_table freq_data[MAX_TRIP_COUNT];
3c2a0909 107 int size[THERMAL_TRIP_CRITICAL + 1];
7e0b55e6
ADK
108 int freq_clip_count;
109};
110
111struct thermal_sensor_conf {
112 char name[SENSOR_NAME_LEN];
113 int (*read_temperature)(void *data);
bffd1f8a 114 int (*write_emul_temp)(void *drv_data, unsigned long temp);
7e0b55e6
ADK
115 struct thermal_trip_point_conf trip_data;
116 struct thermal_cooling_conf cooling_data;
117 void *private_data;
118};
119
120struct exynos_thermal_zone {
121 enum thermal_device_mode mode;
122 struct thermal_zone_device *therm_dev;
123 struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
124 unsigned int cool_dev_size;
125 struct platform_device *exynos4_dev;
126 struct thermal_sensor_conf *sensor_conf;
127 bool bind;
128};
129
130static struct exynos_thermal_zone *th_zone;
3c2a0909
S
131static struct platform_device *exynos_tmu_pdev;
132static struct exynos_tmu_data *tmudata;
7e0b55e6
ADK
133static void exynos_unregister_thermal(void);
134static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
3c2a0909
S
135static int exynos5_tmu_cpufreq_notifier(struct notifier_block *notifier, unsigned long event, void *v);
136
137static struct notifier_block exynos_cpufreq_nb = {
138 .notifier_call = exynos5_tmu_cpufreq_notifier,
139};
140
141/* For ePOP protection, handle additional thermal condition from MIF notification.*/
142#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
143#define MIF_THERMAL_THRESHOLD 4 /* MR4 state 4: 85~ degree */
144#define MIF_THERMAL_SWTRIP_THRESHOLD_TEMP 100 /* TMU junctino temp 100~ degree */
145#define PM_QOS_CPU_FREQ_DEFAULT_VALUE INT_MAX
146#define MIF_THROTTLING1_BIG (1000 * 1000)
147#define MIF_THROTTLING1_LITTLE (PM_QOS_CPU_FREQ_DEFAULT_VALUE)
148#define MIF_THROTTLING2_BIG (800 * 1000)
149#define MIF_THROTTLING2_LITTLE (1000 * 1000)
150
151static bool is_mif_thermal_hotplugged_out;
152static enum mif_thermal_state_t mif_thermal_state = MIF_NORMAL;
153static int mif_thermal_level_ch0, mif_thermal_level_ch1;
154
155static struct pm_qos_request exynos_mif_thermal_cluster1_max_qos;
156static struct pm_qos_request exynos_mif_thermal_cluster0_max_qos;
157#endif
158
159#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
160static void __init init_mp_cpumask_set(void)
161{
162 unsigned int i;
163
164 for_each_cpu(i, cpu_possible_mask) {
165 if (exynos_boot_cluster == CL_ZERO) {
166 if (i >= NR_CLUST0_CPUS)
167 cpumask_set_cpu(i, &mp_cluster_cpus[CL_ONE]);
168 else
169 cpumask_set_cpu(i, &mp_cluster_cpus[CL_ZERO]);
170 } else {
171 if (i >= NR_CLUST1_CPUS)
172 cpumask_set_cpu(i, &mp_cluster_cpus[CL_ZERO]);
173 else
174 cpumask_set_cpu(i, &mp_cluster_cpus[CL_ONE]);
175 }
176 }
177}
178#endif
7e0b55e6
ADK
179
180/* Get mode callback functions for thermal zone */
181static int exynos_get_mode(struct thermal_zone_device *thermal,
182 enum thermal_device_mode *mode)
183{
184 if (th_zone)
185 *mode = th_zone->mode;
186 return 0;
187}
188
189/* Set mode callback functions for thermal zone */
190static int exynos_set_mode(struct thermal_zone_device *thermal,
191 enum thermal_device_mode mode)
192{
193 if (!th_zone->therm_dev) {
194 pr_notice("thermal zone not registered\n");
195 return 0;
196 }
197
7e0b55e6
ADK
198 th_zone->mode = mode;
199 thermal_zone_device_update(th_zone->therm_dev);
7e0b55e6
ADK
200 return 0;
201}
202
203
204/* Get trip type callback functions for thermal zone */
205static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
206 enum thermal_trip_type *type)
207{
3c2a0909
S
208 int active_size, passive_size;
209
210 active_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_ACTIVE];
211 passive_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_PASSIVE];
212
213 if (trip < active_size)
7e0b55e6 214 *type = THERMAL_TRIP_ACTIVE;
3c2a0909
S
215 else if (trip >= active_size && trip < active_size + passive_size)
216 *type = THERMAL_TRIP_PASSIVE;
217 else if (trip >= active_size + passive_size)
7e0b55e6 218 *type = THERMAL_TRIP_CRITICAL;
3c2a0909 219 else
7e0b55e6 220 return -EINVAL;
3c2a0909 221
7e0b55e6
ADK
222 return 0;
223}
224
225/* Get trip temperature callback functions for thermal zone */
226static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
227 unsigned long *temp)
228{
3c2a0909
S
229 int active_size, passive_size;
230
231 active_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_ACTIVE];
232 passive_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_PASSIVE];
233
234 if (trip < 0 || trip > active_size + passive_size)
7e0b55e6
ADK
235 return -EINVAL;
236
237 *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
238 /* convert the temperature into millicelsius */
239 *temp = *temp * MCELSIUS;
240
241 return 0;
242}
243
244/* Get critical temperature callback functions for thermal zone */
245static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
246 unsigned long *temp)
247{
248 int ret;
3c2a0909
S
249 int active_size, passive_size;
250
251 active_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_ACTIVE];
252 passive_size = th_zone->sensor_conf->cooling_data.size[THERMAL_TRIP_PASSIVE];
253
7e0b55e6 254 /* Panic zone */
3c2a0909 255 ret = exynos_get_trip_temp(thermal, active_size + passive_size, temp);
7e0b55e6
ADK
256 return ret;
257}
258
7e0b55e6
ADK
259/* Bind callback functions for thermal zone */
260static int exynos_bind(struct thermal_zone_device *thermal,
261 struct thermal_cooling_device *cdev)
262{
3c2a0909
S
263 int ret = 0, i, tab_size;
264 unsigned long level = THERMAL_CSTATE_INVALID;
265#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
266 int cluster_idx = 0;
267 struct cpufreq_policy policy;
268#endif
7e0b55e6
ADK
269 struct freq_clip_table *tab_ptr, *clip_data;
270 struct thermal_sensor_conf *data = th_zone->sensor_conf;
3c2a0909 271 enum thermal_trip_type type = 0;
7e0b55e6
ADK
272
273 tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
274 tab_size = data->cooling_data.freq_clip_count;
275
276 if (tab_ptr == NULL || tab_size == 0)
277 return -EINVAL;
278
279 /* find the cooling device registered*/
280 for (i = 0; i < th_zone->cool_dev_size; i++)
3c2a0909
S
281 if (cdev == th_zone->cool_dev[i]) {
282#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
283 cluster_idx = i;
284#endif
7e0b55e6 285 break;
3c2a0909 286 }
7e0b55e6
ADK
287
288 /* No matching cooling device */
289 if (i == th_zone->cool_dev_size)
290 return 0;
291
292 /* Bind the thermal zone to the cpufreq cooling device */
293 for (i = 0; i < tab_size; i++) {
294 clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
3c2a0909
S
295#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
296 if (cluster_idx == CL_ZERO) {
297 cpufreq_get_policy(&policy, CLUST0_POLICY_CORE);
298
299 if (clip_data->freq_clip_max_cluster0 > policy.max) {
300 pr_warn("%s: throttling freq(%d) is greater than policy max(%d)\n", __func__, clip_data->freq_clip_max_cluster0, policy.max);
301 clip_data->freq_clip_max_cluster0 = policy.max;
302 } else if (clip_data->freq_clip_max_cluster0 < policy.min) {
303 pr_warn("%s: throttling freq(%d) is less than policy min(%d)\n", __func__, clip_data->freq_clip_max_cluster0, policy.min);
304 clip_data->freq_clip_max_cluster0 = policy.min;
305 }
306
307 level = cpufreq_cooling_get_level(CLUST0_POLICY_CORE, clip_data->freq_clip_max_cluster0);
308 } else if (cluster_idx == CL_ONE) {
309 cpufreq_get_policy(&policy, CLUST1_POLICY_CORE);
310
311 if (clip_data->freq_clip_max > policy.max) {
312 pr_warn("%s: throttling freq(%d) is greater than policy max(%d)\n", __func__, clip_data->freq_clip_max, policy.max);
313 clip_data->freq_clip_max = policy.max;
314 } else if (clip_data->freq_clip_max < policy.min) {
315 pr_warn("%s: throttling freq(%d) is less than policy min(%d)\n", __func__, clip_data->freq_clip_max, policy.min);
316 clip_data->freq_clip_max = policy.min;
317 }
318
319 level = cpufreq_cooling_get_level(CLUST1_POLICY_CORE, clip_data->freq_clip_max);
320 }
321#else
322 level = cpufreq_cooling_get_level(CS_POLICY_CORE, clip_data->freq_clip_max);
323#endif
324 if (level == THERMAL_CSTATE_INVALID) {
325 thermal->cooling_dev_en = false;
7e0b55e6 326 return 0;
3c2a0909
S
327 }
328 exynos_get_trip_type(th_zone->therm_dev, i, &type);
329 switch (type) {
330 case THERMAL_TRIP_ACTIVE:
331 case THERMAL_TRIP_PASSIVE:
7e0b55e6 332 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
ce760ed3 333 level, 0)) {
7e0b55e6 334 pr_err("error binding cdev inst %d\n", i);
3c2a0909 335 thermal->cooling_dev_en = false;
7e0b55e6
ADK
336 ret = -EINVAL;
337 }
338 th_zone->bind = true;
339 break;
340 default:
341 ret = -EINVAL;
342 }
343 }
344
345 return ret;
346}
347
348/* Unbind callback functions for thermal zone */
349static int exynos_unbind(struct thermal_zone_device *thermal,
350 struct thermal_cooling_device *cdev)
351{
352 int ret = 0, i, tab_size;
353 struct thermal_sensor_conf *data = th_zone->sensor_conf;
3c2a0909 354 enum thermal_trip_type type = 0;
7e0b55e6
ADK
355
356 if (th_zone->bind == false)
357 return 0;
358
359 tab_size = data->cooling_data.freq_clip_count;
360
361 if (tab_size == 0)
362 return -EINVAL;
363
364 /* find the cooling device registered*/
365 for (i = 0; i < th_zone->cool_dev_size; i++)
366 if (cdev == th_zone->cool_dev[i])
367 break;
368
369 /* No matching cooling device */
370 if (i == th_zone->cool_dev_size)
371 return 0;
372
373 /* Bind the thermal zone to the cpufreq cooling device */
374 for (i = 0; i < tab_size; i++) {
3c2a0909
S
375 exynos_get_trip_type(th_zone->therm_dev, i, &type);
376 switch (type) {
377 case THERMAL_TRIP_ACTIVE:
378 case THERMAL_TRIP_PASSIVE:
7e0b55e6
ADK
379 if (thermal_zone_unbind_cooling_device(thermal, i,
380 cdev)) {
381 pr_err("error unbinding cdev inst=%d\n", i);
382 ret = -EINVAL;
383 }
384 th_zone->bind = false;
385 break;
386 default:
387 ret = -EINVAL;
388 }
389 }
390 return ret;
391}
392
3c2a0909
S
393
394int exynos_tmu_add_notifier(struct notifier_block *n)
395{
396 return blocking_notifier_chain_register(&exynos_tmu_notifier, n);
397}
398
399void exynos_tmu_call_notifier(enum tmu_noti_state_t cur_state, int temp)
400{
401 if (is_suspending)
402 cur_state = TMU_COLD;
403
404 if (cur_state != tmu_old_state) {
405 if ((cur_state == TMU_COLD) ||
406 ((cur_state == TMU_NORMAL) && (tmu_old_state == TMU_COLD)))
407 blocking_notifier_call_chain(&exynos_tmu_notifier, TMU_COLD, &cur_state);
408 else
409 blocking_notifier_call_chain(&exynos_tmu_notifier, cur_state, &tmu_old_state);
410 if (cur_state == TMU_COLD)
411 pr_info("tmu temperature state %d to %d\n", tmu_old_state, cur_state);
412 else
413 pr_info("tmu temperature state %d to %d, cur_temp : %d\n", tmu_old_state, cur_state, temp);
414 tmu_old_state = cur_state;
415 }
416}
417
418int exynos_gpu_add_notifier(struct notifier_block *n)
419{
420 return blocking_notifier_chain_register(&exynos_gpu_notifier, n);
421}
422
423void exynos_gpu_call_notifier(enum gpu_noti_state_t cur_state)
424{
425 if (is_suspending)
426 cur_state = GPU_COLD;
427
428 if (cur_state != gpu_old_state) {
429 pr_info("gpu temperature state %d to %d\n", gpu_old_state, cur_state);
430 blocking_notifier_call_chain(&exynos_gpu_notifier, cur_state, &cur_state);
431 gpu_old_state = cur_state;
432 }
433}
434
435static void exynos_check_tmu_noti_state(int temp)
436{
437 enum tmu_noti_state_t cur_state;
438
439 /* check current temperature state */
440 if (temp > HOT_CRITICAL_TEMP)
441 cur_state = TMU_CRITICAL;
442 else if (temp > HOT_NORMAL_TEMP && temp <= HOT_CRITICAL_TEMP)
443 cur_state = TMU_HOT;
444 else if (temp > COLD_TEMP && temp <= HOT_NORMAL_TEMP)
445 cur_state = TMU_NORMAL;
446 else
447 cur_state = TMU_COLD;
448
449 exynos_tmu_call_notifier(cur_state, temp);
450}
451
452static void exynos_check_mif_noti_state(int temp)
453{
454 enum mif_noti_state_t cur_state;
455
456 /* check current temperature state */
457 if (temp < MIF_TH_TEMP1)
458 cur_state = MIF_TH_LV1;
459 else if (temp >= MIF_TH_TEMP1 && temp < MIF_TH_TEMP2)
460 cur_state = MIF_TH_LV2;
461 else
462 cur_state = MIF_TH_LV3;
463
464 if (cur_state != mif_old_state) {
465#ifdef CONFIG_SOC_EXYNOS5422
466 pr_info("mif temperature state %d to %d\n", mif_old_state, cur_state);
467#endif
468 blocking_notifier_call_chain(&exynos_tmu_notifier, cur_state, &mif_old_state);
469 mif_old_state = cur_state;
470 }
471}
472
473static void exynos_check_gpu_noti_state(int temp)
474{
475 enum gpu_noti_state_t cur_state;
476#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
477 enum gpu_noti_state_t mif_thermal_gpu_state = GPU_NORMAL;
478#endif
479
480 /* check current temperature state */
481 if (temp >= GPU_TH_TEMP5)
482 cur_state = GPU_TRIPPING;
483 else if (temp >= GPU_TH_TEMP4 && temp < GPU_TH_TEMP5)
484 cur_state = GPU_THROTTLING4;
485 else if (temp >= GPU_TH_TEMP3 && temp < GPU_TH_TEMP4)
486 cur_state = GPU_THROTTLING3;
487 else if (temp >= GPU_TH_TEMP2 && temp < GPU_TH_TEMP3)
488 cur_state = GPU_THROTTLING2;
489 else if (temp >= GPU_TH_TEMP1 && temp < GPU_TH_TEMP2)
490 cur_state = GPU_THROTTLING1;
491 else if (temp > COLD_TEMP && temp < GPU_TH_TEMP1)
492 cur_state = GPU_NORMAL;
493 else
494 cur_state = GPU_COLD;
495
496#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
497 switch (mif_thermal_state) {
498 case MIF_NORMAL:
499 mif_thermal_gpu_state = GPU_NORMAL;
500 break;
501 case MIF_THROTTLING1:
502#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ)
503 mif_thermal_gpu_state = GPU_THROTTLING3;
504#else
505 mif_thermal_gpu_state = GPU_THROTTLING4;
506#endif
507 break;
508 case MIF_THROTTLING2:
509 case MIF_TRIPPING:
510#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ)
511 mif_thermal_gpu_state = GPU_THROTTLING4;
512#else
513 mif_thermal_gpu_state = GPU_TRIPPING;
514#endif
515 break;
516 }
517
518 cur_state = max(cur_state, mif_thermal_gpu_state);
519#endif
520
521 exynos_gpu_call_notifier(cur_state);
522}
523
7e0b55e6
ADK
524/* Get temperature callback functions for thermal zone */
525static int exynos_get_temp(struct thermal_zone_device *thermal,
526 unsigned long *temp)
527{
528 void *data;
529
530 if (!th_zone->sensor_conf) {
531 pr_info("Temperature sensor not initialised\n");
532 return -EINVAL;
533 }
534 data = th_zone->sensor_conf->private_data;
535 *temp = th_zone->sensor_conf->read_temperature(data);
536 /* convert the temperature into millicelsius */
537 *temp = *temp * MCELSIUS;
538 return 0;
539}
540
bffd1f8a
ADK
541/* Get temperature callback functions for thermal zone */
542static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
543 unsigned long temp)
544{
545 void *data;
546 int ret = -EINVAL;
547
548 if (!th_zone->sensor_conf) {
549 pr_info("Temperature sensor not initialised\n");
550 return -EINVAL;
551 }
552 data = th_zone->sensor_conf->private_data;
553 if (th_zone->sensor_conf->write_emul_temp)
554 ret = th_zone->sensor_conf->write_emul_temp(data, temp);
555 return ret;
556}
557
7e0b55e6
ADK
558/* Get the temperature trend */
559static int exynos_get_trend(struct thermal_zone_device *thermal,
560 int trip, enum thermal_trend *trend)
561{
3ad9524a
ADK
562 int ret;
563 unsigned long trip_temp;
564
565 ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
566 if (ret < 0)
567 return ret;
568
569 if (thermal->temperature >= trip_temp)
ce760ed3 570 *trend = THERMAL_TREND_RAISE_FULL;
7e0b55e6 571 else
ce760ed3 572 *trend = THERMAL_TREND_DROP_FULL;
7e0b55e6
ADK
573
574 return 0;
575}
3c2a0909
S
576
577#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5422) || defined(CONFIG_SOC_EXYNOS5433)
578static int __ref exynos_throttle_cpu_hotplug(struct thermal_zone_device *thermal)
579{
580 int ret = 0;
581 int cur_temp = 0;
582 struct exynos_tmu_data *data = th_zone->sensor_conf->private_data;
583 struct exynos_tmu_platform_data *pdata = data->pdata;
584
585 if (!thermal->temperature)
586 return -EINVAL;
587
588 cur_temp = thermal->temperature / MCELSIUS;
589
590 if (is_cpu_hotplugged_out) {
591 if (cur_temp < pdata->hotplug_in_threshold) {
592 /*
593 * If current temperature is lower than low threshold,
594 * call cluster1_cores_hotplug(false) for hotplugged out cpus.
595 */
596 ret = cluster1_cores_hotplug(false);
597 if (ret)
598 pr_err("%s: failed cluster1 cores hotplug in\n",
599 __func__);
600 else
601 is_cpu_hotplugged_out = false;
602 }
603 } else {
604 if (cur_temp >= pdata->hotplug_out_threshold) {
605 /*
606 * If current temperature is higher than high threshold,
607 * call cluster1_cores_hotplug(true) to hold temperature down.
608 */
609 ret = cluster1_cores_hotplug(true);
610 if (ret)
611 pr_err("%s: failed cluster1 cores hotplug out\n",
612 __func__);
613 else
614 is_cpu_hotplugged_out = true;
615 }
616 }
617
618 return ret;
619}
620#endif
621
7e0b55e6 622/* Operation callback functions for thermal zone */
3c2a0909 623static struct thermal_zone_device_ops exynos_dev_ops = {
7e0b55e6
ADK
624 .bind = exynos_bind,
625 .unbind = exynos_unbind,
626 .get_temp = exynos_get_temp,
bffd1f8a 627 .set_emul_temp = exynos_set_emul_temp,
7e0b55e6
ADK
628 .get_trend = exynos_get_trend,
629 .get_mode = exynos_get_mode,
630 .set_mode = exynos_set_mode,
631 .get_trip_type = exynos_get_trip_type,
632 .get_trip_temp = exynos_get_trip_temp,
633 .get_crit_temp = exynos_get_crit_temp,
3c2a0909
S
634#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5422) || defined(CONFIG_SOC_EXYNOS5433)
635 .throttle_cpu_hotplug = exynos_throttle_cpu_hotplug,
636#endif
7e0b55e6
ADK
637};
638
639/*
640 * This function may be called from interrupt based temperature sensor
641 * when threshold is changed.
642 */
643static void exynos_report_trigger(void)
644{
645 unsigned int i;
646 char data[10];
647 char *envp[] = { data, NULL };
3c2a0909 648 enum thermal_trip_type type = 0;
7e0b55e6
ADK
649
650 if (!th_zone || !th_zone->therm_dev)
651 return;
652 if (th_zone->bind == false) {
653 for (i = 0; i < th_zone->cool_dev_size; i++) {
654 if (!th_zone->cool_dev[i])
655 continue;
656 exynos_bind(th_zone->therm_dev,
657 th_zone->cool_dev[i]);
658 }
659 }
660
661 thermal_zone_device_update(th_zone->therm_dev);
662
663 mutex_lock(&th_zone->therm_dev->lock);
664 /* Find the level for which trip happened */
665 for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
666 if (th_zone->therm_dev->last_temperature <
667 th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
668 break;
669 }
670
3c2a0909
S
671 if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
672 exynos_get_trip_type(th_zone->therm_dev, i, &type);
673 if (type == THERMAL_TRIP_ACTIVE)
674 th_zone->therm_dev->passive_delay = ACTIVE_INTERVAL;
7e0b55e6 675 else
3c2a0909 676 th_zone->therm_dev->passive_delay = PASSIVE_INTERVAL;
7e0b55e6
ADK
677 }
678
3c2a0909
S
679 mutex_unlock(&th_zone->therm_dev->lock);
680
7e0b55e6
ADK
681 snprintf(data, sizeof(data), "%u", i);
682 kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
7e0b55e6
ADK
683}
684
685/* Register with the in-kernel thermal management */
686static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
687{
3c2a0909
S
688 int ret, count = 0;
689#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
690 int i, j;
691#endif
7e0b55e6
ADK
692 struct cpumask mask_val;
693
694 if (!sensor_conf || !sensor_conf->read_temperature) {
695 pr_err("Temperature sensor not initialised\n");
696 return -EINVAL;
697 }
698
699 th_zone = kzalloc(sizeof(struct exynos_thermal_zone), GFP_KERNEL);
700 if (!th_zone)
701 return -ENOMEM;
702
703 th_zone->sensor_conf = sensor_conf;
3c2a0909 704 cpumask_clear(&mask_val);
7e0b55e6 705 cpumask_set_cpu(0, &mask_val);
3c2a0909
S
706
707#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
708 for (i = 0; i < EXYNOS_ZONE_COUNT; i++) {
709 for (j = 0; j < CL_END; j++) {
710 th_zone->cool_dev[count] = cpufreq_cooling_register(&mp_cluster_cpus[count]);
711 if (IS_ERR(th_zone->cool_dev[count])) {
712 pr_err("Failed to register cpufreq cooling device\n");
713 ret = -EINVAL;
714 th_zone->cool_dev_size = count;
715 goto err_unregister;
716 }
717 count++;
718 }
719 }
720#else
721 for (count = 0; count < EXYNOS_ZONE_COUNT; count++) {
722 th_zone->cool_dev[count] = cpufreq_cooling_register(&mask_val);
723 if (IS_ERR(th_zone->cool_dev[count])) {
724 pr_err("Failed to register cpufreq cooling device\n");
725 ret = -EINVAL;
726 th_zone->cool_dev_size = count;
727 goto err_unregister;
728 }
7e0b55e6 729 }
3c2a0909
S
730#endif
731 th_zone->cool_dev_size = count;
7e0b55e6
ADK
732
733 th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
3c2a0909
S
734 th_zone->sensor_conf->trip_data.trip_count, 0, NULL, &exynos_dev_ops, NULL, PASSIVE_INTERVAL,
735 IDLE_INTERVAL);
7e0b55e6
ADK
736
737 if (IS_ERR(th_zone->therm_dev)) {
738 pr_err("Failed to register thermal zone device\n");
043e4652 739 ret = PTR_ERR(th_zone->therm_dev);
7e0b55e6
ADK
740 goto err_unregister;
741 }
742 th_zone->mode = THERMAL_DEVICE_ENABLED;
743
744 pr_info("Exynos: Kernel Thermal management registered\n");
745
746 return 0;
747
748err_unregister:
749 exynos_unregister_thermal();
750 return ret;
751}
752
753/* Un-Register with the in-kernel thermal management */
754static void exynos_unregister_thermal(void)
755{
756 int i;
757
c072fed9
SK
758 if (!th_zone)
759 return;
760
761 if (th_zone->therm_dev)
7e0b55e6
ADK
762 thermal_zone_device_unregister(th_zone->therm_dev);
763
764 for (i = 0; i < th_zone->cool_dev_size; i++) {
c072fed9 765 if (th_zone->cool_dev[i])
7e0b55e6
ADK
766 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
767 }
768
769 kfree(th_zone);
770 pr_info("Exynos: Kernel Thermal management unregistered\n");
771}
772
3c2a0909 773static int exynos_tmu_initialize(struct platform_device *pdev, int id)
9d97e5c8 774{
f22d9c03
ADK
775 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
776 struct exynos_tmu_platform_data *pdata = data->pdata;
3c2a0909 777 unsigned int status;
4f0a6847 778 unsigned int rising_threshold = 0, falling_threshold = 0;
3c2a0909
S
779#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
780 unsigned int rising_threshold7_4 = 0, falling_threshold7_4 = 0;
781#endif
4f0a6847 782 int ret = 0, threshold_code, i, trigger_levs = 0;
3c2a0909 783 int timeout = 20000;
9d97e5c8
DK
784
785 mutex_lock(&data->lock);
9d97e5c8 786
3c2a0909
S
787 while(1) {
788 status = readb(data->base[id] + EXYNOS_TMU_REG_STATUS);
789 if (status)
790 break;
9d97e5c8 791
3c2a0909
S
792 timeout--;
793 if (!timeout) {
794 pr_err("%s: timeout TMU busy\n", __func__);
795 ret = -EBUSY;
796 goto out;
797 }
f22d9c03 798
3c2a0909
S
799 cpu_relax();
800 usleep_range(1, 2);
801 };
f22d9c03 802
4f0a6847
JL
803 /* Count trigger levels to be enabled */
804 for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
805 if (pdata->trigger_levels[i])
806 trigger_levs++;
807
f22d9c03
ADK
808 if (data->soc == SOC_ARCH_EXYNOS4210) {
809 /* Write temperature code for threshold */
3c2a0909 810 threshold_code = cal_tmu_temp_to_code(data->cal_data, pdata->threshold, 0);
f22d9c03
ADK
811 if (threshold_code < 0) {
812 ret = threshold_code;
813 goto out;
814 }
815 writeb(threshold_code,
3c2a0909 816 data->base[0] + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
4f0a6847
JL
817 for (i = 0; i < trigger_levs; i++)
818 writeb(pdata->trigger_levels[i],
3c2a0909 819 data->base[0] + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
f22d9c03
ADK
820
821 writel(EXYNOS4210_TMU_INTCLEAR_VAL,
3c2a0909 822 data->base[i] + EXYNOS_TMU_REG_INTCLEAR);
f22d9c03 823 } else if (data->soc == SOC_ARCH_EXYNOS) {
4f0a6847
JL
824 /* Write temperature code for rising and falling threshold */
825 for (i = 0; i < trigger_levs; i++) {
3c2a0909
S
826 threshold_code = cal_tmu_temp_to_code(data->cal_data,
827 pdata->trigger_levels[i], id);
4f0a6847
JL
828 if (threshold_code < 0) {
829 ret = threshold_code;
830 goto out;
831 }
832 rising_threshold |= threshold_code << 8 * i;
833 if (pdata->threshold_falling) {
3c2a0909 834 threshold_code = cal_tmu_temp_to_code(data->cal_data,
4f0a6847 835 pdata->trigger_levels[i] -
3c2a0909 836 pdata->threshold_falling, id);
4f0a6847
JL
837 if (threshold_code > 0)
838 falling_threshold |=
839 threshold_code << 8 * i;
840 }
f22d9c03 841 }
f22d9c03 842
3c2a0909
S
843 writel(rising_threshold, data->base[id] + EXYNOS_THD_TEMP_RISE);
844 writel(falling_threshold, data->base[id] + EXYNOS_THD_TEMP_FALL);
845 writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT, data->base[id] + EXYNOS_TMU_REG_INTCLEAR);
846 } else if (data->soc == SOC_ARCH_EXYNOS543X) {
847#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
848 for (i = 0; i < trigger_levs; i++) {
849 threshold_code = cal_tmu_temp_to_code(data->cal_data,
850 pdata->trigger_levels[i], id);
851 if (threshold_code < 0) {
852 ret = threshold_code;
853 goto out;
854 }
855 if (i < 4)
856 rising_threshold |= threshold_code << (8 * i);
857 else
858 rising_threshold7_4 |= threshold_code << (8 * (i - 4));
859 if (pdata->threshold_falling) {
860 threshold_code = cal_tmu_temp_to_code(data->cal_data,
861 pdata->trigger_levels[i] -
862 pdata->threshold_falling, id);
863 if (threshold_code > 0) {
864 if (i < 4)
865 falling_threshold |= threshold_code << (8 * i);
866 else
867 falling_threshold7_4 |= threshold_code << (8 * (i - 4));
868 }
869 }
870 }
f22d9c03 871 writel(rising_threshold,
3c2a0909
S
872 data->base[id] + EXYNOS_THD_TEMP_RISE3_0);
873 writel(rising_threshold7_4,
874 data->base[id] + EXYNOS_THD_TEMP_RISE7_4);
4f0a6847 875 writel(falling_threshold,
3c2a0909
S
876 data->base[id] + EXYNOS_THD_TEMP_FALL3_0);
877 writel(falling_threshold7_4,
878 data->base[id] + EXYNOS_THD_TEMP_FALL7_4);
4f0a6847 879 writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
3c2a0909
S
880 data->base[id] + EXYNOS_TMU_REG_INTCLEAR);
881
882 /* Adjuest sampling interval default -> 1ms */
883 /* W/A for WTSR */
884 writel(0xE10, data->base[id] + EXYNOS_TMU_REG_SAMPLING_INTERVAL);
885#endif
9d97e5c8 886 }
9d97e5c8 887out:
9d97e5c8
DK
888 mutex_unlock(&data->lock);
889
890 return ret;
891}
892
3c2a0909 893static void exynos_tmu_get_efuse(struct platform_device *pdev, int id)
9d97e5c8 894{
f22d9c03
ADK
895 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
896 struct exynos_tmu_platform_data *pdata = data->pdata;
3c2a0909
S
897 unsigned int trim_info;
898 int timeout = 5;
9d97e5c8
DK
899
900 mutex_lock(&data->lock);
f22d9c03
ADK
901
902 if (data->soc == SOC_ARCH_EXYNOS) {
3c2a0909
S
903 __raw_writel(EXYNOS_TRIMINFO_RELOAD1,
904 data->base[id] + EXYNOS_TRIMINFO_CONFIG);
905 __raw_writel(EXYNOS_TRIMINFO_RELOAD2,
906 data->base[id] + EXYNOS_TRIMINFO_CONTROL);
907 while (readl(data->base[id] + EXYNOS_TRIMINFO_CONTROL) & EXYNOS_TRIMINFO_RELOAD1) {
908 if (!timeout) {
909 pr_err("Thermal TRIMINFO register reload failed\n");
910 break;
911 }
912 timeout--;
913 cpu_relax();
914 usleep_range(5, 10);
915 }
f22d9c03
ADK
916 }
917
3c2a0909
S
918 /* Save trimming info in order to perform calibration */
919 trim_info = readl(data->base[id] + EXYNOS_TMU_REG_TRIMINFO);
920#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
921 if (trim_info & CALIB_SEL_MASK)
922 pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
923 else
924 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
925
926 data->cal_data->cal_type = pdata->cal_type;
927 data->cal_data->vptat[id] = (trim_info & VPTAT_CTRL_MASK) >> VPTAT_CTRL_SHIFT;
928#endif
929 data->cal_data->temp_error1[id] = trim_info & EXYNOS_TMU_TRIM_TEMP_MASK;
930 data->cal_data->temp_error2[id] = ((trim_info >> 8) & EXYNOS_TMU_TRIM_TEMP_MASK);
931
932#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5422) || defined(CONFIG_SOC_EXYNOS5433)
933 if (data->cal_data->temp_error1[id] == 0)
934 data->cal_data->temp_error1[id] = pdata->efuse_value;
935#else
936 if ((EFUSE_MIN_VALUE > data->cal_data->temp_error1[id]) || (data->cal_data->temp_error1[id] > EFUSE_MAX_VALUE) ||
937 (data->cal_data->temp_error1[id] == 0))
938 data->cal_data->temp_error1[id] = pdata->efuse_value;
939#endif
940
941 mutex_unlock(&data->lock);
942}
943
944static void exynos_tmu_control(struct platform_device *pdev, int id, bool on)
945{
946 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
947
948 mutex_lock(&data->lock);
949
950 cal_tmu_control(data->cal_data, id, on);
9d97e5c8 951
9d97e5c8
DK
952 mutex_unlock(&data->lock);
953}
954
f22d9c03 955static int exynos_tmu_read(struct exynos_tmu_data *data)
9d97e5c8 956{
3c2a0909
S
957 int temp, i, max = INT_MIN, min = INT_MAX, gpu_temp = 0;
958 int alltemp[EXYNOS_TMU_COUNT] = {0, };
959#ifdef CONFIG_EXYNOS_SWTRIP
960 char tmustate_string[20];
961 char *envp[2];
962#endif
9d97e5c8
DK
963
964 mutex_lock(&data->lock);
9d97e5c8 965
3c2a0909
S
966 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
967 temp = cal_tmu_read(data->cal_data, i);
968 alltemp[i] = temp;
969
970 if (i == EXYNOS_GPU_NUMBER) {
971 if ((soc_is_exynos5430() || soc_is_exynos5433()) && !gpu_is_power_on())
972 temp = COLD_TEMP + 1;
973 gpu_temp = temp;
974 } else {
975 if (temp > max)
976 max = temp;
977 if (temp < min)
978 min = temp;
979 }
980
981 }
982 temp = max(max, gpu_temp);
983
984 exynos_ss_printk("[TMU]%d, %d, %d, %d, %d\n",
985 alltemp[0], alltemp[1], alltemp[2], alltemp[3], alltemp[4]);
986#ifdef CONFIG_EXYNOS_SWTRIP
987 if (max >= SWTRIP_TEMP)
988 swtrip_counter++;
989 else
990 swtrip_counter = 0;
991
992#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
993 if (swtrip_counter >= SWTRIP_NOISE_COUNT || (mif_thermal_state >= MIF_THROTTLING2 && (temp > MIF_THERMAL_SWTRIP_THRESHOLD_TEMP))) {
994 pr_err("[TMU] SW trip for protecting NAND: MIF_STATE(%d), temp(%d)\n", mif_thermal_state, temp);
995 mif_thermal_state = MIF_TRIPPING;
996#else
997 if (swtrip_counter >= SWTRIP_NOISE_COUNT) {
998#endif
999 snprintf(tmustate_string, sizeof(tmustate_string), "TMUSTATE=%d", 3);
1000 envp[0] = tmustate_string;
1001 envp[1] = NULL;
1002 pr_err("[TMU] SW trip by reaching trip temp(%d)!\n", SWTRIP_TEMP);
1003 if (th_zone && th_zone->therm_dev)
1004 kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
1005 else
1006 pr_err("[TMU] do not call kobject_uevent_env() because th_zone not initialised\n");
1007 }
1008#endif
1009
1010 exynos_check_tmu_noti_state(max);
1011 exynos_check_mif_noti_state(max);
1012 exynos_check_gpu_noti_state(gpu_temp);
9d97e5c8 1013
9d97e5c8 1014 mutex_unlock(&data->lock);
3c2a0909
S
1015#if defined(CONFIG_CPU_THERMAL_IPA)
1016 check_switch_ipa_on(max);
1017#endif
1018 pr_debug("[TMU] TMU0 = %d, TMU1 = %d, TMU2 = %d, TMU3 = %d, TMU4 = %d MAX = %d, GPU = %d\n",
1019 alltemp[0], alltemp[1], alltemp[2], alltemp[3], alltemp[4], max, gpu_temp);
1020
1021 return max;
1022}
9d97e5c8 1023
3c2a0909
S
1024#if defined(CONFIG_CPU_THERMAL_IPA)
1025int ipa_hotplug(bool removecores)
1026{
1027 return cluster1_cores_hotplug(removecores);
9d97e5c8 1028}
3c2a0909
S
1029#endif
1030
9d97e5c8 1031
bffd1f8a
ADK
1032#ifdef CONFIG_THERMAL_EMULATION
1033static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
1034{
1035 struct exynos_tmu_data *data = drv_data;
1036 unsigned int reg;
1037 int ret = -EINVAL;
3c2a0909 1038 int i;
bffd1f8a
ADK
1039
1040 if (data->soc == SOC_ARCH_EXYNOS4210)
1041 goto out;
1042
1043 if (temp && temp < MCELSIUS)
1044 goto out;
1045
1046 mutex_lock(&data->lock);
bffd1f8a 1047
3c2a0909 1048 if (temp)
bffd1f8a
ADK
1049 temp /= MCELSIUS;
1050
3c2a0909
S
1051 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
1052 reg = readl(data->base[i] + EXYNOS_EMUL_CON);
1053
1054 if (temp) {
1055 reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
1056 (cal_tmu_temp_to_code(data->cal_data, temp, i)
1057 << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
1058 } else {
1059 reg &= ~EXYNOS_EMUL_ENABLE;
1060 }
bffd1f8a 1061
3c2a0909
S
1062 writel(reg, data->base[i] + EXYNOS_EMUL_CON);
1063 }
bffd1f8a 1064
bffd1f8a
ADK
1065 mutex_unlock(&data->lock);
1066 return 0;
1067out:
1068 return ret;
1069}
1070#else
1071static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
1072 { return -EINVAL; }
1073#endif/*CONFIG_THERMAL_EMULATION*/
1074
f22d9c03 1075static void exynos_tmu_work(struct work_struct *work)
9d97e5c8 1076{
f22d9c03
ADK
1077 struct exynos_tmu_data *data = container_of(work,
1078 struct exynos_tmu_data, irq_work);
3c2a0909 1079 int i;
9d97e5c8
DK
1080
1081 mutex_lock(&data->lock);
3c2a0909
S
1082 if (data->soc != SOC_ARCH_EXYNOS4210)
1083 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
1084 writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
1085 data->base[i] + EXYNOS_TMU_REG_INTCLEAR);
1086 }
f22d9c03
ADK
1087 else
1088 writel(EXYNOS4210_TMU_INTCLEAR_VAL,
3c2a0909 1089 data->base[0] + EXYNOS_TMU_REG_INTCLEAR);
9d97e5c8 1090 mutex_unlock(&data->lock);
3c2a0909
S
1091 exynos_report_trigger();
1092 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1093 enable_irq(data->irq[i]);
9d97e5c8
DK
1094}
1095
f22d9c03 1096static irqreturn_t exynos_tmu_irq(int irq, void *id)
9d97e5c8 1097{
f22d9c03 1098 struct exynos_tmu_data *data = id;
3c2a0909 1099 int i;
9d97e5c8 1100
3c2a0909
S
1101 pr_debug("[TMUIRQ] irq = %d\n", irq);
1102
1103 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1104 disable_irq_nosync(data->irq[i]);
9d97e5c8
DK
1105 schedule_work(&data->irq_work);
1106
1107 return IRQ_HANDLED;
1108}
7e0b55e6
ADK
1109static struct thermal_sensor_conf exynos_sensor_conf = {
1110 .name = "exynos-therm",
1111 .read_temperature = (int (*)(void *))exynos_tmu_read,
bffd1f8a 1112 .write_emul_temp = exynos_tmu_set_emulation,
17be868e 1113};
3c2a0909
S
1114#if defined(CONFIG_CPU_THERMAL_IPA)
1115static struct ipa_sensor_conf ipa_sensor_conf = {
1116 .read_soc_temperature = (int (*)(void *))exynos_tmu_read,
1117};
1118#endif
1119static int exynos_pm_notifier(struct notifier_block *notifier,
1120 unsigned long pm_event, void *v)
1121{
1122 switch (pm_event) {
1123 case PM_SUSPEND_PREPARE:
1124 is_suspending = true;
1125 exynos_tmu_call_notifier(TMU_COLD, 0);
1126 exynos_gpu_call_notifier(TMU_COLD);
1127 break;
1128 case PM_POST_SUSPEND:
1129 is_suspending = false;
1130 break;
1131 }
17be868e 1132
3c2a0909
S
1133 return NOTIFY_OK;
1134}
1135
1136static struct notifier_block exynos_pm_nb = {
1137 .notifier_call = exynos_pm_notifier,
1138};
1139
1140#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
1141void exynos_tmu_core_control(bool on, int id)
1142{
1143 int i;
1144 unsigned int con;
1145 struct exynos_tmu_data *data;
1146
1147 if (exynos_tmu_pdev == NULL)
1148 return;
1149
1150 data = platform_get_drvdata(exynos_tmu_pdev);
1151
1152 if (!is_tmu_probed || data == NULL)
1153 return;
1154
1155 con = readl(data->base[id] + EXYNOS_TMU_REG_CONTROL);
1156 con &= TMU_CONTROL_ONOFF_MASK;
1157 con |= (on ? EXYNOS_TMU_CORE_ON : EXYNOS_TMU_CORE_OFF);
1158 writel(con, data->base[id] + EXYNOS_TMU_REG_CONTROL);
1159
1160 if (!on) {
1161 for (i = 0; i < IDLE_MAX_TIME; i++) {
1162 if (readl(data->base[id] + EXYNOS_TMU_REG_STATUS) & 0x1)
1163 break;
1164 }
1165 if (i == (IDLE_MAX_TIME - 1))
1166 pr_err("@@@@@ TMU CHECK BUSY @@@@@@\n");
1167 }
1168}
1169#endif
1170
1171#if (defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)) && defined(CONFIG_CPU_IDLE)
1172static void exynos_tmu_all_cores_control(bool on)
1173{
1174 int i, j;
1175 unsigned int con;
1176 unsigned int status;
1177 struct exynos_tmu_data *data;
1178
1179 if (exynos_tmu_pdev == NULL)
1180 return;
1181
1182 data = platform_get_drvdata(exynos_tmu_pdev);
1183
1184 if (data == NULL)
1185 return;
1186
1187 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
1188 con = readl(data->base[i] + EXYNOS_TMU_REG_CONTROL);
1189 con &= TMU_CONTROL_ONOFF_MASK;
1190 con |= (on ? EXYNOS_TMU_CORE_ON : EXYNOS_TMU_CORE_OFF);
1191 writel(con, data->base[i] + EXYNOS_TMU_REG_CONTROL);
1192 }
1193
1194 if (!on) {
1195 for (j=0; j < IDLE_MAX_TIME; j++) {
1196 status = 0;
1197
1198 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1199 status |= (((readl(data->base[i] + EXYNOS_TMU_REG_STATUS) & 0x1)) << i);
1200
1201 if (status == 0x1F)
1202 break;
1203
1204 }
1205 if (j == (IDLE_MAX_TIME - 1))
1206 pr_err("@@@@@ TMU CHECK BUSY @@@@@@\n");
1207 }
1208}
1209
1210static int exynos_pm_dstop_notifier(struct notifier_block *notifier,
1211 unsigned long pm_event, void *v)
1212{
1213 switch (pm_event) {
1214 case LPA_ENTER:
1215 exynos_tmu_all_cores_control(false);
1216 break;
1217 case LPA_ENTER_FAIL:
1218 case LPA_EXIT:
1219 exynos_tmu_all_cores_control(true);
1220 break;
1221 }
1222
1223 return NOTIFY_OK;
1224}
1225
1226static struct notifier_block exynos_pm_dstop_nb = {
1227 .notifier_call = exynos_pm_dstop_notifier,
1228};
1229#endif
1230
1231#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
1232static int __ref exynos_mif_thermal_cpu_hotplug(enum mif_thermal_state_t cur_state)
1233{
1234 int ret = 0;
1235
1236 if (is_mif_thermal_hotplugged_out) {
1237 if (cur_state < MIF_THROTTLING2) {
1238 /*
1239 * If current temperature is lower than low threshold,
1240 * call cluster1_cores_hotplug(false) for hotplugged out cpus.
1241 */
1242 ret = cluster1_cores_hotplug(false);
1243 if (ret)
1244 pr_err("%s: failed cluster1 cores hotplug in\n",
1245 __func__);
1246 else
1247 is_mif_thermal_hotplugged_out = false;
1248 }
1249 } else {
1250 if (cur_state >= MIF_THROTTLING2) {
1251 /*
1252 * If current temperature is higher than high threshold,
1253 * call cluster1_cores_hotplug(true) to hold temperature down.
1254 */
1255 ret = cluster1_cores_hotplug(true);
1256 if (ret)
1257 pr_err("%s: failed cluster1 cores hotplug out\n",
1258 __func__);
1259 else
1260 is_mif_thermal_hotplugged_out = true;
1261 }
1262 }
1263
1264 return ret;
1265}
1266
1267
1268static int exynos_mif_thermal_notifier(struct notifier_block *notifier,
1269 unsigned long event, void *v)
1270{
1271 int *ch = v;
1272 enum mif_thermal_state_t old_state;
1273
1274 if (*ch == 0)
1275 mif_thermal_level_ch0 = event;
1276 else
1277 mif_thermal_level_ch1 = event;
1278
1279 old_state = mif_thermal_state;
1280 if (mif_thermal_level_ch0 < MIF_THERMAL_THRESHOLD && mif_thermal_level_ch1 < MIF_THERMAL_THRESHOLD)
1281 mif_thermal_state = MIF_NORMAL;
1282 else if (mif_thermal_level_ch0 >= MIF_THERMAL_THRESHOLD && mif_thermal_level_ch1 >= MIF_THERMAL_THRESHOLD)
1283 mif_thermal_state = MIF_THROTTLING2;
1284 else
1285 mif_thermal_state = MIF_THROTTLING1;
1286
1287 if (old_state != mif_thermal_state) {
1288 switch (mif_thermal_state) {
1289 case MIF_NORMAL:
1290 pm_qos_update_request(&exynos_mif_thermal_cluster1_max_qos, PM_QOS_CPU_FREQ_DEFAULT_VALUE);
1291 pm_qos_update_request(&exynos_mif_thermal_cluster0_max_qos, PM_QOS_CPU_FREQ_DEFAULT_VALUE);
1292 break;
1293 case MIF_THROTTLING1:
1294 pm_qos_update_request(&exynos_mif_thermal_cluster1_max_qos, MIF_THROTTLING1_BIG);
1295 pm_qos_update_request(&exynos_mif_thermal_cluster0_max_qos, MIF_THROTTLING1_LITTLE);
1296 break;
1297 case MIF_THROTTLING2:
1298 case MIF_TRIPPING:
1299 pm_qos_update_request(&exynos_mif_thermal_cluster1_max_qos, MIF_THROTTLING2_BIG);
1300 pm_qos_update_request(&exynos_mif_thermal_cluster0_max_qos, MIF_THROTTLING2_LITTLE);
1301 break;
1302 }
1303 exynos_mif_thermal_cpu_hotplug(mif_thermal_state);
1304 pr_info("mif MR4 thermal state %d to %d\n", old_state, mif_thermal_state);
1305 }
1306
1307 return NOTIFY_OK;
1308}
1309
1310static struct notifier_block exynos_mif_thermal_nb = {
1311 .notifier_call = exynos_mif_thermal_notifier,
1312};
1313#endif
1314
1315#if defined(CONFIG_CPU_EXYNOS4210)
1316static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
1317 .threshold = 80,
1318 .trigger_levels[0] = 5,
1319 .trigger_levels[1] = 20,
1320 .trigger_levels[2] = 30,
1321 .trigger_level0_en = 1,
1322 .trigger_level1_en = 1,
1323 .trigger_level2_en = 1,
1324 .trigger_level3_en = 0,
1325 .trigger_level4_en = 0,
1326 .trigger_level5_en = 0,
1327 .trigger_level6_en = 0,
1328 .trigger_level7_en = 0,
1329 .gain = 15,
1330 .reference_voltage = 7,
1331 .cal_type = TYPE_ONE_POINT_TRIMMING,
1332 .freq_tab[0] = {
1333 .freq_clip_max = 800 * 1000,
1334 .temp_level = 85,
1335 },
17be868e
ADK
1336 .freq_tab[1] = {
1337 .freq_clip_max = 200 * 1000,
1338 .temp_level = 100,
1339 },
1340 .freq_tab_count = 2,
1341 .type = SOC_ARCH_EXYNOS4210,
1342};
1343#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
1344#else
1345#define EXYNOS4210_TMU_DRV_DATA (NULL)
1346#endif
1347
1348#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
1349static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
4f0a6847 1350 .threshold_falling = 10,
17be868e
ADK
1351 .trigger_levels[0] = 85,
1352 .trigger_levels[1] = 103,
1353 .trigger_levels[2] = 110,
1354 .trigger_level0_en = 1,
1355 .trigger_level1_en = 1,
1356 .trigger_level2_en = 1,
1357 .trigger_level3_en = 0,
3c2a0909
S
1358 .trigger_level4_en = 0,
1359 .trigger_level5_en = 0,
1360 .trigger_level6_en = 0,
1361 .trigger_level7_en = 0,
17be868e
ADK
1362 .gain = 8,
1363 .reference_voltage = 16,
1364 .noise_cancel_mode = 4,
1365 .cal_type = TYPE_ONE_POINT_TRIMMING,
1366 .efuse_value = 55,
1367 .freq_tab[0] = {
1368 .freq_clip_max = 800 * 1000,
1369 .temp_level = 85,
1370 },
1371 .freq_tab[1] = {
1372 .freq_clip_max = 200 * 1000,
1373 .temp_level = 103,
1374 },
1375 .freq_tab_count = 2,
1376 .type = SOC_ARCH_EXYNOS,
1377};
1378#define EXYNOS_TMU_DRV_DATA (&exynos_default_tmu_data)
1379#else
1380#define EXYNOS_TMU_DRV_DATA (NULL)
1381#endif
1382
3c2a0909
S
1383static struct exynos_tmu_platform_data exynos5430_tmu_data = {
1384 .threshold_falling = 2,
1385 .trigger_levels[0] = 70,
1386 .trigger_levels[1] = 85,
1387 .trigger_levels[2] = 90,
1388 .trigger_levels[3] = 95,
1389 .trigger_levels[4] = 100,
1390 .trigger_levels[5] = 105,
1391 .trigger_levels[6] = 105,
1392 .trigger_levels[7] = 115,
1393 .trigger_level0_en = 1,
1394 .trigger_level1_en = 1,
1395 .trigger_level2_en = 1,
1396 .trigger_level3_en = 1,
1397 .trigger_level4_en = 1,
1398 .trigger_level5_en = 1,
1399 .trigger_level6_en = 1,
1400 .trigger_level7_en = 1,
1401 .gain = 8,
1402 .reference_voltage = 16,
1403 .noise_cancel_mode = 4,
1404 .cal_type = TYPE_ONE_POINT_TRIMMING,
1405 .efuse_value = 75,
1406 .freq_tab[0] = {
1407#ifdef CONFIG_SOC_EXYNOS5430_L
1408 .freq_clip_max = 1800 * 1000,
1409#else
1410 .freq_clip_max = 1900 * 1000,
1411#endif
1412#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1413#ifdef CONFIG_SOC_EXYNOS5430_L
1414 .freq_clip_max_cluster0 = 1300 * 1000,
1415#else
1416 .freq_clip_max_cluster0 = 1500 * 1000,
1417#endif
1418#endif
1419 .temp_level = 70,
1420#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1421 .mask_val = &mp_cluster_cpus[CL_ONE],
1422 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1423#endif
1424 },
1425 .freq_tab[1] = {
1426 .freq_clip_max = 1800 * 1000,
1427#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1428#ifdef CONFIG_SOC_EXYNOS5430_L
1429 .freq_clip_max_cluster0 = 1300 * 1000,
1430#else
1431 .freq_clip_max_cluster0 = 1500 * 1000,
1432#endif
1433#endif
1434 .temp_level = 85,
1435#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1436 .mask_val = &mp_cluster_cpus[CL_ONE],
1437 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1438#endif
1439 },
1440 .freq_tab[2] = {
1441 .freq_clip_max = 1500 * 1000,
1442#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1443#ifdef CONFIG_SOC_EXYNOS5430_L
1444 .freq_clip_max_cluster0 = 1300 * 1000,
1445#else
1446 .freq_clip_max_cluster0 = 1500 * 1000,
1447#endif
1448#endif
1449 .temp_level = 90,
1450#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1451 .mask_val = &mp_cluster_cpus[CL_ONE],
1452 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1453#endif
1454 },
1455 .freq_tab[3] = {
1456 .freq_clip_max = 1300 * 1000,
1457#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1458#ifdef CONFIG_SOC_EXYNOS5430_L
1459 .freq_clip_max_cluster0 = 1300 * 1000,
1460#else
1461 .freq_clip_max_cluster0 = 1500 * 1000,
1462#endif
1463#endif
1464 .temp_level = 95,
1465#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1466 .mask_val = &mp_cluster_cpus[CL_ONE],
1467 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1468#endif
1469 },
1470 .freq_tab[4] = {
1471 .freq_clip_max = 900 * 1000,
1472#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1473 .freq_clip_max_cluster0 = 1200 * 1000,
1474#endif
1475 .temp_level = 100,
1476#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1477 .mask_val = &mp_cluster_cpus[CL_ONE],
1478 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1479#endif
1480 },
1481 .freq_tab[5] = {
1482 .freq_clip_max = 900 * 1000,
1483#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1484 .freq_clip_max_cluster0 = 500 * 1000,
1485#endif
1486 .temp_level = 105,
1487#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1488 .mask_val = &mp_cluster_cpus[CL_ONE],
1489 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1490#endif
1491 },
1492 .size[THERMAL_TRIP_ACTIVE] = 1,
1493 .size[THERMAL_TRIP_PASSIVE] = 5,
1494 .freq_tab_count = 6,
1495 .type = SOC_ARCH_EXYNOS543X,
1496};
1497#define EXYNOS5430_TMU_DRV_DATA (&exynos5430_tmu_data)
1498
1499#if defined(CONFIG_SOC_EXYNOS5422)
1500static struct exynos_tmu_platform_data exynos5_tmu_data = {
1501 .threshold_falling = 2,
1502 .trigger_levels[0] = 80,
1503 .trigger_levels[1] = 90,
1504 .trigger_levels[2] = 100,
1505 .trigger_levels[3] = 115,
1506 .trigger_level0_en = 1,
1507 .trigger_level1_en = 1,
1508 .trigger_level2_en = 1,
1509 .trigger_level3_en = 1,
1510 .trigger_level4_en = 0,
1511 .trigger_level5_en = 0,
1512 .trigger_level6_en = 0,
1513 .trigger_level7_en = 0,
1514 .gain = 8,
1515 .reference_voltage = 16,
1516 .noise_cancel_mode = 4,
1517 .cal_type = TYPE_ONE_POINT_TRIMMING,
1518 .efuse_value = 55,
1519 .freq_tab[0] = {
1520 .freq_clip_max = 1700 * 1000,
1521#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1522 .freq_clip_max_cluster0 = 1300 * 1000,
1523#endif
1524 .temp_level = 80,
1525#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1526 .mask_val = &mp_cluster_cpus[CL_ONE],
1527 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1528#endif
1529 },
1530 .freq_tab[1] = {
1531 .freq_clip_max = 1500 * 1000,
1532#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1533 .freq_clip_max_cluster0 = 1300 * 1000,
1534#endif
1535 .temp_level = 90,
1536#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1537 .mask_val = &mp_cluster_cpus[CL_ONE],
1538 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1539#endif
1540 },
1541 .freq_tab[2] = {
1542 .freq_clip_max = 900 * 1000,
1543#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1544 .freq_clip_max_cluster0 = 1300 * 1000,
1545#endif
1546 .temp_level = 95,
1547#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1548 .mask_val = &mp_cluster_cpus[CL_ONE],
1549 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1550#endif
1551 },
1552 .freq_tab[3] = {
1553 .freq_clip_max = 800 * 1000,
1554#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1555 .freq_clip_max_cluster0 = 1200 * 1000,
1556#endif
1557 .temp_level = 100,
1558#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1559 .mask_val = &mp_cluster_cpus[CL_ONE],
1560 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1561#endif
1562 },
1563 .freq_tab[4] = {
1564 .freq_clip_max = 800 * 1000,
1565#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1566 .freq_clip_max_cluster0 = 800 * 1000,
1567#endif
1568 .temp_level = 110,
1569#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1570 .mask_val = &mp_cluster_cpus[CL_ONE],
1571 .mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO],
1572#endif
1573 },
1574 .size[THERMAL_TRIP_ACTIVE] = 1,
1575 .size[THERMAL_TRIP_PASSIVE] = 4,
1576 .freq_tab_count = 5,
1577 .type = SOC_ARCH_EXYNOS,
1578};
1579#define EXYNOS5422_TMU_DRV_DATA (&exynos5_tmu_data)
1580#else
1581#define EXYNOS5422_TMU_DRV_DATA (NULL)
1582#endif
1583
1584#if defined(CONFIG_SOC_EXYNOS5433)
1585static struct exynos_tmu_platform_data exynos5433_tmu_data = {
1586 .type = SOC_ARCH_EXYNOS543X,
1587};
1588#define EXYNOS5433_TMU_DRV_DATA (&exynos5433_tmu_data)
1589#else
1590#define EXYNOS5433_TMU_DRV_DATA (NULL)
1591#endif
1592
17be868e
ADK
1593#ifdef CONFIG_OF
1594static const struct of_device_id exynos_tmu_match[] = {
1595 {
1596 .compatible = "samsung,exynos4210-tmu",
1597 .data = (void *)EXYNOS4210_TMU_DRV_DATA,
1598 },
b6cee53c
SK
1599 {
1600 .compatible = "samsung,exynos4412-tmu",
1601 .data = (void *)EXYNOS_TMU_DRV_DATA,
1602 },
17be868e
ADK
1603 {
1604 .compatible = "samsung,exynos5250-tmu",
1605 .data = (void *)EXYNOS_TMU_DRV_DATA,
1606 },
3c2a0909
S
1607 {
1608 .compatible = "samsung,exynos5430-tmu",
1609 .data = (void *)EXYNOS5430_TMU_DRV_DATA,
1610 },
1611 {
1612 .compatible = "samsung,exynos5422-tmu",
1613 .data = (void *)EXYNOS5422_TMU_DRV_DATA,
1614 },
1615 {
1616 .compatible = "samsung,exynos5433-tmu",
1617 .data = (void *)EXYNOS5433_TMU_DRV_DATA,
1618 },
17be868e
ADK
1619 {},
1620};
1621MODULE_DEVICE_TABLE(of, exynos_tmu_match);
17be868e
ADK
1622#endif
1623
1624static struct platform_device_id exynos_tmu_driver_ids[] = {
1625 {
1626 .name = "exynos4210-tmu",
1627 .driver_data = (kernel_ulong_t)EXYNOS4210_TMU_DRV_DATA,
1628 },
1629 {
1630 .name = "exynos5250-tmu",
1631 .driver_data = (kernel_ulong_t)EXYNOS_TMU_DRV_DATA,
1632 },
3c2a0909
S
1633 {
1634 .name = "exynos5430-tmu",
1635 .driver_data = (kernel_ulong_t)EXYNOS5430_TMU_DRV_DATA,
1636 },
1637 {
1638 .name = "exynos5422-tmu",
1639 .driver_data = (kernel_ulong_t)EXYNOS5422_TMU_DRV_DATA,
1640 },
1641 {
1642 .name = "exynos5433-tmu",
1643 .driver_data = (kernel_ulong_t)EXYNOS5433_TMU_DRV_DATA,
1644 },
17be868e
ADK
1645 { },
1646};
3ae53b1e 1647MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids);
17be868e
ADK
1648
1649static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
1650 struct platform_device *pdev)
1651{
1652#ifdef CONFIG_OF
1653 if (pdev->dev.of_node) {
1654 const struct of_device_id *match;
3c2a0909
S
1655#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1656 init_mp_cpumask_set();
1657#endif
17be868e
ADK
1658 match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
1659 if (!match)
1660 return NULL;
1661 return (struct exynos_tmu_platform_data *) match->data;
1662 }
1663#endif
1664 return (struct exynos_tmu_platform_data *)
1665 platform_get_device_id(pdev)->driver_data;
7e0b55e6 1666}
bbf63be4 1667
3c2a0909
S
1668/* sysfs interface : /sys/devices/platform/exynos5-tmu/temp */
1669static ssize_t
1670exynos_thermal_sensor_temp(struct device *dev,
1671 struct device_attribute *attr, char *buf)
1672{
1673 struct exynos_tmu_data *data = th_zone->sensor_conf->private_data;
1674 unsigned long temp[EXYNOS_TMU_COUNT] = {0,};
1675 int i, len = 0;
1676
1677 mutex_lock(&data->lock);
1678
1679 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1680 temp[i] = cal_tmu_read(data->cal_data, i) * MCELSIUS;
1681
1682 mutex_unlock(&data->lock);
1683
1684 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1685 len += snprintf(&buf[len], PAGE_SIZE, "sensor%d : %ld\n", i, temp[i]);
1686
1687 return len;
1688}
1689
1690static DEVICE_ATTR(temp, S_IRUSR | S_IRGRP, exynos_thermal_sensor_temp, NULL);
1691
1692/* sysfs interface : /sys/devices/platform/exynos5-tmu/curr_temp */
1693static ssize_t
1694exynos_thermal_curr_temp(struct device *dev,
1695 struct device_attribute *attr, char *buf)
1696{
1697 struct exynos_tmu_data *data = th_zone->sensor_conf->private_data;
1698 unsigned long temp[EXYNOS_TMU_COUNT];
1699 int i, len = 0;
1700
1701 if (!(soc_is_exynos5422()))
1702 return -EPERM;
1703
1704 if (EXYNOS_TMU_COUNT < 4)
1705 return -EPERM;
1706
1707 mutex_lock(&data->lock);
1708
1709 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
1710 temp[i] = cal_tmu_read(data->cal_data, i) * 10;
1711 }
1712
1713 mutex_unlock(&data->lock);
1714
1715 /* rearrange temperature with core order
1716 sensor0 -> 3 -> 2 -> 1 */
1717 len += snprintf(&buf[len], PAGE_SIZE, "%ld,", temp[0]);
1718 len += snprintf(&buf[len], PAGE_SIZE, "%ld,", temp[3]);
1719 len += snprintf(&buf[len], PAGE_SIZE, "%ld,", temp[2]);
1720 len += snprintf(&buf[len], PAGE_SIZE, "%ld\n", temp[1]);
1721
1722 return len;
1723}
1724
1725static DEVICE_ATTR(curr_temp, S_IRUGO, exynos_thermal_curr_temp, NULL);
1726
1727static struct attribute *exynos_thermal_sensor_attributes[] = {
1728 &dev_attr_temp.attr,
1729 &dev_attr_curr_temp.attr,
1730 NULL
1731};
1732
1733static const struct attribute_group exynos_thermal_sensor_attr_group = {
1734 .attrs = exynos_thermal_sensor_attributes,
1735};
1736
1737static void exynos_set_cal_data(struct exynos_tmu_data *data)
1738{
1739 int i;
1740
1741 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
1742 data->cal_data->base[i] = data->base[i];
1743
1744 data->cal_data->gain = data->pdata->gain;
1745 data->cal_data->reference_voltage = data->pdata->reference_voltage;
1746 data->cal_data->noise_cancel_mode = data->pdata->noise_cancel_mode;
1747 data->cal_data->cal_type = data->pdata->cal_type;
1748
1749 data->cal_data->trigger_level_en[0] = data->pdata->trigger_level0_en;
1750 data->cal_data->trigger_level_en[1] = data->pdata->trigger_level1_en;
1751 data->cal_data->trigger_level_en[2] = data->pdata->trigger_level2_en;
1752 data->cal_data->trigger_level_en[3] = data->pdata->trigger_level3_en;
1753 data->cal_data->trigger_level_en[4] = data->pdata->trigger_level4_en;
1754 data->cal_data->trigger_level_en[5] = data->pdata->trigger_level5_en;
1755 data->cal_data->trigger_level_en[6] = data->pdata->trigger_level6_en;
1756 data->cal_data->trigger_level_en[7] = data->pdata->trigger_level7_en;
1757}
1758
1759static void exynos_tmu_regdump(struct platform_device *pdev, int id)
1760{
1761 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1762 unsigned int reg_data;
1763
1764 mutex_lock(&data->lock);
1765
1766 reg_data = readl(data->base[id] + EXYNOS_TMU_REG_TRIMINFO);
1767 pr_info("TRIMINFO[%d] = 0x%x\n", id, reg_data);
1768 reg_data = readl(data->base[id] + EXYNOS_TMU_REG_CONTROL);
1769 pr_info("TMU_CONTROL[%d] = 0x%x\n", id, reg_data);
1770 reg_data = readl(data->base[id] + EXYNOS_TMU_REG_CURRENT_TEMP);
1771 pr_info("CURRENT_TEMP[%d] = 0x%x\n", id, reg_data);
1772#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
1773 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_RISE3_0);
1774 pr_info("THRESHOLD_TEMP_RISE3_0[%d] = 0x%x\n", id, reg_data);
1775 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_RISE7_4);
1776 pr_info("THRESHOLD_TEMP_RISE7_4[%d] = 0x%x\n", id, reg_data);
1777 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_FALL3_0);
1778 pr_info("THRESHOLD_TEMP_FALL3_0[%d] = 0x%x\n", id, reg_data);
1779 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_FALL7_4);
1780 pr_info("THRESHOLD_TEMP_FALL7_4[%d] = 0x%x\n", id, reg_data);
1781#else
1782 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_RISE);
1783 pr_info("THRESHOLD_TEMP_RISE[%d] = 0x%x\n", id, reg_data);
1784 reg_data = readl(data->base[id] + EXYNOS_THD_TEMP_FALL);
1785 pr_info("THRESHOLD_TEMP_FALL[%d] = 0x%x\n", id, reg_data);
1786#endif
1787 reg_data = readl(data->base[id] + EXYNOS_TMU_REG_INTEN);
1788 pr_info("INTEN[%d] = 0x%x\n", id, reg_data);
1789 reg_data = readl(data->base[id] + EXYNOS_TMU_REG_INTCLEAR);
1790 pr_info("INTCLEAR[%d] = 0x%x\n", id, reg_data);
1791
1792 mutex_unlock(&data->lock);
1793}
1794
1795#if defined(CONFIG_SOC_EXYNOS5433)
1796static int parse_trigger_data(struct device_node *np, struct exynos_tmu_platform_data *pdata, int i)
1797{
1798 int ret = 0;
1799 u32 enable, temp;
1800 struct device_node *np_trigger;
1801 char node_name[16];
1802
1803 snprintf(node_name, sizeof(node_name), "trigger_level_%d", i);
1804
1805 np_trigger = of_find_node_by_name(np, node_name);
1806 if (!np_trigger)
1807 return -EINVAL;
1808
1809 of_property_read_u32(np_trigger, "temp", &temp);
1810 of_property_read_u32(np_trigger, "enable", &enable);
1811
1812 pdata->trigger_levels[i] = temp;
1813 switch (i) {
1814 case 0:
1815 pdata->trigger_level0_en = (enable == 0) ? 0 : 1;
1816 break;
1817 case 1:
1818 pdata->trigger_level1_en = (enable == 0) ? 0 : 1;
1819 break;
1820 case 2:
1821 pdata->trigger_level2_en = (enable == 0) ? 0 : 1;
1822 break;
1823 case 3:
1824 pdata->trigger_level3_en = (enable == 0) ? 0 : 1;
1825 break;
1826 case 4:
1827 pdata->trigger_level4_en = (enable == 0) ? 0 : 1;
1828 break;
1829 case 5:
1830 pdata->trigger_level5_en = (enable == 0) ? 0 : 1;
1831 break;
1832 case 6:
1833 pdata->trigger_level6_en = (enable == 0) ? 0 : 1;
1834 break;
1835 case 7:
1836 pdata->trigger_level7_en = (enable == 0) ? 0 : 1;
1837 break;
1838 }
1839
1840 return ret;
1841}
1842
1843static int parse_throttle_data(struct device_node *np, struct exynos_tmu_platform_data *pdata, int i)
1844{
1845 int ret = 0;
1846 struct device_node *np_throttle;
1847 char node_name[15];
1848
1849 snprintf(node_name, sizeof(node_name), "throttle_tab_%d", i);
1850
1851 np_throttle = of_find_node_by_name(np, node_name);
1852 if (!np_throttle)
1853 return -EINVAL;
1854
1855 of_property_read_u32(np_throttle, "temp", &pdata->freq_tab[i].temp_level);
1856 of_property_read_u32(np_throttle, "freq_clip_max", &pdata->freq_tab[i].freq_clip_max);
1857
1858#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1859 of_property_read_u32(np_throttle, "freq_clip_max_cluster0", &pdata->freq_tab[i].freq_clip_max_cluster0);
1860 pdata->freq_tab[i].mask_val = &mp_cluster_cpus[CL_ONE];
1861 pdata->freq_tab[i].mask_val_cluster0 = &mp_cluster_cpus[CL_ZERO];
1862#endif
1863
1864 return ret;
1865}
1866
1867
1868static int exynos_tmu_parse_dt(struct device_node *np, struct exynos_tmu_platform_data *pdata)
1869{
1870 u32 value, cal_type, trigger_level_count;
1871 int ret = 0, i;
1872
1873 if (!np)
1874 return -EINVAL;
1875
1876 of_property_read_u32(np, "threshold_falling", &value);
1877 pdata->threshold_falling = value;
1878 of_property_read_u32(np, "gain", &value);
1879 pdata->gain = value;
1880 of_property_read_u32(np, "reference_voltage", &value);
1881 pdata->reference_voltage = value;
1882 of_property_read_u32(np, "noise_cancel_mode", &value);
1883 pdata->noise_cancel_mode = value;
1884 of_property_read_u32(np, "cal_type", &cal_type);
1885 of_property_read_u32(np, "efuse_value", &pdata->efuse_value);
1886 of_property_read_u32(np, "trigger_level_count", &trigger_level_count);
1887 of_property_read_u32(np, "throttle_count", &pdata->freq_tab_count);
1888 of_property_read_u32(np, "throttle_active_count", &pdata->size[THERMAL_TRIP_ACTIVE]);
1889 of_property_read_u32(np, "throttle_passive_count", &pdata->size[THERMAL_TRIP_PASSIVE]);
1890 of_property_read_u32(np, "hotplug_out_threshold", &pdata->hotplug_out_threshold);
1891 of_property_read_u32(np, "hotplug_in_threshold", &pdata->hotplug_in_threshold);
1892
1893 for (i = 0; i < trigger_level_count; i++) {
1894 ret = parse_trigger_data(np, pdata, i);
1895 if (ret) {
1896 pr_err("Failed to load trigger data(%d)\n", i);
1897 return -EINVAL;
1898 }
1899 }
1900
1901 for (i = 0; i < pdata->freq_tab_count; i++) {
1902 ret = parse_throttle_data(np, pdata, i);
1903 if (ret) {
1904 pr_err("Failed to load throttle data(%d)\n", i);
1905 return -EINVAL;
1906 }
1907 }
1908
1909 if (cal_type == 1)
1910 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
1911 else if (cal_type == 2)
1912 pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
1913 else
1914 pdata->cal_type = TYPE_NONE;
1915
1916 return ret;
1917}
1918#endif
1919
1920#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1921static int exynos5_tmu_cpufreq_notifier(struct notifier_block *notifier, unsigned long event, void *v)
1922{
1923 int ret = 0, i;
1924
1925 switch (event) {
1926 case CPUFREQ_INIT_COMPLETE:
1927 ret = exynos_register_thermal(&exynos_sensor_conf);
1928 is_tmu_probed = true;
1929
1930 if (ret) {
1931 dev_err(&exynos_tmu_pdev->dev, "Failed to register thermal interface\n");
1932 sysfs_remove_group(&exynos_tmu_pdev->dev.kobj, &exynos_thermal_sensor_attr_group);
1933 unregister_pm_notifier(&exynos_pm_nb);
1934#if (defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)) && defined(CONFIG_CPU_IDLE)
1935 exynos_pm_unregister_notifier(&exynos_pm_dstop_nb);
1936#endif
1937#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
1938 exynos_cpufreq_init_unregister_notifier(&exynos_cpufreq_nb);
1939#endif
1940 platform_set_drvdata(exynos_tmu_pdev, NULL);
1941 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
1942 if (tmudata->irq[i])
1943 free_irq(tmudata->irq[i], tmudata);
1944 }
1945 kfree(tmudata);
1946
1947 return ret;
1948 }
1949#if defined(CONFIG_CPU_THERMAL_IPA)
1950 ipa_sensor_conf.private_data = exynos_sensor_conf.private_data;
1951 ipa_register_thermal_sensor(&ipa_sensor_conf);
1952#endif
1953#if defined(CONFIG_ARM_EXYNOS5430_BUS_DEVFREQ) || defined(CONFIG_ARM_EXYNOS5433_BUS_DEVFREQ)
1954 exynos5_mif_thermal_add_notifier(&exynos_mif_thermal_nb);
1955 pm_qos_add_request(&exynos_mif_thermal_cluster1_max_qos, PM_QOS_CLUSTER1_FREQ_MAX, PM_QOS_CPU_FREQ_DEFAULT_VALUE);
1956 pm_qos_add_request(&exynos_mif_thermal_cluster0_max_qos, PM_QOS_CLUSTER0_FREQ_MAX, PM_QOS_CPU_FREQ_DEFAULT_VALUE);
1957 is_mif_thermal_hotplugged_out = false;
1958#endif
1959 break;
1960 }
1961 return 0;
1962}
1963#endif
1964
4eab7a9e 1965static int exynos_tmu_probe(struct platform_device *pdev)
9d97e5c8 1966{
f22d9c03
ADK
1967 struct exynos_tmu_data *data;
1968 struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data;
7e0b55e6 1969 int ret, i;
3c2a0909
S
1970#if defined(CONFIG_SOC_EXYNOS5430)
1971 unsigned int spd_option_flag, spd_sel;
1972#endif
1973
1974 exynos_tmu_pdev = pdev;
1975 is_suspending = false;
9d97e5c8 1976
17be868e
ADK
1977 if (!pdata)
1978 pdata = exynos_get_driver_data(pdev);
1979
9d97e5c8
DK
1980 if (!pdata) {
1981 dev_err(&pdev->dev, "No platform init data supplied.\n");
1982 return -ENODEV;
1983 }
3c2a0909
S
1984
1985#if defined(CONFIG_SOC_EXYNOS5433)
1986 ret = exynos_tmu_parse_dt(pdev->dev.of_node, pdata);
1987 if (ret) {
1988 dev_err(&pdev->dev, "Failed to load platform data from device tree.\n");
1989 return -ENODEV;
1990 }
1991#else
1992 pdata->hotplug_in_threshold = CPU_HOTPLUG_IN_TEMP;
1993 pdata->hotplug_out_threshold = CPU_HOTPLUG_OUT_TEMP;
1994#endif
1995
1996#if defined(CONFIG_SOC_EXYNOS5430)
1997 exynos5430_get_egl_speed_option(&spd_option_flag, &spd_sel);
1998 if (spd_option_flag == EGL_DISABLE_SPD_OPTION)
1999 pdata->freq_tab[0].freq_clip_max = 1200 * 1000;
2000#endif
2001#if defined(CONFIG_SOC_EXYNOS5433)
2002 if (exynos_get_table_ver() == 0) {
2003 pdata->freq_tab[0].freq_clip_max = 1100 * 1000;
2004 pdata->freq_tab[1].freq_clip_max = 1000 * 1000;
2005 pdata->freq_tab[2].freq_clip_max = 900 * 1000;
2006 }
2007#endif
2008
79e093c3
ADK
2009 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
2010 GFP_KERNEL);
9d97e5c8
DK
2011 if (!data) {
2012 dev_err(&pdev->dev, "Failed to allocate driver structure\n");
2013 return -ENOMEM;
2014 }
2015
3c2a0909
S
2016 data->cal_data = devm_kzalloc(&pdev->dev, sizeof(struct cal_tmu_data),
2017 GFP_KERNEL);
2018 if (!data->cal_data) {
2019 dev_err(&pdev->dev, "Failed to allocate cal data structure\n");
2020 return -ENOMEM;
9d97e5c8
DK
2021 }
2022
3c2a0909
S
2023#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
2024 exynos_cpufreq_init_register_notifier(&exynos_cpufreq_nb);
2025#endif
2026
f22d9c03 2027 INIT_WORK(&data->irq_work, exynos_tmu_work);
9d97e5c8 2028
3c2a0909
S
2029 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
2030 data->irq[i] = platform_get_irq(pdev, i);
2031 if (data->irq[i] < 0) {
2032 ret = data->irq[i];
2033 dev_err(&pdev->dev, "Failed to get platform irq\n");
2034 goto err_get_irq;
2035 }
9d97e5c8 2036
3c2a0909
S
2037 ret = request_irq(data->irq[i], exynos_tmu_irq,
2038 IRQF_TRIGGER_RISING, "exynos_tmu", data);
2039 if (ret) {
2040 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq[i]);
2041 goto err_request_irq;
2042 }
9d97e5c8 2043
3c2a0909
S
2044 data->mem[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
2045 if (!data->mem[i]) {
2046 ret = -ENOENT;
2047 dev_err(&pdev->dev, "Failed to get platform resource\n");
2048 goto err_get_resource;
2049 }
9d97e5c8 2050
3c2a0909
S
2051 data->base[i] = devm_request_and_ioremap(&pdev->dev, data->mem[i]);
2052 if (IS_ERR(data->base[i])) {
2053 ret = PTR_ERR(data->base[i]);
2054 dev_err(&pdev->dev, "Failed to ioremap memory\n");
2055 goto err_io_remap;
2056 }
2057 }
2a16279c 2058
3c2a0909
S
2059 if (pdata->type == SOC_ARCH_EXYNOS || pdata->type == SOC_ARCH_EXYNOS4210 ||
2060 pdata->type == SOC_ARCH_EXYNOS543X)
f22d9c03
ADK
2061 data->soc = pdata->type;
2062 else {
2063 ret = -EINVAL;
2064 dev_err(&pdev->dev, "Platform not supported\n");
3c2a0909 2065 goto err_soc_type;
f22d9c03
ADK
2066 }
2067
9d97e5c8 2068 data->pdata = pdata;
3c2a0909 2069 tmudata = data;
9d97e5c8
DK
2070 platform_set_drvdata(pdev, data);
2071 mutex_init(&data->lock);
2072
3c2a0909
S
2073 exynos_set_cal_data(data);
2074
2075#if defined(CONFIG_SOC_EXYNOS5433)
2076 ret = cal_tmu_otp_read(data->cal_data);
2077
2078 /* Save the eFuse value before initializing TMU */
9d97e5c8 2079 if (ret) {
3c2a0909
S
2080 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
2081 exynos_tmu_get_efuse(pdev, i);
9d97e5c8 2082 }
3c2a0909
S
2083#else
2084 /* Save the eFuse value before initializing TMU */
2085 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
2086 exynos_tmu_get_efuse(pdev, i);
2087#endif
2088
2089 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
2090 ret = exynos_tmu_initialize(pdev, i);
2091 if (ret) {
2092 dev_err(&pdev->dev, "Failed to initialize TMU\n");
2093 goto err_tmu;
2094 }
2095
2096 exynos_tmu_control(pdev, i, true);
2097 exynos_tmu_regdump(pdev, i);
2098 }
2099
2100 mutex_lock(&data->lock);
2101 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
2102 int temp = cal_tmu_read(data->cal_data, i);
2103 pr_debug("[TMU]temp[%d] : %d\n", i, temp);
2104 }
2105 mutex_unlock(&data->lock);
9d97e5c8 2106
9d97e5c8 2107
7e0b55e6
ADK
2108 /* Register the sensor with thermal management interface */
2109 (&exynos_sensor_conf)->private_data = data;
3c2a0909 2110 exynos_sensor_conf.trip_data.trip_count = pdata->freq_tab_count;
7e0b55e6 2111
3c2a0909 2112 for (i = 0; i < pdata->freq_tab_count; i++) {
7e0b55e6 2113 exynos_sensor_conf.trip_data.trip_val[i] =
3c2a0909
S
2114 pdata->threshold + pdata->freq_tab[i].temp_level;
2115 }
7e0b55e6 2116
4f0a6847
JL
2117 exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
2118
7e0b55e6
ADK
2119 exynos_sensor_conf.cooling_data.freq_clip_count =
2120 pdata->freq_tab_count;
2121 for (i = 0; i < pdata->freq_tab_count; i++) {
2122 exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max =
2123 pdata->freq_tab[i].freq_clip_max;
3c2a0909
S
2124#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
2125 exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max_cluster0 =
2126 pdata->freq_tab[i].freq_clip_max_cluster0;
2127#endif
7e0b55e6
ADK
2128 exynos_sensor_conf.cooling_data.freq_data[i].temp_level =
2129 pdata->freq_tab[i].temp_level;
3c2a0909
S
2130 if (pdata->freq_tab[i].mask_val) {
2131 exynos_sensor_conf.cooling_data.freq_data[i].mask_val =
2132 pdata->freq_tab[i].mask_val;
2133#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
2134 exynos_sensor_conf.cooling_data.freq_data[i].mask_val_cluster0 =
2135 pdata->freq_tab[i].mask_val_cluster0;
2136#endif
2137 } else
2138 exynos_sensor_conf.cooling_data.freq_data[i].mask_val =
2139 cpu_all_mask;
7e0b55e6
ADK
2140 }
2141
3c2a0909
S
2142 exynos_sensor_conf.cooling_data.size[THERMAL_TRIP_ACTIVE] = pdata->size[THERMAL_TRIP_ACTIVE];
2143 exynos_sensor_conf.cooling_data.size[THERMAL_TRIP_PASSIVE] = pdata->size[THERMAL_TRIP_PASSIVE];
2144
2145 register_pm_notifier(&exynos_pm_nb);
2146#if (defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)) && defined(CONFIG_CPU_IDLE)
2147 exynos_pm_register_notifier(&exynos_pm_dstop_nb);
2148#endif
2149 ret = sysfs_create_group(&pdev->dev.kobj, &exynos_thermal_sensor_attr_group);
2150 if (ret)
2151 dev_err(&pdev->dev, "cannot create thermal sensor attributes\n");
2152
2153 is_cpu_hotplugged_out = false;
bbf63be4 2154
9d97e5c8 2155 return 0;
3c2a0909
S
2156
2157err_tmu:
9d97e5c8 2158 platform_set_drvdata(pdev, NULL);
3c2a0909
S
2159err_soc_type:
2160err_io_remap:
2161err_get_resource:
2162 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
2163 if (data->irq[i])
2164 free_irq(data->irq[i], data);
2165 }
2166err_request_irq:
2167err_get_irq:
2168 kfree(data);
2169
9d97e5c8
DK
2170 return ret;
2171}
2172
4eab7a9e 2173static int exynos_tmu_remove(struct platform_device *pdev)
9d97e5c8 2174{
3c2a0909 2175 int i;
9d97e5c8 2176
3c2a0909
S
2177 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
2178 exynos_tmu_control(pdev, i, false);
9d97e5c8 2179
3c2a0909
S
2180 unregister_pm_notifier(&exynos_pm_nb);
2181#if (defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)) && defined(CONFIG_CPU_IDLE)
2182 exynos_pm_unregister_notifier(&exynos_pm_dstop_nb);
2183#endif
7e0b55e6
ADK
2184 exynos_unregister_thermal();
2185
9d97e5c8
DK
2186 platform_set_drvdata(pdev, NULL);
2187
9d97e5c8
DK
2188 return 0;
2189}
2190
08cd6753 2191#ifdef CONFIG_PM_SLEEP
f22d9c03 2192static int exynos_tmu_suspend(struct device *dev)
9d97e5c8 2193{
3c2a0909
S
2194 int i;
2195
2196 for (i = 0; i < EXYNOS_TMU_COUNT; i++)
2197 exynos_tmu_control(to_platform_device(dev), i, false);
9d97e5c8
DK
2198
2199 return 0;
2200}
2201
f22d9c03 2202static int exynos_tmu_resume(struct device *dev)
9d97e5c8 2203{
08cd6753 2204 struct platform_device *pdev = to_platform_device(dev);
3c2a0909 2205 int i;
08cd6753 2206
3c2a0909
S
2207 for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
2208 exynos_tmu_initialize(pdev, i);
2209 exynos_tmu_control(pdev, i, true);
2210 }
9d97e5c8
DK
2211
2212 return 0;
2213}
08cd6753 2214
f22d9c03
ADK
2215static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
2216 exynos_tmu_suspend, exynos_tmu_resume);
2217#define EXYNOS_TMU_PM (&exynos_tmu_pm)
9d97e5c8 2218#else
f22d9c03 2219#define EXYNOS_TMU_PM NULL
9d97e5c8
DK
2220#endif
2221
f22d9c03 2222static struct platform_driver exynos_tmu_driver = {
9d97e5c8 2223 .driver = {
f22d9c03 2224 .name = "exynos-tmu",
9d97e5c8 2225 .owner = THIS_MODULE,
f22d9c03 2226 .pm = EXYNOS_TMU_PM,
caa5cbd5 2227 .of_match_table = of_match_ptr(exynos_tmu_match),
9d97e5c8 2228 },
f22d9c03 2229 .probe = exynos_tmu_probe,
4eab7a9e 2230 .remove = exynos_tmu_remove,
17be868e 2231 .id_table = exynos_tmu_driver_ids,
9d97e5c8
DK
2232};
2233
f22d9c03 2234module_platform_driver(exynos_tmu_driver);
9d97e5c8 2235
f22d9c03 2236MODULE_DESCRIPTION("EXYNOS TMU Driver");
9d97e5c8
DK
2237MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
2238MODULE_LICENSE("GPL");
f22d9c03 2239MODULE_ALIAS("platform:exynos-tmu");