From: Hyeonseong Gil Date: Mon, 15 May 2017 07:31:16 +0000 (+0900) Subject: [COMMON] thermal: core: Change to use their own workqueue X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=816912ef991a896b948742b792bd9ddc461f18f8;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] thermal: core: Change to use their own workqueue To prevent workqueue's delayed schedule, use their own workqueue and it will be handled in all core of LITTLE. Resolved migration conflicts from kernel 4.9 to 4.14. Change-Id: I1520ba4d7877fd9abf0edbc14693319ba27cc915 Signed-off-by: Hyeonseong Gil --- diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 352ecb713358..1fdd6992324e 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -50,11 +51,12 @@ static DEFINE_MUTEX(poweroff_lock); static atomic_t in_suspend; static bool power_off_triggered; +static struct workqueue_struct *thermal_wq; + #ifdef CONFIG_SCHED_HMP -#define BOUNDED_CPU 1 static void start_poll_queue(struct thermal_zone_device *tz, int delay) { - mod_delayed_work_on(tz->poll_queue_cpu, system_freezable_wq, &tz->poll_queue, + mod_delayed_work(thermal_wq, &tz->poll_queue, msecs_to_jiffies(delay)); } #endif @@ -1246,9 +1248,6 @@ thermal_zone_device_register(const char *type, int trips, int mask, tz->trips = trips; tz->passive_delay = passive_delay; tz->polling_delay = polling_delay; -#ifdef CONFIG_SCHED_HMP - tz->poll_queue_cpu = BOUNDED_CPU; -#endif /* sys I/F */ /* Add nodes that are always present via .groups */ @@ -1547,43 +1546,6 @@ static inline int genetlink_init(void) { return 0; } static inline void genetlink_exit(void) {} #endif /* !CONFIG_NET */ -#ifdef CONFIG_SCHED_HMP -static int thermal_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned long cpu = (unsigned long)hcpu; - struct thermal_zone_device *pos; - - switch (action) { - case CPU_ONLINE: - if (cpu == BOUNDED_CPU) { - list_for_each_entry(pos, &thermal_tz_list, node) { - pos->poll_queue_cpu = BOUNDED_CPU; - if (pos->polling_delay) { - start_poll_queue(pos, pos->polling_delay); - } - } - } - break; - case CPU_DOWN_PREPARE: - list_for_each_entry(pos, &thermal_tz_list, node) { - if (pos->poll_queue_cpu == cpu) { - pos->poll_queue_cpu = 0; - if (pos->polling_delay) - start_poll_queue(pos, pos->polling_delay); - } - } - break; - } - return NOTIFY_OK; -} - -static struct notifier_block thermal_cpu_notifier = -{ - .notifier_call = thermal_cpu_callback, -}; -#endif - static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { @@ -1618,8 +1580,19 @@ static struct notifier_block thermal_pm_nb = { static int __init thermal_init(void) { int result; + struct workqueue_attrs attr; + + attr.nice = 0; + attr.no_numa = true; + cpumask_copy(attr.cpumask, &hmp_slow_cpu_mask); + + thermal_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |\ + WQ_MEM_RECLAIM | WQ_FREEZABLE, + 0, "thermal_check"); + apply_workqueue_attrs(thermal_wq, &attr); mutex_init(&poweroff_lock); + result = thermal_register_governors(); if (result) goto error; @@ -1636,9 +1609,6 @@ static int __init thermal_init(void) if (result) goto exit_netlink; -#ifdef CONFIG_SCHED_HMP - register_hotcpu_notifier(&thermal_cpu_notifier); -#endif result = register_pm_notifier(&thermal_pm_nb); if (result) pr_warn("Thermal: Can not register suspend notifier, return %d\n", @@ -1664,9 +1634,6 @@ error: static void __exit thermal_exit(void) { unregister_pm_notifier(&thermal_pm_nb); -#ifdef CONFIG_SCHED_HMP - unregister_hotcpu_notifier(&thermal_cpu_notifier); -#endif of_thermal_destroy_zones(); genetlink_exit(); class_unregister(&thermal_class); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 3cdd38497349..a6fc833258f6 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -239,7 +239,6 @@ struct thermal_zone_device { struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; - int poll_queue_cpu; }; /**