#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug debug mechanism *
+*******************************************************************************/
+#include <linux/kallsyms.h>
+/******************************************************************************/
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+#include <mtlbprof/mtlbprof.h>
+#endif
#include "smpboot.h"
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug and idle integration *
+*******************************************************************************/
+atomic_t is_in_hotplug = ATOMIC_INIT(0);
+void __attribute__((weak)) spm_mcdi_wakeup_all_cores(void) {}
+/******************************************************************************/
+
#ifdef CONFIG_SMP
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
mutex_unlock(&cpu_add_remove_lock);
}
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug debug mechanism *
+*******************************************************************************/
+#if defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2)
+RAW_NOTIFIER_HEAD(cpu_chain);
+#else
static RAW_NOTIFIER_HEAD(cpu_chain);
+#endif
+/******************************************************************************/
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
mutex_unlock(&cpu_hotplug.lock);
schedule();
}
+
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug and idle integration *
+*******************************************************************************/
+ atomic_inc(&is_in_hotplug);
+ spm_mcdi_wakeup_all_cores();
+/******************************************************************************/
}
static void cpu_hotplug_done(void)
{
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug and idle integration *
+*******************************************************************************/
+ atomic_dec(&is_in_hotplug);
+/******************************************************************************/
+
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
}
+/*
+ * Wait for currently running CPU hotplug operations to complete (if any) and
+ * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
+ * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
+ * hotplug path before performing hotplug operations. So acquiring that lock
+ * guarantees mutual exclusion from any currently running hotplug operations.
+ */
+void cpu_hotplug_disable(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 1;
+ cpu_maps_update_done();
+}
+
+void cpu_hotplug_enable(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 0;
+ cpu_maps_update_done();
+}
+
#else /* #if CONFIG_HOTPLUG_CPU */
static void cpu_hotplug_begin(void) {}
static void cpu_hotplug_done(void) {}
int __ref register_cpu_notifier(struct notifier_block *nb)
{
int ret;
+
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug debug mechanism *
+*******************************************************************************/
+#ifdef MTK_CPU_HOTPLUG_DEBUG_0
+ static int index = 0;
+ #ifdef CONFIG_KALLSYMS
+ char namebuf[128] = {0};
+ const char *symname;
+
+ symname = kallsyms_lookup((unsigned long)nb->notifier_call, NULL, NULL, NULL, namebuf);
+ if (symname)
+ printk("[cpu_ntf] <%02d>%08lx (%s)\n", index++, (unsigned long)nb->notifier_call, symname);
+ else
+ printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
+ #else //#ifdef CONFIG_KALLSYMS
+ printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
+ #endif //#ifdef CONFIG_KALLSYMS
+#endif //#ifdef MTK_CPU_HOTPLUG_DEBUG_0
+/******************************************************************************/
+
cpu_maps_update_begin();
ret = raw_notifier_chain_register(&cpu_chain, nb);
cpu_maps_update_done();
while (!idle_cpu(cpu))
cpu_relax();
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ mt_lbprof_update_state(cpu, MT_LBPROF_HOTPLUG_STATE);
+#endif
+
/* This actually kills the CPU. */
__cpu_die(cpu);
cpu_maps_update_done();
return error;
}
+EXPORT_SYMBOL_GPL(disable_nonboot_cpus);
void __weak arch_enable_nonboot_cpus_begin(void)
{
out:
cpu_maps_update_done();
}
+EXPORT_SYMBOL_GPL(enable_nonboot_cpus);
static int __init alloc_frozen_cpus(void)
{
}
core_initcall(alloc_frozen_cpus);
-/*
- * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
- * hotplug when tasks are about to be frozen. Also, don't allow the freezer
- * to continue until any currently running CPU hotplug operation gets
- * completed.
- * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
- * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
- * CPU hotplug path and released only after it is complete. Thus, we
- * (and hence the freezer) will block here until any currently running CPU
- * hotplug operation gets completed.
- */
-void cpu_hotplug_disable_before_freeze(void)
-{
- cpu_maps_update_begin();
- cpu_hotplug_disabled = 1;
- cpu_maps_update_done();
-}
-
-
-/*
- * When tasks have been thawed, re-enable regular CPU hotplug (which had been
- * disabled while beginning to freeze tasks).
- */
-void cpu_hotplug_enable_after_thaw(void)
-{
- cpu_maps_update_begin();
- cpu_hotplug_disabled = 0;
- cpu_maps_update_done();
-}
-
/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
- cpu_hotplug_disable_before_freeze();
+ cpu_hotplug_disable();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
- cpu_hotplug_enable_after_thaw();
+ cpu_hotplug_enable();
break;
default:
void set_cpu_online(unsigned int cpu, bool online)
{
- if (online)
+ if (online) {
cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
- else
+ cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
+ } else {
cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
+ }
}
void set_cpu_active(unsigned int cpu, bool active)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);