2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 /*******************************************************************************
23 * 20131225 marc.huang *
24 * CPU Hotplug debug mechanism *
25 *******************************************************************************/
26 #include <linux/kallsyms.h>
27 /******************************************************************************/
28 #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
29 #include <mtlbprof/mtlbprof.h>
34 /*******************************************************************************
35 * 20131225 marc.huang *
36 * CPU Hotplug and idle integration *
37 *******************************************************************************/
38 atomic_t is_in_hotplug
= ATOMIC_INIT(0);
39 void __attribute__((weak
)) spm_mcdi_wakeup_all_cores(void) {}
40 /******************************************************************************/
43 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
44 static DEFINE_MUTEX(cpu_add_remove_lock
);
47 * The following two API's must be used when attempting
48 * to serialize the updates to cpu_online_mask, cpu_present_mask.
50 void cpu_maps_update_begin(void)
52 mutex_lock(&cpu_add_remove_lock
);
55 void cpu_maps_update_done(void)
57 mutex_unlock(&cpu_add_remove_lock
);
60 /*******************************************************************************
61 * 20131225 marc.huang *
62 * CPU Hotplug debug mechanism *
63 *******************************************************************************/
64 #if defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2)
65 RAW_NOTIFIER_HEAD(cpu_chain
);
67 static RAW_NOTIFIER_HEAD(cpu_chain
);
69 /******************************************************************************/
71 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72 * Should always be manipulated under cpu_add_remove_lock
74 static int cpu_hotplug_disabled
;
76 #ifdef CONFIG_HOTPLUG_CPU
79 struct task_struct
*active_writer
;
80 struct mutex lock
; /* Synchronizes accesses to refcount, */
82 * Also blocks the new readers during
83 * an ongoing cpu hotplug operation.
87 .active_writer
= NULL
,
88 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
92 void get_online_cpus(void)
95 if (cpu_hotplug
.active_writer
== current
)
97 mutex_lock(&cpu_hotplug
.lock
);
98 cpu_hotplug
.refcount
++;
99 mutex_unlock(&cpu_hotplug
.lock
);
102 EXPORT_SYMBOL_GPL(get_online_cpus
);
104 void put_online_cpus(void)
106 if (cpu_hotplug
.active_writer
== current
)
108 mutex_lock(&cpu_hotplug
.lock
);
110 if (WARN_ON(!cpu_hotplug
.refcount
))
111 cpu_hotplug
.refcount
++; /* try to fix things up */
113 if (!--cpu_hotplug
.refcount
&& unlikely(cpu_hotplug
.active_writer
))
114 wake_up_process(cpu_hotplug
.active_writer
);
115 mutex_unlock(&cpu_hotplug
.lock
);
118 EXPORT_SYMBOL_GPL(put_online_cpus
);
121 * This ensures that the hotplug operation can begin only when the
122 * refcount goes to zero.
124 * Note that during a cpu-hotplug operation, the new readers, if any,
125 * will be blocked by the cpu_hotplug.lock
127 * Since cpu_hotplug_begin() is always called after invoking
128 * cpu_maps_update_begin(), we can be sure that only one writer is active.
130 * Note that theoretically, there is a possibility of a livelock:
131 * - Refcount goes to zero, last reader wakes up the sleeping
133 * - Last reader unlocks the cpu_hotplug.lock.
134 * - A new reader arrives at this moment, bumps up the refcount.
135 * - The writer acquires the cpu_hotplug.lock finds the refcount
136 * non zero and goes to sleep again.
138 * However, this is very difficult to achieve in practice since
139 * get_online_cpus() not an api which is called all that often.
142 static void cpu_hotplug_begin(void)
144 cpu_hotplug
.active_writer
= current
;
147 mutex_lock(&cpu_hotplug
.lock
);
148 if (likely(!cpu_hotplug
.refcount
))
150 __set_current_state(TASK_UNINTERRUPTIBLE
);
151 mutex_unlock(&cpu_hotplug
.lock
);
155 /*******************************************************************************
156 * 20131225 marc.huang *
157 * CPU Hotplug and idle integration *
158 *******************************************************************************/
159 atomic_inc(&is_in_hotplug
);
160 spm_mcdi_wakeup_all_cores();
161 /******************************************************************************/
164 static void cpu_hotplug_done(void)
166 /*******************************************************************************
167 * 20131225 marc.huang *
168 * CPU Hotplug and idle integration *
169 *******************************************************************************/
170 atomic_dec(&is_in_hotplug
);
171 /******************************************************************************/
173 cpu_hotplug
.active_writer
= NULL
;
174 mutex_unlock(&cpu_hotplug
.lock
);
178 * Wait for currently running CPU hotplug operations to complete (if any) and
179 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
180 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
181 * hotplug path before performing hotplug operations. So acquiring that lock
182 * guarantees mutual exclusion from any currently running hotplug operations.
184 void cpu_hotplug_disable(void)
186 cpu_maps_update_begin();
187 cpu_hotplug_disabled
= 1;
188 cpu_maps_update_done();
191 void cpu_hotplug_enable(void)
193 cpu_maps_update_begin();
194 cpu_hotplug_disabled
= 0;
195 cpu_maps_update_done();
198 #else /* #if CONFIG_HOTPLUG_CPU */
199 static void cpu_hotplug_begin(void) {}
200 static void cpu_hotplug_done(void) {}
201 #endif /* #else #if CONFIG_HOTPLUG_CPU */
203 /* Need to know about CPUs going up/down? */
204 int __ref
register_cpu_notifier(struct notifier_block
*nb
)
208 /*******************************************************************************
209 * 20131225 marc.huang *
210 * CPU Hotplug debug mechanism *
211 *******************************************************************************/
212 #ifdef MTK_CPU_HOTPLUG_DEBUG_0
213 static int index
= 0;
214 #ifdef CONFIG_KALLSYMS
215 char namebuf
[128] = {0};
218 symname
= kallsyms_lookup((unsigned long)nb
->notifier_call
, NULL
, NULL
, NULL
, namebuf
);
220 printk("[cpu_ntf] <%02d>%08lx (%s)\n", index
++, (unsigned long)nb
->notifier_call
, symname
);
222 printk("[cpu_ntf] <%02d>%08lx\n", index
++, (unsigned long)nb
->notifier_call
);
223 #else //#ifdef CONFIG_KALLSYMS
224 printk("[cpu_ntf] <%02d>%08lx\n", index
++, (unsigned long)nb
->notifier_call
);
225 #endif //#ifdef CONFIG_KALLSYMS
226 #endif //#ifdef MTK_CPU_HOTPLUG_DEBUG_0
227 /******************************************************************************/
229 cpu_maps_update_begin();
230 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
231 cpu_maps_update_done();
235 static int __cpu_notify(unsigned long val
, void *v
, int nr_to_call
,
240 ret
= __raw_notifier_call_chain(&cpu_chain
, val
, v
, nr_to_call
,
243 return notifier_to_errno(ret
);
246 static int cpu_notify(unsigned long val
, void *v
)
248 return __cpu_notify(val
, v
, -1, NULL
);
251 static void cpu_notify_nofail(unsigned long val
, void *v
)
253 BUG_ON(cpu_notify(val
, v
));
255 EXPORT_SYMBOL(register_cpu_notifier
);
257 void __ref
unregister_cpu_notifier(struct notifier_block
*nb
)
259 cpu_maps_update_begin();
260 raw_notifier_chain_unregister(&cpu_chain
, nb
);
261 cpu_maps_update_done();
263 EXPORT_SYMBOL(unregister_cpu_notifier
);
265 #ifdef CONFIG_HOTPLUG_CPU
267 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
270 * This function walks all processes, finds a valid mm struct for each one and
271 * then clears a corresponding bit in mm's cpumask. While this all sounds
272 * trivial, there are various non-obvious corner cases, which this function
273 * tries to solve in a safe manner.
275 * Also note that the function uses a somewhat relaxed locking scheme, so it may
276 * be called only for an already offlined CPU.
278 void clear_tasks_mm_cpumask(int cpu
)
280 struct task_struct
*p
;
283 * This function is called after the cpu is taken down and marked
284 * offline, so its not like new tasks will ever get this cpu set in
285 * their mm mask. -- Peter Zijlstra
286 * Thus, we may use rcu_read_lock() here, instead of grabbing
287 * full-fledged tasklist_lock.
289 WARN_ON(cpu_online(cpu
));
291 for_each_process(p
) {
292 struct task_struct
*t
;
295 * Main thread might exit, but other threads may still have
296 * a valid mm. Find one.
298 t
= find_lock_task_mm(p
);
301 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
307 static inline void check_for_tasks(int cpu
)
309 struct task_struct
*p
;
310 cputime_t utime
, stime
;
312 write_lock_irq(&tasklist_lock
);
313 for_each_process(p
) {
314 task_cputime(p
, &utime
, &stime
);
315 if (task_cpu(p
) == cpu
&& p
->state
== TASK_RUNNING
&&
317 printk(KERN_WARNING
"Task %s (pid = %d) is on cpu %d "
318 "(state = %ld, flags = %x)\n",
319 p
->comm
, task_pid_nr(p
), cpu
,
322 write_unlock_irq(&tasklist_lock
);
325 struct take_cpu_down_param
{
330 /* Take this CPU down. */
331 static int __ref
take_cpu_down(void *_param
)
333 struct take_cpu_down_param
*param
= _param
;
336 /* Ensure this CPU doesn't handle any more interrupts. */
337 err
= __cpu_disable();
341 cpu_notify(CPU_DYING
| param
->mod
, param
->hcpu
);
342 /* Park the stopper thread */
343 kthread_park(current
);
347 /* Requires cpu_add_remove_lock to be held */
348 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
350 int err
, nr_calls
= 0;
351 void *hcpu
= (void *)(long)cpu
;
352 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
353 struct take_cpu_down_param tcd_param
= {
358 if (num_online_cpus() == 1)
361 if (!cpu_online(cpu
))
366 err
= __cpu_notify(CPU_DOWN_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
369 __cpu_notify(CPU_DOWN_FAILED
| mod
, hcpu
, nr_calls
, NULL
);
370 printk("%s: attempt to take down CPU %u failed\n",
374 smpboot_park_threads(cpu
);
376 err
= __stop_machine(take_cpu_down
, &tcd_param
, cpumask_of(cpu
));
378 /* CPU didn't die: tell everyone. Can't complain. */
379 smpboot_unpark_threads(cpu
);
380 cpu_notify_nofail(CPU_DOWN_FAILED
| mod
, hcpu
);
383 BUG_ON(cpu_online(cpu
));
386 * The migration_call() CPU_DYING callback will have removed all
387 * runnable tasks from the cpu, there's only the idle task left now
388 * that the migration thread is done doing the stop_machine thing.
390 * Wait for the stop thread to go away.
392 while (!idle_cpu(cpu
))
395 #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
396 mt_lbprof_update_state(cpu
, MT_LBPROF_HOTPLUG_STATE
);
399 /* This actually kills the CPU. */
402 /* CPU is completely dead: tell everyone. Too late to complain. */
403 cpu_notify_nofail(CPU_DEAD
| mod
, hcpu
);
405 check_for_tasks(cpu
);
410 cpu_notify_nofail(CPU_POST_DEAD
| mod
, hcpu
);
414 int __ref
cpu_down(unsigned int cpu
)
418 cpu_maps_update_begin();
420 if (cpu_hotplug_disabled
) {
425 err
= _cpu_down(cpu
, 0);
428 cpu_maps_update_done();
431 EXPORT_SYMBOL(cpu_down
);
432 #endif /*CONFIG_HOTPLUG_CPU*/
434 /* Requires cpu_add_remove_lock to be held */
435 static int __cpuinit
_cpu_up(unsigned int cpu
, int tasks_frozen
)
437 int ret
, nr_calls
= 0;
438 void *hcpu
= (void *)(long)cpu
;
439 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
440 struct task_struct
*idle
;
444 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
449 idle
= idle_thread_get(cpu
);
455 ret
= smpboot_create_threads(cpu
);
459 ret
= __cpu_notify(CPU_UP_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
462 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
467 /* Arch-specific enabling code. */
468 ret
= __cpu_up(cpu
, idle
);
471 BUG_ON(!cpu_online(cpu
));
473 /* Wake the per cpu threads */
474 smpboot_unpark_threads(cpu
);
476 /* Now call notifier in preparation. */
477 cpu_notify(CPU_ONLINE
| mod
, hcpu
);
481 __cpu_notify(CPU_UP_CANCELED
| mod
, hcpu
, nr_calls
, NULL
);
488 int __cpuinit
cpu_up(unsigned int cpu
)
492 #ifdef CONFIG_MEMORY_HOTPLUG
497 if (!cpu_possible(cpu
)) {
498 printk(KERN_ERR
"can't online cpu %d because it is not "
499 "configured as may-hotadd at boot time\n", cpu
);
500 #if defined(CONFIG_IA64)
501 printk(KERN_ERR
"please check additional_cpus= boot "
507 #ifdef CONFIG_MEMORY_HOTPLUG
508 nid
= cpu_to_node(cpu
);
509 if (!node_online(nid
)) {
510 err
= mem_online_node(nid
);
515 pgdat
= NODE_DATA(nid
);
518 "Can't online cpu %d due to NULL pgdat\n", cpu
);
522 if (pgdat
->node_zonelists
->_zonerefs
->zone
== NULL
) {
523 mutex_lock(&zonelists_mutex
);
524 build_all_zonelists(NULL
, NULL
);
525 mutex_unlock(&zonelists_mutex
);
529 cpu_maps_update_begin();
531 if (cpu_hotplug_disabled
) {
536 err
= _cpu_up(cpu
, 0);
539 cpu_maps_update_done();
542 EXPORT_SYMBOL_GPL(cpu_up
);
544 #ifdef CONFIG_PM_SLEEP_SMP
545 static cpumask_var_t frozen_cpus
;
547 int disable_nonboot_cpus(void)
549 int cpu
, first_cpu
, error
= 0;
551 cpu_maps_update_begin();
552 first_cpu
= cpumask_first(cpu_online_mask
);
554 * We take down all of the non-boot CPUs in one shot to avoid races
555 * with the userspace trying to use the CPU hotplug at the same time
557 cpumask_clear(frozen_cpus
);
559 printk("Disabling non-boot CPUs ...\n");
560 for_each_online_cpu(cpu
) {
561 if (cpu
== first_cpu
)
563 error
= _cpu_down(cpu
, 1);
565 cpumask_set_cpu(cpu
, frozen_cpus
);
567 printk(KERN_ERR
"Error taking CPU%d down: %d\n",
574 BUG_ON(num_online_cpus() > 1);
575 /* Make sure the CPUs won't be enabled by someone else */
576 cpu_hotplug_disabled
= 1;
578 printk(KERN_ERR
"Non-boot CPUs are not disabled\n");
580 cpu_maps_update_done();
583 EXPORT_SYMBOL_GPL(disable_nonboot_cpus
);
585 void __weak
arch_enable_nonboot_cpus_begin(void)
589 void __weak
arch_enable_nonboot_cpus_end(void)
593 void __ref
enable_nonboot_cpus(void)
597 /* Allow everyone to use the CPU hotplug again */
598 cpu_maps_update_begin();
599 cpu_hotplug_disabled
= 0;
600 if (cpumask_empty(frozen_cpus
))
603 printk(KERN_INFO
"Enabling non-boot CPUs ...\n");
605 arch_enable_nonboot_cpus_begin();
607 for_each_cpu(cpu
, frozen_cpus
) {
608 error
= _cpu_up(cpu
, 1);
610 printk(KERN_INFO
"CPU%d is up\n", cpu
);
613 printk(KERN_WARNING
"Error taking CPU%d up: %d\n", cpu
, error
);
616 arch_enable_nonboot_cpus_end();
618 cpumask_clear(frozen_cpus
);
620 cpu_maps_update_done();
622 EXPORT_SYMBOL_GPL(enable_nonboot_cpus
);
624 static int __init
alloc_frozen_cpus(void)
626 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
630 core_initcall(alloc_frozen_cpus
);
633 * When callbacks for CPU hotplug notifications are being executed, we must
634 * ensure that the state of the system with respect to the tasks being frozen
635 * or not, as reported by the notification, remains unchanged *throughout the
636 * duration* of the execution of the callbacks.
637 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
639 * This synchronization is implemented by mutually excluding regular CPU
640 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
641 * Hibernate notifications.
644 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
645 unsigned long action
, void *ptr
)
649 case PM_SUSPEND_PREPARE
:
650 case PM_HIBERNATION_PREPARE
:
651 cpu_hotplug_disable();
654 case PM_POST_SUSPEND
:
655 case PM_POST_HIBERNATION
:
656 cpu_hotplug_enable();
667 static int __init
cpu_hotplug_pm_sync_init(void)
670 * cpu_hotplug_pm_callback has higher priority than x86
671 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
672 * to disable cpu hotplug to avoid cpu hotplug race.
674 pm_notifier(cpu_hotplug_pm_callback
, 0);
677 core_initcall(cpu_hotplug_pm_sync_init
);
679 #endif /* CONFIG_PM_SLEEP_SMP */
682 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
683 * @cpu: cpu that just started
685 * This function calls the cpu_chain notifiers with CPU_STARTING.
686 * It must be called by the arch code on the new cpu, before the new cpu
687 * enables interrupts and before the "boot" cpu returns from __cpu_up().
689 void __cpuinit
notify_cpu_starting(unsigned int cpu
)
691 unsigned long val
= CPU_STARTING
;
693 #ifdef CONFIG_PM_SLEEP_SMP
694 if (frozen_cpus
!= NULL
&& cpumask_test_cpu(cpu
, frozen_cpus
))
695 val
= CPU_STARTING_FROZEN
;
696 #endif /* CONFIG_PM_SLEEP_SMP */
697 cpu_notify(val
, (void *)(long)cpu
);
700 #endif /* CONFIG_SMP */
703 * cpu_bit_bitmap[] is a special, "compressed" data structure that
704 * represents all NR_CPUS bits binary values of 1<<nr.
706 * It is used by cpumask_of() to get a constant address to a CPU
707 * mask value that has a single bit set only.
710 /* cpu_bit_bitmap[0] is empty - so we can back into it */
711 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
712 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
713 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
714 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
716 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
718 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
719 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
720 #if BITS_PER_LONG > 32
721 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
722 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
725 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
727 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
728 EXPORT_SYMBOL(cpu_all_bits
);
730 #ifdef CONFIG_INIT_ALL_POSSIBLE
731 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
734 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
;
736 const struct cpumask
*const cpu_possible_mask
= to_cpumask(cpu_possible_bits
);
737 EXPORT_SYMBOL(cpu_possible_mask
);
739 static DECLARE_BITMAP(cpu_online_bits
, CONFIG_NR_CPUS
) __read_mostly
;
740 const struct cpumask
*const cpu_online_mask
= to_cpumask(cpu_online_bits
);
741 EXPORT_SYMBOL(cpu_online_mask
);
743 static DECLARE_BITMAP(cpu_present_bits
, CONFIG_NR_CPUS
) __read_mostly
;
744 const struct cpumask
*const cpu_present_mask
= to_cpumask(cpu_present_bits
);
745 EXPORT_SYMBOL(cpu_present_mask
);
747 static DECLARE_BITMAP(cpu_active_bits
, CONFIG_NR_CPUS
) __read_mostly
;
748 const struct cpumask
*const cpu_active_mask
= to_cpumask(cpu_active_bits
);
749 EXPORT_SYMBOL(cpu_active_mask
);
751 void set_cpu_possible(unsigned int cpu
, bool possible
)
754 cpumask_set_cpu(cpu
, to_cpumask(cpu_possible_bits
));
756 cpumask_clear_cpu(cpu
, to_cpumask(cpu_possible_bits
));
759 void set_cpu_present(unsigned int cpu
, bool present
)
762 cpumask_set_cpu(cpu
, to_cpumask(cpu_present_bits
));
764 cpumask_clear_cpu(cpu
, to_cpumask(cpu_present_bits
));
767 void set_cpu_online(unsigned int cpu
, bool online
)
770 cpumask_set_cpu(cpu
, to_cpumask(cpu_online_bits
));
771 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
773 cpumask_clear_cpu(cpu
, to_cpumask(cpu_online_bits
));
777 void set_cpu_active(unsigned int cpu
, bool active
)
780 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
782 cpumask_clear_cpu(cpu
, to_cpumask(cpu_active_bits
));
785 void init_cpu_present(const struct cpumask
*src
)
787 cpumask_copy(to_cpumask(cpu_present_bits
), src
);
790 void init_cpu_possible(const struct cpumask
*src
)
792 cpumask_copy(to_cpumask(cpu_possible_bits
), src
);
795 void init_cpu_online(const struct cpumask
*src
)
797 cpumask_copy(to_cpumask(cpu_online_bits
), src
);
800 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
802 void idle_notifier_register(struct notifier_block
*n
)
804 atomic_notifier_chain_register(&idle_notifier
, n
);
806 EXPORT_SYMBOL_GPL(idle_notifier_register
);
808 void idle_notifier_unregister(struct notifier_block
*n
)
810 atomic_notifier_chain_unregister(&idle_notifier
, n
);
812 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
814 void idle_notifier_call_chain(unsigned long val
)
816 atomic_notifier_call_chain(&idle_notifier
, val
, NULL
);
818 EXPORT_SYMBOL_GPL(idle_notifier_call_chain
);