Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 /*******************************************************************************
23 * 20131225 marc.huang *
24 * CPU Hotplug debug mechanism *
25 *******************************************************************************/
26 #include <linux/kallsyms.h>
27 /******************************************************************************/
28 #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
29 #include <mtlbprof/mtlbprof.h>
30 #endif
31
32 #include "smpboot.h"
33
34 /*******************************************************************************
35 * 20131225 marc.huang *
36 * CPU Hotplug and idle integration *
37 *******************************************************************************/
38 atomic_t is_in_hotplug = ATOMIC_INIT(0);
39 void __attribute__((weak)) spm_mcdi_wakeup_all_cores(void) {}
40 /******************************************************************************/
41
42 #ifdef CONFIG_SMP
43 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
44 static DEFINE_MUTEX(cpu_add_remove_lock);
45
46 /*
47 * The following two API's must be used when attempting
48 * to serialize the updates to cpu_online_mask, cpu_present_mask.
49 */
50 void cpu_maps_update_begin(void)
51 {
52 mutex_lock(&cpu_add_remove_lock);
53 }
54
55 void cpu_maps_update_done(void)
56 {
57 mutex_unlock(&cpu_add_remove_lock);
58 }
59
60 /*******************************************************************************
61 * 20131225 marc.huang *
62 * CPU Hotplug debug mechanism *
63 *******************************************************************************/
64 #if defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2)
65 RAW_NOTIFIER_HEAD(cpu_chain);
66 #else
67 static RAW_NOTIFIER_HEAD(cpu_chain);
68 #endif
69 /******************************************************************************/
70
71 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72 * Should always be manipulated under cpu_add_remove_lock
73 */
74 static int cpu_hotplug_disabled;
75
76 #ifdef CONFIG_HOTPLUG_CPU
77
78 static struct {
79 struct task_struct *active_writer;
80 struct mutex lock; /* Synchronizes accesses to refcount, */
81 /*
82 * Also blocks the new readers during
83 * an ongoing cpu hotplug operation.
84 */
85 int refcount;
86 } cpu_hotplug = {
87 .active_writer = NULL,
88 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
89 .refcount = 0,
90 };
91
92 void get_online_cpus(void)
93 {
94 might_sleep();
95 if (cpu_hotplug.active_writer == current)
96 return;
97 mutex_lock(&cpu_hotplug.lock);
98 cpu_hotplug.refcount++;
99 mutex_unlock(&cpu_hotplug.lock);
100
101 }
102 EXPORT_SYMBOL_GPL(get_online_cpus);
103
104 void put_online_cpus(void)
105 {
106 if (cpu_hotplug.active_writer == current)
107 return;
108 mutex_lock(&cpu_hotplug.lock);
109
110 if (WARN_ON(!cpu_hotplug.refcount))
111 cpu_hotplug.refcount++; /* try to fix things up */
112
113 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
114 wake_up_process(cpu_hotplug.active_writer);
115 mutex_unlock(&cpu_hotplug.lock);
116
117 }
118 EXPORT_SYMBOL_GPL(put_online_cpus);
119
120 /*
121 * This ensures that the hotplug operation can begin only when the
122 * refcount goes to zero.
123 *
124 * Note that during a cpu-hotplug operation, the new readers, if any,
125 * will be blocked by the cpu_hotplug.lock
126 *
127 * Since cpu_hotplug_begin() is always called after invoking
128 * cpu_maps_update_begin(), we can be sure that only one writer is active.
129 *
130 * Note that theoretically, there is a possibility of a livelock:
131 * - Refcount goes to zero, last reader wakes up the sleeping
132 * writer.
133 * - Last reader unlocks the cpu_hotplug.lock.
134 * - A new reader arrives at this moment, bumps up the refcount.
135 * - The writer acquires the cpu_hotplug.lock finds the refcount
136 * non zero and goes to sleep again.
137 *
138 * However, this is very difficult to achieve in practice since
139 * get_online_cpus() not an api which is called all that often.
140 *
141 */
142 static void cpu_hotplug_begin(void)
143 {
144 cpu_hotplug.active_writer = current;
145
146 for (;;) {
147 mutex_lock(&cpu_hotplug.lock);
148 if (likely(!cpu_hotplug.refcount))
149 break;
150 __set_current_state(TASK_UNINTERRUPTIBLE);
151 mutex_unlock(&cpu_hotplug.lock);
152 schedule();
153 }
154
155 /*******************************************************************************
156 * 20131225 marc.huang *
157 * CPU Hotplug and idle integration *
158 *******************************************************************************/
159 atomic_inc(&is_in_hotplug);
160 spm_mcdi_wakeup_all_cores();
161 /******************************************************************************/
162 }
163
164 static void cpu_hotplug_done(void)
165 {
166 /*******************************************************************************
167 * 20131225 marc.huang *
168 * CPU Hotplug and idle integration *
169 *******************************************************************************/
170 atomic_dec(&is_in_hotplug);
171 /******************************************************************************/
172
173 cpu_hotplug.active_writer = NULL;
174 mutex_unlock(&cpu_hotplug.lock);
175 }
176
177 /*
178 * Wait for currently running CPU hotplug operations to complete (if any) and
179 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
180 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
181 * hotplug path before performing hotplug operations. So acquiring that lock
182 * guarantees mutual exclusion from any currently running hotplug operations.
183 */
184 void cpu_hotplug_disable(void)
185 {
186 cpu_maps_update_begin();
187 cpu_hotplug_disabled = 1;
188 cpu_maps_update_done();
189 }
190
191 void cpu_hotplug_enable(void)
192 {
193 cpu_maps_update_begin();
194 cpu_hotplug_disabled = 0;
195 cpu_maps_update_done();
196 }
197
198 #else /* #if CONFIG_HOTPLUG_CPU */
199 static void cpu_hotplug_begin(void) {}
200 static void cpu_hotplug_done(void) {}
201 #endif /* #else #if CONFIG_HOTPLUG_CPU */
202
203 /* Need to know about CPUs going up/down? */
204 int __ref register_cpu_notifier(struct notifier_block *nb)
205 {
206 int ret;
207
208 /*******************************************************************************
209 * 20131225 marc.huang *
210 * CPU Hotplug debug mechanism *
211 *******************************************************************************/
212 #ifdef MTK_CPU_HOTPLUG_DEBUG_0
213 static int index = 0;
214 #ifdef CONFIG_KALLSYMS
215 char namebuf[128] = {0};
216 const char *symname;
217
218 symname = kallsyms_lookup((unsigned long)nb->notifier_call, NULL, NULL, NULL, namebuf);
219 if (symname)
220 printk("[cpu_ntf] <%02d>%08lx (%s)\n", index++, (unsigned long)nb->notifier_call, symname);
221 else
222 printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
223 #else //#ifdef CONFIG_KALLSYMS
224 printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
225 #endif //#ifdef CONFIG_KALLSYMS
226 #endif //#ifdef MTK_CPU_HOTPLUG_DEBUG_0
227 /******************************************************************************/
228
229 cpu_maps_update_begin();
230 ret = raw_notifier_chain_register(&cpu_chain, nb);
231 cpu_maps_update_done();
232 return ret;
233 }
234
235 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
236 int *nr_calls)
237 {
238 int ret;
239
240 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
241 nr_calls);
242
243 return notifier_to_errno(ret);
244 }
245
246 static int cpu_notify(unsigned long val, void *v)
247 {
248 return __cpu_notify(val, v, -1, NULL);
249 }
250
251 #ifdef CONFIG_HOTPLUG_CPU
252
253 static void cpu_notify_nofail(unsigned long val, void *v)
254 {
255 BUG_ON(cpu_notify(val, v));
256 }
257 EXPORT_SYMBOL(register_cpu_notifier);
258
259 void __ref unregister_cpu_notifier(struct notifier_block *nb)
260 {
261 cpu_maps_update_begin();
262 raw_notifier_chain_unregister(&cpu_chain, nb);
263 cpu_maps_update_done();
264 }
265 EXPORT_SYMBOL(unregister_cpu_notifier);
266
267 /**
268 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
269 * @cpu: a CPU id
270 *
271 * This function walks all processes, finds a valid mm struct for each one and
272 * then clears a corresponding bit in mm's cpumask. While this all sounds
273 * trivial, there are various non-obvious corner cases, which this function
274 * tries to solve in a safe manner.
275 *
276 * Also note that the function uses a somewhat relaxed locking scheme, so it may
277 * be called only for an already offlined CPU.
278 */
279 void clear_tasks_mm_cpumask(int cpu)
280 {
281 struct task_struct *p;
282
283 /*
284 * This function is called after the cpu is taken down and marked
285 * offline, so its not like new tasks will ever get this cpu set in
286 * their mm mask. -- Peter Zijlstra
287 * Thus, we may use rcu_read_lock() here, instead of grabbing
288 * full-fledged tasklist_lock.
289 */
290 WARN_ON(cpu_online(cpu));
291 rcu_read_lock();
292 for_each_process(p) {
293 struct task_struct *t;
294
295 /*
296 * Main thread might exit, but other threads may still have
297 * a valid mm. Find one.
298 */
299 t = find_lock_task_mm(p);
300 if (!t)
301 continue;
302 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
303 task_unlock(t);
304 }
305 rcu_read_unlock();
306 }
307
308 static inline void check_for_tasks(int cpu)
309 {
310 struct task_struct *p;
311 cputime_t utime, stime;
312
313 write_lock_irq(&tasklist_lock);
314 for_each_process(p) {
315 task_cputime(p, &utime, &stime);
316 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
317 (utime || stime))
318 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
319 "(state = %ld, flags = %x)\n",
320 p->comm, task_pid_nr(p), cpu,
321 p->state, p->flags);
322 }
323 write_unlock_irq(&tasklist_lock);
324 }
325
326 struct take_cpu_down_param {
327 unsigned long mod;
328 void *hcpu;
329 };
330
331 /* Take this CPU down. */
332 static int __ref take_cpu_down(void *_param)
333 {
334 struct take_cpu_down_param *param = _param;
335 int err;
336
337 /* Ensure this CPU doesn't handle any more interrupts. */
338 err = __cpu_disable();
339 if (err < 0)
340 return err;
341
342 cpu_notify(CPU_DYING | param->mod, param->hcpu);
343 /* Park the stopper thread */
344 kthread_park(current);
345 return 0;
346 }
347
348 /* Requires cpu_add_remove_lock to be held */
349 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
350 {
351 int err, nr_calls = 0;
352 void *hcpu = (void *)(long)cpu;
353 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
354 struct take_cpu_down_param tcd_param = {
355 .mod = mod,
356 .hcpu = hcpu,
357 };
358
359 if (num_online_cpus() == 1)
360 return -EBUSY;
361
362 if (!cpu_online(cpu))
363 return -EINVAL;
364
365 cpu_hotplug_begin();
366
367 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
368 if (err) {
369 nr_calls--;
370 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
371 printk("%s: attempt to take down CPU %u failed\n",
372 __func__, cpu);
373 goto out_release;
374 }
375 smpboot_park_threads(cpu);
376
377 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
378 if (err) {
379 /* CPU didn't die: tell everyone. Can't complain. */
380 smpboot_unpark_threads(cpu);
381 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
382 goto out_release;
383 }
384 BUG_ON(cpu_online(cpu));
385
386 /*
387 * The migration_call() CPU_DYING callback will have removed all
388 * runnable tasks from the cpu, there's only the idle task left now
389 * that the migration thread is done doing the stop_machine thing.
390 *
391 * Wait for the stop thread to go away.
392 */
393 while (!idle_cpu(cpu))
394 cpu_relax();
395
396 #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
397 mt_lbprof_update_state(cpu, MT_LBPROF_HOTPLUG_STATE);
398 #endif
399
400 /* This actually kills the CPU. */
401 __cpu_die(cpu);
402
403 /* CPU is completely dead: tell everyone. Too late to complain. */
404 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
405
406 check_for_tasks(cpu);
407
408 out_release:
409 cpu_hotplug_done();
410 if (!err)
411 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
412 return err;
413 }
414
415 int __ref cpu_down(unsigned int cpu)
416 {
417 int err;
418
419 cpu_maps_update_begin();
420
421 if (cpu_hotplug_disabled) {
422 err = -EBUSY;
423 goto out;
424 }
425
426 err = _cpu_down(cpu, 0);
427
428 out:
429 cpu_maps_update_done();
430 return err;
431 }
432 EXPORT_SYMBOL(cpu_down);
433 #endif /*CONFIG_HOTPLUG_CPU*/
434
435 /* Requires cpu_add_remove_lock to be held */
436 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
437 {
438 int ret, nr_calls = 0;
439 void *hcpu = (void *)(long)cpu;
440 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
441 struct task_struct *idle;
442
443 cpu_hotplug_begin();
444
445 if (cpu_online(cpu) || !cpu_present(cpu)) {
446 ret = -EINVAL;
447 goto out;
448 }
449
450 idle = idle_thread_get(cpu);
451 if (IS_ERR(idle)) {
452 ret = PTR_ERR(idle);
453 goto out;
454 }
455
456 ret = smpboot_create_threads(cpu);
457 if (ret)
458 goto out;
459
460 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
461 if (ret) {
462 nr_calls--;
463 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
464 __func__, cpu);
465 goto out_notify;
466 }
467
468 /* Arch-specific enabling code. */
469 ret = __cpu_up(cpu, idle);
470 if (ret != 0)
471 goto out_notify;
472 BUG_ON(!cpu_online(cpu));
473
474 /* Wake the per cpu threads */
475 smpboot_unpark_threads(cpu);
476
477 /* Now call notifier in preparation. */
478 cpu_notify(CPU_ONLINE | mod, hcpu);
479
480 out_notify:
481 if (ret != 0)
482 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
483 out:
484 cpu_hotplug_done();
485
486 return ret;
487 }
488
489 int __cpuinit cpu_up(unsigned int cpu)
490 {
491 int err = 0;
492
493 #ifdef CONFIG_MEMORY_HOTPLUG
494 int nid;
495 pg_data_t *pgdat;
496 #endif
497
498 if (!cpu_possible(cpu)) {
499 printk(KERN_ERR "can't online cpu %d because it is not "
500 "configured as may-hotadd at boot time\n", cpu);
501 #if defined(CONFIG_IA64)
502 printk(KERN_ERR "please check additional_cpus= boot "
503 "parameter\n");
504 #endif
505 return -EINVAL;
506 }
507
508 #ifdef CONFIG_MEMORY_HOTPLUG
509 nid = cpu_to_node(cpu);
510 if (!node_online(nid)) {
511 err = mem_online_node(nid);
512 if (err)
513 return err;
514 }
515
516 pgdat = NODE_DATA(nid);
517 if (!pgdat) {
518 printk(KERN_ERR
519 "Can't online cpu %d due to NULL pgdat\n", cpu);
520 return -ENOMEM;
521 }
522
523 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
524 mutex_lock(&zonelists_mutex);
525 build_all_zonelists(NULL, NULL);
526 mutex_unlock(&zonelists_mutex);
527 }
528 #endif
529
530 cpu_maps_update_begin();
531
532 if (cpu_hotplug_disabled) {
533 err = -EBUSY;
534 goto out;
535 }
536
537 err = _cpu_up(cpu, 0);
538
539 out:
540 cpu_maps_update_done();
541 return err;
542 }
543 EXPORT_SYMBOL_GPL(cpu_up);
544
545 #ifdef CONFIG_PM_SLEEP_SMP
546 static cpumask_var_t frozen_cpus;
547
548 int disable_nonboot_cpus(void)
549 {
550 int cpu, first_cpu, error = 0;
551
552 cpu_maps_update_begin();
553 first_cpu = cpumask_first(cpu_online_mask);
554 /*
555 * We take down all of the non-boot CPUs in one shot to avoid races
556 * with the userspace trying to use the CPU hotplug at the same time
557 */
558 cpumask_clear(frozen_cpus);
559
560 printk("Disabling non-boot CPUs ...\n");
561 for_each_online_cpu(cpu) {
562 if (cpu == first_cpu)
563 continue;
564 error = _cpu_down(cpu, 1);
565 if (!error)
566 cpumask_set_cpu(cpu, frozen_cpus);
567 else {
568 printk(KERN_ERR "Error taking CPU%d down: %d\n",
569 cpu, error);
570 break;
571 }
572 }
573
574 if (!error) {
575 BUG_ON(num_online_cpus() > 1);
576 /* Make sure the CPUs won't be enabled by someone else */
577 cpu_hotplug_disabled = 1;
578 } else {
579 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
580 }
581 cpu_maps_update_done();
582 return error;
583 }
584 EXPORT_SYMBOL_GPL(disable_nonboot_cpus);
585
586 void __weak arch_enable_nonboot_cpus_begin(void)
587 {
588 }
589
590 void __weak arch_enable_nonboot_cpus_end(void)
591 {
592 }
593
594 void __ref enable_nonboot_cpus(void)
595 {
596 int cpu, error;
597
598 /* Allow everyone to use the CPU hotplug again */
599 cpu_maps_update_begin();
600 cpu_hotplug_disabled = 0;
601 if (cpumask_empty(frozen_cpus))
602 goto out;
603
604 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
605
606 arch_enable_nonboot_cpus_begin();
607
608 for_each_cpu(cpu, frozen_cpus) {
609 error = _cpu_up(cpu, 1);
610 if (!error) {
611 printk(KERN_INFO "CPU%d is up\n", cpu);
612 continue;
613 }
614 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
615 }
616
617 arch_enable_nonboot_cpus_end();
618
619 cpumask_clear(frozen_cpus);
620 out:
621 cpu_maps_update_done();
622 }
623 EXPORT_SYMBOL_GPL(enable_nonboot_cpus);
624
625 static int __init alloc_frozen_cpus(void)
626 {
627 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
628 return -ENOMEM;
629 return 0;
630 }
631 core_initcall(alloc_frozen_cpus);
632
633 /*
634 * When callbacks for CPU hotplug notifications are being executed, we must
635 * ensure that the state of the system with respect to the tasks being frozen
636 * or not, as reported by the notification, remains unchanged *throughout the
637 * duration* of the execution of the callbacks.
638 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
639 *
640 * This synchronization is implemented by mutually excluding regular CPU
641 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
642 * Hibernate notifications.
643 */
644 static int
645 cpu_hotplug_pm_callback(struct notifier_block *nb,
646 unsigned long action, void *ptr)
647 {
648 switch (action) {
649
650 case PM_SUSPEND_PREPARE:
651 case PM_HIBERNATION_PREPARE:
652 cpu_hotplug_disable();
653 break;
654
655 case PM_POST_SUSPEND:
656 case PM_POST_HIBERNATION:
657 cpu_hotplug_enable();
658 break;
659
660 default:
661 return NOTIFY_DONE;
662 }
663
664 return NOTIFY_OK;
665 }
666
667
668 static int __init cpu_hotplug_pm_sync_init(void)
669 {
670 /*
671 * cpu_hotplug_pm_callback has higher priority than x86
672 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
673 * to disable cpu hotplug to avoid cpu hotplug race.
674 */
675 pm_notifier(cpu_hotplug_pm_callback, 0);
676 return 0;
677 }
678 core_initcall(cpu_hotplug_pm_sync_init);
679
680 #endif /* CONFIG_PM_SLEEP_SMP */
681
682 /**
683 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
684 * @cpu: cpu that just started
685 *
686 * This function calls the cpu_chain notifiers with CPU_STARTING.
687 * It must be called by the arch code on the new cpu, before the new cpu
688 * enables interrupts and before the "boot" cpu returns from __cpu_up().
689 */
690 void __cpuinit notify_cpu_starting(unsigned int cpu)
691 {
692 unsigned long val = CPU_STARTING;
693
694 #ifdef CONFIG_PM_SLEEP_SMP
695 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
696 val = CPU_STARTING_FROZEN;
697 #endif /* CONFIG_PM_SLEEP_SMP */
698 cpu_notify(val, (void *)(long)cpu);
699 }
700
701 #endif /* CONFIG_SMP */
702
703 /*
704 * cpu_bit_bitmap[] is a special, "compressed" data structure that
705 * represents all NR_CPUS bits binary values of 1<<nr.
706 *
707 * It is used by cpumask_of() to get a constant address to a CPU
708 * mask value that has a single bit set only.
709 */
710
711 /* cpu_bit_bitmap[0] is empty - so we can back into it */
712 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
713 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
714 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
715 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
716
717 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
718
719 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
720 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
721 #if BITS_PER_LONG > 32
722 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
723 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
724 #endif
725 };
726 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
727
728 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
729 EXPORT_SYMBOL(cpu_all_bits);
730
731 #ifdef CONFIG_INIT_ALL_POSSIBLE
732 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
733 = CPU_BITS_ALL;
734 #else
735 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
736 #endif
737 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
738 EXPORT_SYMBOL(cpu_possible_mask);
739
740 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
741 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
742 EXPORT_SYMBOL(cpu_online_mask);
743
744 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
745 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
746 EXPORT_SYMBOL(cpu_present_mask);
747
748 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
749 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
750 EXPORT_SYMBOL(cpu_active_mask);
751
752 void set_cpu_possible(unsigned int cpu, bool possible)
753 {
754 if (possible)
755 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
756 else
757 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
758 }
759
760 void set_cpu_present(unsigned int cpu, bool present)
761 {
762 if (present)
763 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
764 else
765 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
766 }
767
768 void set_cpu_online(unsigned int cpu, bool online)
769 {
770 if (online) {
771 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
772 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
773 } else {
774 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
775 }
776 }
777
778 void set_cpu_active(unsigned int cpu, bool active)
779 {
780 if (active)
781 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
782 else
783 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
784 }
785
786 void init_cpu_present(const struct cpumask *src)
787 {
788 cpumask_copy(to_cpumask(cpu_present_bits), src);
789 }
790
791 void init_cpu_possible(const struct cpumask *src)
792 {
793 cpumask_copy(to_cpumask(cpu_possible_bits), src);
794 }
795
796 void init_cpu_online(const struct cpumask *src)
797 {
798 cpumask_copy(to_cpumask(cpu_online_bits), src);
799 }
800
801 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
802
803 void idle_notifier_register(struct notifier_block *n)
804 {
805 atomic_notifier_chain_register(&idle_notifier, n);
806 }
807 EXPORT_SYMBOL_GPL(idle_notifier_register);
808
809 void idle_notifier_unregister(struct notifier_block *n)
810 {
811 atomic_notifier_chain_unregister(&idle_notifier, n);
812 }
813 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
814
815 void idle_notifier_call_chain(unsigned long val)
816 {
817 atomic_notifier_call_chain(&idle_notifier, val, NULL);
818 }
819 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);