drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
6fa3eb70
S
22/*******************************************************************************
23* 20131225 marc.huang *
24* CPU Hotplug debug mechanism *
25*******************************************************************************/
26#include <linux/kallsyms.h>
27/******************************************************************************/
28#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
29#include <mtlbprof/mtlbprof.h>
30#endif
1da177e4 31
38498a67
TG
32#include "smpboot.h"
33
6fa3eb70
S
34/*******************************************************************************
35* 20131225 marc.huang *
36* CPU Hotplug and idle integration *
37*******************************************************************************/
38atomic_t is_in_hotplug = ATOMIC_INIT(0);
39void __attribute__((weak)) spm_mcdi_wakeup_all_cores(void) {}
40/******************************************************************************/
41
98a79d6a 42#ifdef CONFIG_SMP
b3199c02 43/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 44static DEFINE_MUTEX(cpu_add_remove_lock);
1da177e4 45
79a6cdeb
LJ
46/*
47 * The following two API's must be used when attempting
48 * to serialize the updates to cpu_online_mask, cpu_present_mask.
49 */
50void cpu_maps_update_begin(void)
51{
52 mutex_lock(&cpu_add_remove_lock);
53}
54
55void cpu_maps_update_done(void)
56{
57 mutex_unlock(&cpu_add_remove_lock);
58}
59
6fa3eb70
S
60/*******************************************************************************
61* 20131225 marc.huang *
62* CPU Hotplug debug mechanism *
63*******************************************************************************/
64#if defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2)
65RAW_NOTIFIER_HEAD(cpu_chain);
66#else
5c113fbe 67static RAW_NOTIFIER_HEAD(cpu_chain);
6fa3eb70
S
68#endif
69/******************************************************************************/
1da177e4 70
e3920fb4
RW
71/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72 * Should always be manipulated under cpu_add_remove_lock
73 */
74static int cpu_hotplug_disabled;
75
79a6cdeb
LJ
76#ifdef CONFIG_HOTPLUG_CPU
77
d221938c
GS
78static struct {
79 struct task_struct *active_writer;
80 struct mutex lock; /* Synchronizes accesses to refcount, */
81 /*
82 * Also blocks the new readers during
83 * an ongoing cpu hotplug operation.
84 */
85 int refcount;
31950eb6
LT
86} cpu_hotplug = {
87 .active_writer = NULL,
88 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
89 .refcount = 0,
90};
d221938c 91
86ef5c9a 92void get_online_cpus(void)
a9d9baa1 93{
d221938c
GS
94 might_sleep();
95 if (cpu_hotplug.active_writer == current)
aa953877 96 return;
d221938c
GS
97 mutex_lock(&cpu_hotplug.lock);
98 cpu_hotplug.refcount++;
99 mutex_unlock(&cpu_hotplug.lock);
100
a9d9baa1 101}
86ef5c9a 102EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 103
86ef5c9a 104void put_online_cpus(void)
a9d9baa1 105{
d221938c 106 if (cpu_hotplug.active_writer == current)
aa953877 107 return;
d221938c 108 mutex_lock(&cpu_hotplug.lock);
075663d1
SB
109
110 if (WARN_ON(!cpu_hotplug.refcount))
111 cpu_hotplug.refcount++; /* try to fix things up */
112
d2ba7e2a
ON
113 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
114 wake_up_process(cpu_hotplug.active_writer);
d221938c
GS
115 mutex_unlock(&cpu_hotplug.lock);
116
a9d9baa1 117}
86ef5c9a 118EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 119
d221938c
GS
120/*
121 * This ensures that the hotplug operation can begin only when the
122 * refcount goes to zero.
123 *
124 * Note that during a cpu-hotplug operation, the new readers, if any,
125 * will be blocked by the cpu_hotplug.lock
126 *
d2ba7e2a
ON
127 * Since cpu_hotplug_begin() is always called after invoking
128 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
129 *
130 * Note that theoretically, there is a possibility of a livelock:
131 * - Refcount goes to zero, last reader wakes up the sleeping
132 * writer.
133 * - Last reader unlocks the cpu_hotplug.lock.
134 * - A new reader arrives at this moment, bumps up the refcount.
135 * - The writer acquires the cpu_hotplug.lock finds the refcount
136 * non zero and goes to sleep again.
137 *
138 * However, this is very difficult to achieve in practice since
86ef5c9a 139 * get_online_cpus() not an api which is called all that often.
d221938c
GS
140 *
141 */
142static void cpu_hotplug_begin(void)
143{
d221938c 144 cpu_hotplug.active_writer = current;
d2ba7e2a
ON
145
146 for (;;) {
147 mutex_lock(&cpu_hotplug.lock);
148 if (likely(!cpu_hotplug.refcount))
149 break;
150 __set_current_state(TASK_UNINTERRUPTIBLE);
d221938c
GS
151 mutex_unlock(&cpu_hotplug.lock);
152 schedule();
d221938c 153 }
6fa3eb70
S
154
155/*******************************************************************************
156* 20131225 marc.huang *
157* CPU Hotplug and idle integration *
158*******************************************************************************/
159 atomic_inc(&is_in_hotplug);
160 spm_mcdi_wakeup_all_cores();
161/******************************************************************************/
d221938c
GS
162}
163
164static void cpu_hotplug_done(void)
165{
6fa3eb70
S
166/*******************************************************************************
167* 20131225 marc.huang *
168* CPU Hotplug and idle integration *
169*******************************************************************************/
170 atomic_dec(&is_in_hotplug);
171/******************************************************************************/
172
d221938c
GS
173 cpu_hotplug.active_writer = NULL;
174 mutex_unlock(&cpu_hotplug.lock);
175}
79a6cdeb 176
16e53dbf
SB
177/*
178 * Wait for currently running CPU hotplug operations to complete (if any) and
179 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
180 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
181 * hotplug path before performing hotplug operations. So acquiring that lock
182 * guarantees mutual exclusion from any currently running hotplug operations.
183 */
184void cpu_hotplug_disable(void)
185{
186 cpu_maps_update_begin();
187 cpu_hotplug_disabled = 1;
188 cpu_maps_update_done();
189}
190
191void cpu_hotplug_enable(void)
192{
193 cpu_maps_update_begin();
194 cpu_hotplug_disabled = 0;
195 cpu_maps_update_done();
196}
197
79a6cdeb
LJ
198#else /* #if CONFIG_HOTPLUG_CPU */
199static void cpu_hotplug_begin(void) {}
200static void cpu_hotplug_done(void) {}
25985edc 201#endif /* #else #if CONFIG_HOTPLUG_CPU */
79a6cdeb 202
1da177e4 203/* Need to know about CPUs going up/down? */
f7b16c10 204int __ref register_cpu_notifier(struct notifier_block *nb)
1da177e4 205{
bd5349cf 206 int ret;
6fa3eb70
S
207
208/*******************************************************************************
209* 20131225 marc.huang *
210* CPU Hotplug debug mechanism *
211*******************************************************************************/
212#ifdef MTK_CPU_HOTPLUG_DEBUG_0
213 static int index = 0;
214 #ifdef CONFIG_KALLSYMS
215 char namebuf[128] = {0};
216 const char *symname;
217
218 symname = kallsyms_lookup((unsigned long)nb->notifier_call, NULL, NULL, NULL, namebuf);
219 if (symname)
220 printk("[cpu_ntf] <%02d>%08lx (%s)\n", index++, (unsigned long)nb->notifier_call, symname);
221 else
222 printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
223 #else //#ifdef CONFIG_KALLSYMS
224 printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call);
225 #endif //#ifdef CONFIG_KALLSYMS
226#endif //#ifdef MTK_CPU_HOTPLUG_DEBUG_0
227/******************************************************************************/
228
d221938c 229 cpu_maps_update_begin();
bd5349cf 230 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 231 cpu_maps_update_done();
bd5349cf 232 return ret;
1da177e4 233}
65edc68c 234
e9fb7631
AM
235static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
236 int *nr_calls)
237{
e6bde73b
AM
238 int ret;
239
240 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
e9fb7631 241 nr_calls);
e6bde73b
AM
242
243 return notifier_to_errno(ret);
e9fb7631
AM
244}
245
246static int cpu_notify(unsigned long val, void *v)
247{
248 return __cpu_notify(val, v, -1, NULL);
249}
250
251static void cpu_notify_nofail(unsigned long val, void *v)
252{
00b9b0af 253 BUG_ON(cpu_notify(val, v));
e9fb7631 254}
1da177e4
LT
255EXPORT_SYMBOL(register_cpu_notifier);
256
9647155f 257void __ref unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 258{
d221938c 259 cpu_maps_update_begin();
bd5349cf 260 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 261 cpu_maps_update_done();
1da177e4
LT
262}
263EXPORT_SYMBOL(unregister_cpu_notifier);
264
57bf12f4 265#ifdef CONFIG_HOTPLUG_CPU
e4cc2f87
AV
266/**
267 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
268 * @cpu: a CPU id
269 *
270 * This function walks all processes, finds a valid mm struct for each one and
271 * then clears a corresponding bit in mm's cpumask. While this all sounds
272 * trivial, there are various non-obvious corner cases, which this function
273 * tries to solve in a safe manner.
274 *
275 * Also note that the function uses a somewhat relaxed locking scheme, so it may
276 * be called only for an already offlined CPU.
277 */
cb79295e
AV
278void clear_tasks_mm_cpumask(int cpu)
279{
280 struct task_struct *p;
281
282 /*
283 * This function is called after the cpu is taken down and marked
284 * offline, so its not like new tasks will ever get this cpu set in
285 * their mm mask. -- Peter Zijlstra
286 * Thus, we may use rcu_read_lock() here, instead of grabbing
287 * full-fledged tasklist_lock.
288 */
e4cc2f87 289 WARN_ON(cpu_online(cpu));
cb79295e
AV
290 rcu_read_lock();
291 for_each_process(p) {
292 struct task_struct *t;
293
e4cc2f87
AV
294 /*
295 * Main thread might exit, but other threads may still have
296 * a valid mm. Find one.
297 */
cb79295e
AV
298 t = find_lock_task_mm(p);
299 if (!t)
300 continue;
301 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
302 task_unlock(t);
303 }
304 rcu_read_unlock();
305}
306
1da177e4
LT
307static inline void check_for_tasks(int cpu)
308{
309 struct task_struct *p;
6fac4829 310 cputime_t utime, stime;
1da177e4
LT
311
312 write_lock_irq(&tasklist_lock);
313 for_each_process(p) {
6fac4829 314 task_cputime(p, &utime, &stime);
11854247 315 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
6fac4829 316 (utime || stime))
9d3cfc4c
FP
317 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
318 "(state = %ld, flags = %x)\n",
319 p->comm, task_pid_nr(p), cpu,
320 p->state, p->flags);
1da177e4
LT
321 }
322 write_unlock_irq(&tasklist_lock);
323}
324
db912f96
AK
325struct take_cpu_down_param {
326 unsigned long mod;
327 void *hcpu;
328};
329
1da177e4 330/* Take this CPU down. */
514a20a5 331static int __ref take_cpu_down(void *_param)
1da177e4 332{
db912f96 333 struct take_cpu_down_param *param = _param;
1da177e4
LT
334 int err;
335
1da177e4
LT
336 /* Ensure this CPU doesn't handle any more interrupts. */
337 err = __cpu_disable();
338 if (err < 0)
f3705136 339 return err;
1da177e4 340
e9fb7631 341 cpu_notify(CPU_DYING | param->mod, param->hcpu);
14e568e7
TG
342 /* Park the stopper thread */
343 kthread_park(current);
f3705136 344 return 0;
1da177e4
LT
345}
346
e3920fb4 347/* Requires cpu_add_remove_lock to be held */
514a20a5 348static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 349{
e7407dcc 350 int err, nr_calls = 0;
e7407dcc 351 void *hcpu = (void *)(long)cpu;
8bb78442 352 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
db912f96
AK
353 struct take_cpu_down_param tcd_param = {
354 .mod = mod,
355 .hcpu = hcpu,
356 };
1da177e4 357
e3920fb4
RW
358 if (num_online_cpus() == 1)
359 return -EBUSY;
1da177e4 360
e3920fb4
RW
361 if (!cpu_online(cpu))
362 return -EINVAL;
1da177e4 363
d221938c 364 cpu_hotplug_begin();
4d51985e 365
e9fb7631 366 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 367 if (err) {
a0d8cdb6 368 nr_calls--;
e9fb7631 369 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
1da177e4 370 printk("%s: attempt to take down CPU %u failed\n",
af1f16d0 371 __func__, cpu);
baaca49f 372 goto out_release;
1da177e4 373 }
f97f8f06 374 smpboot_park_threads(cpu);
1da177e4 375
e0b582ec 376 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
04321587 377 if (err) {
1da177e4 378 /* CPU didn't die: tell everyone. Can't complain. */
f97f8f06 379 smpboot_unpark_threads(cpu);
e9fb7631 380 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
6a1bdc1b 381 goto out_release;
8fa1d7d3 382 }
04321587 383 BUG_ON(cpu_online(cpu));
1da177e4 384
48c5ccae
PZ
385 /*
386 * The migration_call() CPU_DYING callback will have removed all
387 * runnable tasks from the cpu, there's only the idle task left now
388 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
389 *
390 * Wait for the stop thread to go away.
48c5ccae 391 */
51a96c77
PZ
392 while (!idle_cpu(cpu))
393 cpu_relax();
1da177e4 394
6fa3eb70
S
395#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
396 mt_lbprof_update_state(cpu, MT_LBPROF_HOTPLUG_STATE);
397#endif
398
1da177e4
LT
399 /* This actually kills the CPU. */
400 __cpu_die(cpu);
401
1da177e4 402 /* CPU is completely dead: tell everyone. Too late to complain. */
e9fb7631 403 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
1da177e4
LT
404
405 check_for_tasks(cpu);
406
baaca49f 407out_release:
d221938c 408 cpu_hotplug_done();
e9fb7631
AM
409 if (!err)
410 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
e3920fb4
RW
411 return err;
412}
413
514a20a5 414int __ref cpu_down(unsigned int cpu)
e3920fb4 415{
9ea09af3 416 int err;
e3920fb4 417
d221938c 418 cpu_maps_update_begin();
e761b772
MK
419
420 if (cpu_hotplug_disabled) {
e3920fb4 421 err = -EBUSY;
e761b772
MK
422 goto out;
423 }
424
e761b772 425 err = _cpu_down(cpu, 0);
e3920fb4 426
e761b772 427out:
d221938c 428 cpu_maps_update_done();
1da177e4
LT
429 return err;
430}
b62b8ef9 431EXPORT_SYMBOL(cpu_down);
1da177e4
LT
432#endif /*CONFIG_HOTPLUG_CPU*/
433
e3920fb4 434/* Requires cpu_add_remove_lock to be held */
8bb78442 435static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 436{
baaca49f 437 int ret, nr_calls = 0;
1da177e4 438 void *hcpu = (void *)(long)cpu;
8bb78442 439 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
3bb5d2ee 440 struct task_struct *idle;
1da177e4 441
d221938c 442 cpu_hotplug_begin();
38498a67 443
5e5041f3
YI
444 if (cpu_online(cpu) || !cpu_present(cpu)) {
445 ret = -EINVAL;
446 goto out;
447 }
448
3bb5d2ee
SS
449 idle = idle_thread_get(cpu);
450 if (IS_ERR(idle)) {
451 ret = PTR_ERR(idle);
38498a67 452 goto out;
3bb5d2ee 453 }
38498a67 454
f97f8f06
TG
455 ret = smpboot_create_threads(cpu);
456 if (ret)
457 goto out;
458
e9fb7631 459 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
e6bde73b 460 if (ret) {
a0d8cdb6 461 nr_calls--;
4d51985e 462 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
af1f16d0 463 __func__, cpu);
1da177e4
LT
464 goto out_notify;
465 }
466
467 /* Arch-specific enabling code. */
3bb5d2ee 468 ret = __cpu_up(cpu, idle);
1da177e4
LT
469 if (ret != 0)
470 goto out_notify;
6978c705 471 BUG_ON(!cpu_online(cpu));
1da177e4 472
f97f8f06
TG
473 /* Wake the per cpu threads */
474 smpboot_unpark_threads(cpu);
475
1da177e4 476 /* Now call notifier in preparation. */
e9fb7631 477 cpu_notify(CPU_ONLINE | mod, hcpu);
1da177e4
LT
478
479out_notify:
480 if (ret != 0)
e9fb7631 481 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
38498a67 482out:
d221938c 483 cpu_hotplug_done();
e3920fb4
RW
484
485 return ret;
486}
487
b282b6f8 488int __cpuinit cpu_up(unsigned int cpu)
e3920fb4
RW
489{
490 int err = 0;
cf23422b 491
492#ifdef CONFIG_MEMORY_HOTPLUG
493 int nid;
494 pg_data_t *pgdat;
495#endif
496
e0b582ec 497 if (!cpu_possible(cpu)) {
73e753a5
KH
498 printk(KERN_ERR "can't online cpu %d because it is not "
499 "configured as may-hotadd at boot time\n", cpu);
87d5e023 500#if defined(CONFIG_IA64)
73e753a5
KH
501 printk(KERN_ERR "please check additional_cpus= boot "
502 "parameter\n");
503#endif
504 return -EINVAL;
505 }
e3920fb4 506
cf23422b 507#ifdef CONFIG_MEMORY_HOTPLUG
508 nid = cpu_to_node(cpu);
509 if (!node_online(nid)) {
510 err = mem_online_node(nid);
511 if (err)
512 return err;
513 }
514
515 pgdat = NODE_DATA(nid);
516 if (!pgdat) {
517 printk(KERN_ERR
518 "Can't online cpu %d due to NULL pgdat\n", cpu);
519 return -ENOMEM;
520 }
521
4eaf3f64
HL
522 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
523 mutex_lock(&zonelists_mutex);
9adb62a5 524 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
525 mutex_unlock(&zonelists_mutex);
526 }
cf23422b 527#endif
528
d221938c 529 cpu_maps_update_begin();
e761b772
MK
530
531 if (cpu_hotplug_disabled) {
e3920fb4 532 err = -EBUSY;
e761b772
MK
533 goto out;
534 }
535
536 err = _cpu_up(cpu, 0);
537
e761b772 538out:
d221938c 539 cpu_maps_update_done();
e3920fb4
RW
540 return err;
541}
a513f6ba 542EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 543
f3de4be9 544#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 545static cpumask_var_t frozen_cpus;
e3920fb4
RW
546
547int disable_nonboot_cpus(void)
548{
e9a5f426 549 int cpu, first_cpu, error = 0;
e3920fb4 550
d221938c 551 cpu_maps_update_begin();
e0b582ec 552 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
553 /*
554 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
555 * with the userspace trying to use the CPU hotplug at the same time
556 */
e0b582ec 557 cpumask_clear(frozen_cpus);
6ad4c188 558
e3920fb4
RW
559 printk("Disabling non-boot CPUs ...\n");
560 for_each_online_cpu(cpu) {
561 if (cpu == first_cpu)
562 continue;
8bb78442 563 error = _cpu_down(cpu, 1);
feae3203 564 if (!error)
e0b582ec 565 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 566 else {
e3920fb4
RW
567 printk(KERN_ERR "Error taking CPU%d down: %d\n",
568 cpu, error);
569 break;
570 }
571 }
86886e55 572
e3920fb4
RW
573 if (!error) {
574 BUG_ON(num_online_cpus() > 1);
575 /* Make sure the CPUs won't be enabled by someone else */
576 cpu_hotplug_disabled = 1;
577 } else {
e1d9fd2e 578 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
e3920fb4 579 }
d221938c 580 cpu_maps_update_done();
e3920fb4
RW
581 return error;
582}
6fa3eb70 583EXPORT_SYMBOL_GPL(disable_nonboot_cpus);
e3920fb4 584
d0af9eed
SS
585void __weak arch_enable_nonboot_cpus_begin(void)
586{
587}
588
589void __weak arch_enable_nonboot_cpus_end(void)
590{
591}
592
fa7303e2 593void __ref enable_nonboot_cpus(void)
e3920fb4
RW
594{
595 int cpu, error;
596
597 /* Allow everyone to use the CPU hotplug again */
d221938c 598 cpu_maps_update_begin();
e3920fb4 599 cpu_hotplug_disabled = 0;
e0b582ec 600 if (cpumask_empty(frozen_cpus))
1d64b9cb 601 goto out;
e3920fb4 602
4d51985e 603 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
d0af9eed
SS
604
605 arch_enable_nonboot_cpus_begin();
606
e0b582ec 607 for_each_cpu(cpu, frozen_cpus) {
8bb78442 608 error = _cpu_up(cpu, 1);
e3920fb4 609 if (!error) {
4d51985e 610 printk(KERN_INFO "CPU%d is up\n", cpu);
e3920fb4
RW
611 continue;
612 }
1d64b9cb 613 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 614 }
d0af9eed
SS
615
616 arch_enable_nonboot_cpus_end();
617
e0b582ec 618 cpumask_clear(frozen_cpus);
1d64b9cb 619out:
d221938c 620 cpu_maps_update_done();
1da177e4 621}
6fa3eb70 622EXPORT_SYMBOL_GPL(enable_nonboot_cpus);
e0b582ec 623
d7268a31 624static int __init alloc_frozen_cpus(void)
e0b582ec
RR
625{
626 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
627 return -ENOMEM;
628 return 0;
629}
630core_initcall(alloc_frozen_cpus);
79cfbdfa 631
79cfbdfa
SB
632/*
633 * When callbacks for CPU hotplug notifications are being executed, we must
634 * ensure that the state of the system with respect to the tasks being frozen
635 * or not, as reported by the notification, remains unchanged *throughout the
636 * duration* of the execution of the callbacks.
637 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
638 *
639 * This synchronization is implemented by mutually excluding regular CPU
640 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
641 * Hibernate notifications.
642 */
643static int
644cpu_hotplug_pm_callback(struct notifier_block *nb,
645 unsigned long action, void *ptr)
646{
647 switch (action) {
648
649 case PM_SUSPEND_PREPARE:
650 case PM_HIBERNATION_PREPARE:
16e53dbf 651 cpu_hotplug_disable();
79cfbdfa
SB
652 break;
653
654 case PM_POST_SUSPEND:
655 case PM_POST_HIBERNATION:
16e53dbf 656 cpu_hotplug_enable();
79cfbdfa
SB
657 break;
658
659 default:
660 return NOTIFY_DONE;
661 }
662
663 return NOTIFY_OK;
664}
665
666
d7268a31 667static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 668{
6e32d479
FY
669 /*
670 * cpu_hotplug_pm_callback has higher priority than x86
671 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
672 * to disable cpu hotplug to avoid cpu hotplug race.
673 */
79cfbdfa
SB
674 pm_notifier(cpu_hotplug_pm_callback, 0);
675 return 0;
676}
677core_initcall(cpu_hotplug_pm_sync_init);
678
f3de4be9 679#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 680
e545a614
MS
681/**
682 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
683 * @cpu: cpu that just started
684 *
685 * This function calls the cpu_chain notifiers with CPU_STARTING.
686 * It must be called by the arch code on the new cpu, before the new cpu
687 * enables interrupts and before the "boot" cpu returns from __cpu_up().
688 */
84196414 689void __cpuinit notify_cpu_starting(unsigned int cpu)
e545a614
MS
690{
691 unsigned long val = CPU_STARTING;
692
693#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 694 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
e545a614
MS
695 val = CPU_STARTING_FROZEN;
696#endif /* CONFIG_PM_SLEEP_SMP */
e9fb7631 697 cpu_notify(val, (void *)(long)cpu);
e545a614
MS
698}
699
68f4f1ec 700#endif /* CONFIG_SMP */
b8d317d1 701
e56b3bc7
LT
702/*
703 * cpu_bit_bitmap[] is a special, "compressed" data structure that
704 * represents all NR_CPUS bits binary values of 1<<nr.
705 *
e0b582ec 706 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
707 * mask value that has a single bit set only.
708 */
b8d317d1 709
e56b3bc7 710/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 711#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
712#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
713#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
714#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 715
e56b3bc7
LT
716const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
717
718 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
719 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
720#if BITS_PER_LONG > 32
721 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
722 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
723#endif
724};
e56b3bc7 725EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
726
727const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
728EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
729
730#ifdef CONFIG_INIT_ALL_POSSIBLE
731static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
732 = CPU_BITS_ALL;
733#else
734static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
735#endif
736const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
737EXPORT_SYMBOL(cpu_possible_mask);
738
739static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
740const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
741EXPORT_SYMBOL(cpu_online_mask);
742
743static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
744const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
745EXPORT_SYMBOL(cpu_present_mask);
746
747static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
748const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
749EXPORT_SYMBOL(cpu_active_mask);
3fa41520
RR
750
751void set_cpu_possible(unsigned int cpu, bool possible)
752{
753 if (possible)
754 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
755 else
756 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
757}
758
759void set_cpu_present(unsigned int cpu, bool present)
760{
761 if (present)
762 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
763 else
764 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
765}
766
767void set_cpu_online(unsigned int cpu, bool online)
768{
24d52daa 769 if (online) {
3fa41520 770 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
24d52daa
LJ
771 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
772 } else {
3fa41520 773 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
24d52daa 774 }
3fa41520
RR
775}
776
777void set_cpu_active(unsigned int cpu, bool active)
778{
779 if (active)
780 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
781 else
782 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
783}
784
785void init_cpu_present(const struct cpumask *src)
786{
787 cpumask_copy(to_cpumask(cpu_present_bits), src);
788}
789
790void init_cpu_possible(const struct cpumask *src)
791{
792 cpumask_copy(to_cpumask(cpu_possible_bits), src);
793}
794
795void init_cpu_online(const struct cpumask *src)
796{
797 cpumask_copy(to_cpumask(cpu_online_bits), src);
798}
6fa3eb70
S
799
800static ATOMIC_NOTIFIER_HEAD(idle_notifier);
801
802void idle_notifier_register(struct notifier_block *n)
803{
804 atomic_notifier_chain_register(&idle_notifier, n);
805}
806EXPORT_SYMBOL_GPL(idle_notifier_register);
807
808void idle_notifier_unregister(struct notifier_block *n)
809{
810 atomic_notifier_chain_unregister(&idle_notifier, n);
811}
812EXPORT_SYMBOL_GPL(idle_notifier_unregister);
813
814void idle_notifier_call_chain(unsigned long val)
815{
816 atomic_notifier_call_chain(&idle_notifier, val, NULL);
817}
818EXPORT_SYMBOL_GPL(idle_notifier_call_chain);