Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22
23 #include "smpboot.h"
24
25 #ifdef CONFIG_SMP
26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
27 static DEFINE_MUTEX(cpu_add_remove_lock);
28
29 /*
30 * The following two API's must be used when attempting
31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
32 */
33 void cpu_maps_update_begin(void)
34 {
35 mutex_lock(&cpu_add_remove_lock);
36 }
37
38 void cpu_maps_update_done(void)
39 {
40 mutex_unlock(&cpu_add_remove_lock);
41 }
42
43 static RAW_NOTIFIER_HEAD(cpu_chain);
44
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
47 */
48 static int cpu_hotplug_disabled;
49
50 #ifdef CONFIG_HOTPLUG_CPU
51
52 static struct {
53 struct task_struct *active_writer;
54 struct mutex lock; /* Synchronizes accesses to refcount, */
55 /*
56 * Also blocks the new readers during
57 * an ongoing cpu hotplug operation.
58 */
59 int refcount;
60 } cpu_hotplug = {
61 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 .refcount = 0,
64 };
65
66 void get_online_cpus(void)
67 {
68 might_sleep();
69 if (cpu_hotplug.active_writer == current)
70 return;
71 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock);
74
75 }
76 EXPORT_SYMBOL_GPL(get_online_cpus);
77
78 void put_online_cpus(void)
79 {
80 if (cpu_hotplug.active_writer == current)
81 return;
82 mutex_lock(&cpu_hotplug.lock);
83
84 if (WARN_ON(!cpu_hotplug.refcount))
85 cpu_hotplug.refcount++; /* try to fix things up */
86
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer);
89 mutex_unlock(&cpu_hotplug.lock);
90
91 }
92 EXPORT_SYMBOL_GPL(put_online_cpus);
93
94 /*
95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero.
97 *
98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock
100 *
101 * Since cpu_hotplug_begin() is always called after invoking
102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
103 *
104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping
106 * writer.
107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again.
111 *
112 * However, this is very difficult to achieve in practice since
113 * get_online_cpus() not an api which is called all that often.
114 *
115 */
116 static void cpu_hotplug_begin(void)
117 {
118 cpu_hotplug.active_writer = current;
119
120 for (;;) {
121 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount))
123 break;
124 __set_current_state(TASK_UNINTERRUPTIBLE);
125 mutex_unlock(&cpu_hotplug.lock);
126 schedule();
127 }
128 }
129
130 static void cpu_hotplug_done(void)
131 {
132 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock);
134 }
135
136 #else /* #if CONFIG_HOTPLUG_CPU */
137 static void cpu_hotplug_begin(void) {}
138 static void cpu_hotplug_done(void) {}
139 #endif /* #else #if CONFIG_HOTPLUG_CPU */
140
141 /* Need to know about CPUs going up/down? */
142 int __ref register_cpu_notifier(struct notifier_block *nb)
143 {
144 int ret;
145 cpu_maps_update_begin();
146 ret = raw_notifier_chain_register(&cpu_chain, nb);
147 cpu_maps_update_done();
148 return ret;
149 }
150
151 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
152 int *nr_calls)
153 {
154 int ret;
155
156 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
157 nr_calls);
158
159 return notifier_to_errno(ret);
160 }
161
162 static int cpu_notify(unsigned long val, void *v)
163 {
164 return __cpu_notify(val, v, -1, NULL);
165 }
166
167 #ifdef CONFIG_HOTPLUG_CPU
168
169 static void cpu_notify_nofail(unsigned long val, void *v)
170 {
171 BUG_ON(cpu_notify(val, v));
172 }
173 EXPORT_SYMBOL(register_cpu_notifier);
174
175 void __ref unregister_cpu_notifier(struct notifier_block *nb)
176 {
177 cpu_maps_update_begin();
178 raw_notifier_chain_unregister(&cpu_chain, nb);
179 cpu_maps_update_done();
180 }
181 EXPORT_SYMBOL(unregister_cpu_notifier);
182
183 /**
184 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
185 * @cpu: a CPU id
186 *
187 * This function walks all processes, finds a valid mm struct for each one and
188 * then clears a corresponding bit in mm's cpumask. While this all sounds
189 * trivial, there are various non-obvious corner cases, which this function
190 * tries to solve in a safe manner.
191 *
192 * Also note that the function uses a somewhat relaxed locking scheme, so it may
193 * be called only for an already offlined CPU.
194 */
195 void clear_tasks_mm_cpumask(int cpu)
196 {
197 struct task_struct *p;
198
199 /*
200 * This function is called after the cpu is taken down and marked
201 * offline, so its not like new tasks will ever get this cpu set in
202 * their mm mask. -- Peter Zijlstra
203 * Thus, we may use rcu_read_lock() here, instead of grabbing
204 * full-fledged tasklist_lock.
205 */
206 WARN_ON(cpu_online(cpu));
207 rcu_read_lock();
208 for_each_process(p) {
209 struct task_struct *t;
210
211 /*
212 * Main thread might exit, but other threads may still have
213 * a valid mm. Find one.
214 */
215 t = find_lock_task_mm(p);
216 if (!t)
217 continue;
218 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
219 task_unlock(t);
220 }
221 rcu_read_unlock();
222 }
223
224 static inline void check_for_tasks(int cpu)
225 {
226 struct task_struct *p;
227 cputime_t utime, stime;
228
229 write_lock_irq(&tasklist_lock);
230 for_each_process(p) {
231 task_cputime(p, &utime, &stime);
232 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
233 (utime || stime))
234 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
235 "(state = %ld, flags = %x)\n",
236 p->comm, task_pid_nr(p), cpu,
237 p->state, p->flags);
238 }
239 write_unlock_irq(&tasklist_lock);
240 }
241
242 struct take_cpu_down_param {
243 unsigned long mod;
244 void *hcpu;
245 };
246
247 /* Take this CPU down. */
248 static int __ref take_cpu_down(void *_param)
249 {
250 struct take_cpu_down_param *param = _param;
251 int err;
252
253 /* Ensure this CPU doesn't handle any more interrupts. */
254 err = __cpu_disable();
255 if (err < 0)
256 return err;
257
258 cpu_notify(CPU_DYING | param->mod, param->hcpu);
259 /* Park the stopper thread */
260 kthread_park(current);
261 return 0;
262 }
263
264 /* Requires cpu_add_remove_lock to be held */
265 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
266 {
267 int err, nr_calls = 0;
268 void *hcpu = (void *)(long)cpu;
269 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
270 struct take_cpu_down_param tcd_param = {
271 .mod = mod,
272 .hcpu = hcpu,
273 };
274
275 if (num_online_cpus() == 1)
276 return -EBUSY;
277
278 if (!cpu_online(cpu))
279 return -EINVAL;
280
281 cpu_hotplug_begin();
282
283 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
284 if (err) {
285 nr_calls--;
286 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
287 printk("%s: attempt to take down CPU %u failed\n",
288 __func__, cpu);
289 goto out_release;
290 }
291 smpboot_park_threads(cpu);
292
293 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
294 if (err) {
295 /* CPU didn't die: tell everyone. Can't complain. */
296 smpboot_unpark_threads(cpu);
297 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
298 goto out_release;
299 }
300 BUG_ON(cpu_online(cpu));
301
302 /*
303 * The migration_call() CPU_DYING callback will have removed all
304 * runnable tasks from the cpu, there's only the idle task left now
305 * that the migration thread is done doing the stop_machine thing.
306 *
307 * Wait for the stop thread to go away.
308 */
309 while (!idle_cpu(cpu))
310 cpu_relax();
311
312 /* This actually kills the CPU. */
313 __cpu_die(cpu);
314
315 /* CPU is completely dead: tell everyone. Too late to complain. */
316 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
317
318 check_for_tasks(cpu);
319
320 out_release:
321 cpu_hotplug_done();
322 if (!err)
323 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
324 return err;
325 }
326
327 int __ref cpu_down(unsigned int cpu)
328 {
329 int err;
330
331 cpu_maps_update_begin();
332
333 if (cpu_hotplug_disabled) {
334 err = -EBUSY;
335 goto out;
336 }
337
338 err = _cpu_down(cpu, 0);
339
340 out:
341 cpu_maps_update_done();
342 return err;
343 }
344 EXPORT_SYMBOL(cpu_down);
345 #endif /*CONFIG_HOTPLUG_CPU*/
346
347 /* Requires cpu_add_remove_lock to be held */
348 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
349 {
350 int ret, nr_calls = 0;
351 void *hcpu = (void *)(long)cpu;
352 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
353 struct task_struct *idle;
354
355 cpu_hotplug_begin();
356
357 if (cpu_online(cpu) || !cpu_present(cpu)) {
358 ret = -EINVAL;
359 goto out;
360 }
361
362 idle = idle_thread_get(cpu);
363 if (IS_ERR(idle)) {
364 ret = PTR_ERR(idle);
365 goto out;
366 }
367
368 ret = smpboot_create_threads(cpu);
369 if (ret)
370 goto out;
371
372 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
373 if (ret) {
374 nr_calls--;
375 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
376 __func__, cpu);
377 goto out_notify;
378 }
379
380 /* Arch-specific enabling code. */
381 ret = __cpu_up(cpu, idle);
382 if (ret != 0)
383 goto out_notify;
384 BUG_ON(!cpu_online(cpu));
385
386 /* Wake the per cpu threads */
387 smpboot_unpark_threads(cpu);
388
389 /* Now call notifier in preparation. */
390 cpu_notify(CPU_ONLINE | mod, hcpu);
391
392 out_notify:
393 if (ret != 0)
394 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
395 out:
396 cpu_hotplug_done();
397
398 return ret;
399 }
400
401 int __cpuinit cpu_up(unsigned int cpu)
402 {
403 int err = 0;
404
405 #ifdef CONFIG_MEMORY_HOTPLUG
406 int nid;
407 pg_data_t *pgdat;
408 #endif
409
410 if (!cpu_possible(cpu)) {
411 printk(KERN_ERR "can't online cpu %d because it is not "
412 "configured as may-hotadd at boot time\n", cpu);
413 #if defined(CONFIG_IA64)
414 printk(KERN_ERR "please check additional_cpus= boot "
415 "parameter\n");
416 #endif
417 return -EINVAL;
418 }
419
420 #ifdef CONFIG_MEMORY_HOTPLUG
421 nid = cpu_to_node(cpu);
422 if (!node_online(nid)) {
423 err = mem_online_node(nid);
424 if (err)
425 return err;
426 }
427
428 pgdat = NODE_DATA(nid);
429 if (!pgdat) {
430 printk(KERN_ERR
431 "Can't online cpu %d due to NULL pgdat\n", cpu);
432 return -ENOMEM;
433 }
434
435 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
436 mutex_lock(&zonelists_mutex);
437 build_all_zonelists(NULL, NULL);
438 mutex_unlock(&zonelists_mutex);
439 }
440 #endif
441
442 cpu_maps_update_begin();
443
444 if (cpu_hotplug_disabled) {
445 err = -EBUSY;
446 goto out;
447 }
448
449 err = _cpu_up(cpu, 0);
450
451 out:
452 cpu_maps_update_done();
453 return err;
454 }
455 EXPORT_SYMBOL_GPL(cpu_up);
456
457 #ifdef CONFIG_PM_SLEEP_SMP
458 static cpumask_var_t frozen_cpus;
459
460 int disable_nonboot_cpus(void)
461 {
462 int cpu, first_cpu, error = 0;
463
464 cpu_maps_update_begin();
465 first_cpu = cpumask_first(cpu_online_mask);
466 /*
467 * We take down all of the non-boot CPUs in one shot to avoid races
468 * with the userspace trying to use the CPU hotplug at the same time
469 */
470 cpumask_clear(frozen_cpus);
471
472 printk("Disabling non-boot CPUs ...\n");
473 for_each_online_cpu(cpu) {
474 if (cpu == first_cpu)
475 continue;
476 error = _cpu_down(cpu, 1);
477 if (!error)
478 cpumask_set_cpu(cpu, frozen_cpus);
479 else {
480 printk(KERN_ERR "Error taking CPU%d down: %d\n",
481 cpu, error);
482 break;
483 }
484 }
485
486 if (!error) {
487 BUG_ON(num_online_cpus() > 1);
488 /* Make sure the CPUs won't be enabled by someone else */
489 cpu_hotplug_disabled = 1;
490 } else {
491 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
492 }
493 cpu_maps_update_done();
494 return error;
495 }
496
497 void __weak arch_enable_nonboot_cpus_begin(void)
498 {
499 }
500
501 void __weak arch_enable_nonboot_cpus_end(void)
502 {
503 }
504
505 void __ref enable_nonboot_cpus(void)
506 {
507 int cpu, error;
508
509 /* Allow everyone to use the CPU hotplug again */
510 cpu_maps_update_begin();
511 cpu_hotplug_disabled = 0;
512 if (cpumask_empty(frozen_cpus))
513 goto out;
514
515 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
516
517 arch_enable_nonboot_cpus_begin();
518
519 for_each_cpu(cpu, frozen_cpus) {
520 error = _cpu_up(cpu, 1);
521 if (!error) {
522 printk(KERN_INFO "CPU%d is up\n", cpu);
523 continue;
524 }
525 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
526 }
527
528 arch_enable_nonboot_cpus_end();
529
530 cpumask_clear(frozen_cpus);
531 out:
532 cpu_maps_update_done();
533 }
534
535 static int __init alloc_frozen_cpus(void)
536 {
537 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
538 return -ENOMEM;
539 return 0;
540 }
541 core_initcall(alloc_frozen_cpus);
542
543 /*
544 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
545 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
546 * to continue until any currently running CPU hotplug operation gets
547 * completed.
548 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
549 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
550 * CPU hotplug path and released only after it is complete. Thus, we
551 * (and hence the freezer) will block here until any currently running CPU
552 * hotplug operation gets completed.
553 */
554 void cpu_hotplug_disable_before_freeze(void)
555 {
556 cpu_maps_update_begin();
557 cpu_hotplug_disabled = 1;
558 cpu_maps_update_done();
559 }
560
561
562 /*
563 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
564 * disabled while beginning to freeze tasks).
565 */
566 void cpu_hotplug_enable_after_thaw(void)
567 {
568 cpu_maps_update_begin();
569 cpu_hotplug_disabled = 0;
570 cpu_maps_update_done();
571 }
572
573 /*
574 * When callbacks for CPU hotplug notifications are being executed, we must
575 * ensure that the state of the system with respect to the tasks being frozen
576 * or not, as reported by the notification, remains unchanged *throughout the
577 * duration* of the execution of the callbacks.
578 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
579 *
580 * This synchronization is implemented by mutually excluding regular CPU
581 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
582 * Hibernate notifications.
583 */
584 static int
585 cpu_hotplug_pm_callback(struct notifier_block *nb,
586 unsigned long action, void *ptr)
587 {
588 switch (action) {
589
590 case PM_SUSPEND_PREPARE:
591 case PM_HIBERNATION_PREPARE:
592 cpu_hotplug_disable_before_freeze();
593 break;
594
595 case PM_POST_SUSPEND:
596 case PM_POST_HIBERNATION:
597 cpu_hotplug_enable_after_thaw();
598 break;
599
600 default:
601 return NOTIFY_DONE;
602 }
603
604 return NOTIFY_OK;
605 }
606
607
608 static int __init cpu_hotplug_pm_sync_init(void)
609 {
610 /*
611 * cpu_hotplug_pm_callback has higher priority than x86
612 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
613 * to disable cpu hotplug to avoid cpu hotplug race.
614 */
615 pm_notifier(cpu_hotplug_pm_callback, 0);
616 return 0;
617 }
618 core_initcall(cpu_hotplug_pm_sync_init);
619
620 #endif /* CONFIG_PM_SLEEP_SMP */
621
622 /**
623 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
624 * @cpu: cpu that just started
625 *
626 * This function calls the cpu_chain notifiers with CPU_STARTING.
627 * It must be called by the arch code on the new cpu, before the new cpu
628 * enables interrupts and before the "boot" cpu returns from __cpu_up().
629 */
630 void __cpuinit notify_cpu_starting(unsigned int cpu)
631 {
632 unsigned long val = CPU_STARTING;
633
634 #ifdef CONFIG_PM_SLEEP_SMP
635 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
636 val = CPU_STARTING_FROZEN;
637 #endif /* CONFIG_PM_SLEEP_SMP */
638 cpu_notify(val, (void *)(long)cpu);
639 }
640
641 #endif /* CONFIG_SMP */
642
643 /*
644 * cpu_bit_bitmap[] is a special, "compressed" data structure that
645 * represents all NR_CPUS bits binary values of 1<<nr.
646 *
647 * It is used by cpumask_of() to get a constant address to a CPU
648 * mask value that has a single bit set only.
649 */
650
651 /* cpu_bit_bitmap[0] is empty - so we can back into it */
652 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
653 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
654 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
655 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
656
657 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
658
659 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
660 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
661 #if BITS_PER_LONG > 32
662 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
663 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
664 #endif
665 };
666 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
667
668 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
669 EXPORT_SYMBOL(cpu_all_bits);
670
671 #ifdef CONFIG_INIT_ALL_POSSIBLE
672 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
673 = CPU_BITS_ALL;
674 #else
675 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
676 #endif
677 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
678 EXPORT_SYMBOL(cpu_possible_mask);
679
680 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
681 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
682 EXPORT_SYMBOL(cpu_online_mask);
683
684 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
685 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
686 EXPORT_SYMBOL(cpu_present_mask);
687
688 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
689 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
690 EXPORT_SYMBOL(cpu_active_mask);
691
692 void set_cpu_possible(unsigned int cpu, bool possible)
693 {
694 if (possible)
695 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
696 else
697 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
698 }
699
700 void set_cpu_present(unsigned int cpu, bool present)
701 {
702 if (present)
703 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
704 else
705 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
706 }
707
708 void set_cpu_online(unsigned int cpu, bool online)
709 {
710 if (online)
711 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
712 else
713 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
714 }
715
716 void set_cpu_active(unsigned int cpu, bool active)
717 {
718 if (active)
719 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
720 else
721 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
722 }
723
724 void init_cpu_present(const struct cpumask *src)
725 {
726 cpumask_copy(to_cpumask(cpu_present_bits), src);
727 }
728
729 void init_cpu_possible(const struct cpumask *src)
730 {
731 cpumask_copy(to_cpumask(cpu_possible_bits), src);
732 }
733
734 void init_cpu_online(const struct cpumask *src)
735 {
736 cpumask_copy(to_cpumask(cpu_online_bits), src);
737 }