2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/nmi.h>
28 #include <linux/smpboot.h>
29 #include <linux/relay.h>
30 #include <linux/slab.h>
31 #include <linux/cpuset.h>
33 #include <soc/samsung/exynos-emc.h>
35 #include <trace/events/power.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpuhp.h>
42 * cpuhp_cpu_state - Per cpu hotplug state storage
43 * @state: The current cpu state
44 * @target: The target state
45 * @thread: Pointer to the hotplug thread
46 * @should_run: Thread should execute
47 * @rollback: Perform a rollback
48 * @single: Single callback invocation
49 * @bringup: Single callback bringup or teardown selector
50 * @cb_state: The state for a single callback (install/uninstall)
51 * @result: Result of the operation
52 * @done_up: Signal completion to the issuer of the task for cpu-up
53 * @done_down: Signal completion to the issuer of the task for cpu-down
55 struct cpuhp_cpu_state
{
56 enum cpuhp_state state
;
57 enum cpuhp_state target
;
58 enum cpuhp_state fail
;
60 struct task_struct
*thread
;
65 struct hlist_node
*node
;
66 struct hlist_node
*last
;
67 enum cpuhp_state cb_state
;
69 struct completion done_up
;
70 struct completion done_down
;
74 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
75 .fail
= CPUHP_INVALID
,
78 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
79 static struct lock_class_key cpuhp_state_key
;
80 static struct lockdep_map cpuhp_state_lock_map
=
81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key
);
85 * cpuhp_step - Hotplug state machine step
86 * @name: Name of the step
87 * @startup: Startup function of the step
88 * @teardown: Teardown function of the step
89 * @skip_onerr: Do not invoke the functions on error rollback
90 * Will go away once the notifiers are gone
91 * @cant_stop: Bringup/teardown can't be stopped at this step
96 int (*single
)(unsigned int cpu
);
97 int (*multi
)(unsigned int cpu
,
98 struct hlist_node
*node
);
101 int (*single
)(unsigned int cpu
);
102 int (*multi
)(unsigned int cpu
,
103 struct hlist_node
*node
);
105 struct hlist_head list
;
111 static DEFINE_MUTEX(cpuhp_state_mutex
);
112 static struct cpuhp_step cpuhp_bp_states
[];
113 static struct cpuhp_step cpuhp_ap_states
[];
115 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
118 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
119 * purposes as that state is handled explicitly in cpu_down.
121 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
124 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
126 struct cpuhp_step
*sp
;
128 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
133 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
134 * @cpu: The cpu for which the callback should be invoked
135 * @state: The state to do callbacks for
136 * @bringup: True if the bringup callback should be invoked
137 * @node: For multi-instance, do a single entry callback for install/remove
138 * @lastp: For multi-instance rollback, remember how far we got
140 * Called from cpu hotplug and from the state register machinery.
142 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
143 bool bringup
, struct hlist_node
*node
,
144 struct hlist_node
**lastp
)
146 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
147 struct cpuhp_step
*step
= cpuhp_get_step(state
);
148 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
149 int (*cb
)(unsigned int cpu
);
152 if (st
->fail
== state
) {
153 st
->fail
= CPUHP_INVALID
;
155 if (!(bringup
? step
->startup
.single
: step
->teardown
.single
))
161 if (!step
->multi_instance
) {
162 WARN_ON_ONCE(lastp
&& *lastp
);
163 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
166 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
168 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
171 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
175 /* Single invocation for instance add/remove */
177 WARN_ON_ONCE(lastp
&& *lastp
);
178 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
179 ret
= cbm(cpu
, node
);
180 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
184 /* State transition. Invoke on all instances */
186 hlist_for_each(node
, &step
->list
) {
187 if (lastp
&& node
== *lastp
)
190 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
191 ret
= cbm(cpu
, node
);
192 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
206 /* Rollback the instances if one failed */
207 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
211 hlist_for_each(node
, &step
->list
) {
215 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
216 ret
= cbm(cpu
, node
);
217 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
219 * Rollback must not fail,
227 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
229 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
230 wait_for_completion(done
);
233 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
235 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
240 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
242 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
244 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
247 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
248 static DEFINE_MUTEX(cpu_add_remove_lock
);
249 bool cpuhp_tasks_frozen
;
250 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
253 * The following two APIs (cpu_maps_update_begin/done) must be used when
254 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
256 void cpu_maps_update_begin(void)
258 mutex_lock(&cpu_add_remove_lock
);
261 void cpu_maps_update_done(void)
263 mutex_unlock(&cpu_add_remove_lock
);
266 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
267 * Should always be manipulated under cpu_add_remove_lock
269 static int cpu_hotplug_disabled
;
271 #ifdef CONFIG_HOTPLUG_CPU
274 struct task_struct
*active_writer
;
275 /* wait queue to wake up the active_writer */
276 wait_queue_head_t wq
;
277 /* verifies that no writer will get active while readers are active */
280 * Also blocks the new readers during
281 * an ongoing cpu hotplug operation.
285 #ifdef CONFIG_DEBUG_LOCK_ALLOC
286 struct lockdep_map dep_map
;
289 .active_writer
= NULL
,
290 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
291 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
292 #ifdef CONFIG_DEBUG_LOCK_ALLOC
293 .dep_map
= STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug
.dep_map
),
297 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
298 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
299 #define cpuhp_lock_acquire_tryread() \
300 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
301 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
302 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
305 void cpus_read_lock(void)
308 if (cpu_hotplug
.active_writer
== current
)
310 cpuhp_lock_acquire_read();
311 mutex_lock(&cpu_hotplug
.lock
);
312 atomic_inc(&cpu_hotplug
.refcount
);
313 mutex_unlock(&cpu_hotplug
.lock
);
315 EXPORT_SYMBOL_GPL(cpus_read_lock
);
317 void cpus_read_unlock(void)
321 if (cpu_hotplug
.active_writer
== current
)
324 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
325 if (WARN_ON(refcount
< 0)) /* try to fix things up */
326 atomic_inc(&cpu_hotplug
.refcount
);
328 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
329 wake_up(&cpu_hotplug
.wq
);
331 cpuhp_lock_release();
334 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
337 * This ensures that the hotplug operation can begin only when the
338 * refcount goes to zero.
340 * Note that during a cpu-hotplug operation, the new readers, if any,
341 * will be blocked by the cpu_hotplug.lock
343 * Since cpu_hotplug_begin() is always called after invoking
344 * cpu_maps_update_begin(), we can be sure that only one writer is active.
346 * Note that theoretically, there is a possibility of a livelock:
347 * - Refcount goes to zero, last reader wakes up the sleeping
349 * - Last reader unlocks the cpu_hotplug.lock.
350 * - A new reader arrives at this moment, bumps up the refcount.
351 * - The writer acquires the cpu_hotplug.lock finds the refcount
352 * non zero and goes to sleep again.
354 * However, this is very difficult to achieve in practice since
355 * get_online_cpus() not an api which is called all that often.
358 void cpus_write_lock(void)
362 cpu_hotplug
.active_writer
= current
;
363 cpuhp_lock_acquire();
366 mutex_lock(&cpu_hotplug
.lock
);
367 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
368 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
370 mutex_unlock(&cpu_hotplug
.lock
);
373 finish_wait(&cpu_hotplug
.wq
, &wait
);
376 void cpus_write_unlock(void)
378 cpu_hotplug
.active_writer
= NULL
;
379 mutex_unlock(&cpu_hotplug
.lock
);
380 cpuhp_lock_release();
384 * Wait for currently running CPU hotplug operations to complete (if any) and
385 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
386 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
387 * hotplug path before performing hotplug operations. So acquiring that lock
388 * guarantees mutual exclusion from any currently running hotplug operations.
390 void cpu_hotplug_disable(void)
392 cpu_maps_update_begin();
393 cpu_hotplug_disabled
++;
394 cpu_maps_update_done();
396 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
398 static void __cpu_hotplug_enable(void)
400 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
402 cpu_hotplug_disabled
--;
405 void cpu_hotplug_enable(void)
407 cpu_maps_update_begin();
408 __cpu_hotplug_enable();
409 cpu_maps_update_done();
411 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
412 #endif /* CONFIG_HOTPLUG_CPU */
414 static inline enum cpuhp_state
415 cpuhp_set_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
417 enum cpuhp_state prev_state
= st
->state
;
419 st
->rollback
= false;
424 st
->bringup
= st
->state
< target
;
430 cpuhp_reset_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state prev_state
)
435 * If we have st->last we need to undo partial multi_instance of this
436 * state first. Otherwise start undo at the previous state.
445 st
->target
= prev_state
;
446 st
->bringup
= !st
->bringup
;
449 /* Regular hotplug invocation of the AP hotplug thread */
450 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
452 if (!st
->single
&& st
->state
== st
->target
)
457 * Make sure the above stores are visible before should_run becomes
458 * true. Paired with the mb() above in cpuhp_thread_fun()
461 st
->should_run
= true;
462 wake_up_process(st
->thread
);
463 wait_for_ap_thread(st
, st
->bringup
);
466 static int cpuhp_kick_ap(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
468 enum cpuhp_state prev_state
;
471 prev_state
= cpuhp_set_state(st
, target
);
473 if ((ret
= st
->result
)) {
474 cpuhp_reset_state(st
, prev_state
);
480 /* Notifier wrappers for transitioning to state machine */
482 static int bringup_wait_for_ap(unsigned int cpu
)
484 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
486 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
487 wait_for_ap_thread(st
, true);
488 if (WARN_ON_ONCE((!cpu_online(cpu
))))
491 /* Unpark the stopper thread and the hotplug thread of the target cpu */
492 stop_machine_unpark(cpu
);
493 kthread_unpark(st
->thread
);
495 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
498 return cpuhp_kick_ap(st
, st
->target
);
501 static int bringup_cpu(unsigned int cpu
)
503 struct task_struct
*idle
= idle_thread_get(cpu
);
507 * Some architectures have to walk the irq descriptors to
508 * setup the vector space for the cpu which comes online.
509 * Prevent irq alloc/free across the bringup.
513 /* Arch-specific enabling code. */
514 ret
= __cpu_up(cpu
, idle
);
518 return bringup_wait_for_ap(cpu
);
522 * Hotplug state machine related functions
525 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
527 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
528 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
530 if (!step
->skip_onerr
)
531 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
535 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
536 enum cpuhp_state target
)
538 enum cpuhp_state prev_state
= st
->state
;
541 while (st
->state
< target
) {
543 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
545 st
->target
= prev_state
;
546 undo_cpu_up(cpu
, st
);
554 * The cpu hotplug threads manage the bringup and teardown of the cpus
556 static void cpuhp_create(unsigned int cpu
)
558 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
560 init_completion(&st
->done_up
);
561 init_completion(&st
->done_down
);
564 static int cpuhp_should_run(unsigned int cpu
)
566 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
568 return st
->should_run
;
572 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
573 * callbacks when a state gets [un]installed at runtime.
575 * Each invocation of this function by the smpboot thread does a single AP
578 * It has 3 modes of operation:
579 * - single: runs st->cb_state
580 * - up: runs ++st->state, while st->state < st->target
581 * - down: runs st->state--, while st->state > st->target
583 * When complete or on error, should_run is cleared and the completion is fired.
585 static void cpuhp_thread_fun(unsigned int cpu
)
587 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
588 bool bringup
= st
->bringup
;
589 enum cpuhp_state state
;
592 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
593 * that if we see ->should_run we also see the rest of the state.
597 if (WARN_ON_ONCE(!st
->should_run
))
600 lock_map_acquire(&cpuhp_state_lock_map
);
603 state
= st
->cb_state
;
604 st
->should_run
= false;
609 st
->should_run
= (st
->state
< st
->target
);
610 WARN_ON_ONCE(st
->state
> st
->target
);
614 st
->should_run
= (st
->state
> st
->target
);
615 WARN_ON_ONCE(st
->state
< st
->target
);
619 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
622 struct cpuhp_step
*step
= cpuhp_get_step(state
);
623 if (step
->skip_onerr
)
627 if (cpuhp_is_atomic_state(state
)) {
629 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
633 * STARTING/DYING must not fail!
635 WARN_ON_ONCE(st
->result
);
637 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
642 * If we fail on a rollback, we're up a creek without no
643 * paddle, no way forward, no way back. We loose, thanks for
646 WARN_ON_ONCE(st
->rollback
);
647 st
->should_run
= false;
651 lock_map_release(&cpuhp_state_lock_map
);
654 complete_ap_thread(st
, bringup
);
657 /* Invoke a single callback on a remote cpu */
659 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
660 struct hlist_node
*node
)
662 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
665 if (!cpu_online(cpu
))
668 lock_map_acquire(&cpuhp_state_lock_map
);
669 lock_map_release(&cpuhp_state_lock_map
);
672 * If we are up and running, use the hotplug thread. For early calls
673 * we invoke the thread function directly.
676 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
678 st
->rollback
= false;
682 st
->bringup
= bringup
;
683 st
->cb_state
= state
;
689 * If we failed and did a partial, do a rollback.
691 if ((ret
= st
->result
) && st
->last
) {
693 st
->bringup
= !bringup
;
699 * Clean up the leftovers so the next hotplug operation wont use stale
702 st
->node
= st
->last
= NULL
;
706 static int cpuhp_fast_kick_ap_work_pre(unsigned int cpu
)
708 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
709 enum cpuhp_state prev_state
= st
->state
;
711 lock_map_acquire(&cpuhp_state_lock_map
);
712 lock_map_release(&cpuhp_state_lock_map
);
714 trace_cpuhp_enter(cpu
, st
->target
, prev_state
,
715 cpuhp_fast_kick_ap_work_pre
);
717 cpuhp_set_state(st
, st
->target
);
718 if (!st
->single
&& st
->state
== st
->target
)
723 * Make sure the above stores are visible before should_run becomes
724 * true. Paired with the mb() above in cpuhp_thread_fun()
727 st
->should_run
= true;
728 wake_up_process(st
->thread
);
733 static int cpuhp_fast_kick_ap_work_post(unsigned int cpu
,
734 enum cpuhp_state prev_state
)
736 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
739 wait_for_ap_thread(st
, st
->bringup
);
740 if ((ret
= st
->result
)) {
741 cpuhp_reset_state(st
, prev_state
);
744 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
749 static int cpuhp_kick_ap_work(unsigned int cpu
)
751 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
752 enum cpuhp_state prev_state
= st
->state
;
755 lock_map_acquire(&cpuhp_state_lock_map
);
756 lock_map_release(&cpuhp_state_lock_map
);
758 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
759 ret
= cpuhp_kick_ap(st
, st
->target
);
760 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
765 static struct smp_hotplug_thread cpuhp_threads
= {
766 .store
= &cpuhp_state
.thread
,
767 .create
= &cpuhp_create
,
768 .thread_should_run
= cpuhp_should_run
,
769 .thread_fn
= cpuhp_thread_fun
,
770 .thread_comm
= "cpuhp/%u",
774 void __init
cpuhp_threads_init(void)
776 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
777 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
780 #ifdef CONFIG_HOTPLUG_CPU
782 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
785 * This function walks all processes, finds a valid mm struct for each one and
786 * then clears a corresponding bit in mm's cpumask. While this all sounds
787 * trivial, there are various non-obvious corner cases, which this function
788 * tries to solve in a safe manner.
790 * Also note that the function uses a somewhat relaxed locking scheme, so it may
791 * be called only for an already offlined CPU.
793 void clear_tasks_mm_cpumask(int cpu
)
795 struct task_struct
*p
;
798 * This function is called after the cpu is taken down and marked
799 * offline, so its not like new tasks will ever get this cpu set in
800 * their mm mask. -- Peter Zijlstra
801 * Thus, we may use rcu_read_lock() here, instead of grabbing
802 * full-fledged tasklist_lock.
804 WARN_ON(cpu_online(cpu
));
806 for_each_process(p
) {
807 struct task_struct
*t
;
810 * Main thread might exit, but other threads may still have
811 * a valid mm. Find one.
813 t
= find_lock_task_mm(p
);
816 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
822 /* Take this CPU down. */
823 static int take_cpu_down(void *_param
)
825 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
826 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
827 int err
, cpu
= smp_processor_id();
830 /* Ensure this CPU doesn't handle any more interrupts. */
831 err
= __cpu_disable();
836 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
837 * do this step again.
839 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
841 /* Invoke the former CPU_DYING callbacks */
842 for (; st
->state
> target
; st
->state
--) {
843 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
845 * DYING must not fail!
850 /* Give up timekeeping duties */
851 tick_handover_do_timer();
852 /* Park the stopper thread */
853 stop_machine_park(cpu
);
857 static int takedown_cpu(unsigned int cpu
);
858 static int takedown_cpus(const struct cpumask
*down_cpus
)
860 struct cpuhp_cpu_state
*st
;
863 /* Park the smpboot threads */
864 for_each_cpu(cpu
, down_cpus
) {
865 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
866 trace_cpuhp_enter(cpu
, st
->target
, st
->state
, takedown_cpu
);
868 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
869 smpboot_park_threads(cpu
);
873 * Prevent irq alloc/free while the dying cpu reorganizes the
874 * interrupt affinities.
879 * So now all preempt/rcu users must observe !cpu_active().
881 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, down_cpus
);
883 /* CPU refused to die */
885 for_each_cpu(cpu
, down_cpus
) {
886 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
887 st
->target
= st
->state
;
889 /* Unpark the hotplug thread so we can rollback there */
890 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
895 for_each_cpu(cpu
, down_cpus
) {
896 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
897 BUG_ON(cpu_online(cpu
));
900 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
901 * runnable tasks from the cpu, there's only the idle task left now
902 * that the migration thread is done doing the stop_machine thing.
904 * Wait for the stop thread to go away.
906 wait_for_ap_thread(st
, false);
907 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
911 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
914 for_each_cpu(cpu
, down_cpus
) {
915 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
917 hotplug_cpu__broadcast_tick_pull(cpu
);
918 /* This actually kills the CPU. */
920 tick_cleanup_dead_cpu(cpu
);
921 rcutree_migrate_callbacks(cpu
);
923 trace_cpuhp_exit(cpu
, st
->state
, st
->state
, st
->result
);
930 static int takedown_cpu(unsigned int cpu
)
932 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
935 /* Park the smpboot threads */
936 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
937 smpboot_park_threads(cpu
);
940 * Prevent irq alloc/free while the dying cpu reorganizes the
941 * interrupt affinities.
946 * So now all preempt/rcu users must observe !cpu_active().
948 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
950 /* CPU refused to die */
952 /* Unpark the hotplug thread so we can rollback there */
953 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
956 BUG_ON(cpu_online(cpu
));
959 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
960 * runnable tasks from the cpu, there's only the idle task left now
961 * that the migration thread is done doing the stop_machine thing.
963 * Wait for the stop thread to go away.
965 wait_for_ap_thread(st
, false);
966 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
968 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
971 hotplug_cpu__broadcast_tick_pull(cpu
);
972 /* This actually kills the CPU. */
975 tick_cleanup_dead_cpu(cpu
);
976 rcutree_migrate_callbacks(cpu
);
980 static void cpuhp_complete_idle_dead(void *arg
)
982 struct cpuhp_cpu_state
*st
= arg
;
984 complete_ap_thread(st
, false);
987 void cpuhp_report_idle_dead(void)
989 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
991 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
992 rcu_report_dead(smp_processor_id());
993 st
->state
= CPUHP_AP_IDLE_DEAD
;
995 * We cannot call complete after rcu_report_dead() so we delegate it
998 smp_call_function_single(cpumask_first(cpu_online_mask
),
999 cpuhp_complete_idle_dead
, st
, 0);
1002 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
1004 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
1005 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
1007 if (!step
->skip_onerr
)
1008 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1012 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
1013 enum cpuhp_state target
)
1015 enum cpuhp_state prev_state
= st
->state
;
1018 for (; st
->state
> target
; st
->state
--) {
1019 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
1021 st
->target
= prev_state
;
1022 undo_cpu_down(cpu
, st
);
1028 static int __ref
_cpus_down(struct cpumask cpus
, int tasks_frozen
,
1029 enum cpuhp_state target
)
1031 struct cpuhp_cpu_state
*st
;
1032 cpumask_t ap_work_cpus
= CPU_MASK_NONE
;
1033 cpumask_t take_down_cpus
= CPU_MASK_NONE
;
1034 int prev_state
[8] = {0};
1038 if (num_online_cpus() == 1)
1041 for_each_cpu(cpu
, &cpus
)
1042 if (!cpu_present(cpu
))
1046 cpuhp_tasks_frozen
= tasks_frozen
;
1048 cpumask_copy(&cpu_fastoff_mask
, &cpus
);
1049 for_each_cpu(cpu
, &cpus
) {
1050 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1051 prev_state
[cpu
] = cpuhp_set_state(st
, target
);
1052 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1053 cpumask_set_cpu(cpu
, &ap_work_cpus
);
1055 cpumask_set_cpu(cpu
, &take_down_cpus
);
1058 for_each_cpu(cpu
, &ap_work_cpus
) {
1059 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1060 emc_cpu_pre_off_callback(cpu
);
1061 set_cpu_active(cpu
, false);
1062 st
->state
= CPUHP_AP_EXYNOS_IDLE_CTRL
;
1065 cpuset_update_active_cpus();
1067 for_each_cpu(cpu
, &ap_work_cpus
) {
1068 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1069 set_cpu_active(cpu
, false);
1070 st
->state
= CPUHP_AP_EXYNOS_IDLE_CTRL
;
1073 cpuset_update_active_cpus();
1075 for_each_cpu(cpu
, &ap_work_cpus
) {
1076 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1077 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1078 cpuhp_fast_kick_ap_work_pre(cpu
);
1081 for_each_cpu(cpu
, &ap_work_cpus
) {
1082 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1083 cpuhp_fast_kick_ap_work_post(cpu
, prev_state
[cpu
]);
1085 * We might have stopped still in the range of the AP hotplug
1086 * thread. Nothing to do anymore.
1088 st
->target
= target
;
1089 cpumask_set_cpu(cpu
, &take_down_cpus
);
1092 /* Hotplug out of all cpu failed */
1093 if (cpumask_empty(&take_down_cpus
))
1096 ret
= takedown_cpus(&take_down_cpus
);
1098 panic("%s: fauiled to takedown_cpus\n", __func__
);
1101 for_each_cpu(cpu
, &take_down_cpus
) {
1102 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1103 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1104 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
[cpu
]) {
1105 cpuhp_reset_state(st
, prev_state
[cpu
]);
1106 __cpuhp_kick_ap(st
);
1110 cpumask_clear(&cpu_fastoff_mask
);
1113 cpus_write_unlock();
1116 * Do post unplug cleanup. This is still protected against
1117 * concurrent CPU hotplug via cpu_add_remove_lock.
1119 lockup_detector_cleanup();
1124 int cpus_down(struct cpumask cpus
)
1128 trace_cpus_down_enter(cpumask_first(&cpus
));
1129 cpu_maps_update_begin();
1131 if (cpu_hotplug_disabled
) {
1136 for_each_cpu(cpu
, &cpus
)
1137 if (!cpu_online(cpu
)) {
1138 cpumask_clear_cpu(cpu
, &cpus
);
1139 pr_warn("cpus_down: cpu%d is not online\n", cpu
);
1142 err
= _cpus_down(cpus
, 0, CPUHP_OFFLINE
);
1145 cpu_maps_update_done();
1146 trace_cpus_down_exit(cpumask_first(&cpus
));
1149 EXPORT_SYMBOL_GPL(cpus_down
);
1151 /* Requires cpu_add_remove_lock to be held */
1152 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
1153 enum cpuhp_state target
)
1155 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1156 int prev_state
, ret
= 0;
1158 if (num_online_cpus() == 1)
1161 if (!cpu_present(cpu
))
1166 cpuhp_tasks_frozen
= tasks_frozen
;
1168 prev_state
= cpuhp_set_state(st
, target
);
1170 * If the current CPU state is in the range of the AP hotplug thread,
1171 * then we need to kick the thread.
1173 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
1174 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1175 ret
= cpuhp_kick_ap_work(cpu
);
1177 * The AP side has done the error rollback already. Just
1178 * return the error code..
1184 * We might have stopped still in the range of the AP hotplug
1185 * thread. Nothing to do anymore.
1187 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1190 st
->target
= target
;
1193 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1194 * to do the further cleanups.
1196 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1197 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
1198 cpuhp_reset_state(st
, prev_state
);
1199 __cpuhp_kick_ap(st
);
1203 cpus_write_unlock();
1205 * Do post unplug cleanup. This is still protected against
1206 * concurrent CPU hotplug via cpu_add_remove_lock.
1208 lockup_detector_cleanup();
1212 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1216 cpu_maps_update_begin();
1218 if (cpu_hotplug_disabled
) {
1223 err
= _cpu_down(cpu
, 0, target
);
1226 cpu_maps_update_done();
1230 int cpu_down(unsigned int cpu
)
1232 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
1234 EXPORT_SYMBOL(cpu_down
);
1237 #define takedown_cpu NULL
1238 #endif /*CONFIG_HOTPLUG_CPU*/
1241 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1242 * @cpu: cpu that just started
1244 * It must be called by the arch code on the new cpu, before the new cpu
1245 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1247 void notify_cpu_starting(unsigned int cpu
)
1249 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1250 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1253 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1254 while (st
->state
< target
) {
1256 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1258 * STARTING must not fail!
1265 * Called from the idle task. Wake up the controlling task which brings the
1266 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1267 * the rest of the online bringup to the hotplug thread.
1269 void cpuhp_online_idle(enum cpuhp_state state
)
1271 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1273 /* Happens for the boot cpu */
1274 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1277 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1278 complete_ap_thread(st
, true);
1281 /* Requires cpu_add_remove_lock to be held */
1282 static int __ref
_cpus_up(struct cpumask cpus
, int tasks_frozen
,
1283 enum cpuhp_state target
)
1285 struct cpuhp_cpu_state
*st
;
1286 cpumask_t ap_work_cpus
= CPU_MASK_NONE
;
1287 cpumask_t bringup_cpus
= CPU_MASK_NONE
;
1288 int prev_state
[8] = {0};
1289 struct task_struct
*idle
;
1295 for_each_cpu(cpu
, &cpus
)
1296 if (!cpu_present(cpu
)) {
1297 pr_warn("_cpus_up: cpu%d is not present\n", cpu
);
1298 cpumask_clear_cpu(cpu
, &cpus
);
1301 cpumask_copy(&cpu_faston_mask
, &cpus
);
1303 for_each_cpu(cpu
, &cpu_faston_mask
) {
1304 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1306 * The caller of do_cpu_up might have raced with another
1307 * caller. Ignore it for now.
1309 if (st
->state
>= target
)
1312 if (st
->state
== CPUHP_OFFLINE
) {
1313 /* Let it fail before we try to bring the cpu up */
1314 idle
= idle_thread_get(cpu
);
1316 ret
= PTR_ERR(idle
);
1321 prev_state
[cpu
] = cpuhp_set_state(st
, target
);
1323 if (st
->state
> CPUHP_BRINGUP_CPU
)
1324 cpumask_set_cpu(cpu
, &ap_work_cpus
);
1326 cpumask_set_cpu(cpu
, &bringup_cpus
);
1330 cpuhp_tasks_frozen
= tasks_frozen
;
1332 * If the current CPU state is in the range of the AP hotplug thread,
1333 * then we need to kick the thread once more.
1335 for_each_cpu(cpu
, &ap_work_cpus
)
1336 cpuhp_fast_kick_ap_work_pre(cpu
);
1338 for_each_cpu(cpu
, &ap_work_cpus
)
1339 cpuhp_fast_kick_ap_work_post(cpu
, prev_state
[cpu
]);
1341 /* Hotplug out of all cpu failed */
1342 if (cpumask_empty(&bringup_cpus
))
1346 * Try to reach the target state. We max out on the BP at
1347 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1348 * responsible for bringing it up to the target state.
1350 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1351 for_each_cpu(cpu
, &bringup_cpus
) {
1352 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1353 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1355 panic("%s: fauiled to bringup_cpus\n", __func__
);
1358 cpumask_clear(&cpu_faston_mask
);
1359 cpus_write_unlock();
1365 /* Requires cpu_add_remove_lock to be held */
1366 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1368 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1369 struct task_struct
*idle
;
1374 if (!cpu_present(cpu
)) {
1380 * The caller of do_cpu_up might have raced with another
1381 * caller. Ignore it for now.
1383 if (st
->state
>= target
)
1386 if (st
->state
== CPUHP_OFFLINE
) {
1387 /* Let it fail before we try to bring the cpu up */
1388 idle
= idle_thread_get(cpu
);
1390 ret
= PTR_ERR(idle
);
1395 cpuhp_tasks_frozen
= tasks_frozen
;
1397 cpuhp_set_state(st
, target
);
1399 * If the current CPU state is in the range of the AP hotplug thread,
1400 * then we need to kick the thread once more.
1402 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1403 ret
= cpuhp_kick_ap_work(cpu
);
1405 * The AP side has done the error rollback already. Just
1406 * return the error code..
1413 * Try to reach the target state. We max out on the BP at
1414 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1415 * responsible for bringing it up to the target state.
1417 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1418 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1420 cpus_write_unlock();
1424 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1428 if (!cpu_possible(cpu
)) {
1429 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1431 #if defined(CONFIG_IA64)
1432 pr_err("please check additional_cpus= boot parameter\n");
1437 err
= try_online_node(cpu_to_node(cpu
));
1441 cpu_maps_update_begin();
1443 if (cpu_hotplug_disabled
) {
1448 err
= _cpu_up(cpu
, 0, target
);
1450 cpu_maps_update_done();
1454 int cpu_up(unsigned int cpu
)
1456 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1458 EXPORT_SYMBOL_GPL(cpu_up
);
1460 int cpus_up(struct cpumask cpus
)
1464 trace_cpus_up_enter(cpumask_first(&cpus
));
1465 for_each_cpu(cpu
, &cpus
)
1466 if (cpu_online(cpu
)) {
1467 cpumask_clear_cpu(cpu
, &cpus
);
1468 pr_warn("cpus_up: cpu%d is already online\n", cpu
);
1471 for_each_cpu(cpu
, &cpus
) {
1472 err
= try_online_node(cpu_to_node(cpu
));
1477 cpu_maps_update_begin();
1479 if (cpu_hotplug_disabled
) {
1484 err
= _cpus_up(cpus
, 0, CPUHP_ONLINE
);
1486 cpu_maps_update_done();
1487 trace_cpus_up_exit(cpumask_first(&cpus
));
1491 EXPORT_SYMBOL_GPL(cpus_up
);
1493 #ifdef CONFIG_PM_SLEEP_SMP
1494 static cpumask_var_t frozen_cpus
;
1496 int freeze_secondary_cpus(int primary
)
1500 cpu_maps_update_begin();
1501 if (!cpu_online(primary
))
1502 primary
= cpumask_first(cpu_online_mask
);
1504 * We take down all of the non-boot CPUs in one shot to avoid races
1505 * with the userspace trying to use the CPU hotplug at the same time
1507 cpumask_clear(frozen_cpus
);
1509 pr_info("Disabling non-boot CPUs ...\n");
1510 for_each_online_cpu(cpu
) {
1513 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1514 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1515 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1517 cpumask_set_cpu(cpu
, frozen_cpus
);
1519 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1525 BUG_ON(num_online_cpus() > 1);
1527 pr_err("Non-boot CPUs are not disabled\n");
1530 * Make sure the CPUs won't be enabled by someone else. We need to do
1531 * this even in case of failure as all disable_nonboot_cpus() users are
1532 * supposed to do enable_nonboot_cpus() on the failure path.
1534 cpu_hotplug_disabled
++;
1536 cpu_maps_update_done();
1540 void __weak
arch_enable_nonboot_cpus_begin(void)
1544 void __weak
arch_enable_nonboot_cpus_end(void)
1548 void enable_nonboot_cpus(void)
1551 struct device
*cpu_device
;
1553 /* Allow everyone to use the CPU hotplug again */
1554 cpu_maps_update_begin();
1555 __cpu_hotplug_enable();
1556 if (cpumask_empty(frozen_cpus
))
1559 pr_info("Enabling non-boot CPUs ...\n");
1561 arch_enable_nonboot_cpus_begin();
1563 for_each_cpu(cpu
, frozen_cpus
) {
1564 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1565 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1566 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1568 pr_info("CPU%d is up\n", cpu
);
1569 cpu_device
= get_cpu_device(cpu
);
1571 pr_err("%s: failed to get cpu%d device\n",
1574 kobject_uevent(&cpu_device
->kobj
, KOBJ_ONLINE
);
1577 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1580 arch_enable_nonboot_cpus_end();
1582 cpumask_clear(frozen_cpus
);
1584 cpu_maps_update_done();
1587 static int __init
alloc_frozen_cpus(void)
1589 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1593 core_initcall(alloc_frozen_cpus
);
1596 * When callbacks for CPU hotplug notifications are being executed, we must
1597 * ensure that the state of the system with respect to the tasks being frozen
1598 * or not, as reported by the notification, remains unchanged *throughout the
1599 * duration* of the execution of the callbacks.
1600 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1602 * This synchronization is implemented by mutually excluding regular CPU
1603 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1604 * Hibernate notifications.
1607 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1608 unsigned long action
, void *ptr
)
1612 case PM_SUSPEND_PREPARE
:
1613 case PM_HIBERNATION_PREPARE
:
1614 cpu_hotplug_disable();
1617 case PM_POST_SUSPEND
:
1618 case PM_POST_HIBERNATION
:
1619 cpu_hotplug_enable();
1630 struct cpumask cpu_fastoff_mask
;
1631 EXPORT_SYMBOL(cpu_fastoff_mask
);
1632 struct cpumask cpu_faston_mask
;
1633 EXPORT_SYMBOL(cpu_faston_mask
);
1634 static int __init
cpu_hotplug_pm_sync_init(void)
1637 * cpu_hotplug_pm_callback has higher priority than x86
1638 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1639 * to disable cpu hotplug to avoid cpu hotplug race.
1641 pm_notifier(cpu_hotplug_pm_callback
, 0);
1642 cpumask_clear(&cpu_fastoff_mask
);
1643 cpumask_clear(&cpu_faston_mask
);
1647 core_initcall(cpu_hotplug_pm_sync_init
);
1649 #endif /* CONFIG_PM_SLEEP_SMP */
1653 #endif /* CONFIG_SMP */
1655 /* Boot processor state steps */
1656 static struct cpuhp_step cpuhp_bp_states
[] = {
1659 .startup
.single
= NULL
,
1660 .teardown
.single
= NULL
,
1663 [CPUHP_CREATE_THREADS
]= {
1664 .name
= "threads:prepare",
1665 .startup
.single
= smpboot_create_threads
,
1666 .teardown
.single
= NULL
,
1669 [CPUHP_PERF_PREPARE
] = {
1670 .name
= "perf:prepare",
1671 .startup
.single
= perf_event_init_cpu
,
1672 .teardown
.single
= perf_event_exit_cpu
,
1674 [CPUHP_WORKQUEUE_PREP
] = {
1675 .name
= "workqueue:prepare",
1676 .startup
.single
= workqueue_prepare_cpu
,
1677 .teardown
.single
= NULL
,
1679 [CPUHP_HRTIMERS_PREPARE
] = {
1680 .name
= "hrtimers:prepare",
1681 .startup
.single
= hrtimers_prepare_cpu
,
1682 .teardown
.single
= hrtimers_dead_cpu
,
1684 [CPUHP_SMPCFD_PREPARE
] = {
1685 .name
= "smpcfd:prepare",
1686 .startup
.single
= smpcfd_prepare_cpu
,
1687 .teardown
.single
= smpcfd_dead_cpu
,
1689 [CPUHP_RELAY_PREPARE
] = {
1690 .name
= "relay:prepare",
1691 .startup
.single
= relay_prepare_cpu
,
1692 .teardown
.single
= NULL
,
1694 [CPUHP_SLAB_PREPARE
] = {
1695 .name
= "slab:prepare",
1696 .startup
.single
= slab_prepare_cpu
,
1697 .teardown
.single
= slab_dead_cpu
,
1699 [CPUHP_RCUTREE_PREP
] = {
1700 .name
= "RCU/tree:prepare",
1701 .startup
.single
= rcutree_prepare_cpu
,
1702 .teardown
.single
= rcutree_dead_cpu
,
1705 * On the tear-down path, timers_dead_cpu() must be invoked
1706 * before blk_mq_queue_reinit_notify() from notify_dead(),
1707 * otherwise a RCU stall occurs.
1709 [CPUHP_TIMERS_PREPARE
] = {
1710 .name
= "timers:dead",
1711 .startup
.single
= timers_prepare_cpu
,
1712 .teardown
.single
= timers_dead_cpu
,
1714 /* Kicks the plugged cpu into life */
1715 [CPUHP_BRINGUP_CPU
] = {
1716 .name
= "cpu:bringup",
1717 .startup
.single
= bringup_cpu
,
1718 .teardown
.single
= NULL
,
1722 * Handled on controll processor until the plugged processor manages
1725 [CPUHP_TEARDOWN_CPU
] = {
1726 .name
= "cpu:teardown",
1727 .startup
.single
= NULL
,
1728 .teardown
.single
= takedown_cpu
,
1732 [CPUHP_BRINGUP_CPU
] = { },
1736 /* Application processor state steps */
1737 static struct cpuhp_step cpuhp_ap_states
[] = {
1739 /* Final state before CPU kills itself */
1740 [CPUHP_AP_IDLE_DEAD
] = {
1741 .name
= "idle:dead",
1744 * Last state before CPU enters the idle loop to die. Transient state
1745 * for synchronization.
1747 [CPUHP_AP_OFFLINE
] = {
1748 .name
= "ap:offline",
1751 /* First state is scheduler control. Interrupts are disabled */
1752 [CPUHP_AP_SCHED_STARTING
] = {
1753 .name
= "sched:starting",
1754 .startup
.single
= sched_cpu_starting
,
1755 .teardown
.single
= sched_cpu_dying
,
1757 [CPUHP_AP_RCUTREE_DYING
] = {
1758 .name
= "RCU/tree:dying",
1759 .startup
.single
= NULL
,
1760 .teardown
.single
= rcutree_dying_cpu
,
1762 [CPUHP_AP_SMPCFD_DYING
] = {
1763 .name
= "smpcfd:dying",
1764 .startup
.single
= NULL
,
1765 .teardown
.single
= smpcfd_dying_cpu
,
1767 /* Entry state on starting. Interrupts enabled from here on. Transient
1768 * state for synchronsization */
1769 [CPUHP_AP_ONLINE
] = {
1770 .name
= "ap:online",
1772 /* Handle smpboot threads park/unpark */
1773 [CPUHP_AP_SMPBOOT_THREADS
] = {
1774 .name
= "smpboot/threads:online",
1775 .startup
.single
= smpboot_unpark_threads
,
1776 .teardown
.single
= NULL
,
1778 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1779 .name
= "irq/affinity:online",
1780 .startup
.single
= irq_affinity_online_cpu
,
1781 .teardown
.single
= NULL
,
1783 [CPUHP_AP_PERF_ONLINE
] = {
1784 .name
= "perf:online",
1785 .startup
.single
= perf_event_init_cpu
,
1786 .teardown
.single
= perf_event_exit_cpu
,
1788 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1789 .name
= "workqueue:online",
1790 .startup
.single
= workqueue_online_cpu
,
1791 .teardown
.single
= workqueue_offline_cpu
,
1793 [CPUHP_AP_RCUTREE_ONLINE
] = {
1794 .name
= "RCU/tree:online",
1795 .startup
.single
= rcutree_online_cpu
,
1796 .teardown
.single
= rcutree_offline_cpu
,
1800 * The dynamically registered state space is here
1804 /* Last state is scheduler control setting the cpu active */
1805 [CPUHP_AP_ACTIVE
] = {
1806 .name
= "sched:active",
1807 .startup
.single
= sched_cpu_activate
,
1808 .teardown
.single
= sched_cpu_deactivate
,
1812 /* CPU is fully up and running. */
1815 .startup
.single
= NULL
,
1816 .teardown
.single
= NULL
,
1820 /* Sanity check for callbacks */
1821 static int cpuhp_cb_check(enum cpuhp_state state
)
1823 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1829 * Returns a free for dynamic slot assignment of the Online state. The states
1830 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1831 * by having no name assigned.
1833 static int cpuhp_reserve_state(enum cpuhp_state state
)
1835 enum cpuhp_state i
, end
;
1836 struct cpuhp_step
*step
;
1839 case CPUHP_AP_ONLINE_DYN
:
1840 step
= cpuhp_ap_states
+ CPUHP_AP_ONLINE_DYN
;
1841 end
= CPUHP_AP_ONLINE_DYN_END
;
1843 case CPUHP_BP_PREPARE_DYN
:
1844 step
= cpuhp_bp_states
+ CPUHP_BP_PREPARE_DYN
;
1845 end
= CPUHP_BP_PREPARE_DYN_END
;
1851 for (i
= state
; i
<= end
; i
++, step
++) {
1855 WARN(1, "No more dynamic states available for CPU hotplug\n");
1859 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1860 int (*startup
)(unsigned int cpu
),
1861 int (*teardown
)(unsigned int cpu
),
1862 bool multi_instance
)
1864 /* (Un)Install the callbacks for further cpu hotplug operations */
1865 struct cpuhp_step
*sp
;
1869 * If name is NULL, then the state gets removed.
1871 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1872 * the first allocation from these dynamic ranges, so the removal
1873 * would trigger a new allocation and clear the wrong (already
1874 * empty) state, leaving the callbacks of the to be cleared state
1875 * dangling, which causes wreckage on the next hotplug operation.
1877 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
1878 state
== CPUHP_BP_PREPARE_DYN
)) {
1879 ret
= cpuhp_reserve_state(state
);
1884 sp
= cpuhp_get_step(state
);
1885 if (name
&& sp
->name
)
1888 sp
->startup
.single
= startup
;
1889 sp
->teardown
.single
= teardown
;
1891 sp
->multi_instance
= multi_instance
;
1892 INIT_HLIST_HEAD(&sp
->list
);
1896 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1898 return cpuhp_get_step(state
)->teardown
.single
;
1902 * Call the startup/teardown function for a step either on the AP or
1903 * on the current CPU.
1905 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1906 struct hlist_node
*node
)
1908 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1912 * If there's nothing to do, we done.
1913 * Relies on the union for multi_instance.
1915 if ((bringup
&& !sp
->startup
.single
) ||
1916 (!bringup
&& !sp
->teardown
.single
))
1919 * The non AP bound callbacks can fail on bringup. On teardown
1920 * e.g. module removal we crash for now.
1923 if (cpuhp_is_ap_state(state
))
1924 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1926 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1928 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1930 BUG_ON(ret
&& !bringup
);
1935 * Called from __cpuhp_setup_state on a recoverable failure.
1937 * Note: The teardown callbacks for rollback are not allowed to fail!
1939 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1940 struct hlist_node
*node
)
1944 /* Roll back the already executed steps on the other cpus */
1945 for_each_present_cpu(cpu
) {
1946 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1947 int cpustate
= st
->state
;
1949 if (cpu
>= failedcpu
)
1952 /* Did we invoke the startup call on that cpu ? */
1953 if (cpustate
>= state
)
1954 cpuhp_issue_call(cpu
, state
, false, node
);
1958 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
1959 struct hlist_node
*node
,
1962 struct cpuhp_step
*sp
;
1966 lockdep_assert_cpus_held();
1968 sp
= cpuhp_get_step(state
);
1969 if (sp
->multi_instance
== false)
1972 mutex_lock(&cpuhp_state_mutex
);
1974 if (!invoke
|| !sp
->startup
.multi
)
1978 * Try to call the startup callback for each present cpu
1979 * depending on the hotplug state of the cpu.
1981 for_each_present_cpu(cpu
) {
1982 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1983 int cpustate
= st
->state
;
1985 if (cpustate
< state
)
1988 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1990 if (sp
->teardown
.multi
)
1991 cpuhp_rollback_install(cpu
, state
, node
);
1997 hlist_add_head(node
, &sp
->list
);
1999 mutex_unlock(&cpuhp_state_mutex
);
2003 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
2009 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
2013 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
2016 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2017 * @state: The state to setup
2018 * @invoke: If true, the startup function is invoked for cpus where
2019 * cpu state >= @state
2020 * @startup: startup callback function
2021 * @teardown: teardown callback function
2022 * @multi_instance: State is set up for multiple instances which get
2025 * The caller needs to hold cpus read locked while calling this function.
2028 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
2029 * 0 for all other states
2030 * On failure: proper (negative) error code
2032 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
2033 const char *name
, bool invoke
,
2034 int (*startup
)(unsigned int cpu
),
2035 int (*teardown
)(unsigned int cpu
),
2036 bool multi_instance
)
2041 lockdep_assert_cpus_held();
2043 if (cpuhp_cb_check(state
) || !name
)
2046 mutex_lock(&cpuhp_state_mutex
);
2048 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
2051 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
2052 if (ret
> 0 && dynstate
) {
2057 if (ret
|| !invoke
|| !startup
)
2061 * Try to call the startup callback for each present cpu
2062 * depending on the hotplug state of the cpu.
2064 for_each_present_cpu(cpu
) {
2065 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2066 int cpustate
= st
->state
;
2068 if (cpustate
< state
)
2071 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
2074 cpuhp_rollback_install(cpu
, state
, NULL
);
2075 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2080 mutex_unlock(&cpuhp_state_mutex
);
2082 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2083 * dynamically allocated state in case of success.
2085 if (!ret
&& dynstate
)
2089 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
2091 int __cpuhp_setup_state(enum cpuhp_state state
,
2092 const char *name
, bool invoke
,
2093 int (*startup
)(unsigned int cpu
),
2094 int (*teardown
)(unsigned int cpu
),
2095 bool multi_instance
)
2100 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
2101 teardown
, multi_instance
);
2105 EXPORT_SYMBOL(__cpuhp_setup_state
);
2107 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
2108 struct hlist_node
*node
, bool invoke
)
2110 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2113 BUG_ON(cpuhp_cb_check(state
));
2115 if (!sp
->multi_instance
)
2119 mutex_lock(&cpuhp_state_mutex
);
2121 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2124 * Call the teardown callback for each present cpu depending
2125 * on the hotplug state of the cpu. This function is not
2126 * allowed to fail currently!
2128 for_each_present_cpu(cpu
) {
2129 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2130 int cpustate
= st
->state
;
2132 if (cpustate
>= state
)
2133 cpuhp_issue_call(cpu
, state
, false, node
);
2138 mutex_unlock(&cpuhp_state_mutex
);
2143 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
2146 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2147 * @state: The state to remove
2148 * @invoke: If true, the teardown function is invoked for cpus where
2149 * cpu state >= @state
2151 * The caller needs to hold cpus read locked while calling this function.
2152 * The teardown callback is currently not allowed to fail. Think
2153 * about module removal!
2155 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
2157 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2160 BUG_ON(cpuhp_cb_check(state
));
2162 lockdep_assert_cpus_held();
2164 mutex_lock(&cpuhp_state_mutex
);
2165 if (sp
->multi_instance
) {
2166 WARN(!hlist_empty(&sp
->list
),
2167 "Error: Removing state %d which has instances left.\n",
2172 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2176 * Call the teardown callback for each present cpu depending
2177 * on the hotplug state of the cpu. This function is not
2178 * allowed to fail currently!
2180 for_each_present_cpu(cpu
) {
2181 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2182 int cpustate
= st
->state
;
2184 if (cpustate
>= state
)
2185 cpuhp_issue_call(cpu
, state
, false, NULL
);
2188 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2189 mutex_unlock(&cpuhp_state_mutex
);
2191 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
2193 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
2196 __cpuhp_remove_state_cpuslocked(state
, invoke
);
2199 EXPORT_SYMBOL(__cpuhp_remove_state
);
2201 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2202 static ssize_t
show_cpuhp_state(struct device
*dev
,
2203 struct device_attribute
*attr
, char *buf
)
2205 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2207 return sprintf(buf
, "%d\n", st
->state
);
2209 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
2211 static ssize_t
write_cpuhp_target(struct device
*dev
,
2212 struct device_attribute
*attr
,
2213 const char *buf
, size_t count
)
2215 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2216 struct cpuhp_step
*sp
;
2219 ret
= kstrtoint(buf
, 10, &target
);
2223 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2224 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
2227 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
2231 ret
= lock_device_hotplug_sysfs();
2235 mutex_lock(&cpuhp_state_mutex
);
2236 sp
= cpuhp_get_step(target
);
2237 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
2238 mutex_unlock(&cpuhp_state_mutex
);
2242 if (st
->state
< target
)
2243 ret
= do_cpu_up(dev
->id
, target
);
2245 ret
= do_cpu_down(dev
->id
, target
);
2247 unlock_device_hotplug();
2248 return ret
? ret
: count
;
2251 static ssize_t
show_cpuhp_target(struct device
*dev
,
2252 struct device_attribute
*attr
, char *buf
)
2254 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2256 return sprintf(buf
, "%d\n", st
->target
);
2258 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
2261 static ssize_t
write_cpuhp_fail(struct device
*dev
,
2262 struct device_attribute
*attr
,
2263 const char *buf
, size_t count
)
2265 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2266 struct cpuhp_step
*sp
;
2269 ret
= kstrtoint(buf
, 10, &fail
);
2274 * Cannot fail STARTING/DYING callbacks.
2276 if (cpuhp_is_atomic_state(fail
))
2280 * Cannot fail anything that doesn't have callbacks.
2282 mutex_lock(&cpuhp_state_mutex
);
2283 sp
= cpuhp_get_step(fail
);
2284 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
2286 mutex_unlock(&cpuhp_state_mutex
);
2295 static ssize_t
show_cpuhp_fail(struct device
*dev
,
2296 struct device_attribute
*attr
, char *buf
)
2298 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2300 return sprintf(buf
, "%d\n", st
->fail
);
2303 static DEVICE_ATTR(fail
, 0644, show_cpuhp_fail
, write_cpuhp_fail
);
2305 static struct attribute
*cpuhp_cpu_attrs
[] = {
2306 &dev_attr_state
.attr
,
2307 &dev_attr_target
.attr
,
2308 &dev_attr_fail
.attr
,
2312 static const struct attribute_group cpuhp_cpu_attr_group
= {
2313 .attrs
= cpuhp_cpu_attrs
,
2318 static ssize_t
show_cpuhp_states(struct device
*dev
,
2319 struct device_attribute
*attr
, char *buf
)
2321 ssize_t cur
, res
= 0;
2324 mutex_lock(&cpuhp_state_mutex
);
2325 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
2326 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
2329 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
2334 mutex_unlock(&cpuhp_state_mutex
);
2337 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
2339 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2340 &dev_attr_states
.attr
,
2344 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2345 .attrs
= cpuhp_cpu_root_attrs
,
2350 static int __init
cpuhp_sysfs_init(void)
2354 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2355 &cpuhp_cpu_root_attr_group
);
2359 for_each_possible_cpu(cpu
) {
2360 struct device
*dev
= get_cpu_device(cpu
);
2364 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
2370 device_initcall(cpuhp_sysfs_init
);
2374 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2375 * represents all NR_CPUS bits binary values of 1<<nr.
2377 * It is used by cpumask_of() to get a constant address to a CPU
2378 * mask value that has a single bit set only.
2381 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2382 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2383 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2384 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2385 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2387 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
2389 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2390 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2391 #if BITS_PER_LONG > 32
2392 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2393 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2396 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
2398 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
2399 EXPORT_SYMBOL(cpu_all_bits
);
2401 #ifdef CONFIG_INIT_ALL_POSSIBLE
2402 struct cpumask __cpu_possible_mask __read_mostly
2405 struct cpumask __cpu_possible_mask __read_mostly
;
2407 EXPORT_SYMBOL(__cpu_possible_mask
);
2409 struct cpumask __cpu_online_mask __read_mostly
;
2410 EXPORT_SYMBOL(__cpu_online_mask
);
2412 struct cpumask __cpu_present_mask __read_mostly
;
2413 EXPORT_SYMBOL(__cpu_present_mask
);
2415 struct cpumask __cpu_active_mask __read_mostly
;
2416 EXPORT_SYMBOL(__cpu_active_mask
);
2418 void init_cpu_present(const struct cpumask
*src
)
2420 cpumask_copy(&__cpu_present_mask
, src
);
2423 void init_cpu_possible(const struct cpumask
*src
)
2425 cpumask_copy(&__cpu_possible_mask
, src
);
2428 void init_cpu_online(const struct cpumask
*src
)
2430 cpumask_copy(&__cpu_online_mask
, src
);
2434 * Activate the first processor.
2436 void __init
boot_cpu_init(void)
2438 int cpu
= smp_processor_id();
2440 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2441 set_cpu_online(cpu
, true);
2442 set_cpu_active(cpu
, true);
2443 set_cpu_present(cpu
, true);
2444 set_cpu_possible(cpu
, true);
2447 __boot_cpu_id
= cpu
;
2452 * Must be called _AFTER_ setting up the per_cpu areas
2454 void __init
boot_cpu_state_init(void)
2456 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;