2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/smt.h>
14 #include <linux/unistd.h>
15 #include <linux/cpu.h>
16 #include <linux/oom.h>
17 #include <linux/rcupdate.h>
18 #include <linux/export.h>
19 #include <linux/bug.h>
20 #include <linux/kthread.h>
21 #include <linux/stop_machine.h>
22 #include <linux/mutex.h>
23 #include <linux/gfp.h>
24 #include <linux/suspend.h>
25 #include <linux/lockdep.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 #include <linux/nmi.h>
29 #include <linux/smpboot.h>
30 #include <linux/relay.h>
31 #include <linux/slab.h>
32 #include <linux/cpuset.h>
34 #include <soc/samsung/exynos-emc.h>
36 #include <trace/events/power.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/cpuhp.h>
43 * cpuhp_cpu_state - Per cpu hotplug state storage
44 * @state: The current cpu state
45 * @target: The target state
46 * @thread: Pointer to the hotplug thread
47 * @should_run: Thread should execute
48 * @rollback: Perform a rollback
49 * @single: Single callback invocation
50 * @bringup: Single callback bringup or teardown selector
51 * @cb_state: The state for a single callback (install/uninstall)
52 * @result: Result of the operation
53 * @done_up: Signal completion to the issuer of the task for cpu-up
54 * @done_down: Signal completion to the issuer of the task for cpu-down
56 struct cpuhp_cpu_state
{
57 enum cpuhp_state state
;
58 enum cpuhp_state target
;
59 enum cpuhp_state fail
;
61 struct task_struct
*thread
;
67 struct hlist_node
*node
;
68 struct hlist_node
*last
;
69 enum cpuhp_state cb_state
;
71 struct completion done_up
;
72 struct completion done_down
;
76 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
77 .fail
= CPUHP_INVALID
,
80 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
81 static struct lock_class_key cpuhp_state_key
;
82 static struct lockdep_map cpuhp_state_lock_map
=
83 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key
);
87 * cpuhp_step - Hotplug state machine step
88 * @name: Name of the step
89 * @startup: Startup function of the step
90 * @teardown: Teardown function of the step
91 * @skip_onerr: Do not invoke the functions on error rollback
92 * Will go away once the notifiers are gone
93 * @cant_stop: Bringup/teardown can't be stopped at this step
98 int (*single
)(unsigned int cpu
);
99 int (*multi
)(unsigned int cpu
,
100 struct hlist_node
*node
);
103 int (*single
)(unsigned int cpu
);
104 int (*multi
)(unsigned int cpu
,
105 struct hlist_node
*node
);
107 struct hlist_head list
;
113 static DEFINE_MUTEX(cpuhp_state_mutex
);
114 static struct cpuhp_step cpuhp_bp_states
[];
115 static struct cpuhp_step cpuhp_ap_states
[];
117 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
120 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
121 * purposes as that state is handled explicitly in cpu_down.
123 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
126 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
128 struct cpuhp_step
*sp
;
130 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
135 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
136 * @cpu: The cpu for which the callback should be invoked
137 * @state: The state to do callbacks for
138 * @bringup: True if the bringup callback should be invoked
139 * @node: For multi-instance, do a single entry callback for install/remove
140 * @lastp: For multi-instance rollback, remember how far we got
142 * Called from cpu hotplug and from the state register machinery.
144 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
145 bool bringup
, struct hlist_node
*node
,
146 struct hlist_node
**lastp
)
148 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
149 struct cpuhp_step
*step
= cpuhp_get_step(state
);
150 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
151 int (*cb
)(unsigned int cpu
);
154 if (st
->fail
== state
) {
155 st
->fail
= CPUHP_INVALID
;
157 if (!(bringup
? step
->startup
.single
: step
->teardown
.single
))
163 if (!step
->multi_instance
) {
164 WARN_ON_ONCE(lastp
&& *lastp
);
165 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
168 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
170 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
173 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
177 /* Single invocation for instance add/remove */
179 WARN_ON_ONCE(lastp
&& *lastp
);
180 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
181 ret
= cbm(cpu
, node
);
182 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
186 /* State transition. Invoke on all instances */
188 hlist_for_each(node
, &step
->list
) {
189 if (lastp
&& node
== *lastp
)
192 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
193 ret
= cbm(cpu
, node
);
194 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
208 /* Rollback the instances if one failed */
209 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
213 hlist_for_each(node
, &step
->list
) {
217 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
218 ret
= cbm(cpu
, node
);
219 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
221 * Rollback must not fail,
229 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
231 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
232 wait_for_completion(done
);
235 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
237 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
242 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
244 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
246 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
249 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
250 static DEFINE_MUTEX(cpu_add_remove_lock
);
251 bool cpuhp_tasks_frozen
;
252 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
255 * The following two APIs (cpu_maps_update_begin/done) must be used when
256 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
258 void cpu_maps_update_begin(void)
260 mutex_lock(&cpu_add_remove_lock
);
263 void cpu_maps_update_done(void)
265 mutex_unlock(&cpu_add_remove_lock
);
268 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
269 * Should always be manipulated under cpu_add_remove_lock
271 static int cpu_hotplug_disabled
;
273 #ifdef CONFIG_HOTPLUG_CPU
276 struct task_struct
*active_writer
;
277 /* wait queue to wake up the active_writer */
278 wait_queue_head_t wq
;
279 /* verifies that no writer will get active while readers are active */
282 * Also blocks the new readers during
283 * an ongoing cpu hotplug operation.
287 #ifdef CONFIG_DEBUG_LOCK_ALLOC
288 struct lockdep_map dep_map
;
291 .active_writer
= NULL
,
292 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
293 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
294 #ifdef CONFIG_DEBUG_LOCK_ALLOC
295 .dep_map
= STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug
.dep_map
),
299 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
300 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
301 #define cpuhp_lock_acquire_tryread() \
302 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
303 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
304 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
307 void cpus_read_lock(void)
310 if (cpu_hotplug
.active_writer
== current
)
312 cpuhp_lock_acquire_read();
313 mutex_lock(&cpu_hotplug
.lock
);
314 atomic_inc(&cpu_hotplug
.refcount
);
315 mutex_unlock(&cpu_hotplug
.lock
);
317 EXPORT_SYMBOL_GPL(cpus_read_lock
);
319 void cpus_read_unlock(void)
323 if (cpu_hotplug
.active_writer
== current
)
326 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
327 if (WARN_ON(refcount
< 0)) /* try to fix things up */
328 atomic_inc(&cpu_hotplug
.refcount
);
330 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
331 wake_up(&cpu_hotplug
.wq
);
333 cpuhp_lock_release();
336 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
339 * This ensures that the hotplug operation can begin only when the
340 * refcount goes to zero.
342 * Note that during a cpu-hotplug operation, the new readers, if any,
343 * will be blocked by the cpu_hotplug.lock
345 * Since cpu_hotplug_begin() is always called after invoking
346 * cpu_maps_update_begin(), we can be sure that only one writer is active.
348 * Note that theoretically, there is a possibility of a livelock:
349 * - Refcount goes to zero, last reader wakes up the sleeping
351 * - Last reader unlocks the cpu_hotplug.lock.
352 * - A new reader arrives at this moment, bumps up the refcount.
353 * - The writer acquires the cpu_hotplug.lock finds the refcount
354 * non zero and goes to sleep again.
356 * However, this is very difficult to achieve in practice since
357 * get_online_cpus() not an api which is called all that often.
360 void cpus_write_lock(void)
364 cpu_hotplug
.active_writer
= current
;
365 cpuhp_lock_acquire();
368 mutex_lock(&cpu_hotplug
.lock
);
369 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
370 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
372 mutex_unlock(&cpu_hotplug
.lock
);
375 finish_wait(&cpu_hotplug
.wq
, &wait
);
378 void cpus_write_unlock(void)
380 cpu_hotplug
.active_writer
= NULL
;
381 mutex_unlock(&cpu_hotplug
.lock
);
382 cpuhp_lock_release();
386 * Wait for currently running CPU hotplug operations to complete (if any) and
387 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
388 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
389 * hotplug path before performing hotplug operations. So acquiring that lock
390 * guarantees mutual exclusion from any currently running hotplug operations.
392 void cpu_hotplug_disable(void)
394 cpu_maps_update_begin();
395 cpu_hotplug_disabled
++;
396 cpu_maps_update_done();
398 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
400 static void __cpu_hotplug_enable(void)
402 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
404 cpu_hotplug_disabled
--;
407 void cpu_hotplug_enable(void)
409 cpu_maps_update_begin();
410 __cpu_hotplug_enable();
411 cpu_maps_update_done();
413 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
414 #endif /* CONFIG_HOTPLUG_CPU */
417 * Architectures that need SMT-specific errata handling during SMT hotplug
418 * should override this.
420 void __weak
arch_smt_update(void) { }
422 #ifdef CONFIG_HOTPLUG_SMT
423 enum cpuhp_smt_control cpu_smt_control __read_mostly
= CPU_SMT_ENABLED
;
425 void __init
cpu_smt_disable(bool force
)
427 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
428 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
432 pr_info("SMT: Force disabled\n");
433 cpu_smt_control
= CPU_SMT_FORCE_DISABLED
;
435 cpu_smt_control
= CPU_SMT_DISABLED
;
440 * The decision whether SMT is supported can only be done after the full
441 * CPU identification. Called from architecture code.
443 void __init
cpu_smt_check_topology(void)
445 if (!topology_smt_supported())
446 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
449 static int __init
smt_cmdline_disable(char *str
)
451 cpu_smt_disable(str
&& !strcmp(str
, "force"));
454 early_param("nosmt", smt_cmdline_disable
);
456 static inline bool cpu_smt_allowed(unsigned int cpu
)
458 if (cpu_smt_control
== CPU_SMT_ENABLED
)
461 if (topology_is_primary_thread(cpu
))
465 * On x86 it's required to boot all logical CPUs at least once so
466 * that the init code can get a chance to set CR4.MCE on each
467 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
468 * core will shutdown the machine.
470 return !per_cpu(cpuhp_state
, cpu
).booted_once
;
473 static inline bool cpu_smt_allowed(unsigned int cpu
) { return true; }
476 static inline enum cpuhp_state
477 cpuhp_set_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
479 enum cpuhp_state prev_state
= st
->state
;
481 st
->rollback
= false;
486 st
->bringup
= st
->state
< target
;
492 cpuhp_reset_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state prev_state
)
497 * If we have st->last we need to undo partial multi_instance of this
498 * state first. Otherwise start undo at the previous state.
507 st
->target
= prev_state
;
508 st
->bringup
= !st
->bringup
;
511 /* Regular hotplug invocation of the AP hotplug thread */
512 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
514 if (!st
->single
&& st
->state
== st
->target
)
519 * Make sure the above stores are visible before should_run becomes
520 * true. Paired with the mb() above in cpuhp_thread_fun()
523 st
->should_run
= true;
524 wake_up_process(st
->thread
);
525 wait_for_ap_thread(st
, st
->bringup
);
528 static int cpuhp_kick_ap(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
530 enum cpuhp_state prev_state
;
533 prev_state
= cpuhp_set_state(st
, target
);
535 if ((ret
= st
->result
)) {
536 cpuhp_reset_state(st
, prev_state
);
542 /* Notifier wrappers for transitioning to state machine */
544 static int bringup_wait_for_ap(unsigned int cpu
)
546 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
548 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
549 wait_for_ap_thread(st
, true);
550 if (WARN_ON_ONCE((!cpu_online(cpu
))))
553 /* Unpark the stopper thread and the hotplug thread of the target cpu */
554 stop_machine_unpark(cpu
);
555 kthread_unpark(st
->thread
);
558 * SMT soft disabling on X86 requires to bring the CPU out of the
559 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
560 * CPU marked itself as booted_once in cpu_notify_starting() so the
561 * cpu_smt_allowed() check will now return false if this is not the
564 if (!cpu_smt_allowed(cpu
))
567 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
570 return cpuhp_kick_ap(st
, st
->target
);
573 static int bringup_cpu(unsigned int cpu
)
575 struct task_struct
*idle
= idle_thread_get(cpu
);
579 * Some architectures have to walk the irq descriptors to
580 * setup the vector space for the cpu which comes online.
581 * Prevent irq alloc/free across the bringup.
585 /* Arch-specific enabling code. */
586 ret
= __cpu_up(cpu
, idle
);
590 return bringup_wait_for_ap(cpu
);
594 * Hotplug state machine related functions
597 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
599 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
600 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
602 if (!step
->skip_onerr
)
603 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
607 static inline bool can_rollback_cpu(struct cpuhp_cpu_state
*st
)
609 if (IS_ENABLED(CONFIG_HOTPLUG_CPU
))
612 * When CPU hotplug is disabled, then taking the CPU down is not
613 * possible because takedown_cpu() and the architecture and
614 * subsystem specific mechanisms are not available. So the CPU
615 * which would be completely unplugged again needs to stay around
616 * in the current state.
618 return st
->state
<= CPUHP_BRINGUP_CPU
;
621 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
622 enum cpuhp_state target
)
624 enum cpuhp_state prev_state
= st
->state
;
627 while (st
->state
< target
) {
629 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
631 if (can_rollback_cpu(st
)) {
632 st
->target
= prev_state
;
633 undo_cpu_up(cpu
, st
);
642 * The cpu hotplug threads manage the bringup and teardown of the cpus
644 static void cpuhp_create(unsigned int cpu
)
646 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
648 init_completion(&st
->done_up
);
649 init_completion(&st
->done_down
);
652 static int cpuhp_should_run(unsigned int cpu
)
654 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
656 return st
->should_run
;
660 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
661 * callbacks when a state gets [un]installed at runtime.
663 * Each invocation of this function by the smpboot thread does a single AP
666 * It has 3 modes of operation:
667 * - single: runs st->cb_state
668 * - up: runs ++st->state, while st->state < st->target
669 * - down: runs st->state--, while st->state > st->target
671 * When complete or on error, should_run is cleared and the completion is fired.
673 static void cpuhp_thread_fun(unsigned int cpu
)
675 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
676 bool bringup
= st
->bringup
;
677 enum cpuhp_state state
;
679 if (WARN_ON_ONCE(!st
->should_run
))
683 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
684 * that if we see ->should_run we also see the rest of the state.
688 if (WARN_ON_ONCE(!st
->should_run
))
691 lock_map_acquire(&cpuhp_state_lock_map
);
694 state
= st
->cb_state
;
695 st
->should_run
= false;
700 st
->should_run
= (st
->state
< st
->target
);
701 WARN_ON_ONCE(st
->state
> st
->target
);
705 st
->should_run
= (st
->state
> st
->target
);
706 WARN_ON_ONCE(st
->state
< st
->target
);
710 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
713 struct cpuhp_step
*step
= cpuhp_get_step(state
);
714 if (step
->skip_onerr
)
718 if (cpuhp_is_atomic_state(state
)) {
720 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
724 * STARTING/DYING must not fail!
726 WARN_ON_ONCE(st
->result
);
728 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
733 * If we fail on a rollback, we're up a creek without no
734 * paddle, no way forward, no way back. We loose, thanks for
737 WARN_ON_ONCE(st
->rollback
);
738 st
->should_run
= false;
742 lock_map_release(&cpuhp_state_lock_map
);
745 complete_ap_thread(st
, bringup
);
748 /* Invoke a single callback on a remote cpu */
750 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
751 struct hlist_node
*node
)
753 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
756 if (!cpu_online(cpu
))
759 lock_map_acquire(&cpuhp_state_lock_map
);
760 lock_map_release(&cpuhp_state_lock_map
);
763 * If we are up and running, use the hotplug thread. For early calls
764 * we invoke the thread function directly.
767 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
769 st
->rollback
= false;
773 st
->bringup
= bringup
;
774 st
->cb_state
= state
;
780 * If we failed and did a partial, do a rollback.
782 if ((ret
= st
->result
) && st
->last
) {
784 st
->bringup
= !bringup
;
790 * Clean up the leftovers so the next hotplug operation wont use stale
793 st
->node
= st
->last
= NULL
;
797 static int cpuhp_fast_kick_ap_work_pre(unsigned int cpu
)
799 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
800 enum cpuhp_state prev_state
= st
->state
;
802 lock_map_acquire(&cpuhp_state_lock_map
);
803 lock_map_release(&cpuhp_state_lock_map
);
805 trace_cpuhp_enter(cpu
, st
->target
, prev_state
,
806 cpuhp_fast_kick_ap_work_pre
);
808 cpuhp_set_state(st
, st
->target
);
809 if (!st
->single
&& st
->state
== st
->target
)
814 * Make sure the above stores are visible before should_run becomes
815 * true. Paired with the mb() above in cpuhp_thread_fun()
818 st
->should_run
= true;
819 wake_up_process(st
->thread
);
824 static int cpuhp_fast_kick_ap_work_post(unsigned int cpu
,
825 enum cpuhp_state prev_state
)
827 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
830 wait_for_ap_thread(st
, st
->bringup
);
831 if ((ret
= st
->result
)) {
832 cpuhp_reset_state(st
, prev_state
);
835 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
840 static int cpuhp_kick_ap_work(unsigned int cpu
)
842 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
843 enum cpuhp_state prev_state
= st
->state
;
846 lock_map_acquire(&cpuhp_state_lock_map
);
847 lock_map_release(&cpuhp_state_lock_map
);
849 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
850 ret
= cpuhp_kick_ap(st
, st
->target
);
851 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
856 static struct smp_hotplug_thread cpuhp_threads
= {
857 .store
= &cpuhp_state
.thread
,
858 .create
= &cpuhp_create
,
859 .thread_should_run
= cpuhp_should_run
,
860 .thread_fn
= cpuhp_thread_fun
,
861 .thread_comm
= "cpuhp/%u",
865 void __init
cpuhp_threads_init(void)
867 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
868 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
871 #ifdef CONFIG_HOTPLUG_CPU
873 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
876 * This function walks all processes, finds a valid mm struct for each one and
877 * then clears a corresponding bit in mm's cpumask. While this all sounds
878 * trivial, there are various non-obvious corner cases, which this function
879 * tries to solve in a safe manner.
881 * Also note that the function uses a somewhat relaxed locking scheme, so it may
882 * be called only for an already offlined CPU.
884 void clear_tasks_mm_cpumask(int cpu
)
886 struct task_struct
*p
;
889 * This function is called after the cpu is taken down and marked
890 * offline, so its not like new tasks will ever get this cpu set in
891 * their mm mask. -- Peter Zijlstra
892 * Thus, we may use rcu_read_lock() here, instead of grabbing
893 * full-fledged tasklist_lock.
895 WARN_ON(cpu_online(cpu
));
897 for_each_process(p
) {
898 struct task_struct
*t
;
901 * Main thread might exit, but other threads may still have
902 * a valid mm. Find one.
904 t
= find_lock_task_mm(p
);
907 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
913 /* Take this CPU down. */
914 static int take_cpu_down(void *_param
)
916 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
917 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
918 int err
, cpu
= smp_processor_id();
921 /* Ensure this CPU doesn't handle any more interrupts. */
922 err
= __cpu_disable();
927 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
928 * do this step again.
930 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
932 /* Invoke the former CPU_DYING callbacks */
933 for (; st
->state
> target
; st
->state
--) {
934 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
936 * DYING must not fail!
941 /* Give up timekeeping duties */
942 tick_handover_do_timer();
943 /* Park the stopper thread */
944 stop_machine_park(cpu
);
948 static int takedown_cpu(unsigned int cpu
);
949 static int takedown_cpus(const struct cpumask
*down_cpus
)
951 struct cpuhp_cpu_state
*st
;
954 /* Park the smpboot threads */
955 for_each_cpu(cpu
, down_cpus
) {
956 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
957 trace_cpuhp_enter(cpu
, st
->target
, st
->state
, takedown_cpu
);
959 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
960 smpboot_park_threads(cpu
);
964 * Prevent irq alloc/free while the dying cpu reorganizes the
965 * interrupt affinities.
970 * So now all preempt/rcu users must observe !cpu_active().
972 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, down_cpus
);
974 /* CPU refused to die */
976 for_each_cpu(cpu
, down_cpus
) {
977 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
978 st
->target
= st
->state
;
980 /* Unpark the hotplug thread so we can rollback there */
981 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
986 for_each_cpu(cpu
, down_cpus
) {
987 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
988 BUG_ON(cpu_online(cpu
));
991 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
992 * runnable tasks from the cpu, there's only the idle task left now
993 * that the migration thread is done doing the stop_machine thing.
995 * Wait for the stop thread to go away.
997 wait_for_ap_thread(st
, false);
998 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
1002 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1003 irq_unlock_sparse();
1005 for_each_cpu(cpu
, down_cpus
) {
1006 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1008 hotplug_cpu__broadcast_tick_pull(cpu
);
1009 /* This actually kills the CPU. */
1011 tick_cleanup_dead_cpu(cpu
);
1012 rcutree_migrate_callbacks(cpu
);
1014 trace_cpuhp_exit(cpu
, st
->state
, st
->state
, st
->result
);
1021 static int takedown_cpu(unsigned int cpu
)
1023 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1026 /* Park the smpboot threads */
1027 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
1030 * Prevent irq alloc/free while the dying cpu reorganizes the
1031 * interrupt affinities.
1036 * So now all preempt/rcu users must observe !cpu_active().
1038 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
1040 /* CPU refused to die */
1041 irq_unlock_sparse();
1042 /* Unpark the hotplug thread so we can rollback there */
1043 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
1046 BUG_ON(cpu_online(cpu
));
1049 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
1050 * runnable tasks from the cpu, there's only the idle task left now
1051 * that the migration thread is done doing the stop_machine thing.
1053 * Wait for the stop thread to go away.
1055 wait_for_ap_thread(st
, false);
1056 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
1058 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1059 irq_unlock_sparse();
1061 hotplug_cpu__broadcast_tick_pull(cpu
);
1062 /* This actually kills the CPU. */
1065 tick_cleanup_dead_cpu(cpu
);
1066 rcutree_migrate_callbacks(cpu
);
1070 static void cpuhp_complete_idle_dead(void *arg
)
1072 struct cpuhp_cpu_state
*st
= arg
;
1074 complete_ap_thread(st
, false);
1077 void cpuhp_report_idle_dead(void)
1079 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1081 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
1082 rcu_report_dead(smp_processor_id());
1083 st
->state
= CPUHP_AP_IDLE_DEAD
;
1085 * We cannot call complete after rcu_report_dead() so we delegate it
1088 smp_call_function_single(cpumask_first(cpu_online_mask
),
1089 cpuhp_complete_idle_dead
, st
, 0);
1092 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
1094 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
1095 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
1097 if (!step
->skip_onerr
)
1098 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1102 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
1103 enum cpuhp_state target
)
1105 enum cpuhp_state prev_state
= st
->state
;
1108 for (; st
->state
> target
; st
->state
--) {
1109 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
1111 st
->target
= prev_state
;
1112 if (st
->state
< prev_state
)
1113 undo_cpu_down(cpu
, st
);
1119 static int __ref
_cpus_down(struct cpumask cpus
, int tasks_frozen
,
1120 enum cpuhp_state target
)
1122 struct cpuhp_cpu_state
*st
;
1123 cpumask_t ap_work_cpus
= CPU_MASK_NONE
;
1124 cpumask_t take_down_cpus
= CPU_MASK_NONE
;
1125 int prev_state
[8] = {0};
1129 if (num_online_cpus() == 1)
1132 for_each_cpu(cpu
, &cpus
)
1133 if (!cpu_present(cpu
))
1137 cpuhp_tasks_frozen
= tasks_frozen
;
1139 cpumask_copy(&cpu_fastoff_mask
, &cpus
);
1140 for_each_cpu(cpu
, &cpus
) {
1141 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1142 prev_state
[cpu
] = cpuhp_set_state(st
, target
);
1143 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1144 cpumask_set_cpu(cpu
, &ap_work_cpus
);
1146 cpumask_set_cpu(cpu
, &take_down_cpus
);
1149 for_each_cpu(cpu
, &ap_work_cpus
) {
1150 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1151 emc_cpu_pre_off_callback(cpu
);
1152 set_cpu_active(cpu
, false);
1153 st
->state
= CPUHP_AP_EXYNOS_IDLE_CTRL
;
1156 cpuset_update_active_cpus();
1158 for_each_cpu(cpu
, &ap_work_cpus
) {
1159 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1160 set_cpu_active(cpu
, false);
1161 st
->state
= CPUHP_AP_EXYNOS_IDLE_CTRL
;
1164 cpuset_update_active_cpus();
1166 for_each_cpu(cpu
, &ap_work_cpus
) {
1167 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1168 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1169 cpuhp_fast_kick_ap_work_pre(cpu
);
1172 for_each_cpu(cpu
, &ap_work_cpus
) {
1173 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1174 cpuhp_fast_kick_ap_work_post(cpu
, prev_state
[cpu
]);
1176 * We might have stopped still in the range of the AP hotplug
1177 * thread. Nothing to do anymore.
1179 st
->target
= target
;
1180 cpumask_set_cpu(cpu
, &take_down_cpus
);
1183 /* Hotplug out of all cpu failed */
1184 if (cpumask_empty(&take_down_cpus
))
1187 ret
= takedown_cpus(&take_down_cpus
);
1189 panic("%s: fauiled to takedown_cpus\n", __func__
);
1192 for_each_cpu(cpu
, &take_down_cpus
) {
1193 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1194 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1195 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
[cpu
]) {
1196 cpuhp_reset_state(st
, prev_state
[cpu
]);
1197 __cpuhp_kick_ap(st
);
1201 cpumask_clear(&cpu_fastoff_mask
);
1204 cpus_write_unlock();
1207 * Do post unplug cleanup. This is still protected against
1208 * concurrent CPU hotplug via cpu_add_remove_lock.
1210 lockup_detector_cleanup();
1215 int cpus_down(struct cpumask cpus
)
1219 trace_cpus_down_enter(cpumask_first(&cpus
));
1220 cpu_maps_update_begin();
1222 if (cpu_hotplug_disabled
) {
1227 for_each_cpu(cpu
, &cpus
)
1228 if (!cpu_online(cpu
)) {
1229 cpumask_clear_cpu(cpu
, &cpus
);
1230 pr_warn("cpus_down: cpu%d is not online\n", cpu
);
1233 err
= _cpus_down(cpus
, 0, CPUHP_OFFLINE
);
1236 cpu_maps_update_done();
1237 trace_cpus_down_exit(cpumask_first(&cpus
));
1240 EXPORT_SYMBOL_GPL(cpus_down
);
1242 /* Requires cpu_add_remove_lock to be held */
1243 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
1244 enum cpuhp_state target
)
1246 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1247 int prev_state
, ret
= 0;
1249 if (num_online_cpus() == 1)
1252 if (!cpu_present(cpu
))
1257 cpuhp_tasks_frozen
= tasks_frozen
;
1259 prev_state
= cpuhp_set_state(st
, target
);
1261 * If the current CPU state is in the range of the AP hotplug thread,
1262 * then we need to kick the thread.
1264 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
1265 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1266 ret
= cpuhp_kick_ap_work(cpu
);
1268 * The AP side has done the error rollback already. Just
1269 * return the error code..
1275 * We might have stopped still in the range of the AP hotplug
1276 * thread. Nothing to do anymore.
1278 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1281 st
->target
= target
;
1284 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1285 * to do the further cleanups.
1287 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1288 if (ret
&& st
->state
== CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
1289 cpuhp_reset_state(st
, prev_state
);
1290 __cpuhp_kick_ap(st
);
1294 cpus_write_unlock();
1296 * Do post unplug cleanup. This is still protected against
1297 * concurrent CPU hotplug via cpu_add_remove_lock.
1299 lockup_detector_cleanup();
1304 static int cpu_down_maps_locked(unsigned int cpu
, enum cpuhp_state target
)
1306 if (cpu_hotplug_disabled
)
1308 return _cpu_down(cpu
, 0, target
);
1311 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1315 cpu_maps_update_begin();
1316 err
= cpu_down_maps_locked(cpu
, target
);
1317 cpu_maps_update_done();
1321 int cpu_down(unsigned int cpu
)
1323 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
1325 EXPORT_SYMBOL(cpu_down
);
1328 #define takedown_cpu NULL
1329 #endif /*CONFIG_HOTPLUG_CPU*/
1332 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1333 * @cpu: cpu that just started
1335 * It must be called by the arch code on the new cpu, before the new cpu
1336 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1338 void notify_cpu_starting(unsigned int cpu
)
1340 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1341 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1344 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1345 st
->booted_once
= true;
1346 while (st
->state
< target
) {
1348 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1350 * STARTING must not fail!
1357 * Called from the idle task. Wake up the controlling task which brings the
1358 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1359 * the rest of the online bringup to the hotplug thread.
1361 void cpuhp_online_idle(enum cpuhp_state state
)
1363 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1365 /* Happens for the boot cpu */
1366 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1369 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1370 complete_ap_thread(st
, true);
1373 /* Requires cpu_add_remove_lock to be held */
1374 static int __ref
_cpus_up(struct cpumask cpus
, int tasks_frozen
,
1375 enum cpuhp_state target
)
1377 struct cpuhp_cpu_state
*st
;
1378 cpumask_t ap_work_cpus
= CPU_MASK_NONE
;
1379 cpumask_t bringup_cpus
= CPU_MASK_NONE
;
1380 int prev_state
[8] = {0};
1381 struct task_struct
*idle
;
1387 for_each_cpu(cpu
, &cpus
)
1388 if (!cpu_present(cpu
)) {
1389 pr_warn("_cpus_up: cpu%d is not present\n", cpu
);
1390 cpumask_clear_cpu(cpu
, &cpus
);
1393 cpumask_copy(&cpu_faston_mask
, &cpus
);
1395 for_each_cpu(cpu
, &cpu_faston_mask
) {
1396 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1398 * The caller of do_cpu_up might have raced with another
1399 * caller. Ignore it for now.
1401 if (st
->state
>= target
)
1404 if (st
->state
== CPUHP_OFFLINE
) {
1405 /* Let it fail before we try to bring the cpu up */
1406 idle
= idle_thread_get(cpu
);
1408 ret
= PTR_ERR(idle
);
1413 prev_state
[cpu
] = cpuhp_set_state(st
, target
);
1415 if (st
->state
> CPUHP_BRINGUP_CPU
)
1416 cpumask_set_cpu(cpu
, &ap_work_cpus
);
1418 cpumask_set_cpu(cpu
, &bringup_cpus
);
1422 cpuhp_tasks_frozen
= tasks_frozen
;
1424 * If the current CPU state is in the range of the AP hotplug thread,
1425 * then we need to kick the thread once more.
1427 for_each_cpu(cpu
, &ap_work_cpus
)
1428 cpuhp_fast_kick_ap_work_pre(cpu
);
1430 for_each_cpu(cpu
, &ap_work_cpus
)
1431 cpuhp_fast_kick_ap_work_post(cpu
, prev_state
[cpu
]);
1433 /* Hotplug out of all cpu failed */
1434 if (cpumask_empty(&bringup_cpus
))
1438 * Try to reach the target state. We max out on the BP at
1439 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1440 * responsible for bringing it up to the target state.
1442 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1443 for_each_cpu(cpu
, &bringup_cpus
) {
1444 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1445 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1447 panic("%s: fauiled to bringup_cpus\n", __func__
);
1450 cpumask_clear(&cpu_faston_mask
);
1451 cpus_write_unlock();
1457 /* Requires cpu_add_remove_lock to be held */
1458 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1460 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1461 struct task_struct
*idle
;
1466 if (!cpu_present(cpu
)) {
1472 * The caller of do_cpu_up might have raced with another
1473 * caller. Ignore it for now.
1475 if (st
->state
>= target
)
1478 if (st
->state
== CPUHP_OFFLINE
) {
1479 /* Let it fail before we try to bring the cpu up */
1480 idle
= idle_thread_get(cpu
);
1482 ret
= PTR_ERR(idle
);
1487 cpuhp_tasks_frozen
= tasks_frozen
;
1489 cpuhp_set_state(st
, target
);
1491 * If the current CPU state is in the range of the AP hotplug thread,
1492 * then we need to kick the thread once more.
1494 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1495 ret
= cpuhp_kick_ap_work(cpu
);
1497 * The AP side has done the error rollback already. Just
1498 * return the error code..
1505 * Try to reach the target state. We max out on the BP at
1506 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1507 * responsible for bringing it up to the target state.
1509 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1510 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1512 cpus_write_unlock();
1517 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1521 if (!cpu_possible(cpu
)) {
1522 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1524 #if defined(CONFIG_IA64)
1525 pr_err("please check additional_cpus= boot parameter\n");
1530 err
= try_online_node(cpu_to_node(cpu
));
1534 cpu_maps_update_begin();
1536 if (cpu_hotplug_disabled
) {
1540 if (!cpu_smt_allowed(cpu
)) {
1545 err
= _cpu_up(cpu
, 0, target
);
1547 cpu_maps_update_done();
1551 int cpu_up(unsigned int cpu
)
1553 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1555 EXPORT_SYMBOL_GPL(cpu_up
);
1557 int cpus_up(struct cpumask cpus
)
1561 trace_cpus_up_enter(cpumask_first(&cpus
));
1562 for_each_cpu(cpu
, &cpus
)
1563 if (cpu_online(cpu
)) {
1564 cpumask_clear_cpu(cpu
, &cpus
);
1565 pr_warn("cpus_up: cpu%d is already online\n", cpu
);
1568 for_each_cpu(cpu
, &cpus
) {
1569 err
= try_online_node(cpu_to_node(cpu
));
1574 cpu_maps_update_begin();
1576 if (cpu_hotplug_disabled
) {
1581 err
= _cpus_up(cpus
, 0, CPUHP_ONLINE
);
1583 cpu_maps_update_done();
1584 trace_cpus_up_exit(cpumask_first(&cpus
));
1588 EXPORT_SYMBOL_GPL(cpus_up
);
1590 #ifdef CONFIG_PM_SLEEP_SMP
1591 static cpumask_var_t frozen_cpus
;
1593 int freeze_secondary_cpus(int primary
)
1597 cpu_maps_update_begin();
1598 if (!cpu_online(primary
))
1599 primary
= cpumask_first(cpu_online_mask
);
1601 * We take down all of the non-boot CPUs in one shot to avoid races
1602 * with the userspace trying to use the CPU hotplug at the same time
1604 cpumask_clear(frozen_cpus
);
1606 pr_info("Disabling non-boot CPUs ...\n");
1607 for_each_online_cpu(cpu
) {
1610 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1611 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1612 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1614 cpumask_set_cpu(cpu
, frozen_cpus
);
1616 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1622 BUG_ON(num_online_cpus() > 1);
1624 pr_err("Non-boot CPUs are not disabled\n");
1627 * Make sure the CPUs won't be enabled by someone else. We need to do
1628 * this even in case of failure as all disable_nonboot_cpus() users are
1629 * supposed to do enable_nonboot_cpus() on the failure path.
1631 cpu_hotplug_disabled
++;
1633 cpu_maps_update_done();
1637 void __weak
arch_enable_nonboot_cpus_begin(void)
1641 void __weak
arch_enable_nonboot_cpus_end(void)
1645 void enable_nonboot_cpus(void)
1648 struct device
*cpu_device
;
1650 /* Allow everyone to use the CPU hotplug again */
1651 cpu_maps_update_begin();
1652 __cpu_hotplug_enable();
1653 if (cpumask_empty(frozen_cpus
))
1656 pr_info("Enabling non-boot CPUs ...\n");
1658 arch_enable_nonboot_cpus_begin();
1660 for_each_cpu(cpu
, frozen_cpus
) {
1661 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1662 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1663 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1665 pr_info("CPU%d is up\n", cpu
);
1666 cpu_device
= get_cpu_device(cpu
);
1668 pr_err("%s: failed to get cpu%d device\n",
1671 kobject_uevent(&cpu_device
->kobj
, KOBJ_ONLINE
);
1674 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1677 arch_enable_nonboot_cpus_end();
1679 cpumask_clear(frozen_cpus
);
1681 cpu_maps_update_done();
1684 static int __init
alloc_frozen_cpus(void)
1686 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1690 core_initcall(alloc_frozen_cpus
);
1693 * When callbacks for CPU hotplug notifications are being executed, we must
1694 * ensure that the state of the system with respect to the tasks being frozen
1695 * or not, as reported by the notification, remains unchanged *throughout the
1696 * duration* of the execution of the callbacks.
1697 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1699 * This synchronization is implemented by mutually excluding regular CPU
1700 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1701 * Hibernate notifications.
1704 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1705 unsigned long action
, void *ptr
)
1709 case PM_SUSPEND_PREPARE
:
1710 case PM_HIBERNATION_PREPARE
:
1711 cpu_hotplug_disable();
1714 case PM_POST_SUSPEND
:
1715 case PM_POST_HIBERNATION
:
1716 cpu_hotplug_enable();
1727 struct cpumask cpu_fastoff_mask
;
1728 EXPORT_SYMBOL(cpu_fastoff_mask
);
1729 struct cpumask cpu_faston_mask
;
1730 EXPORT_SYMBOL(cpu_faston_mask
);
1731 static int __init
cpu_hotplug_pm_sync_init(void)
1734 * cpu_hotplug_pm_callback has higher priority than x86
1735 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1736 * to disable cpu hotplug to avoid cpu hotplug race.
1738 pm_notifier(cpu_hotplug_pm_callback
, 0);
1739 cpumask_clear(&cpu_fastoff_mask
);
1740 cpumask_clear(&cpu_faston_mask
);
1744 core_initcall(cpu_hotplug_pm_sync_init
);
1746 #endif /* CONFIG_PM_SLEEP_SMP */
1750 #endif /* CONFIG_SMP */
1752 /* Boot processor state steps */
1753 static struct cpuhp_step cpuhp_bp_states
[] = {
1756 .startup
.single
= NULL
,
1757 .teardown
.single
= NULL
,
1760 [CPUHP_CREATE_THREADS
]= {
1761 .name
= "threads:prepare",
1762 .startup
.single
= smpboot_create_threads
,
1763 .teardown
.single
= NULL
,
1766 [CPUHP_PERF_PREPARE
] = {
1767 .name
= "perf:prepare",
1768 .startup
.single
= perf_event_init_cpu
,
1769 .teardown
.single
= perf_event_exit_cpu
,
1771 [CPUHP_WORKQUEUE_PREP
] = {
1772 .name
= "workqueue:prepare",
1773 .startup
.single
= workqueue_prepare_cpu
,
1774 .teardown
.single
= NULL
,
1776 [CPUHP_HRTIMERS_PREPARE
] = {
1777 .name
= "hrtimers:prepare",
1778 .startup
.single
= hrtimers_prepare_cpu
,
1779 .teardown
.single
= hrtimers_dead_cpu
,
1781 [CPUHP_SMPCFD_PREPARE
] = {
1782 .name
= "smpcfd:prepare",
1783 .startup
.single
= smpcfd_prepare_cpu
,
1784 .teardown
.single
= smpcfd_dead_cpu
,
1786 [CPUHP_RELAY_PREPARE
] = {
1787 .name
= "relay:prepare",
1788 .startup
.single
= relay_prepare_cpu
,
1789 .teardown
.single
= NULL
,
1791 [CPUHP_SLAB_PREPARE
] = {
1792 .name
= "slab:prepare",
1793 .startup
.single
= slab_prepare_cpu
,
1794 .teardown
.single
= slab_dead_cpu
,
1796 [CPUHP_RCUTREE_PREP
] = {
1797 .name
= "RCU/tree:prepare",
1798 .startup
.single
= rcutree_prepare_cpu
,
1799 .teardown
.single
= rcutree_dead_cpu
,
1802 * On the tear-down path, timers_dead_cpu() must be invoked
1803 * before blk_mq_queue_reinit_notify() from notify_dead(),
1804 * otherwise a RCU stall occurs.
1806 [CPUHP_TIMERS_PREPARE
] = {
1807 .name
= "timers:dead",
1808 .startup
.single
= timers_prepare_cpu
,
1809 .teardown
.single
= timers_dead_cpu
,
1811 /* Kicks the plugged cpu into life */
1812 [CPUHP_BRINGUP_CPU
] = {
1813 .name
= "cpu:bringup",
1814 .startup
.single
= bringup_cpu
,
1815 .teardown
.single
= NULL
,
1819 * Handled on controll processor until the plugged processor manages
1822 [CPUHP_TEARDOWN_CPU
] = {
1823 .name
= "cpu:teardown",
1824 .startup
.single
= NULL
,
1825 .teardown
.single
= takedown_cpu
,
1829 [CPUHP_BRINGUP_CPU
] = { },
1833 /* Application processor state steps */
1834 static struct cpuhp_step cpuhp_ap_states
[] = {
1836 /* Final state before CPU kills itself */
1837 [CPUHP_AP_IDLE_DEAD
] = {
1838 .name
= "idle:dead",
1841 * Last state before CPU enters the idle loop to die. Transient state
1842 * for synchronization.
1844 [CPUHP_AP_OFFLINE
] = {
1845 .name
= "ap:offline",
1848 /* First state is scheduler control. Interrupts are disabled */
1849 [CPUHP_AP_SCHED_STARTING
] = {
1850 .name
= "sched:starting",
1851 .startup
.single
= sched_cpu_starting
,
1852 .teardown
.single
= sched_cpu_dying
,
1854 [CPUHP_AP_RCUTREE_DYING
] = {
1855 .name
= "RCU/tree:dying",
1856 .startup
.single
= NULL
,
1857 .teardown
.single
= rcutree_dying_cpu
,
1859 [CPUHP_AP_SMPCFD_DYING
] = {
1860 .name
= "smpcfd:dying",
1861 .startup
.single
= NULL
,
1862 .teardown
.single
= smpcfd_dying_cpu
,
1864 /* Entry state on starting. Interrupts enabled from here on. Transient
1865 * state for synchronsization */
1866 [CPUHP_AP_ONLINE
] = {
1867 .name
= "ap:online",
1869 /* Handle smpboot threads park/unpark */
1870 [CPUHP_AP_SMPBOOT_THREADS
] = {
1871 .name
= "smpboot/threads:online",
1872 .startup
.single
= smpboot_unpark_threads
,
1873 .teardown
.single
= smpboot_park_threads
,
1875 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1876 .name
= "irq/affinity:online",
1877 .startup
.single
= irq_affinity_online_cpu
,
1878 .teardown
.single
= NULL
,
1880 [CPUHP_AP_PERF_ONLINE
] = {
1881 .name
= "perf:online",
1882 .startup
.single
= perf_event_init_cpu
,
1883 .teardown
.single
= perf_event_exit_cpu
,
1885 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1886 .name
= "workqueue:online",
1887 .startup
.single
= workqueue_online_cpu
,
1888 .teardown
.single
= workqueue_offline_cpu
,
1890 [CPUHP_AP_RCUTREE_ONLINE
] = {
1891 .name
= "RCU/tree:online",
1892 .startup
.single
= rcutree_online_cpu
,
1893 .teardown
.single
= rcutree_offline_cpu
,
1897 * The dynamically registered state space is here
1901 /* Last state is scheduler control setting the cpu active */
1902 [CPUHP_AP_ACTIVE
] = {
1903 .name
= "sched:active",
1904 .startup
.single
= sched_cpu_activate
,
1905 .teardown
.single
= sched_cpu_deactivate
,
1909 /* CPU is fully up and running. */
1912 .startup
.single
= NULL
,
1913 .teardown
.single
= NULL
,
1917 /* Sanity check for callbacks */
1918 static int cpuhp_cb_check(enum cpuhp_state state
)
1920 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1926 * Returns a free for dynamic slot assignment of the Online state. The states
1927 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1928 * by having no name assigned.
1930 static int cpuhp_reserve_state(enum cpuhp_state state
)
1932 enum cpuhp_state i
, end
;
1933 struct cpuhp_step
*step
;
1936 case CPUHP_AP_ONLINE_DYN
:
1937 step
= cpuhp_ap_states
+ CPUHP_AP_ONLINE_DYN
;
1938 end
= CPUHP_AP_ONLINE_DYN_END
;
1940 case CPUHP_BP_PREPARE_DYN
:
1941 step
= cpuhp_bp_states
+ CPUHP_BP_PREPARE_DYN
;
1942 end
= CPUHP_BP_PREPARE_DYN_END
;
1948 for (i
= state
; i
<= end
; i
++, step
++) {
1952 WARN(1, "No more dynamic states available for CPU hotplug\n");
1956 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1957 int (*startup
)(unsigned int cpu
),
1958 int (*teardown
)(unsigned int cpu
),
1959 bool multi_instance
)
1961 /* (Un)Install the callbacks for further cpu hotplug operations */
1962 struct cpuhp_step
*sp
;
1966 * If name is NULL, then the state gets removed.
1968 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1969 * the first allocation from these dynamic ranges, so the removal
1970 * would trigger a new allocation and clear the wrong (already
1971 * empty) state, leaving the callbacks of the to be cleared state
1972 * dangling, which causes wreckage on the next hotplug operation.
1974 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
1975 state
== CPUHP_BP_PREPARE_DYN
)) {
1976 ret
= cpuhp_reserve_state(state
);
1981 sp
= cpuhp_get_step(state
);
1982 if (name
&& sp
->name
)
1985 sp
->startup
.single
= startup
;
1986 sp
->teardown
.single
= teardown
;
1988 sp
->multi_instance
= multi_instance
;
1989 INIT_HLIST_HEAD(&sp
->list
);
1993 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1995 return cpuhp_get_step(state
)->teardown
.single
;
1999 * Call the startup/teardown function for a step either on the AP or
2000 * on the current CPU.
2002 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
2003 struct hlist_node
*node
)
2005 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2009 * If there's nothing to do, we done.
2010 * Relies on the union for multi_instance.
2012 if ((bringup
&& !sp
->startup
.single
) ||
2013 (!bringup
&& !sp
->teardown
.single
))
2016 * The non AP bound callbacks can fail on bringup. On teardown
2017 * e.g. module removal we crash for now.
2020 if (cpuhp_is_ap_state(state
))
2021 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
2023 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
2025 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
2027 BUG_ON(ret
&& !bringup
);
2032 * Called from __cpuhp_setup_state on a recoverable failure.
2034 * Note: The teardown callbacks for rollback are not allowed to fail!
2036 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
2037 struct hlist_node
*node
)
2041 /* Roll back the already executed steps on the other cpus */
2042 for_each_present_cpu(cpu
) {
2043 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2044 int cpustate
= st
->state
;
2046 if (cpu
>= failedcpu
)
2049 /* Did we invoke the startup call on that cpu ? */
2050 if (cpustate
>= state
)
2051 cpuhp_issue_call(cpu
, state
, false, node
);
2055 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
2056 struct hlist_node
*node
,
2059 struct cpuhp_step
*sp
;
2063 lockdep_assert_cpus_held();
2065 sp
= cpuhp_get_step(state
);
2066 if (sp
->multi_instance
== false)
2069 mutex_lock(&cpuhp_state_mutex
);
2071 if (!invoke
|| !sp
->startup
.multi
)
2075 * Try to call the startup callback for each present cpu
2076 * depending on the hotplug state of the cpu.
2078 for_each_present_cpu(cpu
) {
2079 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2080 int cpustate
= st
->state
;
2082 if (cpustate
< state
)
2085 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
2087 if (sp
->teardown
.multi
)
2088 cpuhp_rollback_install(cpu
, state
, node
);
2094 hlist_add_head(node
, &sp
->list
);
2096 mutex_unlock(&cpuhp_state_mutex
);
2100 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
2106 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
2110 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
2113 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2114 * @state: The state to setup
2115 * @invoke: If true, the startup function is invoked for cpus where
2116 * cpu state >= @state
2117 * @startup: startup callback function
2118 * @teardown: teardown callback function
2119 * @multi_instance: State is set up for multiple instances which get
2122 * The caller needs to hold cpus read locked while calling this function.
2125 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
2126 * 0 for all other states
2127 * On failure: proper (negative) error code
2129 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
2130 const char *name
, bool invoke
,
2131 int (*startup
)(unsigned int cpu
),
2132 int (*teardown
)(unsigned int cpu
),
2133 bool multi_instance
)
2138 lockdep_assert_cpus_held();
2140 if (cpuhp_cb_check(state
) || !name
)
2143 mutex_lock(&cpuhp_state_mutex
);
2145 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
2148 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
2149 if (ret
> 0 && dynstate
) {
2154 if (ret
|| !invoke
|| !startup
)
2158 * Try to call the startup callback for each present cpu
2159 * depending on the hotplug state of the cpu.
2161 for_each_present_cpu(cpu
) {
2162 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2163 int cpustate
= st
->state
;
2165 if (cpustate
< state
)
2168 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
2171 cpuhp_rollback_install(cpu
, state
, NULL
);
2172 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2177 mutex_unlock(&cpuhp_state_mutex
);
2179 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2180 * dynamically allocated state in case of success.
2182 if (!ret
&& dynstate
)
2186 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
2188 int __cpuhp_setup_state(enum cpuhp_state state
,
2189 const char *name
, bool invoke
,
2190 int (*startup
)(unsigned int cpu
),
2191 int (*teardown
)(unsigned int cpu
),
2192 bool multi_instance
)
2197 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
2198 teardown
, multi_instance
);
2202 EXPORT_SYMBOL(__cpuhp_setup_state
);
2204 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
2205 struct hlist_node
*node
, bool invoke
)
2207 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2210 BUG_ON(cpuhp_cb_check(state
));
2212 if (!sp
->multi_instance
)
2216 mutex_lock(&cpuhp_state_mutex
);
2218 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2221 * Call the teardown callback for each present cpu depending
2222 * on the hotplug state of the cpu. This function is not
2223 * allowed to fail currently!
2225 for_each_present_cpu(cpu
) {
2226 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2227 int cpustate
= st
->state
;
2229 if (cpustate
>= state
)
2230 cpuhp_issue_call(cpu
, state
, false, node
);
2235 mutex_unlock(&cpuhp_state_mutex
);
2240 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
2243 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2244 * @state: The state to remove
2245 * @invoke: If true, the teardown function is invoked for cpus where
2246 * cpu state >= @state
2248 * The caller needs to hold cpus read locked while calling this function.
2249 * The teardown callback is currently not allowed to fail. Think
2250 * about module removal!
2252 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
2254 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2257 BUG_ON(cpuhp_cb_check(state
));
2259 lockdep_assert_cpus_held();
2261 mutex_lock(&cpuhp_state_mutex
);
2262 if (sp
->multi_instance
) {
2263 WARN(!hlist_empty(&sp
->list
),
2264 "Error: Removing state %d which has instances left.\n",
2269 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2273 * Call the teardown callback for each present cpu depending
2274 * on the hotplug state of the cpu. This function is not
2275 * allowed to fail currently!
2277 for_each_present_cpu(cpu
) {
2278 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2279 int cpustate
= st
->state
;
2281 if (cpustate
>= state
)
2282 cpuhp_issue_call(cpu
, state
, false, NULL
);
2285 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2286 mutex_unlock(&cpuhp_state_mutex
);
2288 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
2290 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
2293 __cpuhp_remove_state_cpuslocked(state
, invoke
);
2296 EXPORT_SYMBOL(__cpuhp_remove_state
);
2298 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2299 static ssize_t
show_cpuhp_state(struct device
*dev
,
2300 struct device_attribute
*attr
, char *buf
)
2302 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2304 return sprintf(buf
, "%d\n", st
->state
);
2306 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
2308 static ssize_t
write_cpuhp_target(struct device
*dev
,
2309 struct device_attribute
*attr
,
2310 const char *buf
, size_t count
)
2312 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2313 struct cpuhp_step
*sp
;
2316 ret
= kstrtoint(buf
, 10, &target
);
2320 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2321 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
2324 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
2328 ret
= lock_device_hotplug_sysfs();
2332 mutex_lock(&cpuhp_state_mutex
);
2333 sp
= cpuhp_get_step(target
);
2334 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
2335 mutex_unlock(&cpuhp_state_mutex
);
2339 if (st
->state
< target
)
2340 ret
= do_cpu_up(dev
->id
, target
);
2342 ret
= do_cpu_down(dev
->id
, target
);
2344 unlock_device_hotplug();
2345 return ret
? ret
: count
;
2348 static ssize_t
show_cpuhp_target(struct device
*dev
,
2349 struct device_attribute
*attr
, char *buf
)
2351 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2353 return sprintf(buf
, "%d\n", st
->target
);
2355 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
2358 static ssize_t
write_cpuhp_fail(struct device
*dev
,
2359 struct device_attribute
*attr
,
2360 const char *buf
, size_t count
)
2362 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2363 struct cpuhp_step
*sp
;
2366 ret
= kstrtoint(buf
, 10, &fail
);
2371 * Cannot fail STARTING/DYING callbacks.
2373 if (cpuhp_is_atomic_state(fail
))
2377 * Cannot fail anything that doesn't have callbacks.
2379 mutex_lock(&cpuhp_state_mutex
);
2380 sp
= cpuhp_get_step(fail
);
2381 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
2383 mutex_unlock(&cpuhp_state_mutex
);
2392 static ssize_t
show_cpuhp_fail(struct device
*dev
,
2393 struct device_attribute
*attr
, char *buf
)
2395 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2397 return sprintf(buf
, "%d\n", st
->fail
);
2400 static DEVICE_ATTR(fail
, 0644, show_cpuhp_fail
, write_cpuhp_fail
);
2402 static struct attribute
*cpuhp_cpu_attrs
[] = {
2403 &dev_attr_state
.attr
,
2404 &dev_attr_target
.attr
,
2405 &dev_attr_fail
.attr
,
2409 static const struct attribute_group cpuhp_cpu_attr_group
= {
2410 .attrs
= cpuhp_cpu_attrs
,
2415 static ssize_t
show_cpuhp_states(struct device
*dev
,
2416 struct device_attribute
*attr
, char *buf
)
2418 ssize_t cur
, res
= 0;
2421 mutex_lock(&cpuhp_state_mutex
);
2422 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
2423 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
2426 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
2431 mutex_unlock(&cpuhp_state_mutex
);
2434 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
2436 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2437 &dev_attr_states
.attr
,
2441 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2442 .attrs
= cpuhp_cpu_root_attrs
,
2447 #ifdef CONFIG_HOTPLUG_SMT
2449 static const char *smt_states
[] = {
2450 [CPU_SMT_ENABLED
] = "on",
2451 [CPU_SMT_DISABLED
] = "off",
2452 [CPU_SMT_FORCE_DISABLED
] = "forceoff",
2453 [CPU_SMT_NOT_SUPPORTED
] = "notsupported",
2457 show_smt_control(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2459 return snprintf(buf
, PAGE_SIZE
- 2, "%s\n", smt_states
[cpu_smt_control
]);
2462 static void cpuhp_offline_cpu_device(unsigned int cpu
)
2464 struct device
*dev
= get_cpu_device(cpu
);
2466 dev
->offline
= true;
2467 /* Tell user space about the state change */
2468 kobject_uevent(&dev
->kobj
, KOBJ_OFFLINE
);
2471 static void cpuhp_online_cpu_device(unsigned int cpu
)
2473 struct device
*dev
= get_cpu_device(cpu
);
2475 dev
->offline
= false;
2476 /* Tell user space about the state change */
2477 kobject_uevent(&dev
->kobj
, KOBJ_ONLINE
);
2480 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval
)
2484 cpu_maps_update_begin();
2485 for_each_online_cpu(cpu
) {
2486 if (topology_is_primary_thread(cpu
))
2488 ret
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
2492 * As this needs to hold the cpu maps lock it's impossible
2493 * to call device_offline() because that ends up calling
2494 * cpu_down() which takes cpu maps lock. cpu maps lock
2495 * needs to be held as this might race against in kernel
2496 * abusers of the hotplug machinery (thermal management).
2498 * So nothing would update device:offline state. That would
2499 * leave the sysfs entry stale and prevent onlining after
2500 * smt control has been changed to 'off' again. This is
2501 * called under the sysfs hotplug lock, so it is properly
2502 * serialized against the regular offline usage.
2504 cpuhp_offline_cpu_device(cpu
);
2507 cpu_smt_control
= ctrlval
;
2510 cpu_maps_update_done();
2514 static int cpuhp_smt_enable(void)
2518 cpu_maps_update_begin();
2519 cpu_smt_control
= CPU_SMT_ENABLED
;
2521 for_each_present_cpu(cpu
) {
2522 /* Skip online CPUs and CPUs on offline nodes */
2523 if (cpu_online(cpu
) || !node_online(cpu_to_node(cpu
)))
2525 ret
= _cpu_up(cpu
, 0, CPUHP_ONLINE
);
2528 /* See comment in cpuhp_smt_disable() */
2529 cpuhp_online_cpu_device(cpu
);
2531 cpu_maps_update_done();
2536 store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2537 const char *buf
, size_t count
)
2541 if (sysfs_streq(buf
, "on"))
2542 ctrlval
= CPU_SMT_ENABLED
;
2543 else if (sysfs_streq(buf
, "off"))
2544 ctrlval
= CPU_SMT_DISABLED
;
2545 else if (sysfs_streq(buf
, "forceoff"))
2546 ctrlval
= CPU_SMT_FORCE_DISABLED
;
2550 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
)
2553 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
2556 ret
= lock_device_hotplug_sysfs();
2560 if (ctrlval
!= cpu_smt_control
) {
2562 case CPU_SMT_ENABLED
:
2563 ret
= cpuhp_smt_enable();
2565 case CPU_SMT_DISABLED
:
2566 case CPU_SMT_FORCE_DISABLED
:
2567 ret
= cpuhp_smt_disable(ctrlval
);
2572 unlock_device_hotplug();
2573 return ret
? ret
: count
;
2575 static DEVICE_ATTR(control
, 0644, show_smt_control
, store_smt_control
);
2578 show_smt_active(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2580 bool active
= topology_max_smt_threads() > 1;
2582 return snprintf(buf
, PAGE_SIZE
- 2, "%d\n", active
);
2584 static DEVICE_ATTR(active
, 0444, show_smt_active
, NULL
);
2586 static struct attribute
*cpuhp_smt_attrs
[] = {
2587 &dev_attr_control
.attr
,
2588 &dev_attr_active
.attr
,
2592 static const struct attribute_group cpuhp_smt_attr_group
= {
2593 .attrs
= cpuhp_smt_attrs
,
2598 static int __init
cpu_smt_state_init(void)
2600 return sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2601 &cpuhp_smt_attr_group
);
2605 static inline int cpu_smt_state_init(void) { return 0; }
2608 static int __init
cpuhp_sysfs_init(void)
2612 ret
= cpu_smt_state_init();
2616 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2617 &cpuhp_cpu_root_attr_group
);
2621 for_each_possible_cpu(cpu
) {
2622 struct device
*dev
= get_cpu_device(cpu
);
2626 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
2632 device_initcall(cpuhp_sysfs_init
);
2636 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2637 * represents all NR_CPUS bits binary values of 1<<nr.
2639 * It is used by cpumask_of() to get a constant address to a CPU
2640 * mask value that has a single bit set only.
2643 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2644 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2645 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2646 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2647 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2649 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
2651 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2652 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2653 #if BITS_PER_LONG > 32
2654 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2655 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2658 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
2660 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
2661 EXPORT_SYMBOL(cpu_all_bits
);
2663 #ifdef CONFIG_INIT_ALL_POSSIBLE
2664 struct cpumask __cpu_possible_mask __read_mostly
2667 struct cpumask __cpu_possible_mask __read_mostly
;
2669 EXPORT_SYMBOL(__cpu_possible_mask
);
2671 struct cpumask __cpu_online_mask __read_mostly
;
2672 EXPORT_SYMBOL(__cpu_online_mask
);
2674 struct cpumask __cpu_present_mask __read_mostly
;
2675 EXPORT_SYMBOL(__cpu_present_mask
);
2677 struct cpumask __cpu_active_mask __read_mostly
;
2678 EXPORT_SYMBOL(__cpu_active_mask
);
2680 void init_cpu_present(const struct cpumask
*src
)
2682 cpumask_copy(&__cpu_present_mask
, src
);
2685 void init_cpu_possible(const struct cpumask
*src
)
2687 cpumask_copy(&__cpu_possible_mask
, src
);
2690 void init_cpu_online(const struct cpumask
*src
)
2692 cpumask_copy(&__cpu_online_mask
, src
);
2696 * Activate the first processor.
2698 void __init
boot_cpu_init(void)
2700 int cpu
= smp_processor_id();
2702 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2703 set_cpu_online(cpu
, true);
2704 set_cpu_active(cpu
, true);
2705 set_cpu_present(cpu
, true);
2706 set_cpu_possible(cpu
, true);
2709 __boot_cpu_id
= cpu
;
2714 * Must be called _AFTER_ setting up the per_cpu areas
2716 void __init
boot_cpu_hotplug_init(void)
2719 this_cpu_write(cpuhp_state
.booted_once
, true);
2721 this_cpu_write(cpuhp_state
.state
, CPUHP_ONLINE
);