2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
29 #include <linux/mt_sched_mon.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/irq.h>
35 - No shared variables, all the data are CPU local.
36 - If a softirq needs serialization, let it serialize itself
38 - Even if softirq is serialized, only local cpu is marked for
39 execution. Hence, we get something sort of weak cpu binding.
40 Though it is still not clear, will it result in better locality
44 - NET RX softirq. It is multithreaded and does not require
45 any global serialization.
46 - NET TX softirq. It kicks software netdevice queues, hence
47 it is logically serialized per device, but this serialization
48 is invisible to common code.
49 - Tasklets: serialized wrt itself.
52 #ifndef __ARCH_IRQ_STAT
53 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
54 EXPORT_SYMBOL(irq_stat
);
57 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
59 DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
61 char *softirq_to_name
[NR_SOFTIRQS
] = {
62 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
63 "TASKLET", "SCHED", "HRTIMER", "RCU"
67 * we cannot loop indefinitely here to avoid userspace starvation,
68 * but we also don't want to introduce a worst case 1/HZ latency
69 * to the pending events, so lets the scheduler to balance
70 * the softirq load for us.
72 static void wakeup_softirqd(void)
74 /* Interrupts are disabled: no need to stop preemption */
75 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
77 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
82 * preempt_count and SOFTIRQ_OFFSET usage:
83 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
85 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
86 * on local_bh_disable or local_bh_enable.
87 * This lets us distinguish between whether we are currently processing
88 * softirq and whether we just have bh disabled.
92 * This one is for softirq.c-internal use,
93 * where hardirqs are disabled legitimately:
95 #ifdef CONFIG_TRACE_IRQFLAGS
96 static void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
100 WARN_ON_ONCE(in_irq());
102 raw_local_irq_save(flags
);
104 * The preempt tracer hooks into add_preempt_count and will break
105 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
106 * is set and before current->softirq_enabled is cleared.
107 * We must manually increment preempt_count here and manually
108 * call the trace_preempt_off later.
110 preempt_count() += cnt
;
112 * Were softirqs turned off above:
114 if (softirq_count() == cnt
)
115 trace_softirqs_off(ip
);
116 raw_local_irq_restore(flags
);
118 if (preempt_count() == cnt
)
119 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
121 #else /* !CONFIG_TRACE_IRQFLAGS */
122 static inline void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
124 add_preempt_count(cnt
);
127 #endif /* CONFIG_TRACE_IRQFLAGS */
129 void local_bh_disable(void)
131 __local_bh_disable((unsigned long)__builtin_return_address(0),
132 SOFTIRQ_DISABLE_OFFSET
);
135 EXPORT_SYMBOL(local_bh_disable
);
137 static void __local_bh_enable(unsigned int cnt
)
139 WARN_ON_ONCE(in_irq());
140 WARN_ON_ONCE(!irqs_disabled());
142 if (softirq_count() == cnt
)
143 trace_softirqs_on((unsigned long)__builtin_return_address(0));
144 sub_preempt_count(cnt
);
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
152 void _local_bh_enable(void)
154 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
157 EXPORT_SYMBOL(_local_bh_enable
);
159 static inline void _local_bh_enable_ip(unsigned long ip
)
161 WARN_ON_ONCE(in_irq() || irqs_disabled());
162 #ifdef CONFIG_TRACE_IRQFLAGS
166 * Are softirqs going to be turned on now:
168 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
169 trace_softirqs_on(ip
);
171 * Keep preemption disabled until we are done with
172 * softirq processing:
174 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET
- 1);
176 if (unlikely(!in_interrupt() && local_softirq_pending()))
180 #ifdef CONFIG_TRACE_IRQFLAGS
183 preempt_check_resched();
186 void local_bh_enable(void)
188 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
190 EXPORT_SYMBOL(local_bh_enable
);
192 void local_bh_enable_ip(unsigned long ip
)
194 _local_bh_enable_ip(ip
);
196 EXPORT_SYMBOL(local_bh_enable_ip
);
199 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
200 * but break the loop if need_resched() is set or after 2 ms.
201 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
202 * certain cases, such as stop_machine(), jiffies may cease to
203 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
204 * well to make sure we eventually return from this method.
206 * These limits have been established via experimentation.
207 * The two things to balance is latency against fairness -
208 * we want to handle softirqs as soon as possible, but they
209 * should not be able to lock up the box.
211 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
212 #define MAX_SOFTIRQ_RESTART 10
214 asmlinkage
void __do_softirq(void)
216 struct softirq_action
*h
;
218 unsigned long end
= jiffies
+ MAX_SOFTIRQ_TIME
;
220 unsigned long old_flags
= current
->flags
;
221 int max_restart
= MAX_SOFTIRQ_RESTART
;
224 * Mask out PF_MEMALLOC s current task context is borrowed for the
225 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
226 * again if the socket is related to swap
228 current
->flags
&= ~PF_MEMALLOC
;
230 pending
= local_softirq_pending();
231 account_irq_enter_time(current
);
233 __local_bh_disable((unsigned long)__builtin_return_address(0),
235 lockdep_softirq_enter();
237 cpu
= smp_processor_id();
239 /* Reset the pending bitmask before enabling irqs */
240 set_softirq_pending(0);
248 unsigned int vec_nr
= h
- softirq_vec
;
249 int prev_count
= preempt_count();
251 kstat_incr_softirqs_this_cpu(vec_nr
);
253 trace_softirq_entry(vec_nr
);
254 mt_trace_SoftIRQ_start(vec_nr
);
256 mt_trace_SoftIRQ_end(vec_nr
);
257 trace_softirq_exit(vec_nr
);
258 if (unlikely(prev_count
!= preempt_count())) {
259 printk(KERN_ERR
"huh, entered softirq %u %s %p"
260 "with preempt_count %08x,"
261 " exited with %08x?\n", vec_nr
,
262 softirq_to_name
[vec_nr
], h
->action
,
263 prev_count
, preempt_count());
264 preempt_count() = prev_count
;
275 pending
= local_softirq_pending();
277 if (time_before(jiffies
, end
) && !need_resched() &&
284 lockdep_softirq_exit();
286 account_irq_exit_time(current
);
287 __local_bh_enable(SOFTIRQ_OFFSET
);
288 tsk_restore_flags(current
, old_flags
, PF_MEMALLOC
);
291 #ifndef __ARCH_HAS_DO_SOFTIRQ
293 asmlinkage
void do_softirq(void)
301 local_irq_save(flags
);
303 pending
= local_softirq_pending();
308 local_irq_restore(flags
);
314 * Enter an interrupt context.
318 int cpu
= smp_processor_id();
321 if (is_idle_task(current
) && !in_interrupt()) {
323 * Prevent raise_softirq from needlessly waking up ksoftirqd
324 * here, as softirq will be serviced on return from interrupt.
327 tick_check_idle(cpu
);
334 static inline void invoke_softirq(void)
336 if (!force_irqthreads
) {
338 * We can safely execute softirq on the current stack if
339 * it is the irq stack, because it should be near empty
340 * at this stage. But we have no way to know if the arch
341 * calls irq_exit() on the irq stack. So call softirq
342 * in its own stack to prevent from any overrun on top
343 * of a potentially deep task stack.
351 static inline void tick_irq_exit(void)
353 #ifdef CONFIG_NO_HZ_COMMON
354 int cpu
= smp_processor_id();
356 /* Make sure that timer wheel updates are propagated */
357 if ((idle_cpu(cpu
) && !need_resched()) || tick_nohz_full_cpu(cpu
)) {
359 tick_nohz_irq_exit();
365 * Exit an interrupt context. Process softirqs if needed and possible:
369 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
372 WARN_ON_ONCE(!irqs_disabled());
375 account_irq_exit_time(current
);
376 trace_hardirq_exit();
377 sub_preempt_count(HARDIRQ_OFFSET
);
378 if (!in_interrupt() && local_softirq_pending())
386 * This function must run with irqs disabled!
388 inline void raise_softirq_irqoff(unsigned int nr
)
390 __raise_softirq_irqoff(nr
);
393 * If we're in an interrupt or softirq, we're done
394 * (this also catches softirq-disabled code). We will
395 * actually run the softirq once we return from
396 * the irq or softirq.
398 * Otherwise we wake up ksoftirqd to make sure we
399 * schedule the softirq soon.
405 void raise_softirq(unsigned int nr
)
409 local_irq_save(flags
);
410 raise_softirq_irqoff(nr
);
411 local_irq_restore(flags
);
414 void __raise_softirq_irqoff(unsigned int nr
)
416 trace_softirq_raise(nr
);
417 or_softirq_pending(1UL << nr
);
420 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
422 softirq_vec
[nr
].action
= action
;
430 struct tasklet_struct
*head
;
431 struct tasklet_struct
**tail
;
434 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
435 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
437 void __tasklet_schedule(struct tasklet_struct
*t
)
441 local_irq_save(flags
);
443 *__this_cpu_read(tasklet_vec
.tail
) = t
;
444 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
445 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
446 local_irq_restore(flags
);
449 EXPORT_SYMBOL(__tasklet_schedule
);
451 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
455 local_irq_save(flags
);
457 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
458 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
459 raise_softirq_irqoff(HI_SOFTIRQ
);
460 local_irq_restore(flags
);
463 EXPORT_SYMBOL(__tasklet_hi_schedule
);
465 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
467 BUG_ON(!irqs_disabled());
469 t
->next
= __this_cpu_read(tasklet_hi_vec
.head
);
470 __this_cpu_write(tasklet_hi_vec
.head
, t
);
471 __raise_softirq_irqoff(HI_SOFTIRQ
);
474 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
476 static void tasklet_action(struct softirq_action
*a
)
478 struct tasklet_struct
*list
;
481 list
= __this_cpu_read(tasklet_vec
.head
);
482 __this_cpu_write(tasklet_vec
.head
, NULL
);
483 __this_cpu_write(tasklet_vec
.tail
, &__get_cpu_var(tasklet_vec
).head
);
487 struct tasklet_struct
*t
= list
;
491 if (tasklet_trylock(t
)) {
492 if (!atomic_read(&t
->count
)) {
493 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
495 mt_trace_tasklet_start(t
->func
);
497 mt_trace_tasklet_end(t
->func
);
506 *__this_cpu_read(tasklet_vec
.tail
) = t
;
507 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
508 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
513 static void tasklet_hi_action(struct softirq_action
*a
)
515 struct tasklet_struct
*list
;
518 list
= __this_cpu_read(tasklet_hi_vec
.head
);
519 __this_cpu_write(tasklet_hi_vec
.head
, NULL
);
520 __this_cpu_write(tasklet_hi_vec
.tail
, &__get_cpu_var(tasklet_hi_vec
).head
);
524 struct tasklet_struct
*t
= list
;
528 if (tasklet_trylock(t
)) {
529 if (!atomic_read(&t
->count
)) {
530 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
541 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
542 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
543 __raise_softirq_irqoff(HI_SOFTIRQ
);
549 void tasklet_init(struct tasklet_struct
*t
,
550 void (*func
)(unsigned long), unsigned long data
)
554 atomic_set(&t
->count
, 0);
559 EXPORT_SYMBOL(tasklet_init
);
561 void tasklet_kill(struct tasklet_struct
*t
)
564 printk("Attempt to kill tasklet from interrupt\n");
566 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
569 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
571 tasklet_unlock_wait(t
);
572 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
575 EXPORT_SYMBOL(tasklet_kill
);
582 * The trampoline is called when the hrtimer expires. It schedules a tasklet
583 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
584 * hrtimer callback, but from softirq context.
586 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
588 struct tasklet_hrtimer
*ttimer
=
589 container_of(timer
, struct tasklet_hrtimer
, timer
);
591 tasklet_hi_schedule(&ttimer
->tasklet
);
592 return HRTIMER_NORESTART
;
596 * Helper function which calls the hrtimer callback from
597 * tasklet/softirq context
599 static void __tasklet_hrtimer_trampoline(unsigned long data
)
601 struct tasklet_hrtimer
*ttimer
= (void *)data
;
602 enum hrtimer_restart restart
;
604 restart
= ttimer
->function(&ttimer
->timer
);
605 if (restart
!= HRTIMER_NORESTART
)
606 hrtimer_restart(&ttimer
->timer
);
610 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
611 * @ttimer: tasklet_hrtimer which is initialized
612 * @function: hrtimer callback function which gets called from softirq context
613 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
614 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
616 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
617 enum hrtimer_restart (*function
)(struct hrtimer
*),
618 clockid_t which_clock
, enum hrtimer_mode mode
)
620 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
621 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
622 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
623 (unsigned long)ttimer
);
624 ttimer
->function
= function
;
626 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
629 * Remote softirq bits
632 DEFINE_PER_CPU(struct list_head
[NR_SOFTIRQS
], softirq_work_list
);
633 EXPORT_PER_CPU_SYMBOL(softirq_work_list
);
635 static void __local_trigger(struct call_single_data
*cp
, int softirq
)
637 struct list_head
*head
= &__get_cpu_var(softirq_work_list
[softirq
]);
639 list_add_tail(&cp
->list
, head
);
641 /* Trigger the softirq only if the list was previously empty. */
642 if (head
->next
== &cp
->list
)
643 raise_softirq_irqoff(softirq
);
646 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
647 static void remote_softirq_receive(void *data
)
649 struct call_single_data
*cp
= data
;
653 softirq
= *(int *)cp
->info
;
654 local_irq_save(flags
);
655 __local_trigger(cp
, softirq
);
656 local_irq_restore(flags
);
659 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
661 if (cpu_online(cpu
)) {
662 cp
->func
= remote_softirq_receive
;
666 __smp_call_function_single(cpu
, cp
, 0);
671 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
672 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
679 * __send_remote_softirq - try to schedule softirq work on a remote cpu
680 * @cp: private SMP call function data area
681 * @cpu: the remote cpu
682 * @this_cpu: the currently executing cpu
683 * @softirq: the softirq for the work
685 * Attempt to schedule softirq work on a remote cpu. If this cannot be
686 * done, the work is instead queued up on the local cpu.
688 * Interrupts must be disabled.
690 void __send_remote_softirq(struct call_single_data
*cp
, int cpu
, int this_cpu
, int softirq
)
692 if (cpu
== this_cpu
|| __try_remote_softirq(cp
, cpu
, softirq
))
693 __local_trigger(cp
, softirq
);
695 EXPORT_SYMBOL(__send_remote_softirq
);
698 * send_remote_softirq - try to schedule softirq work on a remote cpu
699 * @cp: private SMP call function data area
700 * @cpu: the remote cpu
701 * @softirq: the softirq for the work
703 * Like __send_remote_softirq except that disabling interrupts and
704 * computing the current cpu is done for the caller.
706 void send_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
711 local_irq_save(flags
);
712 this_cpu
= smp_processor_id();
713 __send_remote_softirq(cp
, cpu
, this_cpu
, softirq
);
714 local_irq_restore(flags
);
716 EXPORT_SYMBOL(send_remote_softirq
);
718 static int __cpuinit
remote_softirq_cpu_notify(struct notifier_block
*self
,
719 unsigned long action
, void *hcpu
)
722 * If a CPU goes away, splice its entries to the current CPU
723 * and trigger a run of the softirq
725 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
) {
726 int cpu
= (unsigned long) hcpu
;
730 for (i
= 0; i
< NR_SOFTIRQS
; i
++) {
731 struct list_head
*head
= &per_cpu(softirq_work_list
[i
], cpu
);
732 struct list_head
*local_head
;
734 if (list_empty(head
))
737 local_head
= &__get_cpu_var(softirq_work_list
[i
]);
738 list_splice_init(head
, local_head
);
739 raise_softirq_irqoff(i
);
747 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier
= {
748 .notifier_call
= remote_softirq_cpu_notify
,
751 void __init
softirq_init(void)
755 for_each_possible_cpu(cpu
) {
758 per_cpu(tasklet_vec
, cpu
).tail
=
759 &per_cpu(tasklet_vec
, cpu
).head
;
760 per_cpu(tasklet_hi_vec
, cpu
).tail
=
761 &per_cpu(tasklet_hi_vec
, cpu
).head
;
762 for (i
= 0; i
< NR_SOFTIRQS
; i
++)
763 INIT_LIST_HEAD(&per_cpu(softirq_work_list
[i
], cpu
));
766 register_hotcpu_notifier(&remote_softirq_cpu_notifier
);
768 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
769 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
772 static int ksoftirqd_should_run(unsigned int cpu
)
774 return local_softirq_pending();
777 static void run_ksoftirqd(unsigned int cpu
)
780 if (local_softirq_pending()) {
782 rcu_note_context_switch(cpu
);
790 #ifdef CONFIG_HOTPLUG_CPU
792 * tasklet_kill_immediate is called to remove a tasklet which can already be
793 * scheduled for execution on @cpu.
795 * Unlike tasklet_kill, this function removes the tasklet
796 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
798 * When this function is called, @cpu must be in the CPU_DEAD state.
800 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
802 struct tasklet_struct
**i
;
804 BUG_ON(cpu_online(cpu
));
805 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
807 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
810 /* CPU is dead, so no lock needed. */
811 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
814 /* If this was the tail element, move the tail ptr */
816 per_cpu(tasklet_vec
, cpu
).tail
= i
;
823 static void takeover_tasklets(unsigned int cpu
)
825 /* CPU is dead, so no lock needed. */
828 /* Find end, append list for that CPU. */
829 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
830 *__this_cpu_read(tasklet_vec
.tail
) = per_cpu(tasklet_vec
, cpu
).head
;
831 this_cpu_write(tasklet_vec
.tail
, per_cpu(tasklet_vec
, cpu
).tail
);
832 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
833 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
835 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
837 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
838 *__this_cpu_read(tasklet_hi_vec
.tail
) = per_cpu(tasklet_hi_vec
, cpu
).head
;
839 __this_cpu_write(tasklet_hi_vec
.tail
, per_cpu(tasklet_hi_vec
, cpu
).tail
);
840 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
841 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
843 raise_softirq_irqoff(HI_SOFTIRQ
);
847 #endif /* CONFIG_HOTPLUG_CPU */
849 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
850 unsigned long action
,
854 #ifdef CONFIG_HOTPLUG_CPU
856 case CPU_DEAD_FROZEN
:
857 takeover_tasklets((unsigned long)hcpu
);
859 #endif /* CONFIG_HOTPLUG_CPU */
864 static struct notifier_block __cpuinitdata cpu_nfb
= {
865 .notifier_call
= cpu_callback
868 static struct smp_hotplug_thread softirq_threads
= {
870 .thread_should_run
= ksoftirqd_should_run
,
871 .thread_fn
= run_ksoftirqd
,
872 .thread_comm
= "ksoftirqd/%u",
875 static __init
int spawn_ksoftirqd(void)
877 register_cpu_notifier(&cpu_nfb
);
879 BUG_ON(smpboot_register_percpu_thread(&softirq_threads
));
883 early_initcall(spawn_ksoftirqd
);
886 * [ These __weak aliases are kept in a separate compilation unit, so that
887 * GCC does not inline them incorrectly. ]
890 int __init __weak
early_irq_init(void)
895 #ifdef CONFIG_GENERIC_HARDIRQS
896 int __init __weak
arch_probe_nr_irqs(void)
898 return NR_IRQS_LEGACY
;
901 int __init __weak
arch_early_irq_init(void)