2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #include <linux/export.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ftrace.h>
23 #include <linux/smp.h>
24 #include <linux/smpboot.h>
25 #include <linux/tick.h>
27 #include <linux/mt_sched_mon.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
52 EXPORT_SYMBOL(irq_stat
);
55 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
57 DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
59 char *softirq_to_name
[NR_SOFTIRQS
] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 static void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
75 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags
);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt
;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt
)
113 trace_softirqs_off(ip
);
114 raw_local_irq_restore(flags
);
116 if (preempt_count() == cnt
)
117 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
122 add_preempt_count(cnt
);
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET
);
133 EXPORT_SYMBOL(local_bh_disable
);
135 static void __local_bh_enable(unsigned int cnt
)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt
)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt
);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
155 EXPORT_SYMBOL(_local_bh_enable
);
157 static inline void _local_bh_enable_ip(unsigned long ip
)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
167 trace_softirqs_on(ip
);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET
- 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
178 #ifdef CONFIG_TRACE_IRQFLAGS
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable
);
190 void local_bh_enable_ip(unsigned long ip
)
192 _local_bh_enable_ip(ip
);
194 EXPORT_SYMBOL(local_bh_enable_ip
);
197 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
198 * but break the loop if need_resched() is set or after 2 ms.
199 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
200 * certain cases, such as stop_machine(), jiffies may cease to
201 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
202 * well to make sure we eventually return from this method.
204 * These limits have been established via experimentation.
205 * The two things to balance is latency against fairness -
206 * we want to handle softirqs as soon as possible, but they
207 * should not be able to lock up the box.
209 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
210 #define MAX_SOFTIRQ_RESTART 10
212 asmlinkage
void __do_softirq(void)
214 struct softirq_action
*h
;
216 unsigned long end
= jiffies
+ MAX_SOFTIRQ_TIME
;
218 unsigned long old_flags
= current
->flags
;
219 int max_restart
= MAX_SOFTIRQ_RESTART
;
222 * Mask out PF_MEMALLOC s current task context is borrowed for the
223 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
224 * again if the socket is related to swap
226 current
->flags
&= ~PF_MEMALLOC
;
228 pending
= local_softirq_pending();
229 account_irq_enter_time(current
);
231 __local_bh_disable((unsigned long)__builtin_return_address(0),
233 lockdep_softirq_enter();
235 cpu
= smp_processor_id();
237 /* Reset the pending bitmask before enabling irqs */
238 set_softirq_pending(0);
246 unsigned int vec_nr
= h
- softirq_vec
;
247 int prev_count
= preempt_count();
249 kstat_incr_softirqs_this_cpu(vec_nr
);
251 trace_softirq_entry(vec_nr
);
252 mt_trace_SoftIRQ_start(vec_nr
);
254 mt_trace_SoftIRQ_end(vec_nr
);
255 trace_softirq_exit(vec_nr
);
256 if (unlikely(prev_count
!= preempt_count())) {
257 printk(KERN_ERR
"huh, entered softirq %u %s %p"
258 "with preempt_count %08x,"
259 " exited with %08x?\n", vec_nr
,
260 softirq_to_name
[vec_nr
], h
->action
,
261 prev_count
, preempt_count());
262 preempt_count() = prev_count
;
273 pending
= local_softirq_pending();
275 if (time_before(jiffies
, end
) && !need_resched() &&
282 lockdep_softirq_exit();
284 account_irq_exit_time(current
);
285 __local_bh_enable(SOFTIRQ_OFFSET
);
286 tsk_restore_flags(current
, old_flags
, PF_MEMALLOC
);
289 #ifndef __ARCH_HAS_DO_SOFTIRQ
291 asmlinkage
void do_softirq(void)
299 local_irq_save(flags
);
301 pending
= local_softirq_pending();
306 local_irq_restore(flags
);
312 * Enter an interrupt context.
316 int cpu
= smp_processor_id();
319 if (is_idle_task(current
) && !in_interrupt()) {
321 * Prevent raise_softirq from needlessly waking up ksoftirqd
322 * here, as softirq will be serviced on return from interrupt.
325 tick_check_idle(cpu
);
332 static inline void invoke_softirq(void)
334 if (!force_irqthreads
) {
336 * We can safely execute softirq on the current stack if
337 * it is the irq stack, because it should be near empty
338 * at this stage. But we have no way to know if the arch
339 * calls irq_exit() on the irq stack. So call softirq
340 * in its own stack to prevent from any overrun on top
341 * of a potentially deep task stack.
349 static inline void tick_irq_exit(void)
351 #ifdef CONFIG_NO_HZ_COMMON
352 int cpu
= smp_processor_id();
354 /* Make sure that timer wheel updates are propagated */
355 if ((idle_cpu(cpu
) && !need_resched()) || tick_nohz_full_cpu(cpu
)) {
357 tick_nohz_irq_exit();
363 * Exit an interrupt context. Process softirqs if needed and possible:
367 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
370 WARN_ON_ONCE(!irqs_disabled());
373 account_irq_exit_time(current
);
374 trace_hardirq_exit();
375 sub_preempt_count(HARDIRQ_OFFSET
);
376 if (!in_interrupt() && local_softirq_pending())
384 * This function must run with irqs disabled!
386 inline void raise_softirq_irqoff(unsigned int nr
)
388 __raise_softirq_irqoff(nr
);
391 * If we're in an interrupt or softirq, we're done
392 * (this also catches softirq-disabled code). We will
393 * actually run the softirq once we return from
394 * the irq or softirq.
396 * Otherwise we wake up ksoftirqd to make sure we
397 * schedule the softirq soon.
403 void raise_softirq(unsigned int nr
)
407 local_irq_save(flags
);
408 raise_softirq_irqoff(nr
);
409 local_irq_restore(flags
);
412 void __raise_softirq_irqoff(unsigned int nr
)
414 trace_softirq_raise(nr
);
415 or_softirq_pending(1UL << nr
);
418 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
420 softirq_vec
[nr
].action
= action
;
428 struct tasklet_struct
*head
;
429 struct tasklet_struct
**tail
;
432 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
433 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
435 void __tasklet_schedule(struct tasklet_struct
*t
)
439 local_irq_save(flags
);
441 *__this_cpu_read(tasklet_vec
.tail
) = t
;
442 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
443 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
444 local_irq_restore(flags
);
447 EXPORT_SYMBOL(__tasklet_schedule
);
449 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
453 local_irq_save(flags
);
455 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
456 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
457 raise_softirq_irqoff(HI_SOFTIRQ
);
458 local_irq_restore(flags
);
461 EXPORT_SYMBOL(__tasklet_hi_schedule
);
463 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
465 BUG_ON(!irqs_disabled());
467 t
->next
= __this_cpu_read(tasklet_hi_vec
.head
);
468 __this_cpu_write(tasklet_hi_vec
.head
, t
);
469 __raise_softirq_irqoff(HI_SOFTIRQ
);
472 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
474 static void tasklet_action(struct softirq_action
*a
)
476 struct tasklet_struct
*list
;
479 list
= __this_cpu_read(tasklet_vec
.head
);
480 __this_cpu_write(tasklet_vec
.head
, NULL
);
481 __this_cpu_write(tasklet_vec
.tail
, &__get_cpu_var(tasklet_vec
).head
);
485 struct tasklet_struct
*t
= list
;
489 if (tasklet_trylock(t
)) {
490 if (!atomic_read(&t
->count
)) {
491 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
493 mt_trace_tasklet_start(t
->func
);
495 mt_trace_tasklet_end(t
->func
);
504 *__this_cpu_read(tasklet_vec
.tail
) = t
;
505 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
506 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
511 static void tasklet_hi_action(struct softirq_action
*a
)
513 struct tasklet_struct
*list
;
516 list
= __this_cpu_read(tasklet_hi_vec
.head
);
517 __this_cpu_write(tasklet_hi_vec
.head
, NULL
);
518 __this_cpu_write(tasklet_hi_vec
.tail
, &__get_cpu_var(tasklet_hi_vec
).head
);
522 struct tasklet_struct
*t
= list
;
526 if (tasklet_trylock(t
)) {
527 if (!atomic_read(&t
->count
)) {
528 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
539 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
540 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
541 __raise_softirq_irqoff(HI_SOFTIRQ
);
547 void tasklet_init(struct tasklet_struct
*t
,
548 void (*func
)(unsigned long), unsigned long data
)
552 atomic_set(&t
->count
, 0);
557 EXPORT_SYMBOL(tasklet_init
);
559 void tasklet_kill(struct tasklet_struct
*t
)
562 printk("Attempt to kill tasklet from interrupt\n");
564 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
567 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
569 tasklet_unlock_wait(t
);
570 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
573 EXPORT_SYMBOL(tasklet_kill
);
580 * The trampoline is called when the hrtimer expires. It schedules a tasklet
581 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
582 * hrtimer callback, but from softirq context.
584 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
586 struct tasklet_hrtimer
*ttimer
=
587 container_of(timer
, struct tasklet_hrtimer
, timer
);
589 tasklet_hi_schedule(&ttimer
->tasklet
);
590 return HRTIMER_NORESTART
;
594 * Helper function which calls the hrtimer callback from
595 * tasklet/softirq context
597 static void __tasklet_hrtimer_trampoline(unsigned long data
)
599 struct tasklet_hrtimer
*ttimer
= (void *)data
;
600 enum hrtimer_restart restart
;
602 restart
= ttimer
->function(&ttimer
->timer
);
603 if (restart
!= HRTIMER_NORESTART
)
604 hrtimer_restart(&ttimer
->timer
);
608 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
609 * @ttimer: tasklet_hrtimer which is initialized
610 * @function: hrtimer callback function which gets called from softirq context
611 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
612 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
614 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
615 enum hrtimer_restart (*function
)(struct hrtimer
*),
616 clockid_t which_clock
, enum hrtimer_mode mode
)
618 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
619 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
620 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
621 (unsigned long)ttimer
);
622 ttimer
->function
= function
;
624 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
626 void __init
softirq_init(void)
630 for_each_possible_cpu(cpu
) {
631 per_cpu(tasklet_vec
, cpu
).tail
=
632 &per_cpu(tasklet_vec
, cpu
).head
;
633 per_cpu(tasklet_hi_vec
, cpu
).tail
=
634 &per_cpu(tasklet_hi_vec
, cpu
).head
;
637 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
638 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
641 static int ksoftirqd_should_run(unsigned int cpu
)
643 return local_softirq_pending();
646 static void run_ksoftirqd(unsigned int cpu
)
649 if (local_softirq_pending()) {
655 rcu_note_context_switch(cpu
);
663 #ifdef CONFIG_HOTPLUG_CPU
665 * tasklet_kill_immediate is called to remove a tasklet which can already be
666 * scheduled for execution on @cpu.
668 * Unlike tasklet_kill, this function removes the tasklet
669 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
671 * When this function is called, @cpu must be in the CPU_DEAD state.
673 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
675 struct tasklet_struct
**i
;
677 BUG_ON(cpu_online(cpu
));
678 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
680 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
683 /* CPU is dead, so no lock needed. */
684 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
687 /* If this was the tail element, move the tail ptr */
689 per_cpu(tasklet_vec
, cpu
).tail
= i
;
696 static void takeover_tasklets(unsigned int cpu
)
698 /* CPU is dead, so no lock needed. */
701 /* Find end, append list for that CPU. */
702 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
703 *__this_cpu_read(tasklet_vec
.tail
) = per_cpu(tasklet_vec
, cpu
).head
;
704 this_cpu_write(tasklet_vec
.tail
, per_cpu(tasklet_vec
, cpu
).tail
);
705 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
706 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
708 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
710 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
711 *__this_cpu_read(tasklet_hi_vec
.tail
) = per_cpu(tasklet_hi_vec
, cpu
).head
;
712 __this_cpu_write(tasklet_hi_vec
.tail
, per_cpu(tasklet_hi_vec
, cpu
).tail
);
713 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
714 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
716 raise_softirq_irqoff(HI_SOFTIRQ
);
720 #endif /* CONFIG_HOTPLUG_CPU */
722 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
723 unsigned long action
,
727 #ifdef CONFIG_HOTPLUG_CPU
729 case CPU_DEAD_FROZEN
:
730 takeover_tasklets((unsigned long)hcpu
);
732 #endif /* CONFIG_HOTPLUG_CPU */
737 static struct notifier_block __cpuinitdata cpu_nfb
= {
738 .notifier_call
= cpu_callback
741 static struct smp_hotplug_thread softirq_threads
= {
743 .thread_should_run
= ksoftirqd_should_run
,
744 .thread_fn
= run_ksoftirqd
,
745 .thread_comm
= "ksoftirqd/%u",
748 static __init
int spawn_ksoftirqd(void)
750 register_cpu_notifier(&cpu_nfb
);
752 BUG_ON(smpboot_register_percpu_thread(&softirq_threads
));
756 early_initcall(spawn_ksoftirqd
);
759 * [ These __weak aliases are kept in a separate compilation unit, so that
760 * GCC does not inline them incorrectly. ]
763 int __init __weak
early_irq_init(void)
768 #ifdef CONFIG_GENERIC_HARDIRQS
769 int __init __weak
arch_probe_nr_irqs(void)
771 return NR_IRQS_LEGACY
;
774 int __init __weak
arch_early_irq_init(void)