2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/exynos-ss.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/irq.h>
34 #include <linux/nmi.h>
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
54 #ifndef __ARCH_IRQ_STAT
55 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
56 EXPORT_SYMBOL(irq_stat
);
59 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
61 DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
63 const char * const softirq_to_name
[NR_SOFTIRQS
] = {
64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65 "TASKLET", "SCHED", "HRTIMER", "RCU"
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
74 static void wakeup_softirqd(void)
76 /* Interrupts are disabled: no need to stop preemption */
77 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
79 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
84 * preempt_count and SOFTIRQ_OFFSET usage:
85 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
87 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
88 * on local_bh_disable or local_bh_enable.
89 * This lets us distinguish between whether we are currently processing
90 * softirq and whether we just have bh disabled.
94 * This one is for softirq.c-internal use,
95 * where hardirqs are disabled legitimately:
97 #ifdef CONFIG_TRACE_IRQFLAGS
98 void __local_bh_disable_ip(unsigned long ip
, unsigned int cnt
)
102 WARN_ON_ONCE(in_irq());
104 raw_local_irq_save(flags
);
106 * The preempt tracer hooks into preempt_count_add and will break
107 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
108 * is set and before current->softirq_enabled is cleared.
109 * We must manually increment preempt_count here and manually
110 * call the trace_preempt_off later.
112 __preempt_count_add(cnt
);
114 * Were softirqs turned off above:
116 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
117 trace_softirqs_off(ip
);
118 raw_local_irq_restore(flags
);
120 if (preempt_count() == cnt
) {
121 #ifdef CONFIG_DEBUG_PREEMPT
122 current
->preempt_disable_ip
= get_parent_ip(CALLER_ADDR1
);
124 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
127 EXPORT_SYMBOL(__local_bh_disable_ip
);
128 #endif /* CONFIG_TRACE_IRQFLAGS */
130 static void __local_bh_enable(unsigned int cnt
)
132 WARN_ON_ONCE(!irqs_disabled());
134 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
135 trace_softirqs_on(_RET_IP_
);
136 preempt_count_sub(cnt
);
140 * Special-case - softirqs can safely be enabled in
141 * cond_resched_softirq(), or by __do_softirq(),
142 * without processing still-pending softirqs:
144 void _local_bh_enable(void)
146 WARN_ON_ONCE(in_irq());
147 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
149 EXPORT_SYMBOL(_local_bh_enable
);
151 void __local_bh_enable_ip(unsigned long ip
, unsigned int cnt
)
153 WARN_ON_ONCE(in_irq() || irqs_disabled());
154 #ifdef CONFIG_TRACE_IRQFLAGS
158 * Are softirqs going to be turned on now:
160 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
161 trace_softirqs_on(ip
);
163 * Keep preemption disabled until we are done with
164 * softirq processing:
166 preempt_count_sub(cnt
- 1);
168 if (unlikely(!in_interrupt() && local_softirq_pending())) {
170 * Run softirq if any pending. And do it in its own stack
171 * as we may be calling this deep in a task call stack already.
177 #ifdef CONFIG_TRACE_IRQFLAGS
180 preempt_check_resched();
182 EXPORT_SYMBOL(__local_bh_enable_ip
);
185 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
186 * but break the loop if need_resched() is set or after 2 ms.
187 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
188 * certain cases, such as stop_machine(), jiffies may cease to
189 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
190 * well to make sure we eventually return from this method.
192 * These limits have been established via experimentation.
193 * The two things to balance is latency against fairness -
194 * we want to handle softirqs as soon as possible, but they
195 * should not be able to lock up the box.
197 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
198 #define MAX_SOFTIRQ_RESTART 10
200 #ifdef CONFIG_TRACE_IRQFLAGS
202 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
203 * to keep the lockdep irq context tracking as tight as possible in order to
204 * not miss-qualify lock contexts and miss possible deadlocks.
207 static inline bool lockdep_softirq_start(void)
209 bool in_hardirq
= false;
211 if (trace_hardirq_context(current
)) {
213 trace_hardirq_exit();
216 lockdep_softirq_enter();
221 static inline void lockdep_softirq_end(bool in_hardirq
)
223 lockdep_softirq_exit();
226 trace_hardirq_enter();
229 static inline bool lockdep_softirq_start(void) { return false; }
230 static inline void lockdep_softirq_end(bool in_hardirq
) { }
233 asmlinkage __visible
void __softirq_entry
__do_softirq(void)
235 unsigned long end
= jiffies
+ MAX_SOFTIRQ_TIME
;
236 unsigned long old_flags
= current
->flags
;
237 int max_restart
= MAX_SOFTIRQ_RESTART
;
238 struct softirq_action
*h
;
244 * Mask out PF_MEMALLOC s current task context is borrowed for the
245 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
246 * again if the socket is related to swap
248 current
->flags
&= ~PF_MEMALLOC
;
250 pending
= local_softirq_pending();
251 account_irq_enter_time(current
);
253 __local_bh_disable_ip(_RET_IP_
, SOFTIRQ_OFFSET
);
254 in_hardirq
= lockdep_softirq_start();
257 /* Reset the pending bitmask before enabling irqs */
258 set_softirq_pending(0);
264 while ((softirq_bit
= ffs(pending
))) {
268 h
+= softirq_bit
- 1;
270 vec_nr
= h
- softirq_vec
;
271 prev_count
= preempt_count();
273 kstat_incr_softirqs_this_cpu(vec_nr
);
275 trace_softirq_entry(vec_nr
);
276 exynos_ss_irq(ESS_FLAG_SOFTIRQ
, h
->action
, irqs_disabled(), ESS_FLAG_IN
);
277 sl_softirq_entry(softirq_to_name
[vec_nr
], h
->action
);
280 exynos_ss_irq(ESS_FLAG_SOFTIRQ
, h
->action
, irqs_disabled(), ESS_FLAG_OUT
);
281 trace_softirq_exit(vec_nr
);
282 if (unlikely(prev_count
!= preempt_count())) {
283 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
284 vec_nr
, softirq_to_name
[vec_nr
], h
->action
,
285 prev_count
, preempt_count());
286 preempt_count_set(prev_count
);
289 pending
>>= softirq_bit
;
295 pending
= local_softirq_pending();
297 if (time_before(jiffies
, end
) && !need_resched() &&
304 lockdep_softirq_end(in_hardirq
);
305 account_irq_exit_time(current
);
306 __local_bh_enable(SOFTIRQ_OFFSET
);
307 WARN_ON_ONCE(in_interrupt());
308 tsk_restore_flags(current
, old_flags
, PF_MEMALLOC
);
311 asmlinkage __visible
void do_softirq(void)
319 local_irq_save(flags
);
321 pending
= local_softirq_pending();
324 do_softirq_own_stack();
326 local_irq_restore(flags
);
330 * Enter an interrupt context.
335 if (is_idle_task(current
) && !in_interrupt()) {
337 * Prevent raise_softirq from needlessly waking up ksoftirqd
338 * here, as softirq will be serviced on return from interrupt.
348 static inline void invoke_softirq(void)
350 if (!force_irqthreads
) {
351 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
353 * We can safely execute softirq on the current stack if
354 * it is the irq stack, because it should be near empty
360 * Otherwise, irq_exit() is called on the task stack that can
361 * be potentially deep already. So call softirq in its own stack
362 * to prevent from any overrun.
364 do_softirq_own_stack();
371 static inline void tick_irq_exit(void)
373 #ifdef CONFIG_NO_HZ_COMMON
374 int cpu
= smp_processor_id();
376 /* Make sure that timer wheel updates are propagated */
377 if ((idle_cpu(cpu
) && !need_resched()) || tick_nohz_full_cpu(cpu
)) {
379 tick_nohz_irq_exit();
385 * Exit an interrupt context. Process softirqs if needed and possible:
389 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
392 WARN_ON_ONCE(!irqs_disabled());
395 account_irq_exit_time(current
);
396 preempt_count_sub(HARDIRQ_OFFSET
);
397 if (!in_interrupt() && local_softirq_pending())
402 trace_hardirq_exit(); /* must be last! */
406 * This function must run with irqs disabled!
408 inline void raise_softirq_irqoff(unsigned int nr
)
410 __raise_softirq_irqoff(nr
);
413 * If we're in an interrupt or softirq, we're done
414 * (this also catches softirq-disabled code). We will
415 * actually run the softirq once we return from
416 * the irq or softirq.
418 * Otherwise we wake up ksoftirqd to make sure we
419 * schedule the softirq soon.
425 void raise_softirq(unsigned int nr
)
429 local_irq_save(flags
);
430 raise_softirq_irqoff(nr
);
431 local_irq_restore(flags
);
434 void __raise_softirq_irqoff(unsigned int nr
)
436 trace_softirq_raise(nr
);
437 or_softirq_pending(1UL << nr
);
440 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
442 softirq_vec
[nr
].action
= action
;
448 struct tasklet_head
{
449 struct tasklet_struct
*head
;
450 struct tasklet_struct
**tail
;
453 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
454 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
456 void __tasklet_schedule(struct tasklet_struct
*t
)
460 local_irq_save(flags
);
462 *__this_cpu_read(tasklet_vec
.tail
) = t
;
463 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
464 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
465 local_irq_restore(flags
);
467 EXPORT_SYMBOL(__tasklet_schedule
);
469 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
473 local_irq_save(flags
);
475 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
476 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
477 raise_softirq_irqoff(HI_SOFTIRQ
);
478 local_irq_restore(flags
);
480 EXPORT_SYMBOL(__tasklet_hi_schedule
);
482 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
484 BUG_ON(!irqs_disabled());
486 t
->next
= __this_cpu_read(tasklet_hi_vec
.head
);
487 __this_cpu_write(tasklet_hi_vec
.head
, t
);
488 __raise_softirq_irqoff(HI_SOFTIRQ
);
490 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
492 static void tasklet_action(struct softirq_action
*a
)
494 struct tasklet_struct
*list
;
497 list
= __this_cpu_read(tasklet_vec
.head
);
498 __this_cpu_write(tasklet_vec
.head
, NULL
);
499 __this_cpu_write(tasklet_vec
.tail
, this_cpu_ptr(&tasklet_vec
.head
));
503 struct tasklet_struct
*t
= list
;
507 if (tasklet_trylock(t
)) {
508 if (!atomic_read(&t
->count
)) {
509 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
512 exynos_ss_irq(ESS_FLAG_SOFTIRQ_TASKLET
,
513 t
->func
, irqs_disabled(), ESS_FLAG_IN
);
514 sl_softirq_entry(softirq_to_name
[TASKLET_SOFTIRQ
], t
->func
);
517 exynos_ss_irq(ESS_FLAG_SOFTIRQ_TASKLET
,
518 t
->func
, irqs_disabled(), ESS_FLAG_OUT
);
527 *__this_cpu_read(tasklet_vec
.tail
) = t
;
528 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
529 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
534 static void tasklet_hi_action(struct softirq_action
*a
)
536 struct tasklet_struct
*list
;
539 list
= __this_cpu_read(tasklet_hi_vec
.head
);
540 __this_cpu_write(tasklet_hi_vec
.head
, NULL
);
541 __this_cpu_write(tasklet_hi_vec
.tail
, this_cpu_ptr(&tasklet_hi_vec
.head
));
545 struct tasklet_struct
*t
= list
;
549 if (tasklet_trylock(t
)) {
550 if (!atomic_read(&t
->count
)) {
551 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
554 exynos_ss_irq(ESS_FLAG_SOFTIRQ_HI_TASKLET
,
555 t
->func
, irqs_disabled(), ESS_FLAG_IN
);
556 sl_softirq_entry(softirq_to_name
[HI_SOFTIRQ
], t
->func
);
559 exynos_ss_irq(ESS_FLAG_SOFTIRQ_HI_TASKLET
,
560 t
->func
, irqs_disabled(), ESS_FLAG_OUT
);
569 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
570 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
571 __raise_softirq_irqoff(HI_SOFTIRQ
);
576 void tasklet_init(struct tasklet_struct
*t
,
577 void (*func
)(unsigned long), unsigned long data
)
581 atomic_set(&t
->count
, 0);
585 EXPORT_SYMBOL(tasklet_init
);
587 void tasklet_kill(struct tasklet_struct
*t
)
590 pr_notice("Attempt to kill tasklet from interrupt\n");
592 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
595 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
597 tasklet_unlock_wait(t
);
598 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
600 EXPORT_SYMBOL(tasklet_kill
);
607 * The trampoline is called when the hrtimer expires. It schedules a tasklet
608 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
609 * hrtimer callback, but from softirq context.
611 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
613 struct tasklet_hrtimer
*ttimer
=
614 container_of(timer
, struct tasklet_hrtimer
, timer
);
616 tasklet_hi_schedule(&ttimer
->tasklet
);
617 return HRTIMER_NORESTART
;
621 * Helper function which calls the hrtimer callback from
622 * tasklet/softirq context
624 static void __tasklet_hrtimer_trampoline(unsigned long data
)
626 struct tasklet_hrtimer
*ttimer
= (void *)data
;
627 enum hrtimer_restart restart
;
629 restart
= ttimer
->function(&ttimer
->timer
);
630 if (restart
!= HRTIMER_NORESTART
)
631 hrtimer_restart(&ttimer
->timer
);
635 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
636 * @ttimer: tasklet_hrtimer which is initialized
637 * @function: hrtimer callback function which gets called from softirq context
638 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
639 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
641 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
642 enum hrtimer_restart (*function
)(struct hrtimer
*),
643 clockid_t which_clock
, enum hrtimer_mode mode
)
645 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
646 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
647 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
648 (unsigned long)ttimer
);
649 ttimer
->function
= function
;
651 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
653 void __init
softirq_init(void)
657 for_each_possible_cpu(cpu
) {
658 per_cpu(tasklet_vec
, cpu
).tail
=
659 &per_cpu(tasklet_vec
, cpu
).head
;
660 per_cpu(tasklet_hi_vec
, cpu
).tail
=
661 &per_cpu(tasklet_hi_vec
, cpu
).head
;
664 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
665 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
668 static int ksoftirqd_should_run(unsigned int cpu
)
670 return local_softirq_pending();
673 static void run_ksoftirqd(unsigned int cpu
)
676 if (local_softirq_pending()) {
678 * We can safely run softirq on inline stack, as we are not deep
679 * in the task stack here.
683 cond_resched_rcu_qs();
689 #ifdef CONFIG_HOTPLUG_CPU
691 * tasklet_kill_immediate is called to remove a tasklet which can already be
692 * scheduled for execution on @cpu.
694 * Unlike tasklet_kill, this function removes the tasklet
695 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
697 * When this function is called, @cpu must be in the CPU_DEAD state.
699 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
701 struct tasklet_struct
**i
;
703 BUG_ON(cpu_online(cpu
));
704 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
706 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
709 /* CPU is dead, so no lock needed. */
710 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
713 /* If this was the tail element, move the tail ptr */
715 per_cpu(tasklet_vec
, cpu
).tail
= i
;
722 static void takeover_tasklets(unsigned int cpu
)
724 /* CPU is dead, so no lock needed. */
727 /* Find end, append list for that CPU. */
728 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
729 *__this_cpu_read(tasklet_vec
.tail
) = per_cpu(tasklet_vec
, cpu
).head
;
730 this_cpu_write(tasklet_vec
.tail
, per_cpu(tasklet_vec
, cpu
).tail
);
731 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
732 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
734 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
736 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
737 *__this_cpu_read(tasklet_hi_vec
.tail
) = per_cpu(tasklet_hi_vec
, cpu
).head
;
738 __this_cpu_write(tasklet_hi_vec
.tail
, per_cpu(tasklet_hi_vec
, cpu
).tail
);
739 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
740 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
742 raise_softirq_irqoff(HI_SOFTIRQ
);
746 #endif /* CONFIG_HOTPLUG_CPU */
748 static int cpu_callback(struct notifier_block
*nfb
, unsigned long action
,
752 #ifdef CONFIG_HOTPLUG_CPU
754 case CPU_DEAD_FROZEN
:
755 takeover_tasklets((unsigned long)hcpu
);
757 #endif /* CONFIG_HOTPLUG_CPU */
762 static struct notifier_block cpu_nfb
= {
763 .notifier_call
= cpu_callback
766 static struct smp_hotplug_thread softirq_threads
= {
768 .thread_should_run
= ksoftirqd_should_run
,
769 .thread_fn
= run_ksoftirqd
,
770 .thread_comm
= "ksoftirqd/%u",
773 static __init
int spawn_ksoftirqd(void)
775 register_cpu_notifier(&cpu_nfb
);
777 BUG_ON(smpboot_register_percpu_thread(&softirq_threads
));
781 early_initcall(spawn_ksoftirqd
);
784 * [ These __weak aliases are kept in a separate compilation unit, so that
785 * GCC does not inline them incorrectly. ]
788 int __init __weak
early_irq_init(void)
793 int __init __weak
arch_probe_nr_irqs(void)
795 return NR_IRQS_LEGACY
;
798 int __init __weak
arch_early_irq_init(void)
803 unsigned int __weak
arch_dynirq_lower_bound(unsigned int from
)