Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/softirq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * | |
b10db7f0 PM |
6 | * Distribute under GPLv2. |
7 | * | |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | |
54514a70 DM |
9 | * |
10 | * Remote softirq infrastructure is by Jens Axboe. | |
1da177e4 LT |
11 | */ |
12 | ||
9984de1a | 13 | #include <linux/export.h> |
1da177e4 LT |
14 | #include <linux/kernel_stat.h> |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/notifier.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/cpu.h> | |
83144186 | 21 | #include <linux/freezer.h> |
1da177e4 LT |
22 | #include <linux/kthread.h> |
23 | #include <linux/rcupdate.h> | |
7e49fcce | 24 | #include <linux/ftrace.h> |
78eef01b | 25 | #include <linux/smp.h> |
3e339b5d | 26 | #include <linux/smpboot.h> |
79bf2bb3 | 27 | #include <linux/tick.h> |
a0e39ed3 | 28 | |
6fa3eb70 | 29 | #include <linux/mt_sched_mon.h> |
a0e39ed3 | 30 | #define CREATE_TRACE_POINTS |
ad8d75ff | 31 | #include <trace/events/irq.h> |
1da177e4 LT |
32 | |
33 | #include <asm/irq.h> | |
34 | /* | |
35 | - No shared variables, all the data are CPU local. | |
36 | - If a softirq needs serialization, let it serialize itself | |
37 | by its own spinlocks. | |
38 | - Even if softirq is serialized, only local cpu is marked for | |
39 | execution. Hence, we get something sort of weak cpu binding. | |
40 | Though it is still not clear, will it result in better locality | |
41 | or will not. | |
42 | ||
43 | Examples: | |
44 | - NET RX softirq. It is multithreaded and does not require | |
45 | any global serialization. | |
46 | - NET TX softirq. It kicks software netdevice queues, hence | |
47 | it is logically serialized per device, but this serialization | |
48 | is invisible to common code. | |
49 | - Tasklets: serialized wrt itself. | |
50 | */ | |
51 | ||
52 | #ifndef __ARCH_IRQ_STAT | |
53 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |
54 | EXPORT_SYMBOL(irq_stat); | |
55 | #endif | |
56 | ||
978b0116 | 57 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4 | 58 | |
4dd53d89 | 59 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4 | 60 | |
5d592b44 | 61 | char *softirq_to_name[NR_SOFTIRQS] = { |
5dd4de58 | 62 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
09223371 | 63 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44 JB |
64 | }; |
65 | ||
1da177e4 LT |
66 | /* |
67 | * we cannot loop indefinitely here to avoid userspace starvation, | |
68 | * but we also don't want to introduce a worst case 1/HZ latency | |
69 | * to the pending events, so lets the scheduler to balance | |
70 | * the softirq load for us. | |
71 | */ | |
676cb02d | 72 | static void wakeup_softirqd(void) |
1da177e4 LT |
73 | { |
74 | /* Interrupts are disabled: no need to stop preemption */ | |
909ea964 | 75 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4 LT |
76 | |
77 | if (tsk && tsk->state != TASK_RUNNING) | |
78 | wake_up_process(tsk); | |
79 | } | |
80 | ||
75e1056f VP |
81 | /* |
82 | * preempt_count and SOFTIRQ_OFFSET usage: | |
83 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | |
84 | * softirq processing. | |
85 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | |
86 | * on local_bh_disable or local_bh_enable. | |
87 | * This lets us distinguish between whether we are currently processing | |
88 | * softirq and whether we just have bh disabled. | |
89 | */ | |
90 | ||
de30a2b3 IM |
91 | /* |
92 | * This one is for softirq.c-internal use, | |
93 | * where hardirqs are disabled legitimately: | |
94 | */ | |
3c829c36 | 95 | #ifdef CONFIG_TRACE_IRQFLAGS |
75e1056f | 96 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) |
de30a2b3 IM |
97 | { |
98 | unsigned long flags; | |
99 | ||
100 | WARN_ON_ONCE(in_irq()); | |
101 | ||
102 | raw_local_irq_save(flags); | |
7e49fcce SR |
103 | /* |
104 | * The preempt tracer hooks into add_preempt_count and will break | |
105 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | |
106 | * is set and before current->softirq_enabled is cleared. | |
107 | * We must manually increment preempt_count here and manually | |
108 | * call the trace_preempt_off later. | |
109 | */ | |
75e1056f | 110 | preempt_count() += cnt; |
de30a2b3 IM |
111 | /* |
112 | * Were softirqs turned off above: | |
113 | */ | |
75e1056f | 114 | if (softirq_count() == cnt) |
de30a2b3 IM |
115 | trace_softirqs_off(ip); |
116 | raw_local_irq_restore(flags); | |
7e49fcce | 117 | |
75e1056f | 118 | if (preempt_count() == cnt) |
7e49fcce | 119 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
de30a2b3 | 120 | } |
3c829c36 | 121 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
75e1056f | 122 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
3c829c36 | 123 | { |
75e1056f | 124 | add_preempt_count(cnt); |
3c829c36 TC |
125 | barrier(); |
126 | } | |
127 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
de30a2b3 IM |
128 | |
129 | void local_bh_disable(void) | |
130 | { | |
75e1056f VP |
131 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
132 | SOFTIRQ_DISABLE_OFFSET); | |
de30a2b3 IM |
133 | } |
134 | ||
135 | EXPORT_SYMBOL(local_bh_disable); | |
136 | ||
75e1056f VP |
137 | static void __local_bh_enable(unsigned int cnt) |
138 | { | |
139 | WARN_ON_ONCE(in_irq()); | |
140 | WARN_ON_ONCE(!irqs_disabled()); | |
141 | ||
142 | if (softirq_count() == cnt) | |
143 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | |
144 | sub_preempt_count(cnt); | |
145 | } | |
146 | ||
de30a2b3 IM |
147 | /* |
148 | * Special-case - softirqs can safely be enabled in | |
149 | * cond_resched_softirq(), or by __do_softirq(), | |
150 | * without processing still-pending softirqs: | |
151 | */ | |
152 | void _local_bh_enable(void) | |
153 | { | |
75e1056f | 154 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b3 IM |
155 | } |
156 | ||
157 | EXPORT_SYMBOL(_local_bh_enable); | |
158 | ||
0f476b6d | 159 | static inline void _local_bh_enable_ip(unsigned long ip) |
de30a2b3 | 160 | { |
0f476b6d | 161 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
3c829c36 | 162 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 163 | local_irq_disable(); |
3c829c36 | 164 | #endif |
de30a2b3 IM |
165 | /* |
166 | * Are softirqs going to be turned on now: | |
167 | */ | |
75e1056f | 168 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0f476b6d | 169 | trace_softirqs_on(ip); |
de30a2b3 IM |
170 | /* |
171 | * Keep preemption disabled until we are done with | |
172 | * softirq processing: | |
173 | */ | |
75e1056f | 174 | sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); |
de30a2b3 IM |
175 | |
176 | if (unlikely(!in_interrupt() && local_softirq_pending())) | |
177 | do_softirq(); | |
178 | ||
179 | dec_preempt_count(); | |
3c829c36 | 180 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 181 | local_irq_enable(); |
3c829c36 | 182 | #endif |
de30a2b3 IM |
183 | preempt_check_resched(); |
184 | } | |
0f476b6d JB |
185 | |
186 | void local_bh_enable(void) | |
187 | { | |
188 | _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
189 | } | |
de30a2b3 IM |
190 | EXPORT_SYMBOL(local_bh_enable); |
191 | ||
192 | void local_bh_enable_ip(unsigned long ip) | |
193 | { | |
0f476b6d | 194 | _local_bh_enable_ip(ip); |
de30a2b3 IM |
195 | } |
196 | EXPORT_SYMBOL(local_bh_enable_ip); | |
197 | ||
1da177e4 | 198 | /* |
34376a50 BG |
199 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
200 | * but break the loop if need_resched() is set or after 2 ms. | |
201 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in | |
202 | * certain cases, such as stop_machine(), jiffies may cease to | |
203 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as | |
204 | * well to make sure we eventually return from this method. | |
1da177e4 | 205 | * |
c10d7367 | 206 | * These limits have been established via experimentation. |
1da177e4 LT |
207 | * The two things to balance is latency against fairness - |
208 | * we want to handle softirqs as soon as possible, but they | |
209 | * should not be able to lock up the box. | |
210 | */ | |
c10d7367 | 211 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50 | 212 | #define MAX_SOFTIRQ_RESTART 10 |
1da177e4 LT |
213 | |
214 | asmlinkage void __do_softirq(void) | |
215 | { | |
216 | struct softirq_action *h; | |
217 | __u32 pending; | |
c10d7367 | 218 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
1da177e4 | 219 | int cpu; |
907aed48 | 220 | unsigned long old_flags = current->flags; |
34376a50 | 221 | int max_restart = MAX_SOFTIRQ_RESTART; |
907aed48 MG |
222 | |
223 | /* | |
224 | * Mask out PF_MEMALLOC s current task context is borrowed for the | |
225 | * softirq. A softirq handled such as network RX might set PF_MEMALLOC | |
226 | * again if the socket is related to swap | |
227 | */ | |
228 | current->flags &= ~PF_MEMALLOC; | |
1da177e4 LT |
229 | |
230 | pending = local_softirq_pending(); | |
6a61671b | 231 | account_irq_enter_time(current); |
829035fd | 232 | |
75e1056f VP |
233 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
234 | SOFTIRQ_OFFSET); | |
d820ac4c | 235 | lockdep_softirq_enter(); |
1da177e4 | 236 | |
1da177e4 LT |
237 | cpu = smp_processor_id(); |
238 | restart: | |
239 | /* Reset the pending bitmask before enabling irqs */ | |
3f74478b | 240 | set_softirq_pending(0); |
1da177e4 | 241 | |
c70f5d66 | 242 | local_irq_enable(); |
1da177e4 LT |
243 | |
244 | h = softirq_vec; | |
245 | ||
246 | do { | |
247 | if (pending & 1) { | |
f4bc6bb2 | 248 | unsigned int vec_nr = h - softirq_vec; |
8e85b4b5 TG |
249 | int prev_count = preempt_count(); |
250 | ||
f4bc6bb2 TG |
251 | kstat_incr_softirqs_this_cpu(vec_nr); |
252 | ||
253 | trace_softirq_entry(vec_nr); | |
6fa3eb70 S |
254 | mt_trace_SoftIRQ_start(vec_nr); |
255 | h->action(h); | |
256 | mt_trace_SoftIRQ_end(vec_nr); | |
f4bc6bb2 | 257 | trace_softirq_exit(vec_nr); |
8e85b4b5 | 258 | if (unlikely(prev_count != preempt_count())) { |
f4bc6bb2 | 259 | printk(KERN_ERR "huh, entered softirq %u %s %p" |
8e85b4b5 | 260 | "with preempt_count %08x," |
f4bc6bb2 TG |
261 | " exited with %08x?\n", vec_nr, |
262 | softirq_to_name[vec_nr], h->action, | |
263 | prev_count, preempt_count()); | |
8e85b4b5 TG |
264 | preempt_count() = prev_count; |
265 | } | |
266 | ||
d6714c22 | 267 | rcu_bh_qs(cpu); |
1da177e4 LT |
268 | } |
269 | h++; | |
270 | pending >>= 1; | |
271 | } while (pending); | |
272 | ||
c70f5d66 | 273 | local_irq_disable(); |
1da177e4 LT |
274 | |
275 | pending = local_softirq_pending(); | |
c10d7367 | 276 | if (pending) { |
34376a50 BG |
277 | if (time_before(jiffies, end) && !need_resched() && |
278 | --max_restart) | |
c10d7367 | 279 | goto restart; |
1da177e4 | 280 | |
1da177e4 | 281 | wakeup_softirqd(); |
c10d7367 | 282 | } |
1da177e4 | 283 | |
d820ac4c | 284 | lockdep_softirq_exit(); |
829035fd | 285 | |
6a61671b | 286 | account_irq_exit_time(current); |
75e1056f | 287 | __local_bh_enable(SOFTIRQ_OFFSET); |
907aed48 | 288 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
1da177e4 LT |
289 | } |
290 | ||
291 | #ifndef __ARCH_HAS_DO_SOFTIRQ | |
292 | ||
293 | asmlinkage void do_softirq(void) | |
294 | { | |
295 | __u32 pending; | |
296 | unsigned long flags; | |
297 | ||
298 | if (in_interrupt()) | |
299 | return; | |
300 | ||
301 | local_irq_save(flags); | |
302 | ||
303 | pending = local_softirq_pending(); | |
304 | ||
305 | if (pending) | |
306 | __do_softirq(); | |
307 | ||
308 | local_irq_restore(flags); | |
309 | } | |
310 | ||
1da177e4 LT |
311 | #endif |
312 | ||
dde4b2b5 IM |
313 | /* |
314 | * Enter an interrupt context. | |
315 | */ | |
316 | void irq_enter(void) | |
317 | { | |
6378ddb5 | 318 | int cpu = smp_processor_id(); |
719254fa | 319 | |
64db4cff | 320 | rcu_irq_enter(); |
0a8a2e78 | 321 | if (is_idle_task(current) && !in_interrupt()) { |
d267f87f VP |
322 | /* |
323 | * Prevent raise_softirq from needlessly waking up ksoftirqd | |
324 | * here, as softirq will be serviced on return from interrupt. | |
325 | */ | |
326 | local_bh_disable(); | |
719254fa | 327 | tick_check_idle(cpu); |
d267f87f VP |
328 | _local_bh_enable(); |
329 | } | |
330 | ||
331 | __irq_enter(); | |
dde4b2b5 IM |
332 | } |
333 | ||
8d32a307 TG |
334 | static inline void invoke_softirq(void) |
335 | { | |
6dcdd575 FW |
336 | if (!force_irqthreads) { |
337 | /* | |
338 | * We can safely execute softirq on the current stack if | |
339 | * it is the irq stack, because it should be near empty | |
340 | * at this stage. But we have no way to know if the arch | |
341 | * calls irq_exit() on the irq stack. So call softirq | |
342 | * in its own stack to prevent from any overrun on top | |
343 | * of a potentially deep task stack. | |
344 | */ | |
345 | do_softirq(); | |
346 | } else { | |
8d32a307 | 347 | wakeup_softirqd(); |
6dcdd575 | 348 | } |
8d32a307 | 349 | } |
1da177e4 | 350 | |
67826eae FW |
351 | static inline void tick_irq_exit(void) |
352 | { | |
353 | #ifdef CONFIG_NO_HZ_COMMON | |
354 | int cpu = smp_processor_id(); | |
355 | ||
356 | /* Make sure that timer wheel updates are propagated */ | |
357 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | |
358 | if (!in_interrupt()) | |
359 | tick_nohz_irq_exit(); | |
360 | } | |
361 | #endif | |
362 | } | |
363 | ||
1da177e4 LT |
364 | /* |
365 | * Exit an interrupt context. Process softirqs if needed and possible: | |
366 | */ | |
367 | void irq_exit(void) | |
368 | { | |
74eed016 | 369 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d111 | 370 | local_irq_disable(); |
74eed016 TG |
371 | #else |
372 | WARN_ON_ONCE(!irqs_disabled()); | |
373 | #endif | |
374 | ||
6a61671b | 375 | account_irq_exit_time(current); |
de30a2b3 | 376 | trace_hardirq_exit(); |
4d4c4e24 | 377 | sub_preempt_count(HARDIRQ_OFFSET); |
1da177e4 LT |
378 | if (!in_interrupt() && local_softirq_pending()) |
379 | invoke_softirq(); | |
79bf2bb3 | 380 | |
67826eae | 381 | tick_irq_exit(); |
416eb33c | 382 | rcu_irq_exit(); |
1da177e4 LT |
383 | } |
384 | ||
385 | /* | |
386 | * This function must run with irqs disabled! | |
387 | */ | |
7ad5b3a5 | 388 | inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4 LT |
389 | { |
390 | __raise_softirq_irqoff(nr); | |
391 | ||
392 | /* | |
393 | * If we're in an interrupt or softirq, we're done | |
394 | * (this also catches softirq-disabled code). We will | |
395 | * actually run the softirq once we return from | |
396 | * the irq or softirq. | |
397 | * | |
398 | * Otherwise we wake up ksoftirqd to make sure we | |
399 | * schedule the softirq soon. | |
400 | */ | |
401 | if (!in_interrupt()) | |
402 | wakeup_softirqd(); | |
403 | } | |
404 | ||
7ad5b3a5 | 405 | void raise_softirq(unsigned int nr) |
1da177e4 LT |
406 | { |
407 | unsigned long flags; | |
408 | ||
409 | local_irq_save(flags); | |
410 | raise_softirq_irqoff(nr); | |
411 | local_irq_restore(flags); | |
412 | } | |
413 | ||
f069686e SR |
414 | void __raise_softirq_irqoff(unsigned int nr) |
415 | { | |
416 | trace_softirq_raise(nr); | |
417 | or_softirq_pending(1UL << nr); | |
418 | } | |
419 | ||
962cf36c | 420 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4 | 421 | { |
1da177e4 LT |
422 | softirq_vec[nr].action = action; |
423 | } | |
424 | ||
9ba5f005 PZ |
425 | /* |
426 | * Tasklets | |
427 | */ | |
1da177e4 LT |
428 | struct tasklet_head |
429 | { | |
48f20a9a OJ |
430 | struct tasklet_struct *head; |
431 | struct tasklet_struct **tail; | |
1da177e4 LT |
432 | }; |
433 | ||
4620b49f VN |
434 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
435 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
1da177e4 | 436 | |
7ad5b3a5 | 437 | void __tasklet_schedule(struct tasklet_struct *t) |
1da177e4 LT |
438 | { |
439 | unsigned long flags; | |
440 | ||
441 | local_irq_save(flags); | |
48f20a9a | 442 | t->next = NULL; |
909ea964 CL |
443 | *__this_cpu_read(tasklet_vec.tail) = t; |
444 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
1da177e4 LT |
445 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
446 | local_irq_restore(flags); | |
447 | } | |
448 | ||
449 | EXPORT_SYMBOL(__tasklet_schedule); | |
450 | ||
7ad5b3a5 | 451 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4 LT |
452 | { |
453 | unsigned long flags; | |
454 | ||
455 | local_irq_save(flags); | |
48f20a9a | 456 | t->next = NULL; |
909ea964 CL |
457 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
458 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
1da177e4 LT |
459 | raise_softirq_irqoff(HI_SOFTIRQ); |
460 | local_irq_restore(flags); | |
461 | } | |
462 | ||
463 | EXPORT_SYMBOL(__tasklet_hi_schedule); | |
464 | ||
7c692cba VN |
465 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
466 | { | |
467 | BUG_ON(!irqs_disabled()); | |
468 | ||
909ea964 CL |
469 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
470 | __this_cpu_write(tasklet_hi_vec.head, t); | |
7c692cba VN |
471 | __raise_softirq_irqoff(HI_SOFTIRQ); |
472 | } | |
473 | ||
474 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | |
475 | ||
1da177e4 LT |
476 | static void tasklet_action(struct softirq_action *a) |
477 | { | |
478 | struct tasklet_struct *list; | |
479 | ||
480 | local_irq_disable(); | |
909ea964 CL |
481 | list = __this_cpu_read(tasklet_vec.head); |
482 | __this_cpu_write(tasklet_vec.head, NULL); | |
483 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); | |
1da177e4 LT |
484 | local_irq_enable(); |
485 | ||
486 | while (list) { | |
487 | struct tasklet_struct *t = list; | |
488 | ||
489 | list = list->next; | |
490 | ||
491 | if (tasklet_trylock(t)) { | |
492 | if (!atomic_read(&t->count)) { | |
493 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
494 | BUG(); | |
6fa3eb70 | 495 | mt_trace_tasklet_start(t->func); |
1da177e4 | 496 | t->func(t->data); |
6fa3eb70 | 497 | mt_trace_tasklet_end(t->func); |
1da177e4 LT |
498 | tasklet_unlock(t); |
499 | continue; | |
500 | } | |
501 | tasklet_unlock(t); | |
502 | } | |
503 | ||
504 | local_irq_disable(); | |
48f20a9a | 505 | t->next = NULL; |
909ea964 CL |
506 | *__this_cpu_read(tasklet_vec.tail) = t; |
507 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
1da177e4 LT |
508 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
509 | local_irq_enable(); | |
510 | } | |
511 | } | |
512 | ||
513 | static void tasklet_hi_action(struct softirq_action *a) | |
514 | { | |
515 | struct tasklet_struct *list; | |
516 | ||
517 | local_irq_disable(); | |
909ea964 CL |
518 | list = __this_cpu_read(tasklet_hi_vec.head); |
519 | __this_cpu_write(tasklet_hi_vec.head, NULL); | |
520 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); | |
1da177e4 LT |
521 | local_irq_enable(); |
522 | ||
523 | while (list) { | |
524 | struct tasklet_struct *t = list; | |
525 | ||
526 | list = list->next; | |
527 | ||
528 | if (tasklet_trylock(t)) { | |
529 | if (!atomic_read(&t->count)) { | |
530 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
531 | BUG(); | |
532 | t->func(t->data); | |
533 | tasklet_unlock(t); | |
534 | continue; | |
535 | } | |
536 | tasklet_unlock(t); | |
537 | } | |
538 | ||
539 | local_irq_disable(); | |
48f20a9a | 540 | t->next = NULL; |
909ea964 CL |
541 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
542 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
1da177e4 LT |
543 | __raise_softirq_irqoff(HI_SOFTIRQ); |
544 | local_irq_enable(); | |
545 | } | |
546 | } | |
547 | ||
548 | ||
549 | void tasklet_init(struct tasklet_struct *t, | |
550 | void (*func)(unsigned long), unsigned long data) | |
551 | { | |
552 | t->next = NULL; | |
553 | t->state = 0; | |
554 | atomic_set(&t->count, 0); | |
555 | t->func = func; | |
556 | t->data = data; | |
557 | } | |
558 | ||
559 | EXPORT_SYMBOL(tasklet_init); | |
560 | ||
561 | void tasklet_kill(struct tasklet_struct *t) | |
562 | { | |
563 | if (in_interrupt()) | |
564 | printk("Attempt to kill tasklet from interrupt\n"); | |
565 | ||
566 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
79d381c9 | 567 | do { |
1da177e4 | 568 | yield(); |
79d381c9 | 569 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4 LT |
570 | } |
571 | tasklet_unlock_wait(t); | |
572 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
573 | } | |
574 | ||
575 | EXPORT_SYMBOL(tasklet_kill); | |
576 | ||
9ba5f005 PZ |
577 | /* |
578 | * tasklet_hrtimer | |
579 | */ | |
580 | ||
581 | /* | |
b9c30322 PZ |
582 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
583 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended | |
584 | * hrtimer callback, but from softirq context. | |
9ba5f005 PZ |
585 | */ |
586 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | |
587 | { | |
588 | struct tasklet_hrtimer *ttimer = | |
589 | container_of(timer, struct tasklet_hrtimer, timer); | |
590 | ||
b9c30322 PZ |
591 | tasklet_hi_schedule(&ttimer->tasklet); |
592 | return HRTIMER_NORESTART; | |
9ba5f005 PZ |
593 | } |
594 | ||
595 | /* | |
596 | * Helper function which calls the hrtimer callback from | |
597 | * tasklet/softirq context | |
598 | */ | |
599 | static void __tasklet_hrtimer_trampoline(unsigned long data) | |
600 | { | |
601 | struct tasklet_hrtimer *ttimer = (void *)data; | |
602 | enum hrtimer_restart restart; | |
603 | ||
604 | restart = ttimer->function(&ttimer->timer); | |
605 | if (restart != HRTIMER_NORESTART) | |
606 | hrtimer_restart(&ttimer->timer); | |
607 | } | |
608 | ||
609 | /** | |
610 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks | |
611 | * @ttimer: tasklet_hrtimer which is initialized | |
25985edc | 612 | * @function: hrtimer callback function which gets called from softirq context |
9ba5f005 PZ |
613 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
614 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) | |
615 | */ | |
616 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
617 | enum hrtimer_restart (*function)(struct hrtimer *), | |
618 | clockid_t which_clock, enum hrtimer_mode mode) | |
619 | { | |
620 | hrtimer_init(&ttimer->timer, which_clock, mode); | |
621 | ttimer->timer.function = __hrtimer_tasklet_trampoline; | |
622 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, | |
623 | (unsigned long)ttimer); | |
624 | ttimer->function = function; | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | |
627 | ||
628 | /* | |
629 | * Remote softirq bits | |
630 | */ | |
631 | ||
54514a70 DM |
632 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
633 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | |
634 | ||
635 | static void __local_trigger(struct call_single_data *cp, int softirq) | |
636 | { | |
637 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | |
638 | ||
639 | list_add_tail(&cp->list, head); | |
640 | ||
641 | /* Trigger the softirq only if the list was previously empty. */ | |
642 | if (head->next == &cp->list) | |
643 | raise_softirq_irqoff(softirq); | |
644 | } | |
645 | ||
646 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | |
647 | static void remote_softirq_receive(void *data) | |
648 | { | |
649 | struct call_single_data *cp = data; | |
650 | unsigned long flags; | |
651 | int softirq; | |
652 | ||
3440a1ca | 653 | softirq = *(int *)cp->info; |
54514a70 DM |
654 | local_irq_save(flags); |
655 | __local_trigger(cp, softirq); | |
656 | local_irq_restore(flags); | |
657 | } | |
658 | ||
659 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
660 | { | |
661 | if (cpu_online(cpu)) { | |
662 | cp->func = remote_softirq_receive; | |
3440a1ca | 663 | cp->info = &softirq; |
54514a70 | 664 | cp->flags = 0; |
54514a70 | 665 | |
6e275637 | 666 | __smp_call_function_single(cpu, cp, 0); |
54514a70 DM |
667 | return 0; |
668 | } | |
669 | return 1; | |
670 | } | |
671 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | |
672 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
673 | { | |
674 | return 1; | |
675 | } | |
676 | #endif | |
677 | ||
678 | /** | |
679 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | |
680 | * @cp: private SMP call function data area | |
681 | * @cpu: the remote cpu | |
682 | * @this_cpu: the currently executing cpu | |
683 | * @softirq: the softirq for the work | |
684 | * | |
685 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | |
686 | * done, the work is instead queued up on the local cpu. | |
687 | * | |
688 | * Interrupts must be disabled. | |
689 | */ | |
690 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | |
691 | { | |
692 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | |
693 | __local_trigger(cp, softirq); | |
694 | } | |
695 | EXPORT_SYMBOL(__send_remote_softirq); | |
696 | ||
697 | /** | |
698 | * send_remote_softirq - try to schedule softirq work on a remote cpu | |
699 | * @cp: private SMP call function data area | |
700 | * @cpu: the remote cpu | |
701 | * @softirq: the softirq for the work | |
702 | * | |
703 | * Like __send_remote_softirq except that disabling interrupts and | |
704 | * computing the current cpu is done for the caller. | |
705 | */ | |
706 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
707 | { | |
708 | unsigned long flags; | |
709 | int this_cpu; | |
710 | ||
711 | local_irq_save(flags); | |
712 | this_cpu = smp_processor_id(); | |
713 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | |
714 | local_irq_restore(flags); | |
715 | } | |
716 | EXPORT_SYMBOL(send_remote_softirq); | |
717 | ||
718 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | |
719 | unsigned long action, void *hcpu) | |
720 | { | |
721 | /* | |
722 | * If a CPU goes away, splice its entries to the current CPU | |
723 | * and trigger a run of the softirq | |
724 | */ | |
725 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | |
726 | int cpu = (unsigned long) hcpu; | |
727 | int i; | |
728 | ||
729 | local_irq_disable(); | |
730 | for (i = 0; i < NR_SOFTIRQS; i++) { | |
731 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | |
732 | struct list_head *local_head; | |
733 | ||
734 | if (list_empty(head)) | |
735 | continue; | |
736 | ||
737 | local_head = &__get_cpu_var(softirq_work_list[i]); | |
738 | list_splice_init(head, local_head); | |
739 | raise_softirq_irqoff(i); | |
740 | } | |
741 | local_irq_enable(); | |
742 | } | |
743 | ||
744 | return NOTIFY_OK; | |
745 | } | |
746 | ||
747 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | |
748 | .notifier_call = remote_softirq_cpu_notify, | |
749 | }; | |
750 | ||
1da177e4 LT |
751 | void __init softirq_init(void) |
752 | { | |
48f20a9a OJ |
753 | int cpu; |
754 | ||
755 | for_each_possible_cpu(cpu) { | |
54514a70 DM |
756 | int i; |
757 | ||
48f20a9a OJ |
758 | per_cpu(tasklet_vec, cpu).tail = |
759 | &per_cpu(tasklet_vec, cpu).head; | |
760 | per_cpu(tasklet_hi_vec, cpu).tail = | |
761 | &per_cpu(tasklet_hi_vec, cpu).head; | |
54514a70 DM |
762 | for (i = 0; i < NR_SOFTIRQS; i++) |
763 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | |
48f20a9a OJ |
764 | } |
765 | ||
54514a70 DM |
766 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); |
767 | ||
962cf36c CM |
768 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
769 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
1da177e4 LT |
770 | } |
771 | ||
3e339b5d | 772 | static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4 | 773 | { |
3e339b5d TG |
774 | return local_softirq_pending(); |
775 | } | |
1da177e4 | 776 | |
3e339b5d TG |
777 | static void run_ksoftirqd(unsigned int cpu) |
778 | { | |
779 | local_irq_disable(); | |
780 | if (local_softirq_pending()) { | |
781 | __do_softirq(); | |
782 | rcu_note_context_switch(cpu); | |
783 | local_irq_enable(); | |
784 | cond_resched(); | |
785 | return; | |
1da177e4 | 786 | } |
3e339b5d | 787 | local_irq_enable(); |
1da177e4 LT |
788 | } |
789 | ||
790 | #ifdef CONFIG_HOTPLUG_CPU | |
791 | /* | |
792 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
793 | * scheduled for execution on @cpu. | |
794 | * | |
795 | * Unlike tasklet_kill, this function removes the tasklet | |
796 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
797 | * | |
798 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
799 | */ | |
800 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
801 | { | |
802 | struct tasklet_struct **i; | |
803 | ||
804 | BUG_ON(cpu_online(cpu)); | |
805 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
806 | ||
807 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
808 | return; | |
809 | ||
810 | /* CPU is dead, so no lock needed. */ | |
48f20a9a | 811 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4 LT |
812 | if (*i == t) { |
813 | *i = t->next; | |
48f20a9a OJ |
814 | /* If this was the tail element, move the tail ptr */ |
815 | if (*i == NULL) | |
816 | per_cpu(tasklet_vec, cpu).tail = i; | |
1da177e4 LT |
817 | return; |
818 | } | |
819 | } | |
820 | BUG(); | |
821 | } | |
822 | ||
823 | static void takeover_tasklets(unsigned int cpu) | |
824 | { | |
1da177e4 LT |
825 | /* CPU is dead, so no lock needed. */ |
826 | local_irq_disable(); | |
827 | ||
828 | /* Find end, append list for that CPU. */ | |
e5e41723 | 829 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea964 CL |
830 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
831 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); | |
e5e41723 CB |
832 | per_cpu(tasklet_vec, cpu).head = NULL; |
833 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
834 | } | |
1da177e4 LT |
835 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
836 | ||
e5e41723 | 837 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea964 CL |
838 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
839 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | |
e5e41723 CB |
840 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
841 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
842 | } | |
1da177e4 LT |
843 | raise_softirq_irqoff(HI_SOFTIRQ); |
844 | ||
845 | local_irq_enable(); | |
846 | } | |
847 | #endif /* CONFIG_HOTPLUG_CPU */ | |
848 | ||
8c78f307 | 849 | static int __cpuinit cpu_callback(struct notifier_block *nfb, |
1da177e4 LT |
850 | unsigned long action, |
851 | void *hcpu) | |
852 | { | |
1da177e4 | 853 | switch (action) { |
1da177e4 | 854 | #ifdef CONFIG_HOTPLUG_CPU |
1da177e4 | 855 | case CPU_DEAD: |
3e339b5d TG |
856 | case CPU_DEAD_FROZEN: |
857 | takeover_tasklets((unsigned long)hcpu); | |
1da177e4 LT |
858 | break; |
859 | #endif /* CONFIG_HOTPLUG_CPU */ | |
3e339b5d | 860 | } |
1da177e4 LT |
861 | return NOTIFY_OK; |
862 | } | |
863 | ||
8c78f307 | 864 | static struct notifier_block __cpuinitdata cpu_nfb = { |
1da177e4 LT |
865 | .notifier_call = cpu_callback |
866 | }; | |
867 | ||
3e339b5d TG |
868 | static struct smp_hotplug_thread softirq_threads = { |
869 | .store = &ksoftirqd, | |
870 | .thread_should_run = ksoftirqd_should_run, | |
871 | .thread_fn = run_ksoftirqd, | |
872 | .thread_comm = "ksoftirqd/%u", | |
873 | }; | |
874 | ||
7babe8db | 875 | static __init int spawn_ksoftirqd(void) |
1da177e4 | 876 | { |
1da177e4 | 877 | register_cpu_notifier(&cpu_nfb); |
3e339b5d TG |
878 | |
879 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | |
880 | ||
1da177e4 LT |
881 | return 0; |
882 | } | |
7babe8db | 883 | early_initcall(spawn_ksoftirqd); |
78eef01b | 884 | |
43a25632 YL |
885 | /* |
886 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
887 | * GCC does not inline them incorrectly. ] | |
888 | */ | |
889 | ||
890 | int __init __weak early_irq_init(void) | |
891 | { | |
892 | return 0; | |
893 | } | |
894 | ||
b683de2b | 895 | #ifdef CONFIG_GENERIC_HARDIRQS |
4a046d17 YL |
896 | int __init __weak arch_probe_nr_irqs(void) |
897 | { | |
b683de2b | 898 | return NR_IRQS_LEGACY; |
4a046d17 YL |
899 | } |
900 | ||
43a25632 YL |
901 | int __init __weak arch_early_irq_init(void) |
902 | { | |
903 | return 0; | |
904 | } | |
b683de2b | 905 | #endif |