Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
54514a70
DM
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
1da177e4
LT
11 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
79bf2bb3 26#include <linux/tick.h>
a0e39ed3
HC
27
28#define CREATE_TRACE_POINTS
ad8d75ff 29#include <trace/events/irq.h>
1da177e4
LT
30
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
978b0116 55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4
LT
56
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
5d592b44 59char *softirq_to_name[NR_SOFTIRQS] = {
899039e8
SR
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
62};
63
1da177e4
LT
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
7f1e2ca9 70void wakeup_softirqd(void)
1da177e4
LT
71{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
de30a2b3
IM
79/*
80 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
82 */
3c829c36 83#ifdef CONFIG_TRACE_IRQFLAGS
de30a2b3
IM
84static void __local_bh_disable(unsigned long ip)
85{
86 unsigned long flags;
87
88 WARN_ON_ONCE(in_irq());
89
90 raw_local_irq_save(flags);
7e49fcce
SR
91 /*
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
97 */
98 preempt_count() += SOFTIRQ_OFFSET;
de30a2b3
IM
99 /*
100 * Were softirqs turned off above:
101 */
102 if (softirq_count() == SOFTIRQ_OFFSET)
103 trace_softirqs_off(ip);
104 raw_local_irq_restore(flags);
7e49fcce
SR
105
106 if (preempt_count() == SOFTIRQ_OFFSET)
107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
de30a2b3 108}
3c829c36
TC
109#else /* !CONFIG_TRACE_IRQFLAGS */
110static inline void __local_bh_disable(unsigned long ip)
111{
112 add_preempt_count(SOFTIRQ_OFFSET);
113 barrier();
114}
115#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3
IM
116
117void local_bh_disable(void)
118{
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
120}
121
122EXPORT_SYMBOL(local_bh_disable);
123
de30a2b3
IM
124/*
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
128 */
129void _local_bh_enable(void)
130{
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
133
134 if (softirq_count() == SOFTIRQ_OFFSET)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET);
137}
138
139EXPORT_SYMBOL(_local_bh_enable);
140
0f476b6d 141static inline void _local_bh_enable_ip(unsigned long ip)
de30a2b3 142{
0f476b6d 143 WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c36 144#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 145 local_irq_disable();
3c829c36 146#endif
de30a2b3
IM
147 /*
148 * Are softirqs going to be turned on now:
149 */
150 if (softirq_count() == SOFTIRQ_OFFSET)
0f476b6d 151 trace_softirqs_on(ip);
de30a2b3
IM
152 /*
153 * Keep preemption disabled until we are done with
154 * softirq processing:
155 */
156 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
159 do_softirq();
160
161 dec_preempt_count();
3c829c36 162#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 163 local_irq_enable();
3c829c36 164#endif
de30a2b3
IM
165 preempt_check_resched();
166}
0f476b6d
JB
167
168void local_bh_enable(void)
169{
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171}
de30a2b3
IM
172EXPORT_SYMBOL(local_bh_enable);
173
174void local_bh_enable_ip(unsigned long ip)
175{
0f476b6d 176 _local_bh_enable_ip(ip);
de30a2b3
IM
177}
178EXPORT_SYMBOL(local_bh_enable_ip);
179
1da177e4
LT
180/*
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
183 *
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
188 */
189#define MAX_SOFTIRQ_RESTART 10
190
191asmlinkage void __do_softirq(void)
192{
193 struct softirq_action *h;
194 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART;
196 int cpu;
197
198 pending = local_softirq_pending();
829035fd
PM
199 account_system_vtime(current);
200
de30a2b3 201 __local_bh_disable((unsigned long)__builtin_return_address(0));
d820ac4c 202 lockdep_softirq_enter();
1da177e4 203
1da177e4
LT
204 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
3f74478b 207 set_softirq_pending(0);
1da177e4 208
c70f5d66 209 local_irq_enable();
1da177e4
LT
210
211 h = softirq_vec;
212
213 do {
214 if (pending & 1) {
8e85b4b5 215 int prev_count = preempt_count();
aa0ce5bb 216 kstat_incr_softirqs_this_cpu(h - softirq_vec);
8e85b4b5 217
39842323 218 trace_softirq_entry(h, softirq_vec);
1da177e4 219 h->action(h);
39842323 220 trace_softirq_exit(h, softirq_vec);
8e85b4b5 221 if (unlikely(prev_count != preempt_count())) {
5d592b44 222 printk(KERN_ERR "huh, entered softirq %td %s %p"
8e85b4b5
TG
223 "with preempt_count %08x,"
224 " exited with %08x?\n", h - softirq_vec,
5d592b44 225 softirq_to_name[h - softirq_vec],
8e85b4b5
TG
226 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count;
228 }
229
1da177e4
LT
230 rcu_bh_qsctr_inc(cpu);
231 }
232 h++;
233 pending >>= 1;
234 } while (pending);
235
c70f5d66 236 local_irq_disable();
1da177e4
LT
237
238 pending = local_softirq_pending();
239 if (pending && --max_restart)
240 goto restart;
241
242 if (pending)
243 wakeup_softirqd();
244
d820ac4c 245 lockdep_softirq_exit();
829035fd
PM
246
247 account_system_vtime(current);
de30a2b3 248 _local_bh_enable();
1da177e4
LT
249}
250
251#ifndef __ARCH_HAS_DO_SOFTIRQ
252
253asmlinkage void do_softirq(void)
254{
255 __u32 pending;
256 unsigned long flags;
257
258 if (in_interrupt())
259 return;
260
261 local_irq_save(flags);
262
263 pending = local_softirq_pending();
264
265 if (pending)
266 __do_softirq();
267
268 local_irq_restore(flags);
269}
270
1da177e4
LT
271#endif
272
dde4b2b5
IM
273/*
274 * Enter an interrupt context.
275 */
276void irq_enter(void)
277{
6378ddb5 278 int cpu = smp_processor_id();
719254fa 279
64db4cff 280 rcu_irq_enter();
ee5f80a9
TG
281 if (idle_cpu(cpu) && !in_interrupt()) {
282 __irq_enter();
719254fa 283 tick_check_idle(cpu);
ee5f80a9
TG
284 } else
285 __irq_enter();
dde4b2b5
IM
286}
287
1da177e4
LT
288#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289# define invoke_softirq() __do_softirq()
290#else
291# define invoke_softirq() do_softirq()
292#endif
293
294/*
295 * Exit an interrupt context. Process softirqs if needed and possible:
296 */
297void irq_exit(void)
298{
299 account_system_vtime(current);
de30a2b3 300 trace_hardirq_exit();
1da177e4
LT
301 sub_preempt_count(IRQ_EXIT_OFFSET);
302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq();
79bf2bb3
TG
304
305#ifdef CONFIG_NO_HZ
306 /* Make sure that timer wheel updates are propagated */
2232c2d8 307 rcu_irq_exit();
64db4cff
PM
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
79bf2bb3 310#endif
1da177e4
LT
311 preempt_enable_no_resched();
312}
313
314/*
315 * This function must run with irqs disabled!
316 */
7ad5b3a5 317inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
318{
319 __raise_softirq_irqoff(nr);
320
321 /*
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
326 *
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
329 */
330 if (!in_interrupt())
331 wakeup_softirqd();
332}
333
7ad5b3a5 334void raise_softirq(unsigned int nr)
1da177e4
LT
335{
336 unsigned long flags;
337
338 local_irq_save(flags);
339 raise_softirq_irqoff(nr);
340 local_irq_restore(flags);
341}
342
962cf36c 343void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 344{
1da177e4
LT
345 softirq_vec[nr].action = action;
346}
347
1da177e4
LT
348/* Tasklets */
349struct tasklet_head
350{
48f20a9a
OJ
351 struct tasklet_struct *head;
352 struct tasklet_struct **tail;
1da177e4
LT
353};
354
4620b49f
VN
355static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
356static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 357
7ad5b3a5 358void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
359{
360 unsigned long flags;
361
362 local_irq_save(flags);
48f20a9a
OJ
363 t->next = NULL;
364 *__get_cpu_var(tasklet_vec).tail = t;
365 __get_cpu_var(tasklet_vec).tail = &(t->next);
1da177e4
LT
366 raise_softirq_irqoff(TASKLET_SOFTIRQ);
367 local_irq_restore(flags);
368}
369
370EXPORT_SYMBOL(__tasklet_schedule);
371
7ad5b3a5 372void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
373{
374 unsigned long flags;
375
376 local_irq_save(flags);
48f20a9a
OJ
377 t->next = NULL;
378 *__get_cpu_var(tasklet_hi_vec).tail = t;
379 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
1da177e4
LT
380 raise_softirq_irqoff(HI_SOFTIRQ);
381 local_irq_restore(flags);
382}
383
384EXPORT_SYMBOL(__tasklet_hi_schedule);
385
7c692cba
VN
386void __tasklet_hi_schedule_first(struct tasklet_struct *t)
387{
388 BUG_ON(!irqs_disabled());
389
390 t->next = __get_cpu_var(tasklet_hi_vec).head;
391 __get_cpu_var(tasklet_hi_vec).head = t;
392 __raise_softirq_irqoff(HI_SOFTIRQ);
393}
394
395EXPORT_SYMBOL(__tasklet_hi_schedule_first);
396
1da177e4
LT
397static void tasklet_action(struct softirq_action *a)
398{
399 struct tasklet_struct *list;
400
401 local_irq_disable();
48f20a9a
OJ
402 list = __get_cpu_var(tasklet_vec).head;
403 __get_cpu_var(tasklet_vec).head = NULL;
404 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
1da177e4
LT
405 local_irq_enable();
406
407 while (list) {
408 struct tasklet_struct *t = list;
409
410 list = list->next;
411
412 if (tasklet_trylock(t)) {
413 if (!atomic_read(&t->count)) {
414 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
415 BUG();
416 t->func(t->data);
417 tasklet_unlock(t);
418 continue;
419 }
420 tasklet_unlock(t);
421 }
422
423 local_irq_disable();
48f20a9a
OJ
424 t->next = NULL;
425 *__get_cpu_var(tasklet_vec).tail = t;
426 __get_cpu_var(tasklet_vec).tail = &(t->next);
1da177e4
LT
427 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
428 local_irq_enable();
429 }
430}
431
432static void tasklet_hi_action(struct softirq_action *a)
433{
434 struct tasklet_struct *list;
435
436 local_irq_disable();
48f20a9a
OJ
437 list = __get_cpu_var(tasklet_hi_vec).head;
438 __get_cpu_var(tasklet_hi_vec).head = NULL;
439 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
1da177e4
LT
440 local_irq_enable();
441
442 while (list) {
443 struct tasklet_struct *t = list;
444
445 list = list->next;
446
447 if (tasklet_trylock(t)) {
448 if (!atomic_read(&t->count)) {
449 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
450 BUG();
451 t->func(t->data);
452 tasklet_unlock(t);
453 continue;
454 }
455 tasklet_unlock(t);
456 }
457
458 local_irq_disable();
48f20a9a
OJ
459 t->next = NULL;
460 *__get_cpu_var(tasklet_hi_vec).tail = t;
461 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
1da177e4
LT
462 __raise_softirq_irqoff(HI_SOFTIRQ);
463 local_irq_enable();
464 }
465}
466
467
468void tasklet_init(struct tasklet_struct *t,
469 void (*func)(unsigned long), unsigned long data)
470{
471 t->next = NULL;
472 t->state = 0;
473 atomic_set(&t->count, 0);
474 t->func = func;
475 t->data = data;
476}
477
478EXPORT_SYMBOL(tasklet_init);
479
480void tasklet_kill(struct tasklet_struct *t)
481{
482 if (in_interrupt())
483 printk("Attempt to kill tasklet from interrupt\n");
484
485 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 486 do {
1da177e4 487 yield();
79d381c9 488 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
489 }
490 tasklet_unlock_wait(t);
491 clear_bit(TASKLET_STATE_SCHED, &t->state);
492}
493
494EXPORT_SYMBOL(tasklet_kill);
495
54514a70
DM
496DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
497EXPORT_PER_CPU_SYMBOL(softirq_work_list);
498
499static void __local_trigger(struct call_single_data *cp, int softirq)
500{
501 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
502
503 list_add_tail(&cp->list, head);
504
505 /* Trigger the softirq only if the list was previously empty. */
506 if (head->next == &cp->list)
507 raise_softirq_irqoff(softirq);
508}
509
510#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
511static void remote_softirq_receive(void *data)
512{
513 struct call_single_data *cp = data;
514 unsigned long flags;
515 int softirq;
516
517 softirq = cp->priv;
518
519 local_irq_save(flags);
520 __local_trigger(cp, softirq);
521 local_irq_restore(flags);
522}
523
524static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
525{
526 if (cpu_online(cpu)) {
527 cp->func = remote_softirq_receive;
528 cp->info = cp;
529 cp->flags = 0;
530 cp->priv = softirq;
531
6e275637 532 __smp_call_function_single(cpu, cp, 0);
54514a70
DM
533 return 0;
534 }
535 return 1;
536}
537#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
538static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
539{
540 return 1;
541}
542#endif
543
544/**
545 * __send_remote_softirq - try to schedule softirq work on a remote cpu
546 * @cp: private SMP call function data area
547 * @cpu: the remote cpu
548 * @this_cpu: the currently executing cpu
549 * @softirq: the softirq for the work
550 *
551 * Attempt to schedule softirq work on a remote cpu. If this cannot be
552 * done, the work is instead queued up on the local cpu.
553 *
554 * Interrupts must be disabled.
555 */
556void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
557{
558 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
559 __local_trigger(cp, softirq);
560}
561EXPORT_SYMBOL(__send_remote_softirq);
562
563/**
564 * send_remote_softirq - try to schedule softirq work on a remote cpu
565 * @cp: private SMP call function data area
566 * @cpu: the remote cpu
567 * @softirq: the softirq for the work
568 *
569 * Like __send_remote_softirq except that disabling interrupts and
570 * computing the current cpu is done for the caller.
571 */
572void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
573{
574 unsigned long flags;
575 int this_cpu;
576
577 local_irq_save(flags);
578 this_cpu = smp_processor_id();
579 __send_remote_softirq(cp, cpu, this_cpu, softirq);
580 local_irq_restore(flags);
581}
582EXPORT_SYMBOL(send_remote_softirq);
583
584static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
585 unsigned long action, void *hcpu)
586{
587 /*
588 * If a CPU goes away, splice its entries to the current CPU
589 * and trigger a run of the softirq
590 */
591 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
592 int cpu = (unsigned long) hcpu;
593 int i;
594
595 local_irq_disable();
596 for (i = 0; i < NR_SOFTIRQS; i++) {
597 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
598 struct list_head *local_head;
599
600 if (list_empty(head))
601 continue;
602
603 local_head = &__get_cpu_var(softirq_work_list[i]);
604 list_splice_init(head, local_head);
605 raise_softirq_irqoff(i);
606 }
607 local_irq_enable();
608 }
609
610 return NOTIFY_OK;
611}
612
613static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
614 .notifier_call = remote_softirq_cpu_notify,
615};
616
1da177e4
LT
617void __init softirq_init(void)
618{
48f20a9a
OJ
619 int cpu;
620
621 for_each_possible_cpu(cpu) {
54514a70
DM
622 int i;
623
48f20a9a
OJ
624 per_cpu(tasklet_vec, cpu).tail =
625 &per_cpu(tasklet_vec, cpu).head;
626 per_cpu(tasklet_hi_vec, cpu).tail =
627 &per_cpu(tasklet_hi_vec, cpu).head;
54514a70
DM
628 for (i = 0; i < NR_SOFTIRQS; i++)
629 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
48f20a9a
OJ
630 }
631
54514a70
DM
632 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
633
962cf36c
CM
634 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
635 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
636}
637
638static int ksoftirqd(void * __bind_cpu)
639{
1da177e4
LT
640 set_current_state(TASK_INTERRUPTIBLE);
641
642 while (!kthread_should_stop()) {
643 preempt_disable();
644 if (!local_softirq_pending()) {
645 preempt_enable_no_resched();
646 schedule();
647 preempt_disable();
648 }
649
650 __set_current_state(TASK_RUNNING);
651
652 while (local_softirq_pending()) {
653 /* Preempt disable stops cpu going offline.
654 If already offline, we'll be on wrong CPU:
655 don't process */
656 if (cpu_is_offline((long)__bind_cpu))
657 goto wait_to_die;
658 do_softirq();
659 preempt_enable_no_resched();
660 cond_resched();
661 preempt_disable();
64ca5ab9 662 rcu_qsctr_inc((long)__bind_cpu);
1da177e4
LT
663 }
664 preempt_enable();
665 set_current_state(TASK_INTERRUPTIBLE);
666 }
667 __set_current_state(TASK_RUNNING);
668 return 0;
669
670wait_to_die:
671 preempt_enable();
672 /* Wait for kthread_stop */
673 set_current_state(TASK_INTERRUPTIBLE);
674 while (!kthread_should_stop()) {
675 schedule();
676 set_current_state(TASK_INTERRUPTIBLE);
677 }
678 __set_current_state(TASK_RUNNING);
679 return 0;
680}
681
682#ifdef CONFIG_HOTPLUG_CPU
683/*
684 * tasklet_kill_immediate is called to remove a tasklet which can already be
685 * scheduled for execution on @cpu.
686 *
687 * Unlike tasklet_kill, this function removes the tasklet
688 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
689 *
690 * When this function is called, @cpu must be in the CPU_DEAD state.
691 */
692void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
693{
694 struct tasklet_struct **i;
695
696 BUG_ON(cpu_online(cpu));
697 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
698
699 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
700 return;
701
702 /* CPU is dead, so no lock needed. */
48f20a9a 703 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
704 if (*i == t) {
705 *i = t->next;
48f20a9a
OJ
706 /* If this was the tail element, move the tail ptr */
707 if (*i == NULL)
708 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
709 return;
710 }
711 }
712 BUG();
713}
714
715static void takeover_tasklets(unsigned int cpu)
716{
1da177e4
LT
717 /* CPU is dead, so no lock needed. */
718 local_irq_disable();
719
720 /* Find end, append list for that CPU. */
e5e41723
CB
721 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
722 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
723 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
724 per_cpu(tasklet_vec, cpu).head = NULL;
725 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
726 }
1da177e4
LT
727 raise_softirq_irqoff(TASKLET_SOFTIRQ);
728
e5e41723
CB
729 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
730 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
731 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
732 per_cpu(tasklet_hi_vec, cpu).head = NULL;
733 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
734 }
1da177e4
LT
735 raise_softirq_irqoff(HI_SOFTIRQ);
736
737 local_irq_enable();
738}
739#endif /* CONFIG_HOTPLUG_CPU */
740
8c78f307 741static int __cpuinit cpu_callback(struct notifier_block *nfb,
1da177e4
LT
742 unsigned long action,
743 void *hcpu)
744{
745 int hotcpu = (unsigned long)hcpu;
746 struct task_struct *p;
747
748 switch (action) {
749 case CPU_UP_PREPARE:
8bb78442 750 case CPU_UP_PREPARE_FROZEN:
1da177e4
LT
751 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
752 if (IS_ERR(p)) {
753 printk("ksoftirqd for %i failed\n", hotcpu);
754 return NOTIFY_BAD;
755 }
756 kthread_bind(p, hotcpu);
757 per_cpu(ksoftirqd, hotcpu) = p;
758 break;
759 case CPU_ONLINE:
8bb78442 760 case CPU_ONLINE_FROZEN:
1da177e4
LT
761 wake_up_process(per_cpu(ksoftirqd, hotcpu));
762 break;
763#ifdef CONFIG_HOTPLUG_CPU
764 case CPU_UP_CANCELED:
8bb78442 765 case CPU_UP_CANCELED_FROZEN:
fc75cdfa
HC
766 if (!per_cpu(ksoftirqd, hotcpu))
767 break;
1da177e4 768 /* Unbind so it can run. Fall thru. */
a4c4af7c 769 kthread_bind(per_cpu(ksoftirqd, hotcpu),
f1fc057c 770 cpumask_any(cpu_online_mask));
1da177e4 771 case CPU_DEAD:
1c6b4aa9
ST
772 case CPU_DEAD_FROZEN: {
773 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
774
1da177e4
LT
775 p = per_cpu(ksoftirqd, hotcpu);
776 per_cpu(ksoftirqd, hotcpu) = NULL;
961ccddd 777 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
1da177e4
LT
778 kthread_stop(p);
779 takeover_tasklets(hotcpu);
780 break;
1c6b4aa9 781 }
1da177e4
LT
782#endif /* CONFIG_HOTPLUG_CPU */
783 }
784 return NOTIFY_OK;
785}
786
8c78f307 787static struct notifier_block __cpuinitdata cpu_nfb = {
1da177e4
LT
788 .notifier_call = cpu_callback
789};
790
7babe8db 791static __init int spawn_ksoftirqd(void)
1da177e4
LT
792{
793 void *cpu = (void *)(long)smp_processor_id();
07dccf33
AM
794 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
795
796 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
797 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
798 register_cpu_notifier(&cpu_nfb);
799 return 0;
800}
7babe8db 801early_initcall(spawn_ksoftirqd);
78eef01b
AM
802
803#ifdef CONFIG_SMP
804/*
805 * Call a function on all processors
806 */
15c8b6c1 807int on_each_cpu(void (*func) (void *info), void *info, int wait)
78eef01b
AM
808{
809 int ret = 0;
810
811 preempt_disable();
8691e5a8 812 ret = smp_call_function(func, info, wait);
78eef01b
AM
813 local_irq_disable();
814 func(info);
815 local_irq_enable();
816 preempt_enable();
817 return ret;
818}
819EXPORT_SYMBOL(on_each_cpu);
820#endif
43a25632
YL
821
822/*
823 * [ These __weak aliases are kept in a separate compilation unit, so that
824 * GCC does not inline them incorrectly. ]
825 */
826
827int __init __weak early_irq_init(void)
828{
829 return 0;
830}
831
4a046d17
YL
832int __init __weak arch_probe_nr_irqs(void)
833{
834 return 0;
835}
836
43a25632
YL
837int __init __weak arch_early_irq_init(void)
838{
839 return 0;
840}
841
85ac16d0 842int __weak arch_init_chip_data(struct irq_desc *desc, int node)
43a25632
YL
843{
844 return 0;
845}