Merge commit 'v2.6.37-rc2' into sched/core
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / softirq.c
1 /*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
11 */
12
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
30
31 #include <asm/irq.h>
32 /*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
53 #endif
54
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62 };
63
64 /*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
70 static void wakeup_softirqd(void)
71 {
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77 }
78
79 /*
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
82 * softirq processing.
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
87 */
88
89 /*
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
92 */
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
95 {
96 unsigned long flags;
97
98 WARN_ON_ONCE(in_irq());
99
100 raw_local_irq_save(flags);
101 /*
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
107 */
108 preempt_count() += cnt;
109 /*
110 * Were softirqs turned off above:
111 */
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
115
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
118 }
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
121 {
122 add_preempt_count(cnt);
123 barrier();
124 }
125 #endif /* CONFIG_TRACE_IRQFLAGS */
126
127 void local_bh_disable(void)
128 {
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
131 }
132
133 EXPORT_SYMBOL(local_bh_disable);
134
135 static void __local_bh_enable(unsigned int cnt)
136 {
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
139
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
143 }
144
145 /*
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
149 */
150 void _local_bh_enable(void)
151 {
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
153 }
154
155 EXPORT_SYMBOL(_local_bh_enable);
156
157 static inline void _local_bh_enable_ip(unsigned long ip)
158 {
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
161 local_irq_disable();
162 #endif
163 /*
164 * Are softirqs going to be turned on now:
165 */
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
168 /*
169 * Keep preemption disabled until we are done with
170 * softirq processing:
171 */
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
173
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
175 do_softirq();
176
177 dec_preempt_count();
178 #ifdef CONFIG_TRACE_IRQFLAGS
179 local_irq_enable();
180 #endif
181 preempt_check_resched();
182 }
183
184 void local_bh_enable(void)
185 {
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
187 }
188 EXPORT_SYMBOL(local_bh_enable);
189
190 void local_bh_enable_ip(unsigned long ip)
191 {
192 _local_bh_enable_ip(ip);
193 }
194 EXPORT_SYMBOL(local_bh_enable_ip);
195
196 /*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
199 *
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
204 */
205 #define MAX_SOFTIRQ_RESTART 10
206
207 asmlinkage void __do_softirq(void)
208 {
209 struct softirq_action *h;
210 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu;
213
214 pending = local_softirq_pending();
215 account_system_vtime(current);
216
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
218 SOFTIRQ_OFFSET);
219 lockdep_softirq_enter();
220
221 cpu = smp_processor_id();
222 restart:
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
225
226 local_irq_enable();
227
228 h = softirq_vec;
229
230 do {
231 if (pending & 1) {
232 unsigned int vec_nr = h - softirq_vec;
233 int prev_count = preempt_count();
234
235 kstat_incr_softirqs_this_cpu(vec_nr);
236
237 trace_softirq_entry(vec_nr);
238 h->action(h);
239 trace_softirq_exit(vec_nr);
240 if (unlikely(prev_count != preempt_count())) {
241 printk(KERN_ERR "huh, entered softirq %u %s %p"
242 "with preempt_count %08x,"
243 " exited with %08x?\n", vec_nr,
244 softirq_to_name[vec_nr], h->action,
245 prev_count, preempt_count());
246 preempt_count() = prev_count;
247 }
248
249 rcu_bh_qs(cpu);
250 }
251 h++;
252 pending >>= 1;
253 } while (pending);
254
255 local_irq_disable();
256
257 pending = local_softirq_pending();
258 if (pending && --max_restart)
259 goto restart;
260
261 if (pending)
262 wakeup_softirqd();
263
264 lockdep_softirq_exit();
265
266 account_system_vtime(current);
267 __local_bh_enable(SOFTIRQ_OFFSET);
268 }
269
270 #ifndef __ARCH_HAS_DO_SOFTIRQ
271
272 asmlinkage void do_softirq(void)
273 {
274 __u32 pending;
275 unsigned long flags;
276
277 if (in_interrupt())
278 return;
279
280 local_irq_save(flags);
281
282 pending = local_softirq_pending();
283
284 if (pending)
285 __do_softirq();
286
287 local_irq_restore(flags);
288 }
289
290 #endif
291
292 /*
293 * Enter an interrupt context.
294 */
295 void irq_enter(void)
296 {
297 int cpu = smp_processor_id();
298
299 rcu_irq_enter();
300 if (idle_cpu(cpu) && !in_interrupt()) {
301 /*
302 * Prevent raise_softirq from needlessly waking up ksoftirqd
303 * here, as softirq will be serviced on return from interrupt.
304 */
305 local_bh_disable();
306 tick_check_idle(cpu);
307 _local_bh_enable();
308 }
309
310 __irq_enter();
311 }
312
313 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
314 # define invoke_softirq() __do_softirq()
315 #else
316 # define invoke_softirq() do_softirq()
317 #endif
318
319 /*
320 * Exit an interrupt context. Process softirqs if needed and possible:
321 */
322 void irq_exit(void)
323 {
324 account_system_vtime(current);
325 trace_hardirq_exit();
326 sub_preempt_count(IRQ_EXIT_OFFSET);
327 if (!in_interrupt() && local_softirq_pending())
328 invoke_softirq();
329
330 rcu_irq_exit();
331 #ifdef CONFIG_NO_HZ
332 /* Make sure that timer wheel updates are propagated */
333 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
334 tick_nohz_stop_sched_tick(0);
335 #endif
336 preempt_enable_no_resched();
337 }
338
339 /*
340 * This function must run with irqs disabled!
341 */
342 inline void raise_softirq_irqoff(unsigned int nr)
343 {
344 __raise_softirq_irqoff(nr);
345
346 /*
347 * If we're in an interrupt or softirq, we're done
348 * (this also catches softirq-disabled code). We will
349 * actually run the softirq once we return from
350 * the irq or softirq.
351 *
352 * Otherwise we wake up ksoftirqd to make sure we
353 * schedule the softirq soon.
354 */
355 if (!in_interrupt())
356 wakeup_softirqd();
357 }
358
359 void raise_softirq(unsigned int nr)
360 {
361 unsigned long flags;
362
363 local_irq_save(flags);
364 raise_softirq_irqoff(nr);
365 local_irq_restore(flags);
366 }
367
368 void open_softirq(int nr, void (*action)(struct softirq_action *))
369 {
370 softirq_vec[nr].action = action;
371 }
372
373 /*
374 * Tasklets
375 */
376 struct tasklet_head
377 {
378 struct tasklet_struct *head;
379 struct tasklet_struct **tail;
380 };
381
382 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
383 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
384
385 void __tasklet_schedule(struct tasklet_struct *t)
386 {
387 unsigned long flags;
388
389 local_irq_save(flags);
390 t->next = NULL;
391 *__get_cpu_var(tasklet_vec).tail = t;
392 __get_cpu_var(tasklet_vec).tail = &(t->next);
393 raise_softirq_irqoff(TASKLET_SOFTIRQ);
394 local_irq_restore(flags);
395 }
396
397 EXPORT_SYMBOL(__tasklet_schedule);
398
399 void __tasklet_hi_schedule(struct tasklet_struct *t)
400 {
401 unsigned long flags;
402
403 local_irq_save(flags);
404 t->next = NULL;
405 *__get_cpu_var(tasklet_hi_vec).tail = t;
406 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
407 raise_softirq_irqoff(HI_SOFTIRQ);
408 local_irq_restore(flags);
409 }
410
411 EXPORT_SYMBOL(__tasklet_hi_schedule);
412
413 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
414 {
415 BUG_ON(!irqs_disabled());
416
417 t->next = __get_cpu_var(tasklet_hi_vec).head;
418 __get_cpu_var(tasklet_hi_vec).head = t;
419 __raise_softirq_irqoff(HI_SOFTIRQ);
420 }
421
422 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
423
424 static void tasklet_action(struct softirq_action *a)
425 {
426 struct tasklet_struct *list;
427
428 local_irq_disable();
429 list = __get_cpu_var(tasklet_vec).head;
430 __get_cpu_var(tasklet_vec).head = NULL;
431 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
432 local_irq_enable();
433
434 while (list) {
435 struct tasklet_struct *t = list;
436
437 list = list->next;
438
439 if (tasklet_trylock(t)) {
440 if (!atomic_read(&t->count)) {
441 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
442 BUG();
443 t->func(t->data);
444 tasklet_unlock(t);
445 continue;
446 }
447 tasklet_unlock(t);
448 }
449
450 local_irq_disable();
451 t->next = NULL;
452 *__get_cpu_var(tasklet_vec).tail = t;
453 __get_cpu_var(tasklet_vec).tail = &(t->next);
454 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
455 local_irq_enable();
456 }
457 }
458
459 static void tasklet_hi_action(struct softirq_action *a)
460 {
461 struct tasklet_struct *list;
462
463 local_irq_disable();
464 list = __get_cpu_var(tasklet_hi_vec).head;
465 __get_cpu_var(tasklet_hi_vec).head = NULL;
466 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
467 local_irq_enable();
468
469 while (list) {
470 struct tasklet_struct *t = list;
471
472 list = list->next;
473
474 if (tasklet_trylock(t)) {
475 if (!atomic_read(&t->count)) {
476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
477 BUG();
478 t->func(t->data);
479 tasklet_unlock(t);
480 continue;
481 }
482 tasklet_unlock(t);
483 }
484
485 local_irq_disable();
486 t->next = NULL;
487 *__get_cpu_var(tasklet_hi_vec).tail = t;
488 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
489 __raise_softirq_irqoff(HI_SOFTIRQ);
490 local_irq_enable();
491 }
492 }
493
494
495 void tasklet_init(struct tasklet_struct *t,
496 void (*func)(unsigned long), unsigned long data)
497 {
498 t->next = NULL;
499 t->state = 0;
500 atomic_set(&t->count, 0);
501 t->func = func;
502 t->data = data;
503 }
504
505 EXPORT_SYMBOL(tasklet_init);
506
507 void tasklet_kill(struct tasklet_struct *t)
508 {
509 if (in_interrupt())
510 printk("Attempt to kill tasklet from interrupt\n");
511
512 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
513 do {
514 yield();
515 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
516 }
517 tasklet_unlock_wait(t);
518 clear_bit(TASKLET_STATE_SCHED, &t->state);
519 }
520
521 EXPORT_SYMBOL(tasklet_kill);
522
523 /*
524 * tasklet_hrtimer
525 */
526
527 /*
528 * The trampoline is called when the hrtimer expires. It schedules a tasklet
529 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
530 * hrtimer callback, but from softirq context.
531 */
532 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
533 {
534 struct tasklet_hrtimer *ttimer =
535 container_of(timer, struct tasklet_hrtimer, timer);
536
537 tasklet_hi_schedule(&ttimer->tasklet);
538 return HRTIMER_NORESTART;
539 }
540
541 /*
542 * Helper function which calls the hrtimer callback from
543 * tasklet/softirq context
544 */
545 static void __tasklet_hrtimer_trampoline(unsigned long data)
546 {
547 struct tasklet_hrtimer *ttimer = (void *)data;
548 enum hrtimer_restart restart;
549
550 restart = ttimer->function(&ttimer->timer);
551 if (restart != HRTIMER_NORESTART)
552 hrtimer_restart(&ttimer->timer);
553 }
554
555 /**
556 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
557 * @ttimer: tasklet_hrtimer which is initialized
558 * @function: hrtimer callback funtion which gets called from softirq context
559 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
560 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
561 */
562 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
563 enum hrtimer_restart (*function)(struct hrtimer *),
564 clockid_t which_clock, enum hrtimer_mode mode)
565 {
566 hrtimer_init(&ttimer->timer, which_clock, mode);
567 ttimer->timer.function = __hrtimer_tasklet_trampoline;
568 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
569 (unsigned long)ttimer);
570 ttimer->function = function;
571 }
572 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
573
574 /*
575 * Remote softirq bits
576 */
577
578 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
579 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
580
581 static void __local_trigger(struct call_single_data *cp, int softirq)
582 {
583 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
584
585 list_add_tail(&cp->list, head);
586
587 /* Trigger the softirq only if the list was previously empty. */
588 if (head->next == &cp->list)
589 raise_softirq_irqoff(softirq);
590 }
591
592 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
593 static void remote_softirq_receive(void *data)
594 {
595 struct call_single_data *cp = data;
596 unsigned long flags;
597 int softirq;
598
599 softirq = cp->priv;
600
601 local_irq_save(flags);
602 __local_trigger(cp, softirq);
603 local_irq_restore(flags);
604 }
605
606 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
607 {
608 if (cpu_online(cpu)) {
609 cp->func = remote_softirq_receive;
610 cp->info = cp;
611 cp->flags = 0;
612 cp->priv = softirq;
613
614 __smp_call_function_single(cpu, cp, 0);
615 return 0;
616 }
617 return 1;
618 }
619 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
620 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
621 {
622 return 1;
623 }
624 #endif
625
626 /**
627 * __send_remote_softirq - try to schedule softirq work on a remote cpu
628 * @cp: private SMP call function data area
629 * @cpu: the remote cpu
630 * @this_cpu: the currently executing cpu
631 * @softirq: the softirq for the work
632 *
633 * Attempt to schedule softirq work on a remote cpu. If this cannot be
634 * done, the work is instead queued up on the local cpu.
635 *
636 * Interrupts must be disabled.
637 */
638 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
639 {
640 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
641 __local_trigger(cp, softirq);
642 }
643 EXPORT_SYMBOL(__send_remote_softirq);
644
645 /**
646 * send_remote_softirq - try to schedule softirq work on a remote cpu
647 * @cp: private SMP call function data area
648 * @cpu: the remote cpu
649 * @softirq: the softirq for the work
650 *
651 * Like __send_remote_softirq except that disabling interrupts and
652 * computing the current cpu is done for the caller.
653 */
654 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
655 {
656 unsigned long flags;
657 int this_cpu;
658
659 local_irq_save(flags);
660 this_cpu = smp_processor_id();
661 __send_remote_softirq(cp, cpu, this_cpu, softirq);
662 local_irq_restore(flags);
663 }
664 EXPORT_SYMBOL(send_remote_softirq);
665
666 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
667 unsigned long action, void *hcpu)
668 {
669 /*
670 * If a CPU goes away, splice its entries to the current CPU
671 * and trigger a run of the softirq
672 */
673 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
674 int cpu = (unsigned long) hcpu;
675 int i;
676
677 local_irq_disable();
678 for (i = 0; i < NR_SOFTIRQS; i++) {
679 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
680 struct list_head *local_head;
681
682 if (list_empty(head))
683 continue;
684
685 local_head = &__get_cpu_var(softirq_work_list[i]);
686 list_splice_init(head, local_head);
687 raise_softirq_irqoff(i);
688 }
689 local_irq_enable();
690 }
691
692 return NOTIFY_OK;
693 }
694
695 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
696 .notifier_call = remote_softirq_cpu_notify,
697 };
698
699 void __init softirq_init(void)
700 {
701 int cpu;
702
703 for_each_possible_cpu(cpu) {
704 int i;
705
706 per_cpu(tasklet_vec, cpu).tail =
707 &per_cpu(tasklet_vec, cpu).head;
708 per_cpu(tasklet_hi_vec, cpu).tail =
709 &per_cpu(tasklet_hi_vec, cpu).head;
710 for (i = 0; i < NR_SOFTIRQS; i++)
711 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
712 }
713
714 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
715
716 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
717 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
718 }
719
720 static int run_ksoftirqd(void * __bind_cpu)
721 {
722 set_current_state(TASK_INTERRUPTIBLE);
723
724 current->flags |= PF_KSOFTIRQD;
725 while (!kthread_should_stop()) {
726 preempt_disable();
727 if (!local_softirq_pending()) {
728 preempt_enable_no_resched();
729 schedule();
730 preempt_disable();
731 }
732
733 __set_current_state(TASK_RUNNING);
734
735 while (local_softirq_pending()) {
736 /* Preempt disable stops cpu going offline.
737 If already offline, we'll be on wrong CPU:
738 don't process */
739 if (cpu_is_offline((long)__bind_cpu))
740 goto wait_to_die;
741 do_softirq();
742 preempt_enable_no_resched();
743 cond_resched();
744 preempt_disable();
745 rcu_note_context_switch((long)__bind_cpu);
746 }
747 preempt_enable();
748 set_current_state(TASK_INTERRUPTIBLE);
749 }
750 __set_current_state(TASK_RUNNING);
751 return 0;
752
753 wait_to_die:
754 preempt_enable();
755 /* Wait for kthread_stop */
756 set_current_state(TASK_INTERRUPTIBLE);
757 while (!kthread_should_stop()) {
758 schedule();
759 set_current_state(TASK_INTERRUPTIBLE);
760 }
761 __set_current_state(TASK_RUNNING);
762 return 0;
763 }
764
765 #ifdef CONFIG_HOTPLUG_CPU
766 /*
767 * tasklet_kill_immediate is called to remove a tasklet which can already be
768 * scheduled for execution on @cpu.
769 *
770 * Unlike tasklet_kill, this function removes the tasklet
771 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
772 *
773 * When this function is called, @cpu must be in the CPU_DEAD state.
774 */
775 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
776 {
777 struct tasklet_struct **i;
778
779 BUG_ON(cpu_online(cpu));
780 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
781
782 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
783 return;
784
785 /* CPU is dead, so no lock needed. */
786 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
787 if (*i == t) {
788 *i = t->next;
789 /* If this was the tail element, move the tail ptr */
790 if (*i == NULL)
791 per_cpu(tasklet_vec, cpu).tail = i;
792 return;
793 }
794 }
795 BUG();
796 }
797
798 static void takeover_tasklets(unsigned int cpu)
799 {
800 /* CPU is dead, so no lock needed. */
801 local_irq_disable();
802
803 /* Find end, append list for that CPU. */
804 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
805 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
806 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
807 per_cpu(tasklet_vec, cpu).head = NULL;
808 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
809 }
810 raise_softirq_irqoff(TASKLET_SOFTIRQ);
811
812 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
813 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
814 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
815 per_cpu(tasklet_hi_vec, cpu).head = NULL;
816 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
817 }
818 raise_softirq_irqoff(HI_SOFTIRQ);
819
820 local_irq_enable();
821 }
822 #endif /* CONFIG_HOTPLUG_CPU */
823
824 static int __cpuinit cpu_callback(struct notifier_block *nfb,
825 unsigned long action,
826 void *hcpu)
827 {
828 int hotcpu = (unsigned long)hcpu;
829 struct task_struct *p;
830
831 switch (action) {
832 case CPU_UP_PREPARE:
833 case CPU_UP_PREPARE_FROZEN:
834 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
835 if (IS_ERR(p)) {
836 printk("ksoftirqd for %i failed\n", hotcpu);
837 return notifier_from_errno(PTR_ERR(p));
838 }
839 kthread_bind(p, hotcpu);
840 per_cpu(ksoftirqd, hotcpu) = p;
841 break;
842 case CPU_ONLINE:
843 case CPU_ONLINE_FROZEN:
844 wake_up_process(per_cpu(ksoftirqd, hotcpu));
845 break;
846 #ifdef CONFIG_HOTPLUG_CPU
847 case CPU_UP_CANCELED:
848 case CPU_UP_CANCELED_FROZEN:
849 if (!per_cpu(ksoftirqd, hotcpu))
850 break;
851 /* Unbind so it can run. Fall thru. */
852 kthread_bind(per_cpu(ksoftirqd, hotcpu),
853 cpumask_any(cpu_online_mask));
854 case CPU_DEAD:
855 case CPU_DEAD_FROZEN: {
856 static struct sched_param param = {
857 .sched_priority = MAX_RT_PRIO-1
858 };
859
860 p = per_cpu(ksoftirqd, hotcpu);
861 per_cpu(ksoftirqd, hotcpu) = NULL;
862 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
863 kthread_stop(p);
864 takeover_tasklets(hotcpu);
865 break;
866 }
867 #endif /* CONFIG_HOTPLUG_CPU */
868 }
869 return NOTIFY_OK;
870 }
871
872 static struct notifier_block __cpuinitdata cpu_nfb = {
873 .notifier_call = cpu_callback
874 };
875
876 static __init int spawn_ksoftirqd(void)
877 {
878 void *cpu = (void *)(long)smp_processor_id();
879 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
880
881 BUG_ON(err != NOTIFY_OK);
882 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
883 register_cpu_notifier(&cpu_nfb);
884 return 0;
885 }
886 early_initcall(spawn_ksoftirqd);
887
888 #ifdef CONFIG_SMP
889 /*
890 * Call a function on all processors
891 */
892 int on_each_cpu(void (*func) (void *info), void *info, int wait)
893 {
894 int ret = 0;
895
896 preempt_disable();
897 ret = smp_call_function(func, info, wait);
898 local_irq_disable();
899 func(info);
900 local_irq_enable();
901 preempt_enable();
902 return ret;
903 }
904 EXPORT_SYMBOL(on_each_cpu);
905 #endif
906
907 /*
908 * [ These __weak aliases are kept in a separate compilation unit, so that
909 * GCC does not inline them incorrectly. ]
910 */
911
912 int __init __weak early_irq_init(void)
913 {
914 return 0;
915 }
916
917 #ifdef CONFIG_GENERIC_HARDIRQS
918 int __init __weak arch_probe_nr_irqs(void)
919 {
920 return NR_IRQS_LEGACY;
921 }
922
923 int __init __weak arch_early_irq_init(void)
924 {
925 return 0;
926 }
927 #endif