backtrace: replace timer with tasklet + completions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36
37 /*
38 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
40 */
41 struct cpu_workqueue_struct {
42
43 spinlock_t lock;
44
45 struct list_head worklist;
46 wait_queue_head_t more_work;
47 struct work_struct *current_work;
48
49 struct workqueue_struct *wq;
50 struct task_struct *thread;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54
55 /*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
62 const char *name;
63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
65 #ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67 #endif
68 };
69
70 /* Serializes the accesses to the list of workqueues. */
71 static DEFINE_SPINLOCK(workqueue_lock);
72 static LIST_HEAD(workqueues);
73
74 static int singlethread_cpu __read_mostly;
75 static cpumask_t cpu_singlethread_map __read_mostly;
76 /*
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
81 * than optimization.
82 */
83 static cpumask_t cpu_populated_map __read_mostly;
84
85 /* If it's single threaded, it isn't in the list of workqueues. */
86 static inline int is_single_threaded(struct workqueue_struct *wq)
87 {
88 return wq->singlethread;
89 }
90
91 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92 {
93 return is_single_threaded(wq)
94 ? &cpu_singlethread_map : &cpu_populated_map;
95 }
96
97 static
98 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99 {
100 if (unlikely(is_single_threaded(wq)))
101 cpu = singlethread_cpu;
102 return per_cpu_ptr(wq->cpu_wq, cpu);
103 }
104
105 /*
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
108 */
109 static inline void set_wq_data(struct work_struct *work,
110 struct cpu_workqueue_struct *cwq)
111 {
112 unsigned long new;
113
114 BUG_ON(!work_pending(work));
115
116 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
117 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 atomic_long_set(&work->data, new);
119 }
120
121 static inline
122 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
123 {
124 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
125 }
126
127 static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, int tail)
129 {
130 set_wq_data(work, cwq);
131 /*
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
134 */
135 smp_wmb();
136 if (tail)
137 list_add_tail(&work->entry, &cwq->worklist);
138 else
139 list_add(&work->entry, &cwq->worklist);
140 wake_up(&cwq->more_work);
141 }
142
143 /* Preempt must be disabled. */
144 static void __queue_work(struct cpu_workqueue_struct *cwq,
145 struct work_struct *work)
146 {
147 unsigned long flags;
148
149 spin_lock_irqsave(&cwq->lock, flags);
150 insert_work(cwq, work, 1);
151 spin_unlock_irqrestore(&cwq->lock, flags);
152 }
153
154 /**
155 * queue_work - queue work on a workqueue
156 * @wq: workqueue to use
157 * @work: work to queue
158 *
159 * Returns 0 if @work was already on a queue, non-zero otherwise.
160 *
161 * We queue the work to the CPU on which it was submitted, but if the CPU dies
162 * it can be processed by another CPU.
163 */
164 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165 {
166 int ret = 0;
167
168 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
169 BUG_ON(!list_empty(&work->entry));
170 __queue_work(wq_per_cpu(wq, get_cpu()), work);
171 put_cpu();
172 ret = 1;
173 }
174 return ret;
175 }
176 EXPORT_SYMBOL_GPL(queue_work);
177
178 static void delayed_work_timer_fn(unsigned long __data)
179 {
180 struct delayed_work *dwork = (struct delayed_work *)__data;
181 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
182 struct workqueue_struct *wq = cwq->wq;
183
184 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
185 }
186
187 /**
188 * queue_delayed_work - queue work on a workqueue after delay
189 * @wq: workqueue to use
190 * @dwork: delayable work to queue
191 * @delay: number of jiffies to wait before queueing
192 *
193 * Returns 0 if @work was already on a queue, non-zero otherwise.
194 */
195 int queue_delayed_work(struct workqueue_struct *wq,
196 struct delayed_work *dwork, unsigned long delay)
197 {
198 if (delay == 0)
199 return queue_work(wq, &dwork->work);
200
201 return queue_delayed_work_on(-1, wq, dwork, delay);
202 }
203 EXPORT_SYMBOL_GPL(queue_delayed_work);
204
205 /**
206 * queue_delayed_work_on - queue work on specific CPU after delay
207 * @cpu: CPU number to execute work on
208 * @wq: workqueue to use
209 * @dwork: work to queue
210 * @delay: number of jiffies to wait before queueing
211 *
212 * Returns 0 if @work was already on a queue, non-zero otherwise.
213 */
214 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
215 struct delayed_work *dwork, unsigned long delay)
216 {
217 int ret = 0;
218 struct timer_list *timer = &dwork->timer;
219 struct work_struct *work = &dwork->work;
220
221 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
222 BUG_ON(timer_pending(timer));
223 BUG_ON(!list_empty(&work->entry));
224
225 timer_stats_timer_set_start_info(&dwork->timer);
226
227 /* This stores cwq for the moment, for the timer_fn */
228 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
229 timer->expires = jiffies + delay;
230 timer->data = (unsigned long)dwork;
231 timer->function = delayed_work_timer_fn;
232
233 if (unlikely(cpu >= 0))
234 add_timer_on(timer, cpu);
235 else
236 add_timer(timer);
237 ret = 1;
238 }
239 return ret;
240 }
241 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
242
243 static void run_workqueue(struct cpu_workqueue_struct *cwq)
244 {
245 spin_lock_irq(&cwq->lock);
246 cwq->run_depth++;
247 if (cwq->run_depth > 3) {
248 /* morton gets to eat his hat */
249 printk("%s: recursion depth exceeded: %d\n",
250 __func__, cwq->run_depth);
251 dump_stack();
252 }
253 while (!list_empty(&cwq->worklist)) {
254 struct work_struct *work = list_entry(cwq->worklist.next,
255 struct work_struct, entry);
256 work_func_t f = work->func;
257 #ifdef CONFIG_LOCKDEP
258 /*
259 * It is permissible to free the struct work_struct
260 * from inside the function that is called from it,
261 * this we need to take into account for lockdep too.
262 * To avoid bogus "held lock freed" warnings as well
263 * as problems when looking into work->lockdep_map,
264 * make a copy and use that here.
265 */
266 struct lockdep_map lockdep_map = work->lockdep_map;
267 #endif
268
269 cwq->current_work = work;
270 list_del_init(cwq->worklist.next);
271 spin_unlock_irq(&cwq->lock);
272
273 BUG_ON(get_wq_data(work) != cwq);
274 work_clear_pending(work);
275 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
276 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
277 f(work);
278 lock_release(&lockdep_map, 1, _THIS_IP_);
279 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
280
281 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
283 "%s/0x%08x/%d\n",
284 current->comm, preempt_count(),
285 task_pid_nr(current));
286 printk(KERN_ERR " last function: ");
287 print_symbol("%s\n", (unsigned long)f);
288 debug_show_held_locks(current);
289 dump_stack();
290 }
291
292 spin_lock_irq(&cwq->lock);
293 cwq->current_work = NULL;
294 }
295 cwq->run_depth--;
296 spin_unlock_irq(&cwq->lock);
297 }
298
299 static int worker_thread(void *__cwq)
300 {
301 struct cpu_workqueue_struct *cwq = __cwq;
302 DEFINE_WAIT(wait);
303
304 if (cwq->wq->freezeable)
305 set_freezable();
306
307 set_user_nice(current, -5);
308
309 for (;;) {
310 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
311 if (!freezing(current) &&
312 !kthread_should_stop() &&
313 list_empty(&cwq->worklist))
314 schedule();
315 finish_wait(&cwq->more_work, &wait);
316
317 try_to_freeze();
318
319 if (kthread_should_stop())
320 break;
321
322 run_workqueue(cwq);
323 }
324
325 return 0;
326 }
327
328 struct wq_barrier {
329 struct work_struct work;
330 struct completion done;
331 };
332
333 static void wq_barrier_func(struct work_struct *work)
334 {
335 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
336 complete(&barr->done);
337 }
338
339 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 struct wq_barrier *barr, int tail)
341 {
342 INIT_WORK(&barr->work, wq_barrier_func);
343 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
344
345 init_completion(&barr->done);
346
347 insert_work(cwq, &barr->work, tail);
348 }
349
350 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
351 {
352 int active;
353
354 if (cwq->thread == current) {
355 /*
356 * Probably keventd trying to flush its own queue. So simply run
357 * it by hand rather than deadlocking.
358 */
359 run_workqueue(cwq);
360 active = 1;
361 } else {
362 struct wq_barrier barr;
363
364 active = 0;
365 spin_lock_irq(&cwq->lock);
366 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
367 insert_wq_barrier(cwq, &barr, 1);
368 active = 1;
369 }
370 spin_unlock_irq(&cwq->lock);
371
372 if (active)
373 wait_for_completion(&barr.done);
374 }
375
376 return active;
377 }
378
379 /**
380 * flush_workqueue - ensure that any scheduled work has run to completion.
381 * @wq: workqueue to flush
382 *
383 * Forces execution of the workqueue and blocks until its completion.
384 * This is typically used in driver shutdown handlers.
385 *
386 * We sleep until all works which were queued on entry have been handled,
387 * but we are not livelocked by new incoming ones.
388 *
389 * This function used to run the workqueues itself. Now we just wait for the
390 * helper threads to do it.
391 */
392 void flush_workqueue(struct workqueue_struct *wq)
393 {
394 const cpumask_t *cpu_map = wq_cpu_map(wq);
395 int cpu;
396
397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402 }
403 EXPORT_SYMBOL_GPL(flush_workqueue);
404
405 /*
406 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
407 * so this work can't be re-armed in any way.
408 */
409 static int try_to_grab_pending(struct work_struct *work)
410 {
411 struct cpu_workqueue_struct *cwq;
412 int ret = -1;
413
414 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
415 return 0;
416
417 /*
418 * The queueing is in progress, or it is already queued. Try to
419 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
420 */
421
422 cwq = get_wq_data(work);
423 if (!cwq)
424 return ret;
425
426 spin_lock_irq(&cwq->lock);
427 if (!list_empty(&work->entry)) {
428 /*
429 * This work is queued, but perhaps we locked the wrong cwq.
430 * In that case we must see the new value after rmb(), see
431 * insert_work()->wmb().
432 */
433 smp_rmb();
434 if (cwq == get_wq_data(work)) {
435 list_del_init(&work->entry);
436 ret = 1;
437 }
438 }
439 spin_unlock_irq(&cwq->lock);
440
441 return ret;
442 }
443
444 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
445 struct work_struct *work)
446 {
447 struct wq_barrier barr;
448 int running = 0;
449
450 spin_lock_irq(&cwq->lock);
451 if (unlikely(cwq->current_work == work)) {
452 insert_wq_barrier(cwq, &barr, 0);
453 running = 1;
454 }
455 spin_unlock_irq(&cwq->lock);
456
457 if (unlikely(running))
458 wait_for_completion(&barr.done);
459 }
460
461 static void wait_on_work(struct work_struct *work)
462 {
463 struct cpu_workqueue_struct *cwq;
464 struct workqueue_struct *wq;
465 const cpumask_t *cpu_map;
466 int cpu;
467
468 might_sleep();
469
470 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
471 lock_release(&work->lockdep_map, 1, _THIS_IP_);
472
473 cwq = get_wq_data(work);
474 if (!cwq)
475 return;
476
477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq);
479
480 for_each_cpu_mask(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482 }
483
484 static int __cancel_work_timer(struct work_struct *work,
485 struct timer_list* timer)
486 {
487 int ret;
488
489 do {
490 ret = (timer && likely(del_timer(timer)));
491 if (!ret)
492 ret = try_to_grab_pending(work);
493 wait_on_work(work);
494 } while (unlikely(ret < 0));
495
496 work_clear_pending(work);
497 return ret;
498 }
499
500 /**
501 * cancel_work_sync - block until a work_struct's callback has terminated
502 * @work: the work which is to be flushed
503 *
504 * Returns true if @work was pending.
505 *
506 * cancel_work_sync() will cancel the work if it is queued. If the work's
507 * callback appears to be running, cancel_work_sync() will block until it
508 * has completed.
509 *
510 * It is possible to use this function if the work re-queues itself. It can
511 * cancel the work even if it migrates to another workqueue, however in that
512 * case it only guarantees that work->func() has completed on the last queued
513 * workqueue.
514 *
515 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
516 * pending, otherwise it goes into a busy-wait loop until the timer expires.
517 *
518 * The caller must ensure that workqueue_struct on which this work was last
519 * queued can't be destroyed before this function returns.
520 */
521 int cancel_work_sync(struct work_struct *work)
522 {
523 return __cancel_work_timer(work, NULL);
524 }
525 EXPORT_SYMBOL_GPL(cancel_work_sync);
526
527 /**
528 * cancel_delayed_work_sync - reliably kill off a delayed work.
529 * @dwork: the delayed work struct
530 *
531 * Returns true if @dwork was pending.
532 *
533 * It is possible to use this function if @dwork rearms itself via queue_work()
534 * or queue_delayed_work(). See also the comment for cancel_work_sync().
535 */
536 int cancel_delayed_work_sync(struct delayed_work *dwork)
537 {
538 return __cancel_work_timer(&dwork->work, &dwork->timer);
539 }
540 EXPORT_SYMBOL(cancel_delayed_work_sync);
541
542 static struct workqueue_struct *keventd_wq __read_mostly;
543
544 /**
545 * schedule_work - put work task in global workqueue
546 * @work: job to be done
547 *
548 * This puts a job in the kernel-global workqueue.
549 */
550 int schedule_work(struct work_struct *work)
551 {
552 return queue_work(keventd_wq, work);
553 }
554 EXPORT_SYMBOL(schedule_work);
555
556 /**
557 * schedule_delayed_work - put work task in global workqueue after delay
558 * @dwork: job to be done
559 * @delay: number of jiffies to wait or 0 for immediate execution
560 *
561 * After waiting for a given time this puts a job in the kernel-global
562 * workqueue.
563 */
564 int schedule_delayed_work(struct delayed_work *dwork,
565 unsigned long delay)
566 {
567 return queue_delayed_work(keventd_wq, dwork, delay);
568 }
569 EXPORT_SYMBOL(schedule_delayed_work);
570
571 /**
572 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
573 * @cpu: cpu to use
574 * @dwork: job to be done
575 * @delay: number of jiffies to wait
576 *
577 * After waiting for a given time this puts a job in the kernel-global
578 * workqueue on the specified CPU.
579 */
580 int schedule_delayed_work_on(int cpu,
581 struct delayed_work *dwork, unsigned long delay)
582 {
583 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
584 }
585 EXPORT_SYMBOL(schedule_delayed_work_on);
586
587 /**
588 * schedule_on_each_cpu - call a function on each online CPU from keventd
589 * @func: the function to call
590 *
591 * Returns zero on success.
592 * Returns -ve errno on failure.
593 *
594 * schedule_on_each_cpu() is very slow.
595 */
596 int schedule_on_each_cpu(work_func_t func)
597 {
598 int cpu;
599 struct work_struct *works;
600
601 works = alloc_percpu(struct work_struct);
602 if (!works)
603 return -ENOMEM;
604
605 get_online_cpus();
606 for_each_online_cpu(cpu) {
607 struct work_struct *work = per_cpu_ptr(works, cpu);
608
609 INIT_WORK(work, func);
610 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
611 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
612 }
613 flush_workqueue(keventd_wq);
614 put_online_cpus();
615 free_percpu(works);
616 return 0;
617 }
618
619 void flush_scheduled_work(void)
620 {
621 flush_workqueue(keventd_wq);
622 }
623 EXPORT_SYMBOL(flush_scheduled_work);
624
625 /**
626 * execute_in_process_context - reliably execute the routine with user context
627 * @fn: the function to execute
628 * @ew: guaranteed storage for the execute work structure (must
629 * be available when the work executes)
630 *
631 * Executes the function immediately if process context is available,
632 * otherwise schedules the function for delayed execution.
633 *
634 * Returns: 0 - function was executed
635 * 1 - function was scheduled for execution
636 */
637 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
638 {
639 if (!in_interrupt()) {
640 fn(&ew->work);
641 return 0;
642 }
643
644 INIT_WORK(&ew->work, fn);
645 schedule_work(&ew->work);
646
647 return 1;
648 }
649 EXPORT_SYMBOL_GPL(execute_in_process_context);
650
651 int keventd_up(void)
652 {
653 return keventd_wq != NULL;
654 }
655
656 int current_is_keventd(void)
657 {
658 struct cpu_workqueue_struct *cwq;
659 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
660 int ret = 0;
661
662 BUG_ON(!keventd_wq);
663
664 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
665 if (current == cwq->thread)
666 ret = 1;
667
668 return ret;
669
670 }
671
672 static struct cpu_workqueue_struct *
673 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
674 {
675 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
676
677 cwq->wq = wq;
678 spin_lock_init(&cwq->lock);
679 INIT_LIST_HEAD(&cwq->worklist);
680 init_waitqueue_head(&cwq->more_work);
681
682 return cwq;
683 }
684
685 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
686 {
687 struct workqueue_struct *wq = cwq->wq;
688 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
689 struct task_struct *p;
690
691 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
692 /*
693 * Nobody can add the work_struct to this cwq,
694 * if (caller is __create_workqueue)
695 * nobody should see this wq
696 * else // caller is CPU_UP_PREPARE
697 * cpu is not on cpu_online_map
698 * so we can abort safely.
699 */
700 if (IS_ERR(p))
701 return PTR_ERR(p);
702
703 cwq->thread = p;
704
705 return 0;
706 }
707
708 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
709 {
710 struct task_struct *p = cwq->thread;
711
712 if (p != NULL) {
713 if (cpu >= 0)
714 kthread_bind(p, cpu);
715 wake_up_process(p);
716 }
717 }
718
719 struct workqueue_struct *__create_workqueue_key(const char *name,
720 int singlethread,
721 int freezeable,
722 struct lock_class_key *key,
723 const char *lock_name)
724 {
725 struct workqueue_struct *wq;
726 struct cpu_workqueue_struct *cwq;
727 int err = 0, cpu;
728
729 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
730 if (!wq)
731 return NULL;
732
733 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
734 if (!wq->cpu_wq) {
735 kfree(wq);
736 return NULL;
737 }
738
739 wq->name = name;
740 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
741 wq->singlethread = singlethread;
742 wq->freezeable = freezeable;
743 INIT_LIST_HEAD(&wq->list);
744
745 if (singlethread) {
746 cwq = init_cpu_workqueue(wq, singlethread_cpu);
747 err = create_workqueue_thread(cwq, singlethread_cpu);
748 start_workqueue_thread(cwq, -1);
749 } else {
750 get_online_cpus();
751 spin_lock(&workqueue_lock);
752 list_add(&wq->list, &workqueues);
753 spin_unlock(&workqueue_lock);
754
755 for_each_possible_cpu(cpu) {
756 cwq = init_cpu_workqueue(wq, cpu);
757 if (err || !cpu_online(cpu))
758 continue;
759 err = create_workqueue_thread(cwq, cpu);
760 start_workqueue_thread(cwq, cpu);
761 }
762 put_online_cpus();
763 }
764
765 if (err) {
766 destroy_workqueue(wq);
767 wq = NULL;
768 }
769 return wq;
770 }
771 EXPORT_SYMBOL_GPL(__create_workqueue_key);
772
773 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
774 {
775 /*
776 * Our caller is either destroy_workqueue() or CPU_DEAD,
777 * get_online_cpus() protects cwq->thread.
778 */
779 if (cwq->thread == NULL)
780 return;
781
782 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
783 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
784
785 flush_cpu_workqueue(cwq);
786 /*
787 * If the caller is CPU_DEAD and cwq->worklist was not empty,
788 * a concurrent flush_workqueue() can insert a barrier after us.
789 * However, in that case run_workqueue() won't return and check
790 * kthread_should_stop() until it flushes all work_struct's.
791 * When ->worklist becomes empty it is safe to exit because no
792 * more work_structs can be queued on this cwq: flush_workqueue
793 * checks list_empty(), and a "normal" queue_work() can't use
794 * a dead CPU.
795 */
796 kthread_stop(cwq->thread);
797 cwq->thread = NULL;
798 }
799
800 /**
801 * destroy_workqueue - safely terminate a workqueue
802 * @wq: target workqueue
803 *
804 * Safely destroy a workqueue. All work currently pending will be done first.
805 */
806 void destroy_workqueue(struct workqueue_struct *wq)
807 {
808 const cpumask_t *cpu_map = wq_cpu_map(wq);
809 int cpu;
810
811 get_online_cpus();
812 spin_lock(&workqueue_lock);
813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock);
815
816 for_each_cpu_mask(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus();
819
820 free_percpu(wq->cpu_wq);
821 kfree(wq);
822 }
823 EXPORT_SYMBOL_GPL(destroy_workqueue);
824
825 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
826 unsigned long action,
827 void *hcpu)
828 {
829 unsigned int cpu = (unsigned long)hcpu;
830 struct cpu_workqueue_struct *cwq;
831 struct workqueue_struct *wq;
832
833 action &= ~CPU_TASKS_FROZEN;
834
835 switch (action) {
836 case CPU_UP_PREPARE:
837 cpu_set(cpu, cpu_populated_map);
838 }
839
840 list_for_each_entry(wq, &workqueues, list) {
841 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
842
843 switch (action) {
844 case CPU_UP_PREPARE:
845 if (!create_workqueue_thread(cwq, cpu))
846 break;
847 printk(KERN_ERR "workqueue [%s] for %i failed\n",
848 wq->name, cpu);
849 return NOTIFY_BAD;
850
851 case CPU_ONLINE:
852 start_workqueue_thread(cwq, cpu);
853 break;
854
855 case CPU_UP_CANCELED:
856 start_workqueue_thread(cwq, -1);
857 case CPU_DEAD:
858 cleanup_workqueue_thread(cwq);
859 break;
860 }
861 }
862
863 switch (action) {
864 case CPU_UP_CANCELED:
865 case CPU_DEAD:
866 cpu_clear(cpu, cpu_populated_map);
867 }
868
869 return NOTIFY_OK;
870 }
871
872 void __init init_workqueues(void)
873 {
874 cpu_populated_map = cpu_online_map;
875 singlethread_cpu = first_cpu(cpu_possible_map);
876 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
877 hotcpu_notifier(workqueue_cpu_callback, 0);
878 keventd_wq = create_workqueue("events");
879 BUG_ON(!keventd_wq);
880 }