Make access to task's nsproxy lighter
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
1da177e4
LT
35
36/*
f756d5e2
NL
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
1da177e4
LT
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
1da177e4
LT
44 struct list_head worklist;
45 wait_queue_head_t more_work;
3af24433 46 struct work_struct *current_work;
1da177e4
LT
47
48 struct workqueue_struct *wq;
36c8b586 49 struct task_struct *thread;
1da177e4
LT
50
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
89ada679 59 struct cpu_workqueue_struct *cpu_wq;
cce1a165 60 struct list_head list;
1da177e4 61 const char *name;
cce1a165 62 int singlethread;
319c2a98 63 int freezeable; /* Freeze threads during suspend */
1da177e4
LT
64};
65
66/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
9b41ea72 68static DEFINE_MUTEX(workqueue_mutex);
1da177e4
LT
69static LIST_HEAD(workqueues);
70
3af24433 71static int singlethread_cpu __read_mostly;
b1f4ec17 72static cpumask_t cpu_singlethread_map __read_mostly;
14441960
ON
73/*
74 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76 * which comes in between can't use for_each_online_cpu(). We could
77 * use cpu_possible_map, the cpumask below is more a documentation
78 * than optimization.
79 */
3af24433 80static cpumask_t cpu_populated_map __read_mostly;
f756d5e2 81
1da177e4
LT
82/* If it's single threaded, it isn't in the list of workqueues. */
83static inline int is_single_threaded(struct workqueue_struct *wq)
84{
cce1a165 85 return wq->singlethread;
1da177e4
LT
86}
87
b1f4ec17
ON
88static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
89{
90 return is_single_threaded(wq)
91 ? &cpu_singlethread_map : &cpu_populated_map;
92}
93
a848e3b6
ON
94static
95struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
96{
97 if (unlikely(is_single_threaded(wq)))
98 cpu = singlethread_cpu;
99 return per_cpu_ptr(wq->cpu_wq, cpu);
100}
101
4594bf15
DH
102/*
103 * Set the workqueue on which a work item is to be run
104 * - Must *only* be called if the pending flag is set
105 */
ed7c0fee
ON
106static inline void set_wq_data(struct work_struct *work,
107 struct cpu_workqueue_struct *cwq)
365970a1 108{
4594bf15
DH
109 unsigned long new;
110
111 BUG_ON(!work_pending(work));
365970a1 112
ed7c0fee 113 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
114 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
115 atomic_long_set(&work->data, new);
365970a1
DH
116}
117
ed7c0fee
ON
118static inline
119struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 120{
a08727ba 121 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
122}
123
b89deed3
ON
124static void insert_work(struct cpu_workqueue_struct *cwq,
125 struct work_struct *work, int tail)
126{
127 set_wq_data(work, cwq);
6e84d644
ON
128 /*
129 * Ensure that we get the right work->data if we see the
130 * result of list_add() below, see try_to_grab_pending().
131 */
132 smp_wmb();
b89deed3
ON
133 if (tail)
134 list_add_tail(&work->entry, &cwq->worklist);
135 else
136 list_add(&work->entry, &cwq->worklist);
137 wake_up(&cwq->more_work);
138}
139
1da177e4
LT
140/* Preempt must be disabled. */
141static void __queue_work(struct cpu_workqueue_struct *cwq,
142 struct work_struct *work)
143{
144 unsigned long flags;
145
146 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 147 insert_work(cwq, work, 1);
1da177e4
LT
148 spin_unlock_irqrestore(&cwq->lock, flags);
149}
150
0fcb78c2
REB
151/**
152 * queue_work - queue work on a workqueue
153 * @wq: workqueue to use
154 * @work: work to queue
155 *
057647fc 156 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4
LT
157 *
158 * We queue the work to the CPU it was submitted, but there is no
159 * guarantee that it will be processed by that CPU.
160 */
161int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
162{
a848e3b6 163 int ret = 0;
1da177e4 164
a08727ba 165 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4 166 BUG_ON(!list_empty(&work->entry));
a848e3b6
ON
167 __queue_work(wq_per_cpu(wq, get_cpu()), work);
168 put_cpu();
1da177e4
LT
169 ret = 1;
170 }
1da177e4
LT
171 return ret;
172}
ae90dd5d 173EXPORT_SYMBOL_GPL(queue_work);
1da177e4 174
82f67cd9 175void delayed_work_timer_fn(unsigned long __data)
1da177e4 176{
52bad64d 177 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
178 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
179 struct workqueue_struct *wq = cwq->wq;
1da177e4 180
a848e3b6 181 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4
LT
182}
183
0fcb78c2
REB
184/**
185 * queue_delayed_work - queue work on a workqueue after delay
186 * @wq: workqueue to use
af9997e4 187 * @dwork: delayable work to queue
0fcb78c2
REB
188 * @delay: number of jiffies to wait before queueing
189 *
057647fc 190 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 191 */
1da177e4 192int fastcall queue_delayed_work(struct workqueue_struct *wq,
52bad64d 193 struct delayed_work *dwork, unsigned long delay)
1da177e4 194{
63bc0362 195 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 196 if (delay == 0)
63bc0362 197 return queue_work(wq, &dwork->work);
1da177e4 198
63bc0362 199 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 200}
ae90dd5d 201EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 202
0fcb78c2
REB
203/**
204 * queue_delayed_work_on - queue work on specific CPU after delay
205 * @cpu: CPU number to execute work on
206 * @wq: workqueue to use
af9997e4 207 * @dwork: work to queue
0fcb78c2
REB
208 * @delay: number of jiffies to wait before queueing
209 *
057647fc 210 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 211 */
7a6bc1cd 212int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 213 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
214{
215 int ret = 0;
52bad64d
DH
216 struct timer_list *timer = &dwork->timer;
217 struct work_struct *work = &dwork->work;
7a6bc1cd 218
a08727ba 219 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
220 BUG_ON(timer_pending(timer));
221 BUG_ON(!list_empty(&work->entry));
222
ed7c0fee 223 /* This stores cwq for the moment, for the timer_fn */
a848e3b6 224 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cd 225 timer->expires = jiffies + delay;
52bad64d 226 timer->data = (unsigned long)dwork;
7a6bc1cd 227 timer->function = delayed_work_timer_fn;
63bc0362
ON
228
229 if (unlikely(cpu >= 0))
230 add_timer_on(timer, cpu);
231 else
232 add_timer(timer);
7a6bc1cd
VP
233 ret = 1;
234 }
235 return ret;
236}
ae90dd5d 237EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 238
858119e1 239static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 240{
f293ea92 241 spin_lock_irq(&cwq->lock);
1da177e4
LT
242 cwq->run_depth++;
243 if (cwq->run_depth > 3) {
244 /* morton gets to eat his hat */
245 printk("%s: recursion depth exceeded: %d\n",
246 __FUNCTION__, cwq->run_depth);
247 dump_stack();
248 }
249 while (!list_empty(&cwq->worklist)) {
250 struct work_struct *work = list_entry(cwq->worklist.next,
251 struct work_struct, entry);
6bb49e59 252 work_func_t f = work->func;
1da177e4 253
b89deed3 254 cwq->current_work = work;
1da177e4 255 list_del_init(cwq->worklist.next);
f293ea92 256 spin_unlock_irq(&cwq->lock);
1da177e4 257
365970a1 258 BUG_ON(get_wq_data(work) != cwq);
23b2e599 259 work_clear_pending(work);
65f27f38 260 f(work);
1da177e4 261
d5abe669
PZ
262 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
263 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
264 "%s/0x%08x/%d\n",
265 current->comm, preempt_count(),
266 current->pid);
267 printk(KERN_ERR " last function: ");
268 print_symbol("%s\n", (unsigned long)f);
269 debug_show_held_locks(current);
270 dump_stack();
271 }
272
f293ea92 273 spin_lock_irq(&cwq->lock);
b89deed3 274 cwq->current_work = NULL;
1da177e4
LT
275 }
276 cwq->run_depth--;
f293ea92 277 spin_unlock_irq(&cwq->lock);
1da177e4
LT
278}
279
280static int worker_thread(void *__cwq)
281{
282 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 283 DEFINE_WAIT(wait);
1da177e4 284
83144186
RW
285 if (cwq->wq->freezeable)
286 set_freezable();
1da177e4
LT
287
288 set_user_nice(current, -5);
1da177e4 289
3af24433 290 for (;;) {
3af24433 291 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
292 if (!freezing(current) &&
293 !kthread_should_stop() &&
294 list_empty(&cwq->worklist))
1da177e4 295 schedule();
3af24433
ON
296 finish_wait(&cwq->more_work, &wait);
297
85f4186a
ON
298 try_to_freeze();
299
14441960 300 if (kthread_should_stop())
3af24433 301 break;
1da177e4 302
3af24433 303 run_workqueue(cwq);
1da177e4 304 }
3af24433 305
1da177e4
LT
306 return 0;
307}
308
fc2e4d70
ON
309struct wq_barrier {
310 struct work_struct work;
311 struct completion done;
312};
313
314static void wq_barrier_func(struct work_struct *work)
315{
316 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
317 complete(&barr->done);
318}
319
83c22520
ON
320static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
321 struct wq_barrier *barr, int tail)
fc2e4d70
ON
322{
323 INIT_WORK(&barr->work, wq_barrier_func);
324 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
325
326 init_completion(&barr->done);
83c22520
ON
327
328 insert_work(cwq, &barr->work, tail);
fc2e4d70
ON
329}
330
14441960 331static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 332{
14441960
ON
333 int active;
334
1da177e4
LT
335 if (cwq->thread == current) {
336 /*
337 * Probably keventd trying to flush its own queue. So simply run
338 * it by hand rather than deadlocking.
339 */
340 run_workqueue(cwq);
14441960 341 active = 1;
1da177e4 342 } else {
fc2e4d70 343 struct wq_barrier barr;
1da177e4 344
14441960 345 active = 0;
83c22520
ON
346 spin_lock_irq(&cwq->lock);
347 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
348 insert_wq_barrier(cwq, &barr, 1);
349 active = 1;
350 }
351 spin_unlock_irq(&cwq->lock);
1da177e4 352
d721304d 353 if (active)
83c22520 354 wait_for_completion(&barr.done);
1da177e4 355 }
14441960
ON
356
357 return active;
1da177e4
LT
358}
359
0fcb78c2 360/**
1da177e4 361 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 362 * @wq: workqueue to flush
1da177e4
LT
363 *
364 * Forces execution of the workqueue and blocks until its completion.
365 * This is typically used in driver shutdown handlers.
366 *
fc2e4d70
ON
367 * We sleep until all works which were queued on entry have been handled,
368 * but we are not livelocked by new incoming ones.
1da177e4
LT
369 *
370 * This function used to run the workqueues itself. Now we just wait for the
371 * helper threads to do it.
372 */
373void fastcall flush_workqueue(struct workqueue_struct *wq)
374{
b1f4ec17 375 const cpumask_t *cpu_map = wq_cpu_map(wq);
cce1a165 376 int cpu;
1da177e4 377
b1f4ec17
ON
378 might_sleep();
379 for_each_cpu_mask(cpu, *cpu_map)
380 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 381}
ae90dd5d 382EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 383
6e84d644 384/*
1f1f642e 385 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
386 * so this work can't be re-armed in any way.
387 */
388static int try_to_grab_pending(struct work_struct *work)
389{
390 struct cpu_workqueue_struct *cwq;
1f1f642e 391 int ret = -1;
6e84d644
ON
392
393 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
1f1f642e 394 return 0;
6e84d644
ON
395
396 /*
397 * The queueing is in progress, or it is already queued. Try to
398 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
399 */
400
401 cwq = get_wq_data(work);
402 if (!cwq)
403 return ret;
404
405 spin_lock_irq(&cwq->lock);
406 if (!list_empty(&work->entry)) {
407 /*
408 * This work is queued, but perhaps we locked the wrong cwq.
409 * In that case we must see the new value after rmb(), see
410 * insert_work()->wmb().
411 */
412 smp_rmb();
413 if (cwq == get_wq_data(work)) {
414 list_del_init(&work->entry);
415 ret = 1;
416 }
417 }
418 spin_unlock_irq(&cwq->lock);
419
420 return ret;
421}
422
423static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
424 struct work_struct *work)
425{
426 struct wq_barrier barr;
427 int running = 0;
428
429 spin_lock_irq(&cwq->lock);
430 if (unlikely(cwq->current_work == work)) {
83c22520 431 insert_wq_barrier(cwq, &barr, 0);
b89deed3
ON
432 running = 1;
433 }
434 spin_unlock_irq(&cwq->lock);
435
3af24433 436 if (unlikely(running))
b89deed3 437 wait_for_completion(&barr.done);
b89deed3
ON
438}
439
6e84d644 440static void wait_on_work(struct work_struct *work)
b89deed3
ON
441{
442 struct cpu_workqueue_struct *cwq;
28e53bdd
ON
443 struct workqueue_struct *wq;
444 const cpumask_t *cpu_map;
b1f4ec17 445 int cpu;
b89deed3 446
f293ea92
ON
447 might_sleep();
448
b89deed3 449 cwq = get_wq_data(work);
b89deed3 450 if (!cwq)
3af24433 451 return;
b89deed3 452
28e53bdd
ON
453 wq = cwq->wq;
454 cpu_map = wq_cpu_map(wq);
455
b1f4ec17 456 for_each_cpu_mask(cpu, *cpu_map)
6e84d644
ON
457 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
458}
459
1f1f642e
ON
460static int __cancel_work_timer(struct work_struct *work,
461 struct timer_list* timer)
462{
463 int ret;
464
465 do {
466 ret = (timer && likely(del_timer(timer)));
467 if (!ret)
468 ret = try_to_grab_pending(work);
469 wait_on_work(work);
470 } while (unlikely(ret < 0));
471
472 work_clear_pending(work);
473 return ret;
474}
475
6e84d644
ON
476/**
477 * cancel_work_sync - block until a work_struct's callback has terminated
478 * @work: the work which is to be flushed
479 *
1f1f642e
ON
480 * Returns true if @work was pending.
481 *
6e84d644
ON
482 * cancel_work_sync() will cancel the work if it is queued. If the work's
483 * callback appears to be running, cancel_work_sync() will block until it
484 * has completed.
485 *
486 * It is possible to use this function if the work re-queues itself. It can
487 * cancel the work even if it migrates to another workqueue, however in that
488 * case it only guarantees that work->func() has completed on the last queued
489 * workqueue.
490 *
491 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
492 * pending, otherwise it goes into a busy-wait loop until the timer expires.
493 *
494 * The caller must ensure that workqueue_struct on which this work was last
495 * queued can't be destroyed before this function returns.
496 */
1f1f642e 497int cancel_work_sync(struct work_struct *work)
6e84d644 498{
1f1f642e 499 return __cancel_work_timer(work, NULL);
b89deed3 500}
28e53bdd 501EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 502
6e84d644 503/**
f5a421a4 504 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
505 * @dwork: the delayed work struct
506 *
1f1f642e
ON
507 * Returns true if @dwork was pending.
508 *
6e84d644
ON
509 * It is possible to use this function if @dwork rearms itself via queue_work()
510 * or queue_delayed_work(). See also the comment for cancel_work_sync().
511 */
1f1f642e 512int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 513{
1f1f642e 514 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 515}
f5a421a4 516EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 517
6e84d644 518static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 519
0fcb78c2
REB
520/**
521 * schedule_work - put work task in global workqueue
522 * @work: job to be done
523 *
524 * This puts a job in the kernel-global workqueue.
525 */
1da177e4
LT
526int fastcall schedule_work(struct work_struct *work)
527{
528 return queue_work(keventd_wq, work);
529}
ae90dd5d 530EXPORT_SYMBOL(schedule_work);
1da177e4 531
0fcb78c2
REB
532/**
533 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
534 * @dwork: job to be done
535 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
536 *
537 * After waiting for a given time this puts a job in the kernel-global
538 * workqueue.
539 */
82f67cd9
IM
540int fastcall schedule_delayed_work(struct delayed_work *dwork,
541 unsigned long delay)
1da177e4 542{
82f67cd9 543 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 544 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 545}
ae90dd5d 546EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 547
0fcb78c2
REB
548/**
549 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
550 * @cpu: cpu to use
52bad64d 551 * @dwork: job to be done
0fcb78c2
REB
552 * @delay: number of jiffies to wait
553 *
554 * After waiting for a given time this puts a job in the kernel-global
555 * workqueue on the specified CPU.
556 */
1da177e4 557int schedule_delayed_work_on(int cpu,
52bad64d 558 struct delayed_work *dwork, unsigned long delay)
1da177e4 559{
52bad64d 560 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 561}
ae90dd5d 562EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 563
b6136773
AM
564/**
565 * schedule_on_each_cpu - call a function on each online CPU from keventd
566 * @func: the function to call
b6136773
AM
567 *
568 * Returns zero on success.
569 * Returns -ve errno on failure.
570 *
571 * Appears to be racy against CPU hotplug.
572 *
573 * schedule_on_each_cpu() is very slow.
574 */
65f27f38 575int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
576{
577 int cpu;
b6136773 578 struct work_struct *works;
15316ba8 579
b6136773
AM
580 works = alloc_percpu(struct work_struct);
581 if (!works)
15316ba8 582 return -ENOMEM;
b6136773 583
e18f3ffb 584 preempt_disable(); /* CPU hotplug */
15316ba8 585 for_each_online_cpu(cpu) {
9bfb1839
IM
586 struct work_struct *work = per_cpu_ptr(works, cpu);
587
588 INIT_WORK(work, func);
589 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
590 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
15316ba8 591 }
e18f3ffb 592 preempt_enable();
15316ba8 593 flush_workqueue(keventd_wq);
b6136773 594 free_percpu(works);
15316ba8
CL
595 return 0;
596}
597
1da177e4
LT
598void flush_scheduled_work(void)
599{
600 flush_workqueue(keventd_wq);
601}
ae90dd5d 602EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 603
1fa44eca
JB
604/**
605 * execute_in_process_context - reliably execute the routine with user context
606 * @fn: the function to execute
1fa44eca
JB
607 * @ew: guaranteed storage for the execute work structure (must
608 * be available when the work executes)
609 *
610 * Executes the function immediately if process context is available,
611 * otherwise schedules the function for delayed execution.
612 *
613 * Returns: 0 - function was executed
614 * 1 - function was scheduled for execution
615 */
65f27f38 616int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
617{
618 if (!in_interrupt()) {
65f27f38 619 fn(&ew->work);
1fa44eca
JB
620 return 0;
621 }
622
65f27f38 623 INIT_WORK(&ew->work, fn);
1fa44eca
JB
624 schedule_work(&ew->work);
625
626 return 1;
627}
628EXPORT_SYMBOL_GPL(execute_in_process_context);
629
1da177e4
LT
630int keventd_up(void)
631{
632 return keventd_wq != NULL;
633}
634
635int current_is_keventd(void)
636{
637 struct cpu_workqueue_struct *cwq;
d243769d 638 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
639 int ret = 0;
640
641 BUG_ON(!keventd_wq);
642
89ada679 643 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
644 if (current == cwq->thread)
645 ret = 1;
646
647 return ret;
648
649}
650
3af24433
ON
651static struct cpu_workqueue_struct *
652init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 653{
89ada679 654 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 655
3af24433
ON
656 cwq->wq = wq;
657 spin_lock_init(&cwq->lock);
658 INIT_LIST_HEAD(&cwq->worklist);
659 init_waitqueue_head(&cwq->more_work);
660
661 return cwq;
1da177e4
LT
662}
663
3af24433
ON
664static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
665{
666 struct workqueue_struct *wq = cwq->wq;
667 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
668 struct task_struct *p;
669
670 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
671 /*
672 * Nobody can add the work_struct to this cwq,
673 * if (caller is __create_workqueue)
674 * nobody should see this wq
675 * else // caller is CPU_UP_PREPARE
676 * cpu is not on cpu_online_map
677 * so we can abort safely.
678 */
679 if (IS_ERR(p))
680 return PTR_ERR(p);
681
682 cwq->thread = p;
3af24433
ON
683
684 return 0;
685}
686
06ba38a9
ON
687static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
688{
689 struct task_struct *p = cwq->thread;
690
691 if (p != NULL) {
692 if (cpu >= 0)
693 kthread_bind(p, cpu);
694 wake_up_process(p);
695 }
696}
697
3af24433
ON
698struct workqueue_struct *__create_workqueue(const char *name,
699 int singlethread, int freezeable)
1da177e4 700{
1da177e4 701 struct workqueue_struct *wq;
3af24433
ON
702 struct cpu_workqueue_struct *cwq;
703 int err = 0, cpu;
1da177e4 704
3af24433
ON
705 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
706 if (!wq)
707 return NULL;
708
709 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
710 if (!wq->cpu_wq) {
711 kfree(wq);
712 return NULL;
713 }
714
715 wq->name = name;
cce1a165 716 wq->singlethread = singlethread;
3af24433 717 wq->freezeable = freezeable;
cce1a165 718 INIT_LIST_HEAD(&wq->list);
3af24433
ON
719
720 if (singlethread) {
3af24433
ON
721 cwq = init_cpu_workqueue(wq, singlethread_cpu);
722 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 723 start_workqueue_thread(cwq, -1);
3af24433 724 } else {
9b41ea72 725 mutex_lock(&workqueue_mutex);
3af24433
ON
726 list_add(&wq->list, &workqueues);
727
728 for_each_possible_cpu(cpu) {
729 cwq = init_cpu_workqueue(wq, cpu);
730 if (err || !cpu_online(cpu))
731 continue;
732 err = create_workqueue_thread(cwq, cpu);
06ba38a9 733 start_workqueue_thread(cwq, cpu);
1da177e4 734 }
3af24433
ON
735 mutex_unlock(&workqueue_mutex);
736 }
737
738 if (err) {
739 destroy_workqueue(wq);
740 wq = NULL;
741 }
742 return wq;
743}
744EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4 745
3af24433
ON
746static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
747{
14441960
ON
748 /*
749 * Our caller is either destroy_workqueue() or CPU_DEAD,
750 * workqueue_mutex protects cwq->thread
751 */
752 if (cwq->thread == NULL)
753 return;
3af24433 754
13c22168 755 flush_cpu_workqueue(cwq);
14441960 756 /*
13c22168
ON
757 * If the caller is CPU_DEAD and cwq->worklist was not empty,
758 * a concurrent flush_workqueue() can insert a barrier after us.
759 * However, in that case run_workqueue() won't return and check
760 * kthread_should_stop() until it flushes all work_struct's.
14441960
ON
761 * When ->worklist becomes empty it is safe to exit because no
762 * more work_structs can be queued on this cwq: flush_workqueue
763 * checks list_empty(), and a "normal" queue_work() can't use
764 * a dead CPU.
765 */
14441960
ON
766 kthread_stop(cwq->thread);
767 cwq->thread = NULL;
3af24433
ON
768}
769
770/**
771 * destroy_workqueue - safely terminate a workqueue
772 * @wq: target workqueue
773 *
774 * Safely destroy a workqueue. All work currently pending will be done first.
775 */
776void destroy_workqueue(struct workqueue_struct *wq)
777{
b1f4ec17 778 const cpumask_t *cpu_map = wq_cpu_map(wq);
3af24433 779 struct cpu_workqueue_struct *cwq;
b1f4ec17 780 int cpu;
3af24433 781
b1f4ec17
ON
782 mutex_lock(&workqueue_mutex);
783 list_del(&wq->list);
784 mutex_unlock(&workqueue_mutex);
3af24433 785
b1f4ec17
ON
786 for_each_cpu_mask(cpu, *cpu_map) {
787 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
788 cleanup_workqueue_thread(cwq, cpu);
3af24433 789 }
9b41ea72 790
3af24433
ON
791 free_percpu(wq->cpu_wq);
792 kfree(wq);
793}
794EXPORT_SYMBOL_GPL(destroy_workqueue);
795
796static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
797 unsigned long action,
798 void *hcpu)
799{
800 unsigned int cpu = (unsigned long)hcpu;
801 struct cpu_workqueue_struct *cwq;
802 struct workqueue_struct *wq;
803
8bb78442
RW
804 action &= ~CPU_TASKS_FROZEN;
805
3af24433
ON
806 switch (action) {
807 case CPU_LOCK_ACQUIRE:
9b41ea72 808 mutex_lock(&workqueue_mutex);
3af24433 809 return NOTIFY_OK;
9b41ea72 810
3af24433 811 case CPU_LOCK_RELEASE:
9b41ea72 812 mutex_unlock(&workqueue_mutex);
3af24433 813 return NOTIFY_OK;
1da177e4 814
3af24433
ON
815 case CPU_UP_PREPARE:
816 cpu_set(cpu, cpu_populated_map);
817 }
818
819 list_for_each_entry(wq, &workqueues, list) {
820 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
821
822 switch (action) {
823 case CPU_UP_PREPARE:
824 if (!create_workqueue_thread(cwq, cpu))
825 break;
826 printk(KERN_ERR "workqueue for %i failed\n", cpu);
827 return NOTIFY_BAD;
828
829 case CPU_ONLINE:
06ba38a9 830 start_workqueue_thread(cwq, cpu);
3af24433
ON
831 break;
832
833 case CPU_UP_CANCELED:
06ba38a9 834 start_workqueue_thread(cwq, -1);
3af24433
ON
835 case CPU_DEAD:
836 cleanup_workqueue_thread(cwq, cpu);
837 break;
838 }
1da177e4
LT
839 }
840
841 return NOTIFY_OK;
842}
1da177e4 843
c12920d1 844void __init init_workqueues(void)
1da177e4 845{
3af24433 846 cpu_populated_map = cpu_online_map;
f756d5e2 847 singlethread_cpu = first_cpu(cpu_possible_map);
b1f4ec17 848 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
1da177e4
LT
849 hotcpu_notifier(workqueue_cpu_callback, 0);
850 keventd_wq = create_workqueue("events");
851 BUG_ON(!keventd_wq);
852}