workqueue: fix build problem on !CONFIG_SMP
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
e1f8e874 12 * Andrew Morton
1da177e4
LT
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
c34056a3 36#include <linux/idr.h>
e22bee78
TH
37
38#include "workqueue_sched.h"
1da177e4 39
c8e55f36 40enum {
db7bccf4 41 /* global_cwq flags */
e22bee78
TH
42 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
43 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
44 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
db7bccf4 45 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
649027d7 46 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
db7bccf4 47
c8e55f36
TH
48 /* worker flags */
49 WORKER_STARTED = 1 << 0, /* started */
50 WORKER_DIE = 1 << 1, /* die die die */
51 WORKER_IDLE = 1 << 2, /* is idle */
e22bee78 52 WORKER_PREP = 1 << 3, /* preparing to run works */
db7bccf4 53 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
e22bee78 54 WORKER_REBIND = 1 << 5, /* mom is home, come back */
fb0e7beb 55 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
f3421797 56 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
e22bee78 57
fb0e7beb 58 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
f3421797 59 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
db7bccf4
TH
60
61 /* gcwq->trustee_state */
62 TRUSTEE_START = 0, /* start */
63 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
64 TRUSTEE_BUTCHER = 2, /* butcher workers */
65 TRUSTEE_RELEASE = 3, /* release workers */
66 TRUSTEE_DONE = 4, /* trustee is done */
c8e55f36
TH
67
68 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
69 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
70 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
db7bccf4 71
e22bee78
TH
72 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
73 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
74
75 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */
76 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
77 CREATE_COOLDOWN = HZ, /* time to breath after fail */
db7bccf4 78 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
e22bee78
TH
79
80 /*
81 * Rescue workers are used only on emergencies and shared by
82 * all cpus. Give -20.
83 */
84 RESCUER_NICE_LEVEL = -20,
c8e55f36
TH
85};
86
4690c4ab
TH
87/*
88 * Structure fields follow one of the following exclusion rules.
89 *
90 * I: Set during initialization and read-only afterwards.
91 *
e22bee78
TH
92 * P: Preemption protected. Disabling preemption is enough and should
93 * only be modified and accessed from the local cpu.
94 *
8b03ae3c 95 * L: gcwq->lock protected. Access with gcwq->lock held.
4690c4ab 96 *
e22bee78
TH
97 * X: During normal operation, modification requires gcwq->lock and
98 * should be done only from local cpu. Either disabling preemption
99 * on local cpu or grabbing gcwq->lock is enough for read access.
f3421797 100 * If GCWQ_DISASSOCIATED is set, it's identical to L.
e22bee78 101 *
73f53c4a
TH
102 * F: wq->flush_mutex protected.
103 *
4690c4ab
TH
104 * W: workqueue_lock protected.
105 */
106
8b03ae3c 107struct global_cwq;
c34056a3 108
e22bee78
TH
109/*
110 * The poor guys doing the actual heavy lifting. All on-duty workers
111 * are either serving the manager role, on idle list or on busy hash.
112 */
c34056a3 113struct worker {
c8e55f36
TH
114 /* on idle list while idle, on busy hash table while busy */
115 union {
116 struct list_head entry; /* L: while idle */
117 struct hlist_node hentry; /* L: while busy */
118 };
119
c34056a3 120 struct work_struct *current_work; /* L: work being processed */
8cca0eea 121 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
affee4b2 122 struct list_head scheduled; /* L: scheduled works */
c34056a3 123 struct task_struct *task; /* I: worker task */
8b03ae3c 124 struct global_cwq *gcwq; /* I: the associated gcwq */
e22bee78
TH
125 /* 64 bytes boundary on 64bit, 32 on 32bit */
126 unsigned long last_active; /* L: last active timestamp */
127 unsigned int flags; /* X: flags */
c34056a3 128 int id; /* I: worker id */
e22bee78 129 struct work_struct rebind_work; /* L: rebind worker to cpu */
c34056a3
TH
130};
131
8b03ae3c 132/*
e22bee78
TH
133 * Global per-cpu workqueue. There's one and only one for each cpu
134 * and all works are queued and processed here regardless of their
135 * target workqueues.
8b03ae3c
TH
136 */
137struct global_cwq {
138 spinlock_t lock; /* the gcwq lock */
7e11629d 139 struct list_head worklist; /* L: list of pending works */
8b03ae3c 140 unsigned int cpu; /* I: the associated cpu */
db7bccf4 141 unsigned int flags; /* L: GCWQ_* flags */
c8e55f36
TH
142
143 int nr_workers; /* L: total number of workers */
144 int nr_idle; /* L: currently idle ones */
145
146 /* workers are chained either in the idle_list or busy_hash */
e22bee78 147 struct list_head idle_list; /* X: list of idle workers */
c8e55f36
TH
148 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
149 /* L: hash of busy workers */
150
e22bee78
TH
151 struct timer_list idle_timer; /* L: worker idle timeout */
152 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
153
8b03ae3c 154 struct ida worker_ida; /* L: for worker IDs */
db7bccf4
TH
155
156 struct task_struct *trustee; /* L: for gcwq shutdown */
157 unsigned int trustee_state; /* L: trustee state */
158 wait_queue_head_t trustee_wait; /* trustee wait */
e22bee78 159 struct worker *first_idle; /* L: first idle worker */
8b03ae3c
TH
160} ____cacheline_aligned_in_smp;
161
1da177e4 162/*
502ca9d8 163 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
0f900049
TH
164 * work_struct->data are used for flags and thus cwqs need to be
165 * aligned at two's power of the number of flag bits.
1da177e4
LT
166 */
167struct cpu_workqueue_struct {
8b03ae3c 168 struct global_cwq *gcwq; /* I: the associated gcwq */
4690c4ab 169 struct workqueue_struct *wq; /* I: the owning workqueue */
73f53c4a
TH
170 int work_color; /* L: current color */
171 int flush_color; /* L: flushing color */
172 int nr_in_flight[WORK_NR_COLORS];
173 /* L: nr of in_flight works */
1e19ffc6 174 int nr_active; /* L: nr of active works */
a0a1a5fd 175 int max_active; /* L: max active works */
1e19ffc6 176 struct list_head delayed_works; /* L: delayed works */
0f900049 177};
1da177e4 178
73f53c4a
TH
179/*
180 * Structure used to wait for workqueue flush.
181 */
182struct wq_flusher {
183 struct list_head list; /* F: list of flushers */
184 int flush_color; /* F: flush color waiting for */
185 struct completion done; /* flush completion */
186};
187
1da177e4
LT
188/*
189 * The externally visible workqueue abstraction is an array of
190 * per-CPU workqueues:
191 */
192struct workqueue_struct {
97e37d7b 193 unsigned int flags; /* I: WQ_* flags */
bdbc5dd7
TH
194 union {
195 struct cpu_workqueue_struct __percpu *pcpu;
196 struct cpu_workqueue_struct *single;
197 unsigned long v;
198 } cpu_wq; /* I: cwq's */
4690c4ab 199 struct list_head list; /* W: list of all workqueues */
73f53c4a
TH
200
201 struct mutex flush_mutex; /* protects wq flushing */
202 int work_color; /* F: current work color */
203 int flush_color; /* F: current flush color */
204 atomic_t nr_cwqs_to_flush; /* flush in progress */
205 struct wq_flusher *first_flusher; /* F: first flusher */
206 struct list_head flusher_queue; /* F: flush waiters */
207 struct list_head flusher_overflow; /* F: flush overflow list */
208
e22bee78
TH
209 cpumask_var_t mayday_mask; /* cpus requesting rescue */
210 struct worker *rescuer; /* I: rescue worker */
211
dcd989cb 212 int saved_max_active; /* W: saved cwq max_active */
4690c4ab 213 const char *name; /* I: workqueue name */
4e6045f1 214#ifdef CONFIG_LOCKDEP
4690c4ab 215 struct lockdep_map lockdep_map;
4e6045f1 216#endif
1da177e4
LT
217};
218
d320c038
TH
219struct workqueue_struct *system_wq __read_mostly;
220struct workqueue_struct *system_long_wq __read_mostly;
221struct workqueue_struct *system_nrt_wq __read_mostly;
f3421797 222struct workqueue_struct *system_unbound_wq __read_mostly;
d320c038
TH
223EXPORT_SYMBOL_GPL(system_wq);
224EXPORT_SYMBOL_GPL(system_long_wq);
225EXPORT_SYMBOL_GPL(system_nrt_wq);
f3421797 226EXPORT_SYMBOL_GPL(system_unbound_wq);
d320c038 227
db7bccf4
TH
228#define for_each_busy_worker(worker, i, pos, gcwq) \
229 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
230 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
231
f3421797
TH
232static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
233 unsigned int sw)
234{
235 if (cpu < nr_cpu_ids) {
236 if (sw & 1) {
237 cpu = cpumask_next(cpu, mask);
238 if (cpu < nr_cpu_ids)
239 return cpu;
240 }
241 if (sw & 2)
242 return WORK_CPU_UNBOUND;
243 }
244 return WORK_CPU_NONE;
245}
246
247static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
248 struct workqueue_struct *wq)
249{
250 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
251}
252
253#define for_each_gcwq_cpu(cpu) \
254 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
255 (cpu) < WORK_CPU_NONE; \
256 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
257
258#define for_each_online_gcwq_cpu(cpu) \
259 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
260 (cpu) < WORK_CPU_NONE; \
261 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
262
263#define for_each_cwq_cpu(cpu, wq) \
264 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
265 (cpu) < WORK_CPU_NONE; \
266 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
267
dc186ad7
TG
268#ifdef CONFIG_DEBUG_OBJECTS_WORK
269
270static struct debug_obj_descr work_debug_descr;
271
272/*
273 * fixup_init is called when:
274 * - an active object is initialized
275 */
276static int work_fixup_init(void *addr, enum debug_obj_state state)
277{
278 struct work_struct *work = addr;
279
280 switch (state) {
281 case ODEBUG_STATE_ACTIVE:
282 cancel_work_sync(work);
283 debug_object_init(work, &work_debug_descr);
284 return 1;
285 default:
286 return 0;
287 }
288}
289
290/*
291 * fixup_activate is called when:
292 * - an active object is activated
293 * - an unknown object is activated (might be a statically initialized object)
294 */
295static int work_fixup_activate(void *addr, enum debug_obj_state state)
296{
297 struct work_struct *work = addr;
298
299 switch (state) {
300
301 case ODEBUG_STATE_NOTAVAILABLE:
302 /*
303 * This is not really a fixup. The work struct was
304 * statically initialized. We just make sure that it
305 * is tracked in the object tracker.
306 */
22df02bb 307 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
dc186ad7
TG
308 debug_object_init(work, &work_debug_descr);
309 debug_object_activate(work, &work_debug_descr);
310 return 0;
311 }
312 WARN_ON_ONCE(1);
313 return 0;
314
315 case ODEBUG_STATE_ACTIVE:
316 WARN_ON(1);
317
318 default:
319 return 0;
320 }
321}
322
323/*
324 * fixup_free is called when:
325 * - an active object is freed
326 */
327static int work_fixup_free(void *addr, enum debug_obj_state state)
328{
329 struct work_struct *work = addr;
330
331 switch (state) {
332 case ODEBUG_STATE_ACTIVE:
333 cancel_work_sync(work);
334 debug_object_free(work, &work_debug_descr);
335 return 1;
336 default:
337 return 0;
338 }
339}
340
341static struct debug_obj_descr work_debug_descr = {
342 .name = "work_struct",
343 .fixup_init = work_fixup_init,
344 .fixup_activate = work_fixup_activate,
345 .fixup_free = work_fixup_free,
346};
347
348static inline void debug_work_activate(struct work_struct *work)
349{
350 debug_object_activate(work, &work_debug_descr);
351}
352
353static inline void debug_work_deactivate(struct work_struct *work)
354{
355 debug_object_deactivate(work, &work_debug_descr);
356}
357
358void __init_work(struct work_struct *work, int onstack)
359{
360 if (onstack)
361 debug_object_init_on_stack(work, &work_debug_descr);
362 else
363 debug_object_init(work, &work_debug_descr);
364}
365EXPORT_SYMBOL_GPL(__init_work);
366
367void destroy_work_on_stack(struct work_struct *work)
368{
369 debug_object_free(work, &work_debug_descr);
370}
371EXPORT_SYMBOL_GPL(destroy_work_on_stack);
372
373#else
374static inline void debug_work_activate(struct work_struct *work) { }
375static inline void debug_work_deactivate(struct work_struct *work) { }
376#endif
377
95402b38
GS
378/* Serializes the accesses to the list of workqueues. */
379static DEFINE_SPINLOCK(workqueue_lock);
1da177e4 380static LIST_HEAD(workqueues);
a0a1a5fd 381static bool workqueue_freezing; /* W: have wqs started freezing? */
c34056a3 382
e22bee78
TH
383/*
384 * The almighty global cpu workqueues. nr_running is the only field
385 * which is expected to be used frequently by other cpus via
386 * try_to_wake_up(). Put it in a separate cacheline.
387 */
8b03ae3c 388static DEFINE_PER_CPU(struct global_cwq, global_cwq);
e22bee78 389static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
8b03ae3c 390
f3421797
TH
391/*
392 * Global cpu workqueue and nr_running counter for unbound gcwq. The
393 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
394 * workers have WORKER_UNBOUND set.
395 */
396static struct global_cwq unbound_global_cwq;
397static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
398
c34056a3 399static int worker_thread(void *__worker);
1da177e4 400
8b03ae3c
TH
401static struct global_cwq *get_gcwq(unsigned int cpu)
402{
f3421797
TH
403 if (cpu != WORK_CPU_UNBOUND)
404 return &per_cpu(global_cwq, cpu);
405 else
406 return &unbound_global_cwq;
8b03ae3c
TH
407}
408
e22bee78
TH
409static atomic_t *get_gcwq_nr_running(unsigned int cpu)
410{
f3421797
TH
411 if (cpu != WORK_CPU_UNBOUND)
412 return &per_cpu(gcwq_nr_running, cpu);
413 else
414 return &unbound_gcwq_nr_running;
e22bee78
TH
415}
416
1537663f
TH
417static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
418 struct workqueue_struct *wq)
b1f4ec17 419{
f3421797
TH
420 if (!(wq->flags & WQ_UNBOUND)) {
421 if (likely(cpu < nr_cpu_ids)) {
422#ifdef CONFIG_SMP
423 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
bdbc5dd7 424#else
f3421797 425 return wq->cpu_wq.single;
bdbc5dd7 426#endif
f3421797
TH
427 }
428 } else if (likely(cpu == WORK_CPU_UNBOUND))
429 return wq->cpu_wq.single;
430 return NULL;
b1f4ec17
ON
431}
432
73f53c4a
TH
433static unsigned int work_color_to_flags(int color)
434{
435 return color << WORK_STRUCT_COLOR_SHIFT;
436}
437
438static int get_work_color(struct work_struct *work)
439{
440 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
441 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
442}
443
444static int work_next_color(int color)
445{
446 return (color + 1) % WORK_NR_COLORS;
447}
448
4594bf15 449/*
7a22ad75
TH
450 * Work data points to the cwq while a work is on queue. Once
451 * execution starts, it points to the cpu the work was last on. This
452 * can be distinguished by comparing the data value against
453 * PAGE_OFFSET.
454 *
455 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
456 * cwq, cpu or clear work->data. These functions should only be
457 * called while the work is owned - ie. while the PENDING bit is set.
458 *
459 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
460 * corresponding to a work. gcwq is available once the work has been
461 * queued anywhere after initialization. cwq is available only from
462 * queueing until execution starts.
4594bf15 463 */
7a22ad75
TH
464static inline void set_work_data(struct work_struct *work, unsigned long data,
465 unsigned long flags)
365970a1 466{
4594bf15 467 BUG_ON(!work_pending(work));
7a22ad75
TH
468 atomic_long_set(&work->data, data | flags | work_static(work));
469}
365970a1 470
7a22ad75
TH
471static void set_work_cwq(struct work_struct *work,
472 struct cpu_workqueue_struct *cwq,
473 unsigned long extra_flags)
474{
475 set_work_data(work, (unsigned long)cwq,
476 WORK_STRUCT_PENDING | extra_flags);
365970a1
DH
477}
478
7a22ad75
TH
479static void set_work_cpu(struct work_struct *work, unsigned int cpu)
480{
481 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
482}
483
484static void clear_work_data(struct work_struct *work)
485{
486 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
487}
488
489static inline unsigned long get_work_data(struct work_struct *work)
490{
491 return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
492}
493
494static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
4d707b9f 495{
7a22ad75
TH
496 unsigned long data = get_work_data(work);
497
498 return data >= PAGE_OFFSET ? (void *)data : NULL;
4d707b9f
ON
499}
500
7a22ad75 501static struct global_cwq *get_work_gcwq(struct work_struct *work)
365970a1 502{
7a22ad75
TH
503 unsigned long data = get_work_data(work);
504 unsigned int cpu;
505
506 if (data >= PAGE_OFFSET)
507 return ((struct cpu_workqueue_struct *)data)->gcwq;
508
509 cpu = data >> WORK_STRUCT_FLAG_BITS;
bdbc5dd7 510 if (cpu == WORK_CPU_NONE)
7a22ad75
TH
511 return NULL;
512
f3421797 513 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
7a22ad75 514 return get_gcwq(cpu);
365970a1
DH
515}
516
e22bee78
TH
517/*
518 * Policy functions. These define the policies on how the global
519 * worker pool is managed. Unless noted otherwise, these functions
520 * assume that they're being called with gcwq->lock held.
521 */
522
649027d7
TH
523static bool __need_more_worker(struct global_cwq *gcwq)
524{
525 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
526 gcwq->flags & GCWQ_HIGHPRI_PENDING;
527}
528
e22bee78
TH
529/*
530 * Need to wake up a worker? Called from anything but currently
531 * running workers.
532 */
533static bool need_more_worker(struct global_cwq *gcwq)
534{
649027d7 535 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
e22bee78
TH
536}
537
538/* Can I start working? Called from busy but !running workers. */
539static bool may_start_working(struct global_cwq *gcwq)
540{
541 return gcwq->nr_idle;
542}
543
544/* Do I need to keep working? Called from currently running workers. */
545static bool keep_working(struct global_cwq *gcwq)
546{
547 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
548
549 return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
550}
551
552/* Do we need a new worker? Called from manager. */
553static bool need_to_create_worker(struct global_cwq *gcwq)
554{
555 return need_more_worker(gcwq) && !may_start_working(gcwq);
556}
557
558/* Do I need to be the manager? */
559static bool need_to_manage_workers(struct global_cwq *gcwq)
560{
561 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
562}
563
564/* Do we have too many workers and should some go away? */
565static bool too_many_workers(struct global_cwq *gcwq)
566{
567 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
568 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
569 int nr_busy = gcwq->nr_workers - nr_idle;
570
571 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
572}
573
574/*
575 * Wake up functions.
576 */
577
7e11629d
TH
578/* Return the first worker. Safe with preemption disabled */
579static struct worker *first_worker(struct global_cwq *gcwq)
580{
581 if (unlikely(list_empty(&gcwq->idle_list)))
582 return NULL;
583
584 return list_first_entry(&gcwq->idle_list, struct worker, entry);
585}
586
587/**
588 * wake_up_worker - wake up an idle worker
589 * @gcwq: gcwq to wake worker for
590 *
591 * Wake up the first idle worker of @gcwq.
592 *
593 * CONTEXT:
594 * spin_lock_irq(gcwq->lock).
595 */
596static void wake_up_worker(struct global_cwq *gcwq)
597{
598 struct worker *worker = first_worker(gcwq);
599
600 if (likely(worker))
601 wake_up_process(worker->task);
602}
603
d302f017 604/**
e22bee78
TH
605 * wq_worker_waking_up - a worker is waking up
606 * @task: task waking up
607 * @cpu: CPU @task is waking up to
608 *
609 * This function is called during try_to_wake_up() when a worker is
610 * being awoken.
611 *
612 * CONTEXT:
613 * spin_lock_irq(rq->lock)
614 */
615void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
616{
617 struct worker *worker = kthread_data(task);
618
619 if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
620 atomic_inc(get_gcwq_nr_running(cpu));
621}
622
623/**
624 * wq_worker_sleeping - a worker is going to sleep
625 * @task: task going to sleep
626 * @cpu: CPU in question, must be the current CPU number
627 *
628 * This function is called during schedule() when a busy worker is
629 * going to sleep. Worker on the same cpu can be woken up by
630 * returning pointer to its task.
631 *
632 * CONTEXT:
633 * spin_lock_irq(rq->lock)
634 *
635 * RETURNS:
636 * Worker task on @cpu to wake up, %NULL if none.
637 */
638struct task_struct *wq_worker_sleeping(struct task_struct *task,
639 unsigned int cpu)
640{
641 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
642 struct global_cwq *gcwq = get_gcwq(cpu);
643 atomic_t *nr_running = get_gcwq_nr_running(cpu);
644
645 if (unlikely(worker->flags & WORKER_NOT_RUNNING))
646 return NULL;
647
648 /* this can only happen on the local cpu */
649 BUG_ON(cpu != raw_smp_processor_id());
650
651 /*
652 * The counterpart of the following dec_and_test, implied mb,
653 * worklist not empty test sequence is in insert_work().
654 * Please read comment there.
655 *
656 * NOT_RUNNING is clear. This means that trustee is not in
657 * charge and we're running on the local cpu w/ rq lock held
658 * and preemption disabled, which in turn means that none else
659 * could be manipulating idle_list, so dereferencing idle_list
660 * without gcwq lock is safe.
661 */
662 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
663 to_wakeup = first_worker(gcwq);
664 return to_wakeup ? to_wakeup->task : NULL;
665}
666
667/**
668 * worker_set_flags - set worker flags and adjust nr_running accordingly
cb444766 669 * @worker: self
d302f017
TH
670 * @flags: flags to set
671 * @wakeup: wakeup an idle worker if necessary
672 *
e22bee78
TH
673 * Set @flags in @worker->flags and adjust nr_running accordingly. If
674 * nr_running becomes zero and @wakeup is %true, an idle worker is
675 * woken up.
d302f017 676 *
cb444766
TH
677 * CONTEXT:
678 * spin_lock_irq(gcwq->lock)
d302f017
TH
679 */
680static inline void worker_set_flags(struct worker *worker, unsigned int flags,
681 bool wakeup)
682{
e22bee78
TH
683 struct global_cwq *gcwq = worker->gcwq;
684
cb444766
TH
685 WARN_ON_ONCE(worker->task != current);
686
e22bee78
TH
687 /*
688 * If transitioning into NOT_RUNNING, adjust nr_running and
689 * wake up an idle worker as necessary if requested by
690 * @wakeup.
691 */
692 if ((flags & WORKER_NOT_RUNNING) &&
693 !(worker->flags & WORKER_NOT_RUNNING)) {
694 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
695
696 if (wakeup) {
697 if (atomic_dec_and_test(nr_running) &&
698 !list_empty(&gcwq->worklist))
699 wake_up_worker(gcwq);
700 } else
701 atomic_dec(nr_running);
702 }
703
d302f017
TH
704 worker->flags |= flags;
705}
706
707/**
e22bee78 708 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
cb444766 709 * @worker: self
d302f017
TH
710 * @flags: flags to clear
711 *
e22bee78 712 * Clear @flags in @worker->flags and adjust nr_running accordingly.
d302f017 713 *
cb444766
TH
714 * CONTEXT:
715 * spin_lock_irq(gcwq->lock)
d302f017
TH
716 */
717static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
718{
e22bee78
TH
719 struct global_cwq *gcwq = worker->gcwq;
720 unsigned int oflags = worker->flags;
721
cb444766
TH
722 WARN_ON_ONCE(worker->task != current);
723
d302f017 724 worker->flags &= ~flags;
e22bee78
TH
725
726 /* if transitioning out of NOT_RUNNING, increment nr_running */
727 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
728 if (!(worker->flags & WORKER_NOT_RUNNING))
729 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
d302f017
TH
730}
731
c8e55f36
TH
732/**
733 * busy_worker_head - return the busy hash head for a work
734 * @gcwq: gcwq of interest
735 * @work: work to be hashed
736 *
737 * Return hash head of @gcwq for @work.
738 *
739 * CONTEXT:
740 * spin_lock_irq(gcwq->lock).
741 *
742 * RETURNS:
743 * Pointer to the hash head.
744 */
745static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
746 struct work_struct *work)
747{
748 const int base_shift = ilog2(sizeof(struct work_struct));
749 unsigned long v = (unsigned long)work;
750
751 /* simple shift and fold hash, do we need something better? */
752 v >>= base_shift;
753 v += v >> BUSY_WORKER_HASH_ORDER;
754 v &= BUSY_WORKER_HASH_MASK;
755
756 return &gcwq->busy_hash[v];
757}
758
8cca0eea
TH
759/**
760 * __find_worker_executing_work - find worker which is executing a work
761 * @gcwq: gcwq of interest
762 * @bwh: hash head as returned by busy_worker_head()
763 * @work: work to find worker for
764 *
765 * Find a worker which is executing @work on @gcwq. @bwh should be
766 * the hash head obtained by calling busy_worker_head() with the same
767 * work.
768 *
769 * CONTEXT:
770 * spin_lock_irq(gcwq->lock).
771 *
772 * RETURNS:
773 * Pointer to worker which is executing @work if found, NULL
774 * otherwise.
775 */
776static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
777 struct hlist_head *bwh,
778 struct work_struct *work)
779{
780 struct worker *worker;
781 struct hlist_node *tmp;
782
783 hlist_for_each_entry(worker, tmp, bwh, hentry)
784 if (worker->current_work == work)
785 return worker;
786 return NULL;
787}
788
789/**
790 * find_worker_executing_work - find worker which is executing a work
791 * @gcwq: gcwq of interest
792 * @work: work to find worker for
793 *
794 * Find a worker which is executing @work on @gcwq. This function is
795 * identical to __find_worker_executing_work() except that this
796 * function calculates @bwh itself.
797 *
798 * CONTEXT:
799 * spin_lock_irq(gcwq->lock).
800 *
801 * RETURNS:
802 * Pointer to worker which is executing @work if found, NULL
803 * otherwise.
804 */
805static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
806 struct work_struct *work)
807{
808 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
809 work);
810}
811
649027d7
TH
812/**
813 * gcwq_determine_ins_pos - find insertion position
814 * @gcwq: gcwq of interest
815 * @cwq: cwq a work is being queued for
816 *
817 * A work for @cwq is about to be queued on @gcwq, determine insertion
818 * position for the work. If @cwq is for HIGHPRI wq, the work is
819 * queued at the head of the queue but in FIFO order with respect to
820 * other HIGHPRI works; otherwise, at the end of the queue. This
821 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
822 * there are HIGHPRI works pending.
823 *
824 * CONTEXT:
825 * spin_lock_irq(gcwq->lock).
826 *
827 * RETURNS:
828 * Pointer to inserstion position.
829 */
830static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
831 struct cpu_workqueue_struct *cwq)
832{
833 struct work_struct *twork;
834
835 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
836 return &gcwq->worklist;
837
838 list_for_each_entry(twork, &gcwq->worklist, entry) {
839 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
840
841 if (!(tcwq->wq->flags & WQ_HIGHPRI))
842 break;
843 }
844
845 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
846 return &twork->entry;
847}
848
4690c4ab 849/**
7e11629d 850 * insert_work - insert a work into gcwq
4690c4ab
TH
851 * @cwq: cwq @work belongs to
852 * @work: work to insert
853 * @head: insertion point
854 * @extra_flags: extra WORK_STRUCT_* flags to set
855 *
7e11629d
TH
856 * Insert @work which belongs to @cwq into @gcwq after @head.
857 * @extra_flags is or'd to work_struct flags.
4690c4ab
TH
858 *
859 * CONTEXT:
8b03ae3c 860 * spin_lock_irq(gcwq->lock).
4690c4ab 861 */
b89deed3 862static void insert_work(struct cpu_workqueue_struct *cwq,
4690c4ab
TH
863 struct work_struct *work, struct list_head *head,
864 unsigned int extra_flags)
b89deed3 865{
e22bee78
TH
866 struct global_cwq *gcwq = cwq->gcwq;
867
4690c4ab 868 /* we own @work, set data and link */
7a22ad75 869 set_work_cwq(work, cwq, extra_flags);
4690c4ab 870
6e84d644
ON
871 /*
872 * Ensure that we get the right work->data if we see the
873 * result of list_add() below, see try_to_grab_pending().
874 */
875 smp_wmb();
4690c4ab 876
1a4d9b0a 877 list_add_tail(&work->entry, head);
e22bee78
TH
878
879 /*
880 * Ensure either worker_sched_deactivated() sees the above
881 * list_add_tail() or we see zero nr_running to avoid workers
882 * lying around lazily while there are works to be processed.
883 */
884 smp_mb();
885
649027d7 886 if (__need_more_worker(gcwq))
e22bee78 887 wake_up_worker(gcwq);
b89deed3
ON
888}
889
4690c4ab 890static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1da177e4
LT
891 struct work_struct *work)
892{
502ca9d8
TH
893 struct global_cwq *gcwq;
894 struct cpu_workqueue_struct *cwq;
1e19ffc6 895 struct list_head *worklist;
1da177e4
LT
896 unsigned long flags;
897
dc186ad7 898 debug_work_activate(work);
1e19ffc6 899
c7fc77f7
TH
900 /* determine gcwq to use */
901 if (!(wq->flags & WQ_UNBOUND)) {
18aa9eff
TH
902 struct global_cwq *last_gcwq;
903
c7fc77f7
TH
904 if (unlikely(cpu == WORK_CPU_UNBOUND))
905 cpu = raw_smp_processor_id();
906
18aa9eff
TH
907 /*
908 * It's multi cpu. If @wq is non-reentrant and @work
909 * was previously on a different cpu, it might still
910 * be running there, in which case the work needs to
911 * be queued on that cpu to guarantee non-reentrance.
912 */
502ca9d8 913 gcwq = get_gcwq(cpu);
18aa9eff
TH
914 if (wq->flags & WQ_NON_REENTRANT &&
915 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
916 struct worker *worker;
917
918 spin_lock_irqsave(&last_gcwq->lock, flags);
919
920 worker = find_worker_executing_work(last_gcwq, work);
921
922 if (worker && worker->current_cwq->wq == wq)
923 gcwq = last_gcwq;
924 else {
925 /* meh... not running there, queue here */
926 spin_unlock_irqrestore(&last_gcwq->lock, flags);
927 spin_lock_irqsave(&gcwq->lock, flags);
928 }
929 } else
930 spin_lock_irqsave(&gcwq->lock, flags);
f3421797
TH
931 } else {
932 gcwq = get_gcwq(WORK_CPU_UNBOUND);
933 spin_lock_irqsave(&gcwq->lock, flags);
502ca9d8
TH
934 }
935
936 /* gcwq determined, get cwq and queue */
937 cwq = get_cwq(gcwq->cpu, wq);
938
4690c4ab 939 BUG_ON(!list_empty(&work->entry));
1e19ffc6 940
73f53c4a 941 cwq->nr_in_flight[cwq->work_color]++;
1e19ffc6
TH
942
943 if (likely(cwq->nr_active < cwq->max_active)) {
944 cwq->nr_active++;
649027d7 945 worklist = gcwq_determine_ins_pos(gcwq, cwq);
1e19ffc6
TH
946 } else
947 worklist = &cwq->delayed_works;
948
949 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
950
8b03ae3c 951 spin_unlock_irqrestore(&gcwq->lock, flags);
1da177e4
LT
952}
953
0fcb78c2
REB
954/**
955 * queue_work - queue work on a workqueue
956 * @wq: workqueue to use
957 * @work: work to queue
958 *
057647fc 959 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 960 *
00dfcaf7
ON
961 * We queue the work to the CPU on which it was submitted, but if the CPU dies
962 * it can be processed by another CPU.
1da177e4 963 */
7ad5b3a5 964int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 965{
ef1ca236
ON
966 int ret;
967
968 ret = queue_work_on(get_cpu(), wq, work);
969 put_cpu();
970
1da177e4
LT
971 return ret;
972}
ae90dd5d 973EXPORT_SYMBOL_GPL(queue_work);
1da177e4 974
c1a220e7
ZR
975/**
976 * queue_work_on - queue work on specific cpu
977 * @cpu: CPU number to execute work on
978 * @wq: workqueue to use
979 * @work: work to queue
980 *
981 * Returns 0 if @work was already on a queue, non-zero otherwise.
982 *
983 * We queue the work to a specific CPU, the caller must ensure it
984 * can't go away.
985 */
986int
987queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
988{
989 int ret = 0;
990
22df02bb 991 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 992 __queue_work(cpu, wq, work);
c1a220e7
ZR
993 ret = 1;
994 }
995 return ret;
996}
997EXPORT_SYMBOL_GPL(queue_work_on);
998
6d141c3f 999static void delayed_work_timer_fn(unsigned long __data)
1da177e4 1000{
52bad64d 1001 struct delayed_work *dwork = (struct delayed_work *)__data;
7a22ad75 1002 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1da177e4 1003
4690c4ab 1004 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1da177e4
LT
1005}
1006
0fcb78c2
REB
1007/**
1008 * queue_delayed_work - queue work on a workqueue after delay
1009 * @wq: workqueue to use
af9997e4 1010 * @dwork: delayable work to queue
0fcb78c2
REB
1011 * @delay: number of jiffies to wait before queueing
1012 *
057647fc 1013 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 1014 */
7ad5b3a5 1015int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 1016 struct delayed_work *dwork, unsigned long delay)
1da177e4 1017{
52bad64d 1018 if (delay == 0)
63bc0362 1019 return queue_work(wq, &dwork->work);
1da177e4 1020
63bc0362 1021 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 1022}
ae90dd5d 1023EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 1024
0fcb78c2
REB
1025/**
1026 * queue_delayed_work_on - queue work on specific CPU after delay
1027 * @cpu: CPU number to execute work on
1028 * @wq: workqueue to use
af9997e4 1029 * @dwork: work to queue
0fcb78c2
REB
1030 * @delay: number of jiffies to wait before queueing
1031 *
057647fc 1032 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 1033 */
7a6bc1cd 1034int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 1035 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
1036{
1037 int ret = 0;
52bad64d
DH
1038 struct timer_list *timer = &dwork->timer;
1039 struct work_struct *work = &dwork->work;
7a6bc1cd 1040
22df02bb 1041 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
c7fc77f7 1042 unsigned int lcpu;
7a22ad75 1043
7a6bc1cd
VP
1044 BUG_ON(timer_pending(timer));
1045 BUG_ON(!list_empty(&work->entry));
1046
8a3e77cc 1047 timer_stats_timer_set_start_info(&dwork->timer);
c7fc77f7 1048
7a22ad75
TH
1049 /*
1050 * This stores cwq for the moment, for the timer_fn.
1051 * Note that the work's gcwq is preserved to allow
1052 * reentrance detection for delayed works.
1053 */
c7fc77f7
TH
1054 if (!(wq->flags & WQ_UNBOUND)) {
1055 struct global_cwq *gcwq = get_work_gcwq(work);
1056
1057 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1058 lcpu = gcwq->cpu;
1059 else
1060 lcpu = raw_smp_processor_id();
1061 } else
1062 lcpu = WORK_CPU_UNBOUND;
1063
7a22ad75 1064 set_work_cwq(work, get_cwq(lcpu, wq), 0);
c7fc77f7 1065
7a6bc1cd 1066 timer->expires = jiffies + delay;
52bad64d 1067 timer->data = (unsigned long)dwork;
7a6bc1cd 1068 timer->function = delayed_work_timer_fn;
63bc0362
ON
1069
1070 if (unlikely(cpu >= 0))
1071 add_timer_on(timer, cpu);
1072 else
1073 add_timer(timer);
7a6bc1cd
VP
1074 ret = 1;
1075 }
1076 return ret;
1077}
ae90dd5d 1078EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 1079
c8e55f36
TH
1080/**
1081 * worker_enter_idle - enter idle state
1082 * @worker: worker which is entering idle state
1083 *
1084 * @worker is entering idle state. Update stats and idle timer if
1085 * necessary.
1086 *
1087 * LOCKING:
1088 * spin_lock_irq(gcwq->lock).
1089 */
1090static void worker_enter_idle(struct worker *worker)
1091{
1092 struct global_cwq *gcwq = worker->gcwq;
1093
1094 BUG_ON(worker->flags & WORKER_IDLE);
1095 BUG_ON(!list_empty(&worker->entry) &&
1096 (worker->hentry.next || worker->hentry.pprev));
1097
cb444766
TH
1098 /* can't use worker_set_flags(), also called from start_worker() */
1099 worker->flags |= WORKER_IDLE;
c8e55f36 1100 gcwq->nr_idle++;
e22bee78 1101 worker->last_active = jiffies;
c8e55f36
TH
1102
1103 /* idle_list is LIFO */
1104 list_add(&worker->entry, &gcwq->idle_list);
db7bccf4 1105
e22bee78
TH
1106 if (likely(!(worker->flags & WORKER_ROGUE))) {
1107 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1108 mod_timer(&gcwq->idle_timer,
1109 jiffies + IDLE_WORKER_TIMEOUT);
1110 } else
db7bccf4 1111 wake_up_all(&gcwq->trustee_wait);
cb444766
TH
1112
1113 /* sanity check nr_running */
1114 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1115 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
c8e55f36
TH
1116}
1117
1118/**
1119 * worker_leave_idle - leave idle state
1120 * @worker: worker which is leaving idle state
1121 *
1122 * @worker is leaving idle state. Update stats.
1123 *
1124 * LOCKING:
1125 * spin_lock_irq(gcwq->lock).
1126 */
1127static void worker_leave_idle(struct worker *worker)
1128{
1129 struct global_cwq *gcwq = worker->gcwq;
1130
1131 BUG_ON(!(worker->flags & WORKER_IDLE));
d302f017 1132 worker_clr_flags(worker, WORKER_IDLE);
c8e55f36
TH
1133 gcwq->nr_idle--;
1134 list_del_init(&worker->entry);
1135}
1136
e22bee78
TH
1137/**
1138 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1139 * @worker: self
1140 *
1141 * Works which are scheduled while the cpu is online must at least be
1142 * scheduled to a worker which is bound to the cpu so that if they are
1143 * flushed from cpu callbacks while cpu is going down, they are
1144 * guaranteed to execute on the cpu.
1145 *
1146 * This function is to be used by rogue workers and rescuers to bind
1147 * themselves to the target cpu and may race with cpu going down or
1148 * coming online. kthread_bind() can't be used because it may put the
1149 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1150 * verbatim as it's best effort and blocking and gcwq may be
1151 * [dis]associated in the meantime.
1152 *
1153 * This function tries set_cpus_allowed() and locks gcwq and verifies
1154 * the binding against GCWQ_DISASSOCIATED which is set during
1155 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1156 * idle state or fetches works without dropping lock, it can guarantee
1157 * the scheduling requirement described in the first paragraph.
1158 *
1159 * CONTEXT:
1160 * Might sleep. Called without any lock but returns with gcwq->lock
1161 * held.
1162 *
1163 * RETURNS:
1164 * %true if the associated gcwq is online (@worker is successfully
1165 * bound), %false if offline.
1166 */
1167static bool worker_maybe_bind_and_lock(struct worker *worker)
1168{
1169 struct global_cwq *gcwq = worker->gcwq;
1170 struct task_struct *task = worker->task;
1171
1172 while (true) {
1173 /*
1174 * The following call may fail, succeed or succeed
1175 * without actually migrating the task to the cpu if
1176 * it races with cpu hotunplug operation. Verify
1177 * against GCWQ_DISASSOCIATED.
1178 */
f3421797
TH
1179 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1180 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
e22bee78
TH
1181
1182 spin_lock_irq(&gcwq->lock);
1183 if (gcwq->flags & GCWQ_DISASSOCIATED)
1184 return false;
1185 if (task_cpu(task) == gcwq->cpu &&
1186 cpumask_equal(&current->cpus_allowed,
1187 get_cpu_mask(gcwq->cpu)))
1188 return true;
1189 spin_unlock_irq(&gcwq->lock);
1190
1191 /* CPU has come up inbetween, retry migration */
1192 cpu_relax();
1193 }
1194}
1195
1196/*
1197 * Function for worker->rebind_work used to rebind rogue busy workers
1198 * to the associated cpu which is coming back online. This is
1199 * scheduled by cpu up but can race with other cpu hotplug operations
1200 * and may be executed twice without intervening cpu down.
1201 */
1202static void worker_rebind_fn(struct work_struct *work)
1203{
1204 struct worker *worker = container_of(work, struct worker, rebind_work);
1205 struct global_cwq *gcwq = worker->gcwq;
1206
1207 if (worker_maybe_bind_and_lock(worker))
1208 worker_clr_flags(worker, WORKER_REBIND);
1209
1210 spin_unlock_irq(&gcwq->lock);
1211}
1212
c34056a3
TH
1213static struct worker *alloc_worker(void)
1214{
1215 struct worker *worker;
1216
1217 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
c8e55f36
TH
1218 if (worker) {
1219 INIT_LIST_HEAD(&worker->entry);
affee4b2 1220 INIT_LIST_HEAD(&worker->scheduled);
e22bee78
TH
1221 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1222 /* on creation a worker is in !idle && prep state */
1223 worker->flags = WORKER_PREP;
c8e55f36 1224 }
c34056a3
TH
1225 return worker;
1226}
1227
1228/**
1229 * create_worker - create a new workqueue worker
7e11629d 1230 * @gcwq: gcwq the new worker will belong to
c34056a3
TH
1231 * @bind: whether to set affinity to @cpu or not
1232 *
7e11629d 1233 * Create a new worker which is bound to @gcwq. The returned worker
c34056a3
TH
1234 * can be started by calling start_worker() or destroyed using
1235 * destroy_worker().
1236 *
1237 * CONTEXT:
1238 * Might sleep. Does GFP_KERNEL allocations.
1239 *
1240 * RETURNS:
1241 * Pointer to the newly created worker.
1242 */
7e11629d 1243static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
c34056a3 1244{
f3421797 1245 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
c34056a3 1246 struct worker *worker = NULL;
f3421797 1247 int id = -1;
c34056a3 1248
8b03ae3c
TH
1249 spin_lock_irq(&gcwq->lock);
1250 while (ida_get_new(&gcwq->worker_ida, &id)) {
1251 spin_unlock_irq(&gcwq->lock);
1252 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
c34056a3 1253 goto fail;
8b03ae3c 1254 spin_lock_irq(&gcwq->lock);
c34056a3 1255 }
8b03ae3c 1256 spin_unlock_irq(&gcwq->lock);
c34056a3
TH
1257
1258 worker = alloc_worker();
1259 if (!worker)
1260 goto fail;
1261
8b03ae3c 1262 worker->gcwq = gcwq;
c34056a3
TH
1263 worker->id = id;
1264
f3421797
TH
1265 if (!on_unbound_cpu)
1266 worker->task = kthread_create(worker_thread, worker,
1267 "kworker/%u:%d", gcwq->cpu, id);
1268 else
1269 worker->task = kthread_create(worker_thread, worker,
1270 "kworker/u:%d", id);
c34056a3
TH
1271 if (IS_ERR(worker->task))
1272 goto fail;
1273
db7bccf4
TH
1274 /*
1275 * A rogue worker will become a regular one if CPU comes
1276 * online later on. Make sure every worker has
1277 * PF_THREAD_BOUND set.
1278 */
f3421797 1279 if (bind && !on_unbound_cpu)
8b03ae3c 1280 kthread_bind(worker->task, gcwq->cpu);
f3421797 1281 else {
db7bccf4 1282 worker->task->flags |= PF_THREAD_BOUND;
f3421797
TH
1283 if (on_unbound_cpu)
1284 worker->flags |= WORKER_UNBOUND;
1285 }
c34056a3
TH
1286
1287 return worker;
1288fail:
1289 if (id >= 0) {
8b03ae3c
TH
1290 spin_lock_irq(&gcwq->lock);
1291 ida_remove(&gcwq->worker_ida, id);
1292 spin_unlock_irq(&gcwq->lock);
c34056a3
TH
1293 }
1294 kfree(worker);
1295 return NULL;
1296}
1297
1298/**
1299 * start_worker - start a newly created worker
1300 * @worker: worker to start
1301 *
c8e55f36 1302 * Make the gcwq aware of @worker and start it.
c34056a3
TH
1303 *
1304 * CONTEXT:
8b03ae3c 1305 * spin_lock_irq(gcwq->lock).
c34056a3
TH
1306 */
1307static void start_worker(struct worker *worker)
1308{
cb444766 1309 worker->flags |= WORKER_STARTED;
c8e55f36
TH
1310 worker->gcwq->nr_workers++;
1311 worker_enter_idle(worker);
c34056a3
TH
1312 wake_up_process(worker->task);
1313}
1314
1315/**
1316 * destroy_worker - destroy a workqueue worker
1317 * @worker: worker to be destroyed
1318 *
c8e55f36
TH
1319 * Destroy @worker and adjust @gcwq stats accordingly.
1320 *
1321 * CONTEXT:
1322 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
c34056a3
TH
1323 */
1324static void destroy_worker(struct worker *worker)
1325{
8b03ae3c 1326 struct global_cwq *gcwq = worker->gcwq;
c34056a3
TH
1327 int id = worker->id;
1328
1329 /* sanity check frenzy */
1330 BUG_ON(worker->current_work);
affee4b2 1331 BUG_ON(!list_empty(&worker->scheduled));
c34056a3 1332
c8e55f36
TH
1333 if (worker->flags & WORKER_STARTED)
1334 gcwq->nr_workers--;
1335 if (worker->flags & WORKER_IDLE)
1336 gcwq->nr_idle--;
1337
1338 list_del_init(&worker->entry);
cb444766 1339 worker->flags |= WORKER_DIE;
c8e55f36
TH
1340
1341 spin_unlock_irq(&gcwq->lock);
1342
c34056a3
TH
1343 kthread_stop(worker->task);
1344 kfree(worker);
1345
8b03ae3c
TH
1346 spin_lock_irq(&gcwq->lock);
1347 ida_remove(&gcwq->worker_ida, id);
c34056a3
TH
1348}
1349
e22bee78
TH
1350static void idle_worker_timeout(unsigned long __gcwq)
1351{
1352 struct global_cwq *gcwq = (void *)__gcwq;
1353
1354 spin_lock_irq(&gcwq->lock);
1355
1356 if (too_many_workers(gcwq)) {
1357 struct worker *worker;
1358 unsigned long expires;
1359
1360 /* idle_list is kept in LIFO order, check the last one */
1361 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1362 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1363
1364 if (time_before(jiffies, expires))
1365 mod_timer(&gcwq->idle_timer, expires);
1366 else {
1367 /* it's been idle for too long, wake up manager */
1368 gcwq->flags |= GCWQ_MANAGE_WORKERS;
1369 wake_up_worker(gcwq);
1370 }
1371 }
1372
1373 spin_unlock_irq(&gcwq->lock);
1374}
1375
1376static bool send_mayday(struct work_struct *work)
1377{
1378 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1379 struct workqueue_struct *wq = cwq->wq;
f3421797 1380 unsigned int cpu;
e22bee78
TH
1381
1382 if (!(wq->flags & WQ_RESCUER))
1383 return false;
1384
1385 /* mayday mayday mayday */
f3421797
TH
1386 cpu = cwq->gcwq->cpu;
1387 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1388 if (cpu == WORK_CPU_UNBOUND)
1389 cpu = 0;
1390 if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask))
e22bee78
TH
1391 wake_up_process(wq->rescuer->task);
1392 return true;
1393}
1394
1395static void gcwq_mayday_timeout(unsigned long __gcwq)
1396{
1397 struct global_cwq *gcwq = (void *)__gcwq;
1398 struct work_struct *work;
1399
1400 spin_lock_irq(&gcwq->lock);
1401
1402 if (need_to_create_worker(gcwq)) {
1403 /*
1404 * We've been trying to create a new worker but
1405 * haven't been successful. We might be hitting an
1406 * allocation deadlock. Send distress signals to
1407 * rescuers.
1408 */
1409 list_for_each_entry(work, &gcwq->worklist, entry)
1410 send_mayday(work);
1411 }
1412
1413 spin_unlock_irq(&gcwq->lock);
1414
1415 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1416}
1417
1418/**
1419 * maybe_create_worker - create a new worker if necessary
1420 * @gcwq: gcwq to create a new worker for
1421 *
1422 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1423 * have at least one idle worker on return from this function. If
1424 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1425 * sent to all rescuers with works scheduled on @gcwq to resolve
1426 * possible allocation deadlock.
1427 *
1428 * On return, need_to_create_worker() is guaranteed to be false and
1429 * may_start_working() true.
1430 *
1431 * LOCKING:
1432 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1433 * multiple times. Does GFP_KERNEL allocations. Called only from
1434 * manager.
1435 *
1436 * RETURNS:
1437 * false if no action was taken and gcwq->lock stayed locked, true
1438 * otherwise.
1439 */
1440static bool maybe_create_worker(struct global_cwq *gcwq)
1441{
1442 if (!need_to_create_worker(gcwq))
1443 return false;
1444restart:
9f9c2364
TH
1445 spin_unlock_irq(&gcwq->lock);
1446
e22bee78
TH
1447 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1448 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1449
1450 while (true) {
1451 struct worker *worker;
1452
e22bee78
TH
1453 worker = create_worker(gcwq, true);
1454 if (worker) {
1455 del_timer_sync(&gcwq->mayday_timer);
1456 spin_lock_irq(&gcwq->lock);
1457 start_worker(worker);
1458 BUG_ON(need_to_create_worker(gcwq));
1459 return true;
1460 }
1461
1462 if (!need_to_create_worker(gcwq))
1463 break;
1464
e22bee78
TH
1465 __set_current_state(TASK_INTERRUPTIBLE);
1466 schedule_timeout(CREATE_COOLDOWN);
9f9c2364 1467
e22bee78
TH
1468 if (!need_to_create_worker(gcwq))
1469 break;
1470 }
1471
e22bee78
TH
1472 del_timer_sync(&gcwq->mayday_timer);
1473 spin_lock_irq(&gcwq->lock);
1474 if (need_to_create_worker(gcwq))
1475 goto restart;
1476 return true;
1477}
1478
1479/**
1480 * maybe_destroy_worker - destroy workers which have been idle for a while
1481 * @gcwq: gcwq to destroy workers for
1482 *
1483 * Destroy @gcwq workers which have been idle for longer than
1484 * IDLE_WORKER_TIMEOUT.
1485 *
1486 * LOCKING:
1487 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1488 * multiple times. Called only from manager.
1489 *
1490 * RETURNS:
1491 * false if no action was taken and gcwq->lock stayed locked, true
1492 * otherwise.
1493 */
1494static bool maybe_destroy_workers(struct global_cwq *gcwq)
1495{
1496 bool ret = false;
1497
1498 while (too_many_workers(gcwq)) {
1499 struct worker *worker;
1500 unsigned long expires;
1501
1502 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1503 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1504
1505 if (time_before(jiffies, expires)) {
1506 mod_timer(&gcwq->idle_timer, expires);
1507 break;
1508 }
1509
1510 destroy_worker(worker);
1511 ret = true;
1512 }
1513
1514 return ret;
1515}
1516
1517/**
1518 * manage_workers - manage worker pool
1519 * @worker: self
1520 *
1521 * Assume the manager role and manage gcwq worker pool @worker belongs
1522 * to. At any given time, there can be only zero or one manager per
1523 * gcwq. The exclusion is handled automatically by this function.
1524 *
1525 * The caller can safely start processing works on false return. On
1526 * true return, it's guaranteed that need_to_create_worker() is false
1527 * and may_start_working() is true.
1528 *
1529 * CONTEXT:
1530 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1531 * multiple times. Does GFP_KERNEL allocations.
1532 *
1533 * RETURNS:
1534 * false if no action was taken and gcwq->lock stayed locked, true if
1535 * some action was taken.
1536 */
1537static bool manage_workers(struct worker *worker)
1538{
1539 struct global_cwq *gcwq = worker->gcwq;
1540 bool ret = false;
1541
1542 if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1543 return ret;
1544
1545 gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1546 gcwq->flags |= GCWQ_MANAGING_WORKERS;
1547
1548 /*
1549 * Destroy and then create so that may_start_working() is true
1550 * on return.
1551 */
1552 ret |= maybe_destroy_workers(gcwq);
1553 ret |= maybe_create_worker(gcwq);
1554
1555 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1556
1557 /*
1558 * The trustee might be waiting to take over the manager
1559 * position, tell it we're done.
1560 */
1561 if (unlikely(gcwq->trustee))
1562 wake_up_all(&gcwq->trustee_wait);
1563
1564 return ret;
1565}
1566
affee4b2
TH
1567/**
1568 * move_linked_works - move linked works to a list
1569 * @work: start of series of works to be scheduled
1570 * @head: target list to append @work to
1571 * @nextp: out paramter for nested worklist walking
1572 *
1573 * Schedule linked works starting from @work to @head. Work series to
1574 * be scheduled starts at @work and includes any consecutive work with
1575 * WORK_STRUCT_LINKED set in its predecessor.
1576 *
1577 * If @nextp is not NULL, it's updated to point to the next work of
1578 * the last scheduled work. This allows move_linked_works() to be
1579 * nested inside outer list_for_each_entry_safe().
1580 *
1581 * CONTEXT:
8b03ae3c 1582 * spin_lock_irq(gcwq->lock).
affee4b2
TH
1583 */
1584static void move_linked_works(struct work_struct *work, struct list_head *head,
1585 struct work_struct **nextp)
1586{
1587 struct work_struct *n;
1588
1589 /*
1590 * Linked worklist will always end before the end of the list,
1591 * use NULL for list head.
1592 */
1593 list_for_each_entry_safe_from(work, n, NULL, entry) {
1594 list_move_tail(&work->entry, head);
1595 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1596 break;
1597 }
1598
1599 /*
1600 * If we're already inside safe list traversal and have moved
1601 * multiple works to the scheduled queue, the next position
1602 * needs to be updated.
1603 */
1604 if (nextp)
1605 *nextp = n;
1606}
1607
1e19ffc6
TH
1608static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1609{
1610 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1611 struct work_struct, entry);
649027d7 1612 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1e19ffc6 1613
649027d7 1614 move_linked_works(work, pos, NULL);
1e19ffc6
TH
1615 cwq->nr_active++;
1616}
1617
73f53c4a
TH
1618/**
1619 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1620 * @cwq: cwq of interest
1621 * @color: color of work which left the queue
1622 *
1623 * A work either has completed or is removed from pending queue,
1624 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1625 *
1626 * CONTEXT:
8b03ae3c 1627 * spin_lock_irq(gcwq->lock).
73f53c4a
TH
1628 */
1629static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1630{
1631 /* ignore uncolored works */
1632 if (color == WORK_NO_COLOR)
1633 return;
1634
1635 cwq->nr_in_flight[color]--;
1e19ffc6
TH
1636 cwq->nr_active--;
1637
502ca9d8
TH
1638 if (!list_empty(&cwq->delayed_works)) {
1639 /* one down, submit a delayed one */
1640 if (cwq->nr_active < cwq->max_active)
1641 cwq_activate_first_delayed(cwq);
502ca9d8 1642 }
73f53c4a
TH
1643
1644 /* is flush in progress and are we at the flushing tip? */
1645 if (likely(cwq->flush_color != color))
1646 return;
1647
1648 /* are there still in-flight works? */
1649 if (cwq->nr_in_flight[color])
1650 return;
1651
1652 /* this cwq is done, clear flush_color */
1653 cwq->flush_color = -1;
1654
1655 /*
1656 * If this was the last cwq, wake up the first flusher. It
1657 * will handle the rest.
1658 */
1659 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1660 complete(&cwq->wq->first_flusher->done);
1661}
1662
a62428c0
TH
1663/**
1664 * process_one_work - process single work
c34056a3 1665 * @worker: self
a62428c0
TH
1666 * @work: work to process
1667 *
1668 * Process @work. This function contains all the logics necessary to
1669 * process a single work including synchronization against and
1670 * interaction with other workers on the same cpu, queueing and
1671 * flushing. As long as context requirement is met, any worker can
1672 * call this function to process a work.
1673 *
1674 * CONTEXT:
8b03ae3c 1675 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
a62428c0 1676 */
c34056a3 1677static void process_one_work(struct worker *worker, struct work_struct *work)
a62428c0 1678{
7e11629d 1679 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
8b03ae3c 1680 struct global_cwq *gcwq = cwq->gcwq;
c8e55f36 1681 struct hlist_head *bwh = busy_worker_head(gcwq, work);
fb0e7beb 1682 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
a62428c0 1683 work_func_t f = work->func;
73f53c4a 1684 int work_color;
7e11629d 1685 struct worker *collision;
a62428c0
TH
1686#ifdef CONFIG_LOCKDEP
1687 /*
1688 * It is permissible to free the struct work_struct from
1689 * inside the function that is called from it, this we need to
1690 * take into account for lockdep too. To avoid bogus "held
1691 * lock freed" warnings as well as problems when looking into
1692 * work->lockdep_map, make a copy and use that here.
1693 */
1694 struct lockdep_map lockdep_map = work->lockdep_map;
1695#endif
7e11629d
TH
1696 /*
1697 * A single work shouldn't be executed concurrently by
1698 * multiple workers on a single cpu. Check whether anyone is
1699 * already processing the work. If so, defer the work to the
1700 * currently executing one.
1701 */
1702 collision = __find_worker_executing_work(gcwq, bwh, work);
1703 if (unlikely(collision)) {
1704 move_linked_works(work, &collision->scheduled, NULL);
1705 return;
1706 }
1707
a62428c0 1708 /* claim and process */
a62428c0 1709 debug_work_deactivate(work);
c8e55f36 1710 hlist_add_head(&worker->hentry, bwh);
c34056a3 1711 worker->current_work = work;
8cca0eea 1712 worker->current_cwq = cwq;
73f53c4a 1713 work_color = get_work_color(work);
7a22ad75 1714
7a22ad75
TH
1715 /* record the current cpu number in the work data and dequeue */
1716 set_work_cpu(work, gcwq->cpu);
a62428c0
TH
1717 list_del_init(&work->entry);
1718
649027d7
TH
1719 /*
1720 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1721 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1722 */
1723 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1724 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1725 struct work_struct, entry);
1726
1727 if (!list_empty(&gcwq->worklist) &&
1728 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1729 wake_up_worker(gcwq);
1730 else
1731 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1732 }
1733
fb0e7beb
TH
1734 /*
1735 * CPU intensive works don't participate in concurrency
1736 * management. They're the scheduler's responsibility.
1737 */
1738 if (unlikely(cpu_intensive))
1739 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1740
8b03ae3c 1741 spin_unlock_irq(&gcwq->lock);
a62428c0 1742
a62428c0
TH
1743 work_clear_pending(work);
1744 lock_map_acquire(&cwq->wq->lockdep_map);
1745 lock_map_acquire(&lockdep_map);
1746 f(work);
1747 lock_map_release(&lockdep_map);
1748 lock_map_release(&cwq->wq->lockdep_map);
1749
1750 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1751 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1752 "%s/0x%08x/%d\n",
1753 current->comm, preempt_count(), task_pid_nr(current));
1754 printk(KERN_ERR " last function: ");
1755 print_symbol("%s\n", (unsigned long)f);
1756 debug_show_held_locks(current);
1757 dump_stack();
1758 }
1759
8b03ae3c 1760 spin_lock_irq(&gcwq->lock);
a62428c0 1761
fb0e7beb
TH
1762 /* clear cpu intensive status */
1763 if (unlikely(cpu_intensive))
1764 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1765
a62428c0 1766 /* we're done with it, release */
c8e55f36 1767 hlist_del_init(&worker->hentry);
c34056a3 1768 worker->current_work = NULL;
8cca0eea 1769 worker->current_cwq = NULL;
73f53c4a 1770 cwq_dec_nr_in_flight(cwq, work_color);
a62428c0
TH
1771}
1772
affee4b2
TH
1773/**
1774 * process_scheduled_works - process scheduled works
1775 * @worker: self
1776 *
1777 * Process all scheduled works. Please note that the scheduled list
1778 * may change while processing a work, so this function repeatedly
1779 * fetches a work from the top and executes it.
1780 *
1781 * CONTEXT:
8b03ae3c 1782 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
affee4b2
TH
1783 * multiple times.
1784 */
1785static void process_scheduled_works(struct worker *worker)
1da177e4 1786{
affee4b2
TH
1787 while (!list_empty(&worker->scheduled)) {
1788 struct work_struct *work = list_first_entry(&worker->scheduled,
1da177e4 1789 struct work_struct, entry);
c34056a3 1790 process_one_work(worker, work);
1da177e4 1791 }
1da177e4
LT
1792}
1793
4690c4ab
TH
1794/**
1795 * worker_thread - the worker thread function
c34056a3 1796 * @__worker: self
4690c4ab 1797 *
e22bee78
TH
1798 * The gcwq worker thread function. There's a single dynamic pool of
1799 * these per each cpu. These workers process all works regardless of
1800 * their specific target workqueue. The only exception is works which
1801 * belong to workqueues with a rescuer which will be explained in
1802 * rescuer_thread().
4690c4ab 1803 */
c34056a3 1804static int worker_thread(void *__worker)
1da177e4 1805{
c34056a3 1806 struct worker *worker = __worker;
8b03ae3c 1807 struct global_cwq *gcwq = worker->gcwq;
1da177e4 1808
e22bee78
TH
1809 /* tell the scheduler that this is a workqueue worker */
1810 worker->task->flags |= PF_WQ_WORKER;
c8e55f36 1811woke_up:
c8e55f36 1812 spin_lock_irq(&gcwq->lock);
1da177e4 1813
c8e55f36
TH
1814 /* DIE can be set only while we're idle, checking here is enough */
1815 if (worker->flags & WORKER_DIE) {
1816 spin_unlock_irq(&gcwq->lock);
e22bee78 1817 worker->task->flags &= ~PF_WQ_WORKER;
c8e55f36
TH
1818 return 0;
1819 }
affee4b2 1820
c8e55f36 1821 worker_leave_idle(worker);
db7bccf4 1822recheck:
e22bee78
TH
1823 /* no more worker necessary? */
1824 if (!need_more_worker(gcwq))
1825 goto sleep;
1826
1827 /* do we need to manage? */
1828 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1829 goto recheck;
1830
c8e55f36
TH
1831 /*
1832 * ->scheduled list can only be filled while a worker is
1833 * preparing to process a work or actually processing it.
1834 * Make sure nobody diddled with it while I was sleeping.
1835 */
1836 BUG_ON(!list_empty(&worker->scheduled));
1837
e22bee78
TH
1838 /*
1839 * When control reaches this point, we're guaranteed to have
1840 * at least one idle worker or that someone else has already
1841 * assumed the manager role.
1842 */
1843 worker_clr_flags(worker, WORKER_PREP);
1844
1845 do {
c8e55f36 1846 struct work_struct *work =
7e11629d 1847 list_first_entry(&gcwq->worklist,
c8e55f36
TH
1848 struct work_struct, entry);
1849
1850 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1851 /* optimization path, not strictly necessary */
1852 process_one_work(worker, work);
1853 if (unlikely(!list_empty(&worker->scheduled)))
affee4b2 1854 process_scheduled_works(worker);
c8e55f36
TH
1855 } else {
1856 move_linked_works(work, &worker->scheduled, NULL);
1857 process_scheduled_works(worker);
affee4b2 1858 }
e22bee78
TH
1859 } while (keep_working(gcwq));
1860
1861 worker_set_flags(worker, WORKER_PREP, false);
d313dd85 1862sleep:
e22bee78
TH
1863 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1864 goto recheck;
d313dd85 1865
c8e55f36 1866 /*
e22bee78
TH
1867 * gcwq->lock is held and there's no work to process and no
1868 * need to manage, sleep. Workers are woken up only while
1869 * holding gcwq->lock or from local cpu, so setting the
1870 * current state before releasing gcwq->lock is enough to
1871 * prevent losing any event.
c8e55f36
TH
1872 */
1873 worker_enter_idle(worker);
1874 __set_current_state(TASK_INTERRUPTIBLE);
1875 spin_unlock_irq(&gcwq->lock);
1876 schedule();
1877 goto woke_up;
1da177e4
LT
1878}
1879
e22bee78
TH
1880/**
1881 * rescuer_thread - the rescuer thread function
1882 * @__wq: the associated workqueue
1883 *
1884 * Workqueue rescuer thread function. There's one rescuer for each
1885 * workqueue which has WQ_RESCUER set.
1886 *
1887 * Regular work processing on a gcwq may block trying to create a new
1888 * worker which uses GFP_KERNEL allocation which has slight chance of
1889 * developing into deadlock if some works currently on the same queue
1890 * need to be processed to satisfy the GFP_KERNEL allocation. This is
1891 * the problem rescuer solves.
1892 *
1893 * When such condition is possible, the gcwq summons rescuers of all
1894 * workqueues which have works queued on the gcwq and let them process
1895 * those works so that forward progress can be guaranteed.
1896 *
1897 * This should happen rarely.
1898 */
1899static int rescuer_thread(void *__wq)
1900{
1901 struct workqueue_struct *wq = __wq;
1902 struct worker *rescuer = wq->rescuer;
1903 struct list_head *scheduled = &rescuer->scheduled;
f3421797 1904 bool is_unbound = wq->flags & WQ_UNBOUND;
e22bee78
TH
1905 unsigned int cpu;
1906
1907 set_user_nice(current, RESCUER_NICE_LEVEL);
1908repeat:
1909 set_current_state(TASK_INTERRUPTIBLE);
1910
1911 if (kthread_should_stop())
1912 return 0;
1913
f3421797
TH
1914 /*
1915 * See whether any cpu is asking for help. Unbounded
1916 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1917 */
e22bee78 1918 for_each_cpu(cpu, wq->mayday_mask) {
f3421797
TH
1919 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1920 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
e22bee78
TH
1921 struct global_cwq *gcwq = cwq->gcwq;
1922 struct work_struct *work, *n;
1923
1924 __set_current_state(TASK_RUNNING);
1925 cpumask_clear_cpu(cpu, wq->mayday_mask);
1926
1927 /* migrate to the target cpu if possible */
1928 rescuer->gcwq = gcwq;
1929 worker_maybe_bind_and_lock(rescuer);
1930
1931 /*
1932 * Slurp in all works issued via this workqueue and
1933 * process'em.
1934 */
1935 BUG_ON(!list_empty(&rescuer->scheduled));
1936 list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1937 if (get_work_cwq(work) == cwq)
1938 move_linked_works(work, scheduled, &n);
1939
1940 process_scheduled_works(rescuer);
1941 spin_unlock_irq(&gcwq->lock);
1942 }
1943
1944 schedule();
1945 goto repeat;
1946}
1947
fc2e4d70
ON
1948struct wq_barrier {
1949 struct work_struct work;
1950 struct completion done;
1951};
1952
1953static void wq_barrier_func(struct work_struct *work)
1954{
1955 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1956 complete(&barr->done);
1957}
1958
4690c4ab
TH
1959/**
1960 * insert_wq_barrier - insert a barrier work
1961 * @cwq: cwq to insert barrier into
1962 * @barr: wq_barrier to insert
affee4b2
TH
1963 * @target: target work to attach @barr to
1964 * @worker: worker currently executing @target, NULL if @target is not executing
4690c4ab 1965 *
affee4b2
TH
1966 * @barr is linked to @target such that @barr is completed only after
1967 * @target finishes execution. Please note that the ordering
1968 * guarantee is observed only with respect to @target and on the local
1969 * cpu.
1970 *
1971 * Currently, a queued barrier can't be canceled. This is because
1972 * try_to_grab_pending() can't determine whether the work to be
1973 * grabbed is at the head of the queue and thus can't clear LINKED
1974 * flag of the previous work while there must be a valid next work
1975 * after a work with LINKED flag set.
1976 *
1977 * Note that when @worker is non-NULL, @target may be modified
1978 * underneath us, so we can't reliably determine cwq from @target.
4690c4ab
TH
1979 *
1980 * CONTEXT:
8b03ae3c 1981 * spin_lock_irq(gcwq->lock).
4690c4ab 1982 */
83c22520 1983static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
affee4b2
TH
1984 struct wq_barrier *barr,
1985 struct work_struct *target, struct worker *worker)
fc2e4d70 1986{
affee4b2
TH
1987 struct list_head *head;
1988 unsigned int linked = 0;
1989
dc186ad7 1990 /*
8b03ae3c 1991 * debugobject calls are safe here even with gcwq->lock locked
dc186ad7
TG
1992 * as we know for sure that this will not trigger any of the
1993 * checks and call back into the fixup functions where we
1994 * might deadlock.
1995 */
1996 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
22df02bb 1997 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
fc2e4d70 1998 init_completion(&barr->done);
83c22520 1999
affee4b2
TH
2000 /*
2001 * If @target is currently being executed, schedule the
2002 * barrier to the worker; otherwise, put it after @target.
2003 */
2004 if (worker)
2005 head = worker->scheduled.next;
2006 else {
2007 unsigned long *bits = work_data_bits(target);
2008
2009 head = target->entry.next;
2010 /* there can already be other linked works, inherit and set */
2011 linked = *bits & WORK_STRUCT_LINKED;
2012 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2013 }
2014
dc186ad7 2015 debug_work_activate(&barr->work);
affee4b2
TH
2016 insert_work(cwq, &barr->work, head,
2017 work_color_to_flags(WORK_NO_COLOR) | linked);
fc2e4d70
ON
2018}
2019
73f53c4a
TH
2020/**
2021 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2022 * @wq: workqueue being flushed
2023 * @flush_color: new flush color, < 0 for no-op
2024 * @work_color: new work color, < 0 for no-op
2025 *
2026 * Prepare cwqs for workqueue flushing.
2027 *
2028 * If @flush_color is non-negative, flush_color on all cwqs should be
2029 * -1. If no cwq has in-flight commands at the specified color, all
2030 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2031 * has in flight commands, its cwq->flush_color is set to
2032 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2033 * wakeup logic is armed and %true is returned.
2034 *
2035 * The caller should have initialized @wq->first_flusher prior to
2036 * calling this function with non-negative @flush_color. If
2037 * @flush_color is negative, no flush color update is done and %false
2038 * is returned.
2039 *
2040 * If @work_color is non-negative, all cwqs should have the same
2041 * work_color which is previous to @work_color and all will be
2042 * advanced to @work_color.
2043 *
2044 * CONTEXT:
2045 * mutex_lock(wq->flush_mutex).
2046 *
2047 * RETURNS:
2048 * %true if @flush_color >= 0 and there's something to flush. %false
2049 * otherwise.
2050 */
2051static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2052 int flush_color, int work_color)
1da177e4 2053{
73f53c4a
TH
2054 bool wait = false;
2055 unsigned int cpu;
1da177e4 2056
73f53c4a
TH
2057 if (flush_color >= 0) {
2058 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2059 atomic_set(&wq->nr_cwqs_to_flush, 1);
1da177e4 2060 }
2355b70f 2061
f3421797 2062 for_each_cwq_cpu(cpu, wq) {
73f53c4a 2063 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
8b03ae3c 2064 struct global_cwq *gcwq = cwq->gcwq;
73f53c4a 2065
8b03ae3c 2066 spin_lock_irq(&gcwq->lock);
73f53c4a
TH
2067
2068 if (flush_color >= 0) {
2069 BUG_ON(cwq->flush_color != -1);
2070
2071 if (cwq->nr_in_flight[flush_color]) {
2072 cwq->flush_color = flush_color;
2073 atomic_inc(&wq->nr_cwqs_to_flush);
2074 wait = true;
2075 }
2076 }
2077
2078 if (work_color >= 0) {
2079 BUG_ON(work_color != work_next_color(cwq->work_color));
2080 cwq->work_color = work_color;
2081 }
2082
8b03ae3c 2083 spin_unlock_irq(&gcwq->lock);
dc186ad7 2084 }
14441960 2085
73f53c4a
TH
2086 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2087 complete(&wq->first_flusher->done);
2088
2089 return wait;
1da177e4
LT
2090}
2091
0fcb78c2 2092/**
1da177e4 2093 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 2094 * @wq: workqueue to flush
1da177e4
LT
2095 *
2096 * Forces execution of the workqueue and blocks until its completion.
2097 * This is typically used in driver shutdown handlers.
2098 *
fc2e4d70
ON
2099 * We sleep until all works which were queued on entry have been handled,
2100 * but we are not livelocked by new incoming ones.
1da177e4 2101 */
7ad5b3a5 2102void flush_workqueue(struct workqueue_struct *wq)
1da177e4 2103{
73f53c4a
TH
2104 struct wq_flusher this_flusher = {
2105 .list = LIST_HEAD_INIT(this_flusher.list),
2106 .flush_color = -1,
2107 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2108 };
2109 int next_color;
1da177e4 2110
3295f0ef
IM
2111 lock_map_acquire(&wq->lockdep_map);
2112 lock_map_release(&wq->lockdep_map);
73f53c4a
TH
2113
2114 mutex_lock(&wq->flush_mutex);
2115
2116 /*
2117 * Start-to-wait phase
2118 */
2119 next_color = work_next_color(wq->work_color);
2120
2121 if (next_color != wq->flush_color) {
2122 /*
2123 * Color space is not full. The current work_color
2124 * becomes our flush_color and work_color is advanced
2125 * by one.
2126 */
2127 BUG_ON(!list_empty(&wq->flusher_overflow));
2128 this_flusher.flush_color = wq->work_color;
2129 wq->work_color = next_color;
2130
2131 if (!wq->first_flusher) {
2132 /* no flush in progress, become the first flusher */
2133 BUG_ON(wq->flush_color != this_flusher.flush_color);
2134
2135 wq->first_flusher = &this_flusher;
2136
2137 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2138 wq->work_color)) {
2139 /* nothing to flush, done */
2140 wq->flush_color = next_color;
2141 wq->first_flusher = NULL;
2142 goto out_unlock;
2143 }
2144 } else {
2145 /* wait in queue */
2146 BUG_ON(wq->flush_color == this_flusher.flush_color);
2147 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2148 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2149 }
2150 } else {
2151 /*
2152 * Oops, color space is full, wait on overflow queue.
2153 * The next flush completion will assign us
2154 * flush_color and transfer to flusher_queue.
2155 */
2156 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2157 }
2158
2159 mutex_unlock(&wq->flush_mutex);
2160
2161 wait_for_completion(&this_flusher.done);
2162
2163 /*
2164 * Wake-up-and-cascade phase
2165 *
2166 * First flushers are responsible for cascading flushes and
2167 * handling overflow. Non-first flushers can simply return.
2168 */
2169 if (wq->first_flusher != &this_flusher)
2170 return;
2171
2172 mutex_lock(&wq->flush_mutex);
2173
4ce48b37
TH
2174 /* we might have raced, check again with mutex held */
2175 if (wq->first_flusher != &this_flusher)
2176 goto out_unlock;
2177
73f53c4a
TH
2178 wq->first_flusher = NULL;
2179
2180 BUG_ON(!list_empty(&this_flusher.list));
2181 BUG_ON(wq->flush_color != this_flusher.flush_color);
2182
2183 while (true) {
2184 struct wq_flusher *next, *tmp;
2185
2186 /* complete all the flushers sharing the current flush color */
2187 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2188 if (next->flush_color != wq->flush_color)
2189 break;
2190 list_del_init(&next->list);
2191 complete(&next->done);
2192 }
2193
2194 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2195 wq->flush_color != work_next_color(wq->work_color));
2196
2197 /* this flush_color is finished, advance by one */
2198 wq->flush_color = work_next_color(wq->flush_color);
2199
2200 /* one color has been freed, handle overflow queue */
2201 if (!list_empty(&wq->flusher_overflow)) {
2202 /*
2203 * Assign the same color to all overflowed
2204 * flushers, advance work_color and append to
2205 * flusher_queue. This is the start-to-wait
2206 * phase for these overflowed flushers.
2207 */
2208 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2209 tmp->flush_color = wq->work_color;
2210
2211 wq->work_color = work_next_color(wq->work_color);
2212
2213 list_splice_tail_init(&wq->flusher_overflow,
2214 &wq->flusher_queue);
2215 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2216 }
2217
2218 if (list_empty(&wq->flusher_queue)) {
2219 BUG_ON(wq->flush_color != wq->work_color);
2220 break;
2221 }
2222
2223 /*
2224 * Need to flush more colors. Make the next flusher
2225 * the new first flusher and arm cwqs.
2226 */
2227 BUG_ON(wq->flush_color == wq->work_color);
2228 BUG_ON(wq->flush_color != next->flush_color);
2229
2230 list_del_init(&next->list);
2231 wq->first_flusher = next;
2232
2233 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2234 break;
2235
2236 /*
2237 * Meh... this color is already done, clear first
2238 * flusher and repeat cascading.
2239 */
2240 wq->first_flusher = NULL;
2241 }
2242
2243out_unlock:
2244 mutex_unlock(&wq->flush_mutex);
1da177e4 2245}
ae90dd5d 2246EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 2247
db700897
ON
2248/**
2249 * flush_work - block until a work_struct's callback has terminated
2250 * @work: the work which is to be flushed
2251 *
a67da70d
ON
2252 * Returns false if @work has already terminated.
2253 *
db700897
ON
2254 * It is expected that, prior to calling flush_work(), the caller has
2255 * arranged for the work to not be requeued, otherwise it doesn't make
2256 * sense to use this function.
2257 */
2258int flush_work(struct work_struct *work)
2259{
affee4b2 2260 struct worker *worker = NULL;
8b03ae3c 2261 struct global_cwq *gcwq;
7a22ad75 2262 struct cpu_workqueue_struct *cwq;
db700897
ON
2263 struct wq_barrier barr;
2264
2265 might_sleep();
7a22ad75
TH
2266 gcwq = get_work_gcwq(work);
2267 if (!gcwq)
db700897 2268 return 0;
a67da70d 2269
8b03ae3c 2270 spin_lock_irq(&gcwq->lock);
db700897
ON
2271 if (!list_empty(&work->entry)) {
2272 /*
2273 * See the comment near try_to_grab_pending()->smp_rmb().
7a22ad75
TH
2274 * If it was re-queued to a different gcwq under us, we
2275 * are not going to wait.
db700897
ON
2276 */
2277 smp_rmb();
7a22ad75
TH
2278 cwq = get_work_cwq(work);
2279 if (unlikely(!cwq || gcwq != cwq->gcwq))
4690c4ab 2280 goto already_gone;
db700897 2281 } else {
7a22ad75 2282 worker = find_worker_executing_work(gcwq, work);
affee4b2 2283 if (!worker)
4690c4ab 2284 goto already_gone;
7a22ad75 2285 cwq = worker->current_cwq;
db700897 2286 }
db700897 2287
affee4b2 2288 insert_wq_barrier(cwq, &barr, work, worker);
8b03ae3c 2289 spin_unlock_irq(&gcwq->lock);
7a22ad75
TH
2290
2291 lock_map_acquire(&cwq->wq->lockdep_map);
2292 lock_map_release(&cwq->wq->lockdep_map);
2293
db700897 2294 wait_for_completion(&barr.done);
dc186ad7 2295 destroy_work_on_stack(&barr.work);
db700897 2296 return 1;
4690c4ab 2297already_gone:
8b03ae3c 2298 spin_unlock_irq(&gcwq->lock);
4690c4ab 2299 return 0;
db700897
ON
2300}
2301EXPORT_SYMBOL_GPL(flush_work);
2302
6e84d644 2303/*
1f1f642e 2304 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
2305 * so this work can't be re-armed in any way.
2306 */
2307static int try_to_grab_pending(struct work_struct *work)
2308{
8b03ae3c 2309 struct global_cwq *gcwq;
1f1f642e 2310 int ret = -1;
6e84d644 2311
22df02bb 2312 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1f1f642e 2313 return 0;
6e84d644
ON
2314
2315 /*
2316 * The queueing is in progress, or it is already queued. Try to
2317 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2318 */
7a22ad75
TH
2319 gcwq = get_work_gcwq(work);
2320 if (!gcwq)
6e84d644
ON
2321 return ret;
2322
8b03ae3c 2323 spin_lock_irq(&gcwq->lock);
6e84d644
ON
2324 if (!list_empty(&work->entry)) {
2325 /*
7a22ad75 2326 * This work is queued, but perhaps we locked the wrong gcwq.
6e84d644
ON
2327 * In that case we must see the new value after rmb(), see
2328 * insert_work()->wmb().
2329 */
2330 smp_rmb();
7a22ad75 2331 if (gcwq == get_work_gcwq(work)) {
dc186ad7 2332 debug_work_deactivate(work);
6e84d644 2333 list_del_init(&work->entry);
7a22ad75
TH
2334 cwq_dec_nr_in_flight(get_work_cwq(work),
2335 get_work_color(work));
6e84d644
ON
2336 ret = 1;
2337 }
2338 }
8b03ae3c 2339 spin_unlock_irq(&gcwq->lock);
6e84d644
ON
2340
2341 return ret;
2342}
2343
7a22ad75 2344static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
b89deed3
ON
2345{
2346 struct wq_barrier barr;
affee4b2 2347 struct worker *worker;
b89deed3 2348
8b03ae3c 2349 spin_lock_irq(&gcwq->lock);
affee4b2 2350
7a22ad75
TH
2351 worker = find_worker_executing_work(gcwq, work);
2352 if (unlikely(worker))
2353 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
affee4b2 2354
8b03ae3c 2355 spin_unlock_irq(&gcwq->lock);
b89deed3 2356
affee4b2 2357 if (unlikely(worker)) {
b89deed3 2358 wait_for_completion(&barr.done);
dc186ad7
TG
2359 destroy_work_on_stack(&barr.work);
2360 }
b89deed3
ON
2361}
2362
6e84d644 2363static void wait_on_work(struct work_struct *work)
b89deed3 2364{
b1f4ec17 2365 int cpu;
b89deed3 2366
f293ea92
ON
2367 might_sleep();
2368
3295f0ef
IM
2369 lock_map_acquire(&work->lockdep_map);
2370 lock_map_release(&work->lockdep_map);
4e6045f1 2371
f3421797 2372 for_each_gcwq_cpu(cpu)
7a22ad75 2373 wait_on_cpu_work(get_gcwq(cpu), work);
6e84d644
ON
2374}
2375
1f1f642e
ON
2376static int __cancel_work_timer(struct work_struct *work,
2377 struct timer_list* timer)
2378{
2379 int ret;
2380
2381 do {
2382 ret = (timer && likely(del_timer(timer)));
2383 if (!ret)
2384 ret = try_to_grab_pending(work);
2385 wait_on_work(work);
2386 } while (unlikely(ret < 0));
2387
7a22ad75 2388 clear_work_data(work);
1f1f642e
ON
2389 return ret;
2390}
2391
6e84d644
ON
2392/**
2393 * cancel_work_sync - block until a work_struct's callback has terminated
2394 * @work: the work which is to be flushed
2395 *
1f1f642e
ON
2396 * Returns true if @work was pending.
2397 *
6e84d644
ON
2398 * cancel_work_sync() will cancel the work if it is queued. If the work's
2399 * callback appears to be running, cancel_work_sync() will block until it
2400 * has completed.
2401 *
2402 * It is possible to use this function if the work re-queues itself. It can
2403 * cancel the work even if it migrates to another workqueue, however in that
2404 * case it only guarantees that work->func() has completed on the last queued
2405 * workqueue.
2406 *
2407 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2408 * pending, otherwise it goes into a busy-wait loop until the timer expires.
2409 *
2410 * The caller must ensure that workqueue_struct on which this work was last
2411 * queued can't be destroyed before this function returns.
2412 */
1f1f642e 2413int cancel_work_sync(struct work_struct *work)
6e84d644 2414{
1f1f642e 2415 return __cancel_work_timer(work, NULL);
b89deed3 2416}
28e53bdd 2417EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 2418
6e84d644 2419/**
f5a421a4 2420 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
2421 * @dwork: the delayed work struct
2422 *
1f1f642e
ON
2423 * Returns true if @dwork was pending.
2424 *
6e84d644
ON
2425 * It is possible to use this function if @dwork rearms itself via queue_work()
2426 * or queue_delayed_work(). See also the comment for cancel_work_sync().
2427 */
1f1f642e 2428int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 2429{
1f1f642e 2430 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 2431}
f5a421a4 2432EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 2433
0fcb78c2
REB
2434/**
2435 * schedule_work - put work task in global workqueue
2436 * @work: job to be done
2437 *
5b0f437d
BVA
2438 * Returns zero if @work was already on the kernel-global workqueue and
2439 * non-zero otherwise.
2440 *
2441 * This puts a job in the kernel-global workqueue if it was not already
2442 * queued and leaves it in the same position on the kernel-global
2443 * workqueue otherwise.
0fcb78c2 2444 */
7ad5b3a5 2445int schedule_work(struct work_struct *work)
1da177e4 2446{
d320c038 2447 return queue_work(system_wq, work);
1da177e4 2448}
ae90dd5d 2449EXPORT_SYMBOL(schedule_work);
1da177e4 2450
c1a220e7
ZR
2451/*
2452 * schedule_work_on - put work task on a specific cpu
2453 * @cpu: cpu to put the work task on
2454 * @work: job to be done
2455 *
2456 * This puts a job on a specific cpu
2457 */
2458int schedule_work_on(int cpu, struct work_struct *work)
2459{
d320c038 2460 return queue_work_on(cpu, system_wq, work);
c1a220e7
ZR
2461}
2462EXPORT_SYMBOL(schedule_work_on);
2463
0fcb78c2
REB
2464/**
2465 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
2466 * @dwork: job to be done
2467 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
2468 *
2469 * After waiting for a given time this puts a job in the kernel-global
2470 * workqueue.
2471 */
7ad5b3a5 2472int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 2473 unsigned long delay)
1da177e4 2474{
d320c038 2475 return queue_delayed_work(system_wq, dwork, delay);
1da177e4 2476}
ae90dd5d 2477EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 2478
8c53e463
LT
2479/**
2480 * flush_delayed_work - block until a dwork_struct's callback has terminated
2481 * @dwork: the delayed work which is to be flushed
2482 *
2483 * Any timeout is cancelled, and any pending work is run immediately.
2484 */
2485void flush_delayed_work(struct delayed_work *dwork)
2486{
2487 if (del_timer_sync(&dwork->timer)) {
7a22ad75 2488 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
4690c4ab 2489 &dwork->work);
8c53e463
LT
2490 put_cpu();
2491 }
2492 flush_work(&dwork->work);
2493}
2494EXPORT_SYMBOL(flush_delayed_work);
2495
0fcb78c2
REB
2496/**
2497 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2498 * @cpu: cpu to use
52bad64d 2499 * @dwork: job to be done
0fcb78c2
REB
2500 * @delay: number of jiffies to wait
2501 *
2502 * After waiting for a given time this puts a job in the kernel-global
2503 * workqueue on the specified CPU.
2504 */
1da177e4 2505int schedule_delayed_work_on(int cpu,
52bad64d 2506 struct delayed_work *dwork, unsigned long delay)
1da177e4 2507{
d320c038 2508 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
1da177e4 2509}
ae90dd5d 2510EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 2511
b6136773
AM
2512/**
2513 * schedule_on_each_cpu - call a function on each online CPU from keventd
2514 * @func: the function to call
b6136773
AM
2515 *
2516 * Returns zero on success.
2517 * Returns -ve errno on failure.
2518 *
b6136773
AM
2519 * schedule_on_each_cpu() is very slow.
2520 */
65f27f38 2521int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
2522{
2523 int cpu;
b6136773 2524 struct work_struct *works;
15316ba8 2525
b6136773
AM
2526 works = alloc_percpu(struct work_struct);
2527 if (!works)
15316ba8 2528 return -ENOMEM;
b6136773 2529
93981800
TH
2530 get_online_cpus();
2531
15316ba8 2532 for_each_online_cpu(cpu) {
9bfb1839
IM
2533 struct work_struct *work = per_cpu_ptr(works, cpu);
2534
2535 INIT_WORK(work, func);
b71ab8c2 2536 schedule_work_on(cpu, work);
65a64464 2537 }
93981800
TH
2538
2539 for_each_online_cpu(cpu)
2540 flush_work(per_cpu_ptr(works, cpu));
2541
95402b38 2542 put_online_cpus();
b6136773 2543 free_percpu(works);
15316ba8
CL
2544 return 0;
2545}
2546
eef6a7d5
AS
2547/**
2548 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2549 *
2550 * Forces execution of the kernel-global workqueue and blocks until its
2551 * completion.
2552 *
2553 * Think twice before calling this function! It's very easy to get into
2554 * trouble if you don't take great care. Either of the following situations
2555 * will lead to deadlock:
2556 *
2557 * One of the work items currently on the workqueue needs to acquire
2558 * a lock held by your code or its caller.
2559 *
2560 * Your code is running in the context of a work routine.
2561 *
2562 * They will be detected by lockdep when they occur, but the first might not
2563 * occur very often. It depends on what work items are on the workqueue and
2564 * what locks they need, which you have no control over.
2565 *
2566 * In most situations flushing the entire workqueue is overkill; you merely
2567 * need to know that a particular work item isn't queued and isn't running.
2568 * In such cases you should use cancel_delayed_work_sync() or
2569 * cancel_work_sync() instead.
2570 */
1da177e4
LT
2571void flush_scheduled_work(void)
2572{
d320c038 2573 flush_workqueue(system_wq);
1da177e4 2574}
ae90dd5d 2575EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 2576
1fa44eca
JB
2577/**
2578 * execute_in_process_context - reliably execute the routine with user context
2579 * @fn: the function to execute
1fa44eca
JB
2580 * @ew: guaranteed storage for the execute work structure (must
2581 * be available when the work executes)
2582 *
2583 * Executes the function immediately if process context is available,
2584 * otherwise schedules the function for delayed execution.
2585 *
2586 * Returns: 0 - function was executed
2587 * 1 - function was scheduled for execution
2588 */
65f27f38 2589int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
2590{
2591 if (!in_interrupt()) {
65f27f38 2592 fn(&ew->work);
1fa44eca
JB
2593 return 0;
2594 }
2595
65f27f38 2596 INIT_WORK(&ew->work, fn);
1fa44eca
JB
2597 schedule_work(&ew->work);
2598
2599 return 1;
2600}
2601EXPORT_SYMBOL_GPL(execute_in_process_context);
2602
1da177e4
LT
2603int keventd_up(void)
2604{
d320c038 2605 return system_wq != NULL;
1da177e4
LT
2606}
2607
bdbc5dd7 2608static int alloc_cwqs(struct workqueue_struct *wq)
0f900049
TH
2609{
2610 /*
2611 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2612 * Make sure that the alignment isn't lower than that of
2613 * unsigned long long.
2614 */
2615 const size_t size = sizeof(struct cpu_workqueue_struct);
2616 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2617 __alignof__(unsigned long long));
931ac77e
TH
2618#ifdef CONFIG_SMP
2619 bool percpu = !(wq->flags & WQ_UNBOUND);
2620#else
2621 bool percpu = false;
2622#endif
0f900049 2623
931ac77e 2624 if (percpu)
f3421797 2625 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
931ac77e 2626 else {
f3421797
TH
2627 void *ptr;
2628
2629 /*
2630 * Allocate enough room to align cwq and put an extra
2631 * pointer at the end pointing back to the originally
2632 * allocated pointer which will be used for free.
2633 */
2634 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2635 if (ptr) {
2636 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2637 *(void **)(wq->cpu_wq.single + 1) = ptr;
2638 }
bdbc5dd7 2639 }
f3421797 2640
0f900049 2641 /* just in case, make sure it's actually aligned */
bdbc5dd7
TH
2642 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2643 return wq->cpu_wq.v ? 0 : -ENOMEM;
0f900049
TH
2644}
2645
bdbc5dd7 2646static void free_cwqs(struct workqueue_struct *wq)
0f900049 2647{
931ac77e
TH
2648#ifdef CONFIG_SMP
2649 bool percpu = !(wq->flags & WQ_UNBOUND);
2650#else
2651 bool percpu = false;
2652#endif
2653
2654 if (percpu)
f3421797
TH
2655 free_percpu(wq->cpu_wq.pcpu);
2656 else if (wq->cpu_wq.single) {
2657 /* the pointer to free is stored right after the cwq */
bdbc5dd7 2658 kfree(*(void **)(wq->cpu_wq.single + 1));
f3421797 2659 }
0f900049
TH
2660}
2661
f3421797
TH
2662static int wq_clamp_max_active(int max_active, unsigned int flags,
2663 const char *name)
b71ab8c2 2664{
f3421797
TH
2665 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2666
2667 if (max_active < 1 || max_active > lim)
b71ab8c2
TH
2668 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2669 "is out of range, clamping between %d and %d\n",
f3421797 2670 max_active, name, 1, lim);
b71ab8c2 2671
f3421797 2672 return clamp_val(max_active, 1, lim);
b71ab8c2
TH
2673}
2674
d320c038
TH
2675struct workqueue_struct *__alloc_workqueue_key(const char *name,
2676 unsigned int flags,
2677 int max_active,
2678 struct lock_class_key *key,
2679 const char *lock_name)
1da177e4 2680{
1da177e4 2681 struct workqueue_struct *wq;
c34056a3 2682 unsigned int cpu;
1da177e4 2683
f3421797
TH
2684 /*
2685 * Unbound workqueues aren't concurrency managed and should be
2686 * dispatched to workers immediately.
2687 */
2688 if (flags & WQ_UNBOUND)
2689 flags |= WQ_HIGHPRI;
2690
d320c038 2691 max_active = max_active ?: WQ_DFL_ACTIVE;
f3421797 2692 max_active = wq_clamp_max_active(max_active, flags, name);
1e19ffc6 2693
3af24433
ON
2694 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2695 if (!wq)
4690c4ab 2696 goto err;
3af24433 2697
97e37d7b 2698 wq->flags = flags;
a0a1a5fd 2699 wq->saved_max_active = max_active;
73f53c4a
TH
2700 mutex_init(&wq->flush_mutex);
2701 atomic_set(&wq->nr_cwqs_to_flush, 0);
2702 INIT_LIST_HEAD(&wq->flusher_queue);
2703 INIT_LIST_HEAD(&wq->flusher_overflow);
502ca9d8 2704
3af24433 2705 wq->name = name;
eb13ba87 2706 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 2707 INIT_LIST_HEAD(&wq->list);
3af24433 2708
bdbc5dd7
TH
2709 if (alloc_cwqs(wq) < 0)
2710 goto err;
2711
f3421797 2712 for_each_cwq_cpu(cpu, wq) {
1537663f 2713 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
8b03ae3c 2714 struct global_cwq *gcwq = get_gcwq(cpu);
1537663f 2715
0f900049 2716 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
8b03ae3c 2717 cwq->gcwq = gcwq;
c34056a3 2718 cwq->wq = wq;
73f53c4a 2719 cwq->flush_color = -1;
1e19ffc6 2720 cwq->max_active = max_active;
1e19ffc6 2721 INIT_LIST_HEAD(&cwq->delayed_works);
e22bee78 2722 }
1537663f 2723
e22bee78
TH
2724 if (flags & WQ_RESCUER) {
2725 struct worker *rescuer;
2726
2727 if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL))
2728 goto err;
2729
2730 wq->rescuer = rescuer = alloc_worker();
2731 if (!rescuer)
2732 goto err;
2733
2734 rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2735 if (IS_ERR(rescuer->task))
2736 goto err;
2737
2738 wq->rescuer = rescuer;
2739 rescuer->task->flags |= PF_THREAD_BOUND;
2740 wake_up_process(rescuer->task);
3af24433
ON
2741 }
2742
a0a1a5fd
TH
2743 /*
2744 * workqueue_lock protects global freeze state and workqueues
2745 * list. Grab it, set max_active accordingly and add the new
2746 * workqueue to workqueues list.
2747 */
1537663f 2748 spin_lock(&workqueue_lock);
a0a1a5fd
TH
2749
2750 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
f3421797 2751 for_each_cwq_cpu(cpu, wq)
a0a1a5fd
TH
2752 get_cwq(cpu, wq)->max_active = 0;
2753
1537663f 2754 list_add(&wq->list, &workqueues);
a0a1a5fd 2755
1537663f
TH
2756 spin_unlock(&workqueue_lock);
2757
3af24433 2758 return wq;
4690c4ab
TH
2759err:
2760 if (wq) {
bdbc5dd7 2761 free_cwqs(wq);
e22bee78
TH
2762 free_cpumask_var(wq->mayday_mask);
2763 kfree(wq->rescuer);
4690c4ab
TH
2764 kfree(wq);
2765 }
2766 return NULL;
3af24433 2767}
d320c038 2768EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
1da177e4 2769
3af24433
ON
2770/**
2771 * destroy_workqueue - safely terminate a workqueue
2772 * @wq: target workqueue
2773 *
2774 * Safely destroy a workqueue. All work currently pending will be done first.
2775 */
2776void destroy_workqueue(struct workqueue_struct *wq)
2777{
c8e55f36 2778 unsigned int cpu;
3af24433 2779
a0a1a5fd
TH
2780 flush_workqueue(wq);
2781
2782 /*
2783 * wq list is used to freeze wq, remove from list after
2784 * flushing is complete in case freeze races us.
2785 */
95402b38 2786 spin_lock(&workqueue_lock);
b1f4ec17 2787 list_del(&wq->list);
95402b38 2788 spin_unlock(&workqueue_lock);
3af24433 2789
e22bee78 2790 /* sanity check */
f3421797 2791 for_each_cwq_cpu(cpu, wq) {
73f53c4a
TH
2792 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2793 int i;
2794
73f53c4a
TH
2795 for (i = 0; i < WORK_NR_COLORS; i++)
2796 BUG_ON(cwq->nr_in_flight[i]);
1e19ffc6
TH
2797 BUG_ON(cwq->nr_active);
2798 BUG_ON(!list_empty(&cwq->delayed_works));
73f53c4a 2799 }
9b41ea72 2800
e22bee78
TH
2801 if (wq->flags & WQ_RESCUER) {
2802 kthread_stop(wq->rescuer->task);
2803 free_cpumask_var(wq->mayday_mask);
2804 }
2805
bdbc5dd7 2806 free_cwqs(wq);
3af24433
ON
2807 kfree(wq);
2808}
2809EXPORT_SYMBOL_GPL(destroy_workqueue);
2810
dcd989cb
TH
2811/**
2812 * workqueue_set_max_active - adjust max_active of a workqueue
2813 * @wq: target workqueue
2814 * @max_active: new max_active value.
2815 *
2816 * Set max_active of @wq to @max_active.
2817 *
2818 * CONTEXT:
2819 * Don't call from IRQ context.
2820 */
2821void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2822{
2823 unsigned int cpu;
2824
f3421797 2825 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
dcd989cb
TH
2826
2827 spin_lock(&workqueue_lock);
2828
2829 wq->saved_max_active = max_active;
2830
f3421797 2831 for_each_cwq_cpu(cpu, wq) {
dcd989cb
TH
2832 struct global_cwq *gcwq = get_gcwq(cpu);
2833
2834 spin_lock_irq(&gcwq->lock);
2835
2836 if (!(wq->flags & WQ_FREEZEABLE) ||
2837 !(gcwq->flags & GCWQ_FREEZING))
2838 get_cwq(gcwq->cpu, wq)->max_active = max_active;
2839
2840 spin_unlock_irq(&gcwq->lock);
2841 }
2842
2843 spin_unlock(&workqueue_lock);
2844}
2845EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2846
2847/**
2848 * workqueue_congested - test whether a workqueue is congested
2849 * @cpu: CPU in question
2850 * @wq: target workqueue
2851 *
2852 * Test whether @wq's cpu workqueue for @cpu is congested. There is
2853 * no synchronization around this function and the test result is
2854 * unreliable and only useful as advisory hints or for debugging.
2855 *
2856 * RETURNS:
2857 * %true if congested, %false otherwise.
2858 */
2859bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2860{
2861 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2862
2863 return !list_empty(&cwq->delayed_works);
2864}
2865EXPORT_SYMBOL_GPL(workqueue_congested);
2866
2867/**
2868 * work_cpu - return the last known associated cpu for @work
2869 * @work: the work of interest
2870 *
2871 * RETURNS:
bdbc5dd7 2872 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
dcd989cb
TH
2873 */
2874unsigned int work_cpu(struct work_struct *work)
2875{
2876 struct global_cwq *gcwq = get_work_gcwq(work);
2877
bdbc5dd7 2878 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
dcd989cb
TH
2879}
2880EXPORT_SYMBOL_GPL(work_cpu);
2881
2882/**
2883 * work_busy - test whether a work is currently pending or running
2884 * @work: the work to be tested
2885 *
2886 * Test whether @work is currently pending or running. There is no
2887 * synchronization around this function and the test result is
2888 * unreliable and only useful as advisory hints or for debugging.
2889 * Especially for reentrant wqs, the pending state might hide the
2890 * running state.
2891 *
2892 * RETURNS:
2893 * OR'd bitmask of WORK_BUSY_* bits.
2894 */
2895unsigned int work_busy(struct work_struct *work)
2896{
2897 struct global_cwq *gcwq = get_work_gcwq(work);
2898 unsigned long flags;
2899 unsigned int ret = 0;
2900
2901 if (!gcwq)
2902 return false;
2903
2904 spin_lock_irqsave(&gcwq->lock, flags);
2905
2906 if (work_pending(work))
2907 ret |= WORK_BUSY_PENDING;
2908 if (find_worker_executing_work(gcwq, work))
2909 ret |= WORK_BUSY_RUNNING;
2910
2911 spin_unlock_irqrestore(&gcwq->lock, flags);
2912
2913 return ret;
2914}
2915EXPORT_SYMBOL_GPL(work_busy);
2916
db7bccf4
TH
2917/*
2918 * CPU hotplug.
2919 *
e22bee78
TH
2920 * There are two challenges in supporting CPU hotplug. Firstly, there
2921 * are a lot of assumptions on strong associations among work, cwq and
2922 * gcwq which make migrating pending and scheduled works very
2923 * difficult to implement without impacting hot paths. Secondly,
2924 * gcwqs serve mix of short, long and very long running works making
2925 * blocked draining impractical.
2926 *
2927 * This is solved by allowing a gcwq to be detached from CPU, running
2928 * it with unbound (rogue) workers and allowing it to be reattached
2929 * later if the cpu comes back online. A separate thread is created
2930 * to govern a gcwq in such state and is called the trustee of the
2931 * gcwq.
db7bccf4
TH
2932 *
2933 * Trustee states and their descriptions.
2934 *
2935 * START Command state used on startup. On CPU_DOWN_PREPARE, a
2936 * new trustee is started with this state.
2937 *
2938 * IN_CHARGE Once started, trustee will enter this state after
e22bee78
TH
2939 * assuming the manager role and making all existing
2940 * workers rogue. DOWN_PREPARE waits for trustee to
2941 * enter this state. After reaching IN_CHARGE, trustee
2942 * tries to execute the pending worklist until it's empty
2943 * and the state is set to BUTCHER, or the state is set
2944 * to RELEASE.
db7bccf4
TH
2945 *
2946 * BUTCHER Command state which is set by the cpu callback after
2947 * the cpu has went down. Once this state is set trustee
2948 * knows that there will be no new works on the worklist
2949 * and once the worklist is empty it can proceed to
2950 * killing idle workers.
2951 *
2952 * RELEASE Command state which is set by the cpu callback if the
2953 * cpu down has been canceled or it has come online
2954 * again. After recognizing this state, trustee stops
e22bee78
TH
2955 * trying to drain or butcher and clears ROGUE, rebinds
2956 * all remaining workers back to the cpu and releases
2957 * manager role.
db7bccf4
TH
2958 *
2959 * DONE Trustee will enter this state after BUTCHER or RELEASE
2960 * is complete.
2961 *
2962 * trustee CPU draining
2963 * took over down complete
2964 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2965 * | | ^
2966 * | CPU is back online v return workers |
2967 * ----------------> RELEASE --------------
2968 */
2969
2970/**
2971 * trustee_wait_event_timeout - timed event wait for trustee
2972 * @cond: condition to wait for
2973 * @timeout: timeout in jiffies
2974 *
2975 * wait_event_timeout() for trustee to use. Handles locking and
2976 * checks for RELEASE request.
2977 *
2978 * CONTEXT:
2979 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2980 * multiple times. To be used by trustee.
2981 *
2982 * RETURNS:
2983 * Positive indicating left time if @cond is satisfied, 0 if timed
2984 * out, -1 if canceled.
2985 */
2986#define trustee_wait_event_timeout(cond, timeout) ({ \
2987 long __ret = (timeout); \
2988 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2989 __ret) { \
2990 spin_unlock_irq(&gcwq->lock); \
2991 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2992 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2993 __ret); \
2994 spin_lock_irq(&gcwq->lock); \
2995 } \
2996 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2997})
2998
2999/**
3000 * trustee_wait_event - event wait for trustee
3001 * @cond: condition to wait for
3002 *
3003 * wait_event() for trustee to use. Automatically handles locking and
3004 * checks for CANCEL request.
3005 *
3006 * CONTEXT:
3007 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3008 * multiple times. To be used by trustee.
3009 *
3010 * RETURNS:
3011 * 0 if @cond is satisfied, -1 if canceled.
3012 */
3013#define trustee_wait_event(cond) ({ \
3014 long __ret1; \
3015 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3016 __ret1 < 0 ? -1 : 0; \
3017})
3018
3019static int __cpuinit trustee_thread(void *__gcwq)
3020{
3021 struct global_cwq *gcwq = __gcwq;
3022 struct worker *worker;
e22bee78 3023 struct work_struct *work;
db7bccf4 3024 struct hlist_node *pos;
e22bee78 3025 long rc;
db7bccf4
TH
3026 int i;
3027
3028 BUG_ON(gcwq->cpu != smp_processor_id());
3029
3030 spin_lock_irq(&gcwq->lock);
3031 /*
e22bee78
TH
3032 * Claim the manager position and make all workers rogue.
3033 * Trustee must be bound to the target cpu and can't be
3034 * cancelled.
db7bccf4
TH
3035 */
3036 BUG_ON(gcwq->cpu != smp_processor_id());
e22bee78
TH
3037 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3038 BUG_ON(rc < 0);
3039
3040 gcwq->flags |= GCWQ_MANAGING_WORKERS;
db7bccf4
TH
3041
3042 list_for_each_entry(worker, &gcwq->idle_list, entry)
cb444766 3043 worker->flags |= WORKER_ROGUE;
db7bccf4
TH
3044
3045 for_each_busy_worker(worker, i, pos, gcwq)
cb444766 3046 worker->flags |= WORKER_ROGUE;
db7bccf4 3047
e22bee78
TH
3048 /*
3049 * Call schedule() so that we cross rq->lock and thus can
3050 * guarantee sched callbacks see the rogue flag. This is
3051 * necessary as scheduler callbacks may be invoked from other
3052 * cpus.
3053 */
3054 spin_unlock_irq(&gcwq->lock);
3055 schedule();
3056 spin_lock_irq(&gcwq->lock);
3057
3058 /*
cb444766
TH
3059 * Sched callbacks are disabled now. Zap nr_running. After
3060 * this, nr_running stays zero and need_more_worker() and
3061 * keep_working() are always true as long as the worklist is
3062 * not empty.
e22bee78 3063 */
cb444766 3064 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
e22bee78
TH
3065
3066 spin_unlock_irq(&gcwq->lock);
3067 del_timer_sync(&gcwq->idle_timer);
3068 spin_lock_irq(&gcwq->lock);
3069
db7bccf4
TH
3070 /*
3071 * We're now in charge. Notify and proceed to drain. We need
3072 * to keep the gcwq running during the whole CPU down
3073 * procedure as other cpu hotunplug callbacks may need to
3074 * flush currently running tasks.
3075 */
3076 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3077 wake_up_all(&gcwq->trustee_wait);
3078
3079 /*
3080 * The original cpu is in the process of dying and may go away
3081 * anytime now. When that happens, we and all workers would
e22bee78
TH
3082 * be migrated to other cpus. Try draining any left work. We
3083 * want to get it over with ASAP - spam rescuers, wake up as
3084 * many idlers as necessary and create new ones till the
3085 * worklist is empty. Note that if the gcwq is frozen, there
3086 * may be frozen works in freezeable cwqs. Don't declare
3087 * completion while frozen.
db7bccf4
TH
3088 */
3089 while (gcwq->nr_workers != gcwq->nr_idle ||
3090 gcwq->flags & GCWQ_FREEZING ||
3091 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
e22bee78
TH
3092 int nr_works = 0;
3093
3094 list_for_each_entry(work, &gcwq->worklist, entry) {
3095 send_mayday(work);
3096 nr_works++;
3097 }
3098
3099 list_for_each_entry(worker, &gcwq->idle_list, entry) {
3100 if (!nr_works--)
3101 break;
3102 wake_up_process(worker->task);
3103 }
3104
3105 if (need_to_create_worker(gcwq)) {
3106 spin_unlock_irq(&gcwq->lock);
3107 worker = create_worker(gcwq, false);
3108 spin_lock_irq(&gcwq->lock);
3109 if (worker) {
cb444766 3110 worker->flags |= WORKER_ROGUE;
e22bee78
TH
3111 start_worker(worker);
3112 }
3113 }
3114
db7bccf4
TH
3115 /* give a breather */
3116 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3117 break;
3118 }
3119
e22bee78
TH
3120 /*
3121 * Either all works have been scheduled and cpu is down, or
3122 * cpu down has already been canceled. Wait for and butcher
3123 * all workers till we're canceled.
3124 */
3125 do {
3126 rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3127 while (!list_empty(&gcwq->idle_list))
3128 destroy_worker(list_first_entry(&gcwq->idle_list,
3129 struct worker, entry));
3130 } while (gcwq->nr_workers && rc >= 0);
3131
3132 /*
3133 * At this point, either draining has completed and no worker
3134 * is left, or cpu down has been canceled or the cpu is being
3135 * brought back up. There shouldn't be any idle one left.
3136 * Tell the remaining busy ones to rebind once it finishes the
3137 * currently scheduled works by scheduling the rebind_work.
3138 */
3139 WARN_ON(!list_empty(&gcwq->idle_list));
3140
3141 for_each_busy_worker(worker, i, pos, gcwq) {
3142 struct work_struct *rebind_work = &worker->rebind_work;
3143
3144 /*
3145 * Rebind_work may race with future cpu hotplug
3146 * operations. Use a separate flag to mark that
3147 * rebinding is scheduled.
3148 */
cb444766
TH
3149 worker->flags |= WORKER_REBIND;
3150 worker->flags &= ~WORKER_ROGUE;
e22bee78
TH
3151
3152 /* queue rebind_work, wq doesn't matter, use the default one */
3153 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3154 work_data_bits(rebind_work)))
3155 continue;
3156
3157 debug_work_activate(rebind_work);
d320c038 3158 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
e22bee78
TH
3159 worker->scheduled.next,
3160 work_color_to_flags(WORK_NO_COLOR));
3161 }
3162
3163 /* relinquish manager role */
3164 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3165
db7bccf4
TH
3166 /* notify completion */
3167 gcwq->trustee = NULL;
3168 gcwq->trustee_state = TRUSTEE_DONE;
3169 wake_up_all(&gcwq->trustee_wait);
3170 spin_unlock_irq(&gcwq->lock);
3171 return 0;
3172}
3173
3174/**
3175 * wait_trustee_state - wait for trustee to enter the specified state
3176 * @gcwq: gcwq the trustee of interest belongs to
3177 * @state: target state to wait for
3178 *
3179 * Wait for the trustee to reach @state. DONE is already matched.
3180 *
3181 * CONTEXT:
3182 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3183 * multiple times. To be used by cpu_callback.
3184 */
3185static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3186{
3187 if (!(gcwq->trustee_state == state ||
3188 gcwq->trustee_state == TRUSTEE_DONE)) {
3189 spin_unlock_irq(&gcwq->lock);
3190 __wait_event(gcwq->trustee_wait,
3191 gcwq->trustee_state == state ||
3192 gcwq->trustee_state == TRUSTEE_DONE);
3193 spin_lock_irq(&gcwq->lock);
3194 }
3195}
3196
3af24433
ON
3197static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3198 unsigned long action,
3199 void *hcpu)
3200{
3201 unsigned int cpu = (unsigned long)hcpu;
db7bccf4
TH
3202 struct global_cwq *gcwq = get_gcwq(cpu);
3203 struct task_struct *new_trustee = NULL;
e22bee78 3204 struct worker *uninitialized_var(new_worker);
db7bccf4 3205 unsigned long flags;
3af24433 3206
8bb78442
RW
3207 action &= ~CPU_TASKS_FROZEN;
3208
db7bccf4
TH
3209 switch (action) {
3210 case CPU_DOWN_PREPARE:
3211 new_trustee = kthread_create(trustee_thread, gcwq,
3212 "workqueue_trustee/%d\n", cpu);
3213 if (IS_ERR(new_trustee))
3214 return notifier_from_errno(PTR_ERR(new_trustee));
3215 kthread_bind(new_trustee, cpu);
e22bee78
TH
3216 /* fall through */
3217 case CPU_UP_PREPARE:
3218 BUG_ON(gcwq->first_idle);
3219 new_worker = create_worker(gcwq, false);
3220 if (!new_worker) {
3221 if (new_trustee)
3222 kthread_stop(new_trustee);
3223 return NOTIFY_BAD;
3224 }
db7bccf4 3225 }
3af24433 3226
db7bccf4
TH
3227 /* some are called w/ irq disabled, don't disturb irq status */
3228 spin_lock_irqsave(&gcwq->lock, flags);
3af24433 3229
db7bccf4
TH
3230 switch (action) {
3231 case CPU_DOWN_PREPARE:
3232 /* initialize trustee and tell it to acquire the gcwq */
3233 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3234 gcwq->trustee = new_trustee;
3235 gcwq->trustee_state = TRUSTEE_START;
3236 wake_up_process(gcwq->trustee);
3237 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
e22bee78
TH
3238 /* fall through */
3239 case CPU_UP_PREPARE:
3240 BUG_ON(gcwq->first_idle);
3241 gcwq->first_idle = new_worker;
3242 break;
3243
3244 case CPU_DYING:
3245 /*
3246 * Before this, the trustee and all workers except for
3247 * the ones which are still executing works from
3248 * before the last CPU down must be on the cpu. After
3249 * this, they'll all be diasporas.
3250 */
3251 gcwq->flags |= GCWQ_DISASSOCIATED;
db7bccf4
TH
3252 break;
3253
3254 case CPU_POST_DEAD:
3255 gcwq->trustee_state = TRUSTEE_BUTCHER;
e22bee78
TH
3256 /* fall through */
3257 case CPU_UP_CANCELED:
3258 destroy_worker(gcwq->first_idle);
3259 gcwq->first_idle = NULL;
db7bccf4
TH
3260 break;
3261
3262 case CPU_DOWN_FAILED:
3263 case CPU_ONLINE:
e22bee78 3264 gcwq->flags &= ~GCWQ_DISASSOCIATED;
db7bccf4
TH
3265 if (gcwq->trustee_state != TRUSTEE_DONE) {
3266 gcwq->trustee_state = TRUSTEE_RELEASE;
3267 wake_up_process(gcwq->trustee);
3268 wait_trustee_state(gcwq, TRUSTEE_DONE);
3af24433 3269 }
db7bccf4 3270
e22bee78
TH
3271 /*
3272 * Trustee is done and there might be no worker left.
3273 * Put the first_idle in and request a real manager to
3274 * take a look.
3275 */
3276 spin_unlock_irq(&gcwq->lock);
3277 kthread_bind(gcwq->first_idle->task, cpu);
3278 spin_lock_irq(&gcwq->lock);
3279 gcwq->flags |= GCWQ_MANAGE_WORKERS;
3280 start_worker(gcwq->first_idle);
3281 gcwq->first_idle = NULL;
db7bccf4 3282 break;
1da177e4
LT
3283 }
3284
db7bccf4
TH
3285 spin_unlock_irqrestore(&gcwq->lock, flags);
3286
1537663f 3287 return notifier_from_errno(0);
1da177e4 3288}
1da177e4 3289
2d3854a3 3290#ifdef CONFIG_SMP
8ccad40d 3291
2d3854a3 3292struct work_for_cpu {
6b44003e 3293 struct completion completion;
2d3854a3
RR
3294 long (*fn)(void *);
3295 void *arg;
3296 long ret;
3297};
3298
6b44003e 3299static int do_work_for_cpu(void *_wfc)
2d3854a3 3300{
6b44003e 3301 struct work_for_cpu *wfc = _wfc;
2d3854a3 3302 wfc->ret = wfc->fn(wfc->arg);
6b44003e
AM
3303 complete(&wfc->completion);
3304 return 0;
2d3854a3
RR
3305}
3306
3307/**
3308 * work_on_cpu - run a function in user context on a particular cpu
3309 * @cpu: the cpu to run on
3310 * @fn: the function to run
3311 * @arg: the function arg
3312 *
31ad9081
RR
3313 * This will return the value @fn returns.
3314 * It is up to the caller to ensure that the cpu doesn't go offline.
6b44003e 3315 * The caller must not hold any locks which would prevent @fn from completing.
2d3854a3
RR
3316 */
3317long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3318{
6b44003e
AM
3319 struct task_struct *sub_thread;
3320 struct work_for_cpu wfc = {
3321 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3322 .fn = fn,
3323 .arg = arg,
3324 };
3325
3326 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3327 if (IS_ERR(sub_thread))
3328 return PTR_ERR(sub_thread);
3329 kthread_bind(sub_thread, cpu);
3330 wake_up_process(sub_thread);
3331 wait_for_completion(&wfc.completion);
2d3854a3
RR
3332 return wfc.ret;
3333}
3334EXPORT_SYMBOL_GPL(work_on_cpu);
3335#endif /* CONFIG_SMP */
3336
a0a1a5fd
TH
3337#ifdef CONFIG_FREEZER
3338
3339/**
3340 * freeze_workqueues_begin - begin freezing workqueues
3341 *
3342 * Start freezing workqueues. After this function returns, all
3343 * freezeable workqueues will queue new works to their frozen_works
7e11629d 3344 * list instead of gcwq->worklist.
a0a1a5fd
TH
3345 *
3346 * CONTEXT:
8b03ae3c 3347 * Grabs and releases workqueue_lock and gcwq->lock's.
a0a1a5fd
TH
3348 */
3349void freeze_workqueues_begin(void)
3350{
a0a1a5fd
TH
3351 unsigned int cpu;
3352
3353 spin_lock(&workqueue_lock);
3354
3355 BUG_ON(workqueue_freezing);
3356 workqueue_freezing = true;
3357
f3421797 3358 for_each_gcwq_cpu(cpu) {
8b03ae3c 3359 struct global_cwq *gcwq = get_gcwq(cpu);
bdbc5dd7 3360 struct workqueue_struct *wq;
8b03ae3c
TH
3361
3362 spin_lock_irq(&gcwq->lock);
3363
db7bccf4
TH
3364 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3365 gcwq->flags |= GCWQ_FREEZING;
3366
a0a1a5fd
TH
3367 list_for_each_entry(wq, &workqueues, list) {
3368 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3369
f3421797 3370 if (cwq && wq->flags & WQ_FREEZEABLE)
a0a1a5fd 3371 cwq->max_active = 0;
a0a1a5fd 3372 }
8b03ae3c
TH
3373
3374 spin_unlock_irq(&gcwq->lock);
a0a1a5fd
TH
3375 }
3376
3377 spin_unlock(&workqueue_lock);
3378}
3379
3380/**
3381 * freeze_workqueues_busy - are freezeable workqueues still busy?
3382 *
3383 * Check whether freezing is complete. This function must be called
3384 * between freeze_workqueues_begin() and thaw_workqueues().
3385 *
3386 * CONTEXT:
3387 * Grabs and releases workqueue_lock.
3388 *
3389 * RETURNS:
3390 * %true if some freezeable workqueues are still busy. %false if
3391 * freezing is complete.
3392 */
3393bool freeze_workqueues_busy(void)
3394{
a0a1a5fd
TH
3395 unsigned int cpu;
3396 bool busy = false;
3397
3398 spin_lock(&workqueue_lock);
3399
3400 BUG_ON(!workqueue_freezing);
3401
f3421797 3402 for_each_gcwq_cpu(cpu) {
bdbc5dd7 3403 struct workqueue_struct *wq;
a0a1a5fd
TH
3404 /*
3405 * nr_active is monotonically decreasing. It's safe
3406 * to peek without lock.
3407 */
3408 list_for_each_entry(wq, &workqueues, list) {
3409 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3410
f3421797 3411 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
a0a1a5fd
TH
3412 continue;
3413
3414 BUG_ON(cwq->nr_active < 0);
3415 if (cwq->nr_active) {
3416 busy = true;
3417 goto out_unlock;
3418 }
3419 }
3420 }
3421out_unlock:
3422 spin_unlock(&workqueue_lock);
3423 return busy;
3424}
3425
3426/**
3427 * thaw_workqueues - thaw workqueues
3428 *
3429 * Thaw workqueues. Normal queueing is restored and all collected
7e11629d 3430 * frozen works are transferred to their respective gcwq worklists.
a0a1a5fd
TH
3431 *
3432 * CONTEXT:
8b03ae3c 3433 * Grabs and releases workqueue_lock and gcwq->lock's.
a0a1a5fd
TH
3434 */
3435void thaw_workqueues(void)
3436{
a0a1a5fd
TH
3437 unsigned int cpu;
3438
3439 spin_lock(&workqueue_lock);
3440
3441 if (!workqueue_freezing)
3442 goto out_unlock;
3443
f3421797 3444 for_each_gcwq_cpu(cpu) {
8b03ae3c 3445 struct global_cwq *gcwq = get_gcwq(cpu);
bdbc5dd7 3446 struct workqueue_struct *wq;
8b03ae3c
TH
3447
3448 spin_lock_irq(&gcwq->lock);
3449
db7bccf4
TH
3450 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3451 gcwq->flags &= ~GCWQ_FREEZING;
3452
a0a1a5fd
TH
3453 list_for_each_entry(wq, &workqueues, list) {
3454 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3455
f3421797 3456 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
a0a1a5fd
TH
3457 continue;
3458
a0a1a5fd
TH
3459 /* restore max_active and repopulate worklist */
3460 cwq->max_active = wq->saved_max_active;
3461
3462 while (!list_empty(&cwq->delayed_works) &&
3463 cwq->nr_active < cwq->max_active)
3464 cwq_activate_first_delayed(cwq);
a0a1a5fd 3465 }
8b03ae3c 3466
e22bee78
TH
3467 wake_up_worker(gcwq);
3468
8b03ae3c 3469 spin_unlock_irq(&gcwq->lock);
a0a1a5fd
TH
3470 }
3471
3472 workqueue_freezing = false;
3473out_unlock:
3474 spin_unlock(&workqueue_lock);
3475}
3476#endif /* CONFIG_FREEZER */
3477
c12920d1 3478void __init init_workqueues(void)
1da177e4 3479{
c34056a3 3480 unsigned int cpu;
c8e55f36 3481 int i;
c34056a3 3482
7a22ad75
TH
3483 /*
3484 * The pointer part of work->data is either pointing to the
3485 * cwq or contains the cpu number the work ran last on. Make
3486 * sure cpu number won't overflow into kernel pointer area so
3487 * that they can be distinguished.
3488 */
bdbc5dd7 3489 BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
7a22ad75 3490
db7bccf4 3491 hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
8b03ae3c
TH
3492
3493 /* initialize gcwqs */
f3421797 3494 for_each_gcwq_cpu(cpu) {
8b03ae3c
TH
3495 struct global_cwq *gcwq = get_gcwq(cpu);
3496
3497 spin_lock_init(&gcwq->lock);
7e11629d 3498 INIT_LIST_HEAD(&gcwq->worklist);
8b03ae3c 3499 gcwq->cpu = cpu;
f3421797
TH
3500 if (cpu == WORK_CPU_UNBOUND)
3501 gcwq->flags |= GCWQ_DISASSOCIATED;
8b03ae3c 3502
c8e55f36
TH
3503 INIT_LIST_HEAD(&gcwq->idle_list);
3504 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3505 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3506
e22bee78
TH
3507 init_timer_deferrable(&gcwq->idle_timer);
3508 gcwq->idle_timer.function = idle_worker_timeout;
3509 gcwq->idle_timer.data = (unsigned long)gcwq;
3510
3511 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3512 (unsigned long)gcwq);
3513
8b03ae3c 3514 ida_init(&gcwq->worker_ida);
db7bccf4
TH
3515
3516 gcwq->trustee_state = TRUSTEE_DONE;
3517 init_waitqueue_head(&gcwq->trustee_wait);
8b03ae3c
TH
3518 }
3519
e22bee78 3520 /* create the initial worker */
f3421797 3521 for_each_online_gcwq_cpu(cpu) {
e22bee78
TH
3522 struct global_cwq *gcwq = get_gcwq(cpu);
3523 struct worker *worker;
3524
3525 worker = create_worker(gcwq, true);
3526 BUG_ON(!worker);
3527 spin_lock_irq(&gcwq->lock);
3528 start_worker(worker);
3529 spin_unlock_irq(&gcwq->lock);
3530 }
3531
d320c038
TH
3532 system_wq = alloc_workqueue("events", 0, 0);
3533 system_long_wq = alloc_workqueue("events_long", 0, 0);
3534 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
f3421797
TH
3535 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3536 WQ_UNBOUND_MAX_ACTIVE);
d320c038 3537 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
1da177e4 3538}