2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
38 * Structure fields follow one of the following exclusion rules.
40 * I: Set during initialization and read-only afterwards.
42 * L: cwq->lock protected. Access with cwq->lock held.
44 * F: wq->flush_mutex protected.
46 * W: workqueue_lock protected.
50 * The per-CPU workqueue (if single thread, we always use the first
51 * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
52 * work_struct->data are used for flags and thus cwqs need to be
53 * aligned at two's power of the number of flag bits.
55 struct cpu_workqueue_struct
{
59 struct list_head worklist
;
60 wait_queue_head_t more_work
;
61 struct work_struct
*current_work
;
64 struct workqueue_struct
*wq
; /* I: the owning workqueue */
65 int work_color
; /* L: current color */
66 int flush_color
; /* L: flushing color */
67 int nr_in_flight
[WORK_NR_COLORS
];
68 /* L: nr of in_flight works */
69 struct task_struct
*thread
;
73 * Structure used to wait for workqueue flush.
76 struct list_head list
; /* F: list of flushers */
77 int flush_color
; /* F: flush color waiting for */
78 struct completion done
; /* flush completion */
82 * The externally visible workqueue abstraction is an array of
85 struct workqueue_struct
{
86 unsigned int flags
; /* I: WQ_* flags */
87 struct cpu_workqueue_struct
*cpu_wq
; /* I: cwq's */
88 struct list_head list
; /* W: list of all workqueues */
90 struct mutex flush_mutex
; /* protects wq flushing */
91 int work_color
; /* F: current work color */
92 int flush_color
; /* F: current flush color */
93 atomic_t nr_cwqs_to_flush
; /* flush in progress */
94 struct wq_flusher
*first_flusher
; /* F: first flusher */
95 struct list_head flusher_queue
; /* F: flush waiters */
96 struct list_head flusher_overflow
; /* F: flush overflow list */
98 const char *name
; /* I: workqueue name */
100 struct lockdep_map lockdep_map
;
104 #ifdef CONFIG_DEBUG_OBJECTS_WORK
106 static struct debug_obj_descr work_debug_descr
;
109 * fixup_init is called when:
110 * - an active object is initialized
112 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
114 struct work_struct
*work
= addr
;
117 case ODEBUG_STATE_ACTIVE
:
118 cancel_work_sync(work
);
119 debug_object_init(work
, &work_debug_descr
);
127 * fixup_activate is called when:
128 * - an active object is activated
129 * - an unknown object is activated (might be a statically initialized object)
131 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
133 struct work_struct
*work
= addr
;
137 case ODEBUG_STATE_NOTAVAILABLE
:
139 * This is not really a fixup. The work struct was
140 * statically initialized. We just make sure that it
141 * is tracked in the object tracker.
143 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
144 debug_object_init(work
, &work_debug_descr
);
145 debug_object_activate(work
, &work_debug_descr
);
151 case ODEBUG_STATE_ACTIVE
:
160 * fixup_free is called when:
161 * - an active object is freed
163 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
165 struct work_struct
*work
= addr
;
168 case ODEBUG_STATE_ACTIVE
:
169 cancel_work_sync(work
);
170 debug_object_free(work
, &work_debug_descr
);
177 static struct debug_obj_descr work_debug_descr
= {
178 .name
= "work_struct",
179 .fixup_init
= work_fixup_init
,
180 .fixup_activate
= work_fixup_activate
,
181 .fixup_free
= work_fixup_free
,
184 static inline void debug_work_activate(struct work_struct
*work
)
186 debug_object_activate(work
, &work_debug_descr
);
189 static inline void debug_work_deactivate(struct work_struct
*work
)
191 debug_object_deactivate(work
, &work_debug_descr
);
194 void __init_work(struct work_struct
*work
, int onstack
)
197 debug_object_init_on_stack(work
, &work_debug_descr
);
199 debug_object_init(work
, &work_debug_descr
);
201 EXPORT_SYMBOL_GPL(__init_work
);
203 void destroy_work_on_stack(struct work_struct
*work
)
205 debug_object_free(work
, &work_debug_descr
);
207 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
210 static inline void debug_work_activate(struct work_struct
*work
) { }
211 static inline void debug_work_deactivate(struct work_struct
*work
) { }
214 /* Serializes the accesses to the list of workqueues. */
215 static DEFINE_SPINLOCK(workqueue_lock
);
216 static LIST_HEAD(workqueues
);
218 static int singlethread_cpu __read_mostly
;
220 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
221 struct workqueue_struct
*wq
)
223 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
226 static struct cpu_workqueue_struct
*target_cwq(unsigned int cpu
,
227 struct workqueue_struct
*wq
)
229 if (unlikely(wq
->flags
& WQ_SINGLE_THREAD
))
230 cpu
= singlethread_cpu
;
231 return get_cwq(cpu
, wq
);
234 static unsigned int work_color_to_flags(int color
)
236 return color
<< WORK_STRUCT_COLOR_SHIFT
;
239 static int get_work_color(struct work_struct
*work
)
241 return (*work_data_bits(work
) >> WORK_STRUCT_COLOR_SHIFT
) &
242 ((1 << WORK_STRUCT_COLOR_BITS
) - 1);
245 static int work_next_color(int color
)
247 return (color
+ 1) % WORK_NR_COLORS
;
251 * Set the workqueue on which a work item is to be run
252 * - Must *only* be called if the pending flag is set
254 static inline void set_wq_data(struct work_struct
*work
,
255 struct cpu_workqueue_struct
*cwq
,
256 unsigned long extra_flags
)
258 BUG_ON(!work_pending(work
));
260 atomic_long_set(&work
->data
, (unsigned long)cwq
| work_static(work
) |
261 WORK_STRUCT_PENDING
| extra_flags
);
265 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
267 static inline void clear_wq_data(struct work_struct
*work
)
269 atomic_long_set(&work
->data
, work_static(work
));
272 static inline struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
274 return (void *)(atomic_long_read(&work
->data
) &
275 WORK_STRUCT_WQ_DATA_MASK
);
279 * insert_work - insert a work into cwq
280 * @cwq: cwq @work belongs to
281 * @work: work to insert
282 * @head: insertion point
283 * @extra_flags: extra WORK_STRUCT_* flags to set
285 * Insert @work into @cwq after @head.
288 * spin_lock_irq(cwq->lock).
290 static void insert_work(struct cpu_workqueue_struct
*cwq
,
291 struct work_struct
*work
, struct list_head
*head
,
292 unsigned int extra_flags
)
294 /* we own @work, set data and link */
295 set_wq_data(work
, cwq
, extra_flags
);
298 * Ensure that we get the right work->data if we see the
299 * result of list_add() below, see try_to_grab_pending().
303 list_add_tail(&work
->entry
, head
);
304 wake_up(&cwq
->more_work
);
307 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
308 struct work_struct
*work
)
310 struct cpu_workqueue_struct
*cwq
= target_cwq(cpu
, wq
);
313 debug_work_activate(work
);
314 spin_lock_irqsave(&cwq
->lock
, flags
);
315 BUG_ON(!list_empty(&work
->entry
));
316 cwq
->nr_in_flight
[cwq
->work_color
]++;
317 insert_work(cwq
, work
, &cwq
->worklist
,
318 work_color_to_flags(cwq
->work_color
));
319 spin_unlock_irqrestore(&cwq
->lock
, flags
);
323 * queue_work - queue work on a workqueue
324 * @wq: workqueue to use
325 * @work: work to queue
327 * Returns 0 if @work was already on a queue, non-zero otherwise.
329 * We queue the work to the CPU on which it was submitted, but if the CPU dies
330 * it can be processed by another CPU.
332 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
336 ret
= queue_work_on(get_cpu(), wq
, work
);
341 EXPORT_SYMBOL_GPL(queue_work
);
344 * queue_work_on - queue work on specific cpu
345 * @cpu: CPU number to execute work on
346 * @wq: workqueue to use
347 * @work: work to queue
349 * Returns 0 if @work was already on a queue, non-zero otherwise.
351 * We queue the work to a specific CPU, the caller must ensure it
355 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
359 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
360 __queue_work(cpu
, wq
, work
);
365 EXPORT_SYMBOL_GPL(queue_work_on
);
367 static void delayed_work_timer_fn(unsigned long __data
)
369 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
370 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
372 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
376 * queue_delayed_work - queue work on a workqueue after delay
377 * @wq: workqueue to use
378 * @dwork: delayable work to queue
379 * @delay: number of jiffies to wait before queueing
381 * Returns 0 if @work was already on a queue, non-zero otherwise.
383 int queue_delayed_work(struct workqueue_struct
*wq
,
384 struct delayed_work
*dwork
, unsigned long delay
)
387 return queue_work(wq
, &dwork
->work
);
389 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
391 EXPORT_SYMBOL_GPL(queue_delayed_work
);
394 * queue_delayed_work_on - queue work on specific CPU after delay
395 * @cpu: CPU number to execute work on
396 * @wq: workqueue to use
397 * @dwork: work to queue
398 * @delay: number of jiffies to wait before queueing
400 * Returns 0 if @work was already on a queue, non-zero otherwise.
402 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
403 struct delayed_work
*dwork
, unsigned long delay
)
406 struct timer_list
*timer
= &dwork
->timer
;
407 struct work_struct
*work
= &dwork
->work
;
409 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
410 BUG_ON(timer_pending(timer
));
411 BUG_ON(!list_empty(&work
->entry
));
413 timer_stats_timer_set_start_info(&dwork
->timer
);
415 /* This stores cwq for the moment, for the timer_fn */
416 set_wq_data(work
, target_cwq(raw_smp_processor_id(), wq
), 0);
417 timer
->expires
= jiffies
+ delay
;
418 timer
->data
= (unsigned long)dwork
;
419 timer
->function
= delayed_work_timer_fn
;
421 if (unlikely(cpu
>= 0))
422 add_timer_on(timer
, cpu
);
429 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
432 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
433 * @cwq: cwq of interest
434 * @color: color of work which left the queue
436 * A work either has completed or is removed from pending queue,
437 * decrement nr_in_flight of its cwq and handle workqueue flushing.
440 * spin_lock_irq(cwq->lock).
442 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct
*cwq
, int color
)
444 /* ignore uncolored works */
445 if (color
== WORK_NO_COLOR
)
448 cwq
->nr_in_flight
[color
]--;
450 /* is flush in progress and are we at the flushing tip? */
451 if (likely(cwq
->flush_color
!= color
))
454 /* are there still in-flight works? */
455 if (cwq
->nr_in_flight
[color
])
458 /* this cwq is done, clear flush_color */
459 cwq
->flush_color
= -1;
462 * If this was the last cwq, wake up the first flusher. It
463 * will handle the rest.
465 if (atomic_dec_and_test(&cwq
->wq
->nr_cwqs_to_flush
))
466 complete(&cwq
->wq
->first_flusher
->done
);
470 * process_one_work - process single work
471 * @cwq: cwq to process work for
472 * @work: work to process
474 * Process @work. This function contains all the logics necessary to
475 * process a single work including synchronization against and
476 * interaction with other workers on the same cpu, queueing and
477 * flushing. As long as context requirement is met, any worker can
478 * call this function to process a work.
481 * spin_lock_irq(cwq->lock) which is released and regrabbed.
483 static void process_one_work(struct cpu_workqueue_struct
*cwq
,
484 struct work_struct
*work
)
486 work_func_t f
= work
->func
;
488 #ifdef CONFIG_LOCKDEP
490 * It is permissible to free the struct work_struct from
491 * inside the function that is called from it, this we need to
492 * take into account for lockdep too. To avoid bogus "held
493 * lock freed" warnings as well as problems when looking into
494 * work->lockdep_map, make a copy and use that here.
496 struct lockdep_map lockdep_map
= work
->lockdep_map
;
498 /* claim and process */
499 debug_work_deactivate(work
);
500 cwq
->current_work
= work
;
501 work_color
= get_work_color(work
);
502 list_del_init(&work
->entry
);
504 spin_unlock_irq(&cwq
->lock
);
506 BUG_ON(get_wq_data(work
) != cwq
);
507 work_clear_pending(work
);
508 lock_map_acquire(&cwq
->wq
->lockdep_map
);
509 lock_map_acquire(&lockdep_map
);
511 lock_map_release(&lockdep_map
);
512 lock_map_release(&cwq
->wq
->lockdep_map
);
514 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
515 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
517 current
->comm
, preempt_count(), task_pid_nr(current
));
518 printk(KERN_ERR
" last function: ");
519 print_symbol("%s\n", (unsigned long)f
);
520 debug_show_held_locks(current
);
524 spin_lock_irq(&cwq
->lock
);
526 /* we're done with it, release */
527 cwq
->current_work
= NULL
;
528 cwq_dec_nr_in_flight(cwq
, work_color
);
531 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
533 spin_lock_irq(&cwq
->lock
);
534 while (!list_empty(&cwq
->worklist
)) {
535 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
536 struct work_struct
, entry
);
537 process_one_work(cwq
, work
);
539 spin_unlock_irq(&cwq
->lock
);
543 * worker_thread - the worker thread function
544 * @__cwq: cwq to serve
546 * The cwq worker thread function.
548 static int worker_thread(void *__cwq
)
550 struct cpu_workqueue_struct
*cwq
= __cwq
;
553 if (cwq
->wq
->flags
& WQ_FREEZEABLE
)
557 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
558 if (!freezing(current
) &&
559 !kthread_should_stop() &&
560 list_empty(&cwq
->worklist
))
562 finish_wait(&cwq
->more_work
, &wait
);
566 if (kthread_should_stop())
569 if (unlikely(!cpumask_equal(&cwq
->thread
->cpus_allowed
,
570 get_cpu_mask(cwq
->cpu
))))
571 set_cpus_allowed_ptr(cwq
->thread
,
572 get_cpu_mask(cwq
->cpu
));
580 struct work_struct work
;
581 struct completion done
;
584 static void wq_barrier_func(struct work_struct
*work
)
586 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
587 complete(&barr
->done
);
591 * insert_wq_barrier - insert a barrier work
592 * @cwq: cwq to insert barrier into
593 * @barr: wq_barrier to insert
594 * @head: insertion point
596 * Insert barrier @barr into @cwq before @head.
599 * spin_lock_irq(cwq->lock).
601 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
602 struct wq_barrier
*barr
, struct list_head
*head
)
605 * debugobject calls are safe here even with cwq->lock locked
606 * as we know for sure that this will not trigger any of the
607 * checks and call back into the fixup functions where we
610 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
611 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
612 init_completion(&barr
->done
);
614 debug_work_activate(&barr
->work
);
615 insert_work(cwq
, &barr
->work
, head
, work_color_to_flags(WORK_NO_COLOR
));
619 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
620 * @wq: workqueue being flushed
621 * @flush_color: new flush color, < 0 for no-op
622 * @work_color: new work color, < 0 for no-op
624 * Prepare cwqs for workqueue flushing.
626 * If @flush_color is non-negative, flush_color on all cwqs should be
627 * -1. If no cwq has in-flight commands at the specified color, all
628 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
629 * has in flight commands, its cwq->flush_color is set to
630 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
631 * wakeup logic is armed and %true is returned.
633 * The caller should have initialized @wq->first_flusher prior to
634 * calling this function with non-negative @flush_color. If
635 * @flush_color is negative, no flush color update is done and %false
638 * If @work_color is non-negative, all cwqs should have the same
639 * work_color which is previous to @work_color and all will be
640 * advanced to @work_color.
643 * mutex_lock(wq->flush_mutex).
646 * %true if @flush_color >= 0 and there's something to flush. %false
649 static bool flush_workqueue_prep_cwqs(struct workqueue_struct
*wq
,
650 int flush_color
, int work_color
)
655 if (flush_color
>= 0) {
656 BUG_ON(atomic_read(&wq
->nr_cwqs_to_flush
));
657 atomic_set(&wq
->nr_cwqs_to_flush
, 1);
660 for_each_possible_cpu(cpu
) {
661 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
663 spin_lock_irq(&cwq
->lock
);
665 if (flush_color
>= 0) {
666 BUG_ON(cwq
->flush_color
!= -1);
668 if (cwq
->nr_in_flight
[flush_color
]) {
669 cwq
->flush_color
= flush_color
;
670 atomic_inc(&wq
->nr_cwqs_to_flush
);
675 if (work_color
>= 0) {
676 BUG_ON(work_color
!= work_next_color(cwq
->work_color
));
677 cwq
->work_color
= work_color
;
680 spin_unlock_irq(&cwq
->lock
);
683 if (flush_color
>= 0 && atomic_dec_and_test(&wq
->nr_cwqs_to_flush
))
684 complete(&wq
->first_flusher
->done
);
690 * flush_workqueue - ensure that any scheduled work has run to completion.
691 * @wq: workqueue to flush
693 * Forces execution of the workqueue and blocks until its completion.
694 * This is typically used in driver shutdown handlers.
696 * We sleep until all works which were queued on entry have been handled,
697 * but we are not livelocked by new incoming ones.
699 void flush_workqueue(struct workqueue_struct
*wq
)
701 struct wq_flusher this_flusher
= {
702 .list
= LIST_HEAD_INIT(this_flusher
.list
),
704 .done
= COMPLETION_INITIALIZER_ONSTACK(this_flusher
.done
),
708 lock_map_acquire(&wq
->lockdep_map
);
709 lock_map_release(&wq
->lockdep_map
);
711 mutex_lock(&wq
->flush_mutex
);
714 * Start-to-wait phase
716 next_color
= work_next_color(wq
->work_color
);
718 if (next_color
!= wq
->flush_color
) {
720 * Color space is not full. The current work_color
721 * becomes our flush_color and work_color is advanced
724 BUG_ON(!list_empty(&wq
->flusher_overflow
));
725 this_flusher
.flush_color
= wq
->work_color
;
726 wq
->work_color
= next_color
;
728 if (!wq
->first_flusher
) {
729 /* no flush in progress, become the first flusher */
730 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
732 wq
->first_flusher
= &this_flusher
;
734 if (!flush_workqueue_prep_cwqs(wq
, wq
->flush_color
,
736 /* nothing to flush, done */
737 wq
->flush_color
= next_color
;
738 wq
->first_flusher
= NULL
;
743 BUG_ON(wq
->flush_color
== this_flusher
.flush_color
);
744 list_add_tail(&this_flusher
.list
, &wq
->flusher_queue
);
745 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
749 * Oops, color space is full, wait on overflow queue.
750 * The next flush completion will assign us
751 * flush_color and transfer to flusher_queue.
753 list_add_tail(&this_flusher
.list
, &wq
->flusher_overflow
);
756 mutex_unlock(&wq
->flush_mutex
);
758 wait_for_completion(&this_flusher
.done
);
761 * Wake-up-and-cascade phase
763 * First flushers are responsible for cascading flushes and
764 * handling overflow. Non-first flushers can simply return.
766 if (wq
->first_flusher
!= &this_flusher
)
769 mutex_lock(&wq
->flush_mutex
);
771 wq
->first_flusher
= NULL
;
773 BUG_ON(!list_empty(&this_flusher
.list
));
774 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
777 struct wq_flusher
*next
, *tmp
;
779 /* complete all the flushers sharing the current flush color */
780 list_for_each_entry_safe(next
, tmp
, &wq
->flusher_queue
, list
) {
781 if (next
->flush_color
!= wq
->flush_color
)
783 list_del_init(&next
->list
);
784 complete(&next
->done
);
787 BUG_ON(!list_empty(&wq
->flusher_overflow
) &&
788 wq
->flush_color
!= work_next_color(wq
->work_color
));
790 /* this flush_color is finished, advance by one */
791 wq
->flush_color
= work_next_color(wq
->flush_color
);
793 /* one color has been freed, handle overflow queue */
794 if (!list_empty(&wq
->flusher_overflow
)) {
796 * Assign the same color to all overflowed
797 * flushers, advance work_color and append to
798 * flusher_queue. This is the start-to-wait
799 * phase for these overflowed flushers.
801 list_for_each_entry(tmp
, &wq
->flusher_overflow
, list
)
802 tmp
->flush_color
= wq
->work_color
;
804 wq
->work_color
= work_next_color(wq
->work_color
);
806 list_splice_tail_init(&wq
->flusher_overflow
,
808 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
811 if (list_empty(&wq
->flusher_queue
)) {
812 BUG_ON(wq
->flush_color
!= wq
->work_color
);
817 * Need to flush more colors. Make the next flusher
818 * the new first flusher and arm cwqs.
820 BUG_ON(wq
->flush_color
== wq
->work_color
);
821 BUG_ON(wq
->flush_color
!= next
->flush_color
);
823 list_del_init(&next
->list
);
824 wq
->first_flusher
= next
;
826 if (flush_workqueue_prep_cwqs(wq
, wq
->flush_color
, -1))
830 * Meh... this color is already done, clear first
831 * flusher and repeat cascading.
833 wq
->first_flusher
= NULL
;
837 mutex_unlock(&wq
->flush_mutex
);
839 EXPORT_SYMBOL_GPL(flush_workqueue
);
842 * flush_work - block until a work_struct's callback has terminated
843 * @work: the work which is to be flushed
845 * Returns false if @work has already terminated.
847 * It is expected that, prior to calling flush_work(), the caller has
848 * arranged for the work to not be requeued, otherwise it doesn't make
849 * sense to use this function.
851 int flush_work(struct work_struct
*work
)
853 struct cpu_workqueue_struct
*cwq
;
854 struct list_head
*prev
;
855 struct wq_barrier barr
;
858 cwq
= get_wq_data(work
);
862 lock_map_acquire(&cwq
->wq
->lockdep_map
);
863 lock_map_release(&cwq
->wq
->lockdep_map
);
865 spin_lock_irq(&cwq
->lock
);
866 if (!list_empty(&work
->entry
)) {
868 * See the comment near try_to_grab_pending()->smp_rmb().
869 * If it was re-queued under us we are not going to wait.
872 if (unlikely(cwq
!= get_wq_data(work
)))
876 if (cwq
->current_work
!= work
)
878 prev
= &cwq
->worklist
;
880 insert_wq_barrier(cwq
, &barr
, prev
->next
);
882 spin_unlock_irq(&cwq
->lock
);
883 wait_for_completion(&barr
.done
);
884 destroy_work_on_stack(&barr
.work
);
887 spin_unlock_irq(&cwq
->lock
);
890 EXPORT_SYMBOL_GPL(flush_work
);
893 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
894 * so this work can't be re-armed in any way.
896 static int try_to_grab_pending(struct work_struct
*work
)
898 struct cpu_workqueue_struct
*cwq
;
901 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
905 * The queueing is in progress, or it is already queued. Try to
906 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
909 cwq
= get_wq_data(work
);
913 spin_lock_irq(&cwq
->lock
);
914 if (!list_empty(&work
->entry
)) {
916 * This work is queued, but perhaps we locked the wrong cwq.
917 * In that case we must see the new value after rmb(), see
918 * insert_work()->wmb().
921 if (cwq
== get_wq_data(work
)) {
922 debug_work_deactivate(work
);
923 list_del_init(&work
->entry
);
924 cwq_dec_nr_in_flight(cwq
, get_work_color(work
));
928 spin_unlock_irq(&cwq
->lock
);
933 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
934 struct work_struct
*work
)
936 struct wq_barrier barr
;
939 spin_lock_irq(&cwq
->lock
);
940 if (unlikely(cwq
->current_work
== work
)) {
941 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
944 spin_unlock_irq(&cwq
->lock
);
946 if (unlikely(running
)) {
947 wait_for_completion(&barr
.done
);
948 destroy_work_on_stack(&barr
.work
);
952 static void wait_on_work(struct work_struct
*work
)
954 struct cpu_workqueue_struct
*cwq
;
955 struct workqueue_struct
*wq
;
960 lock_map_acquire(&work
->lockdep_map
);
961 lock_map_release(&work
->lockdep_map
);
963 cwq
= get_wq_data(work
);
969 for_each_possible_cpu(cpu
)
970 wait_on_cpu_work(get_cwq(cpu
, wq
), work
);
973 static int __cancel_work_timer(struct work_struct
*work
,
974 struct timer_list
* timer
)
979 ret
= (timer
&& likely(del_timer(timer
)));
981 ret
= try_to_grab_pending(work
);
983 } while (unlikely(ret
< 0));
990 * cancel_work_sync - block until a work_struct's callback has terminated
991 * @work: the work which is to be flushed
993 * Returns true if @work was pending.
995 * cancel_work_sync() will cancel the work if it is queued. If the work's
996 * callback appears to be running, cancel_work_sync() will block until it
999 * It is possible to use this function if the work re-queues itself. It can
1000 * cancel the work even if it migrates to another workqueue, however in that
1001 * case it only guarantees that work->func() has completed on the last queued
1004 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1005 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1007 * The caller must ensure that workqueue_struct on which this work was last
1008 * queued can't be destroyed before this function returns.
1010 int cancel_work_sync(struct work_struct
*work
)
1012 return __cancel_work_timer(work
, NULL
);
1014 EXPORT_SYMBOL_GPL(cancel_work_sync
);
1017 * cancel_delayed_work_sync - reliably kill off a delayed work.
1018 * @dwork: the delayed work struct
1020 * Returns true if @dwork was pending.
1022 * It is possible to use this function if @dwork rearms itself via queue_work()
1023 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1025 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
1027 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
1029 EXPORT_SYMBOL(cancel_delayed_work_sync
);
1031 static struct workqueue_struct
*keventd_wq __read_mostly
;
1034 * schedule_work - put work task in global workqueue
1035 * @work: job to be done
1037 * Returns zero if @work was already on the kernel-global workqueue and
1038 * non-zero otherwise.
1040 * This puts a job in the kernel-global workqueue if it was not already
1041 * queued and leaves it in the same position on the kernel-global
1042 * workqueue otherwise.
1044 int schedule_work(struct work_struct
*work
)
1046 return queue_work(keventd_wq
, work
);
1048 EXPORT_SYMBOL(schedule_work
);
1051 * schedule_work_on - put work task on a specific cpu
1052 * @cpu: cpu to put the work task on
1053 * @work: job to be done
1055 * This puts a job on a specific cpu
1057 int schedule_work_on(int cpu
, struct work_struct
*work
)
1059 return queue_work_on(cpu
, keventd_wq
, work
);
1061 EXPORT_SYMBOL(schedule_work_on
);
1064 * schedule_delayed_work - put work task in global workqueue after delay
1065 * @dwork: job to be done
1066 * @delay: number of jiffies to wait or 0 for immediate execution
1068 * After waiting for a given time this puts a job in the kernel-global
1071 int schedule_delayed_work(struct delayed_work
*dwork
,
1072 unsigned long delay
)
1074 return queue_delayed_work(keventd_wq
, dwork
, delay
);
1076 EXPORT_SYMBOL(schedule_delayed_work
);
1079 * flush_delayed_work - block until a dwork_struct's callback has terminated
1080 * @dwork: the delayed work which is to be flushed
1082 * Any timeout is cancelled, and any pending work is run immediately.
1084 void flush_delayed_work(struct delayed_work
*dwork
)
1086 if (del_timer_sync(&dwork
->timer
)) {
1087 __queue_work(get_cpu(), get_wq_data(&dwork
->work
)->wq
,
1091 flush_work(&dwork
->work
);
1093 EXPORT_SYMBOL(flush_delayed_work
);
1096 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1098 * @dwork: job to be done
1099 * @delay: number of jiffies to wait
1101 * After waiting for a given time this puts a job in the kernel-global
1102 * workqueue on the specified CPU.
1104 int schedule_delayed_work_on(int cpu
,
1105 struct delayed_work
*dwork
, unsigned long delay
)
1107 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
1109 EXPORT_SYMBOL(schedule_delayed_work_on
);
1112 * schedule_on_each_cpu - call a function on each online CPU from keventd
1113 * @func: the function to call
1115 * Returns zero on success.
1116 * Returns -ve errno on failure.
1118 * schedule_on_each_cpu() is very slow.
1120 int schedule_on_each_cpu(work_func_t func
)
1124 struct work_struct
*works
;
1126 works
= alloc_percpu(struct work_struct
);
1133 * When running in keventd don't schedule a work item on
1134 * itself. Can just call directly because the work queue is
1135 * already bound. This also is faster.
1137 if (current_is_keventd())
1138 orig
= raw_smp_processor_id();
1140 for_each_online_cpu(cpu
) {
1141 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
1143 INIT_WORK(work
, func
);
1145 schedule_work_on(cpu
, work
);
1148 func(per_cpu_ptr(works
, orig
));
1150 for_each_online_cpu(cpu
)
1151 flush_work(per_cpu_ptr(works
, cpu
));
1159 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1161 * Forces execution of the kernel-global workqueue and blocks until its
1164 * Think twice before calling this function! It's very easy to get into
1165 * trouble if you don't take great care. Either of the following situations
1166 * will lead to deadlock:
1168 * One of the work items currently on the workqueue needs to acquire
1169 * a lock held by your code or its caller.
1171 * Your code is running in the context of a work routine.
1173 * They will be detected by lockdep when they occur, but the first might not
1174 * occur very often. It depends on what work items are on the workqueue and
1175 * what locks they need, which you have no control over.
1177 * In most situations flushing the entire workqueue is overkill; you merely
1178 * need to know that a particular work item isn't queued and isn't running.
1179 * In such cases you should use cancel_delayed_work_sync() or
1180 * cancel_work_sync() instead.
1182 void flush_scheduled_work(void)
1184 flush_workqueue(keventd_wq
);
1186 EXPORT_SYMBOL(flush_scheduled_work
);
1189 * execute_in_process_context - reliably execute the routine with user context
1190 * @fn: the function to execute
1191 * @ew: guaranteed storage for the execute work structure (must
1192 * be available when the work executes)
1194 * Executes the function immediately if process context is available,
1195 * otherwise schedules the function for delayed execution.
1197 * Returns: 0 - function was executed
1198 * 1 - function was scheduled for execution
1200 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
1202 if (!in_interrupt()) {
1207 INIT_WORK(&ew
->work
, fn
);
1208 schedule_work(&ew
->work
);
1212 EXPORT_SYMBOL_GPL(execute_in_process_context
);
1214 int keventd_up(void)
1216 return keventd_wq
!= NULL
;
1219 int current_is_keventd(void)
1221 struct cpu_workqueue_struct
*cwq
;
1222 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1225 BUG_ON(!keventd_wq
);
1227 cwq
= get_cwq(cpu
, keventd_wq
);
1228 if (current
== cwq
->thread
)
1235 static struct cpu_workqueue_struct
*alloc_cwqs(void)
1238 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1239 * Make sure that the alignment isn't lower than that of
1240 * unsigned long long.
1242 const size_t size
= sizeof(struct cpu_workqueue_struct
);
1243 const size_t align
= max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS
,
1244 __alignof__(unsigned long long));
1245 struct cpu_workqueue_struct
*cwqs
;
1250 * On UP, percpu allocator doesn't honor alignment parameter
1251 * and simply uses arch-dependent default. Allocate enough
1252 * room to align cwq and put an extra pointer at the end
1253 * pointing back to the originally allocated pointer which
1254 * will be used for free.
1256 * FIXME: This really belongs to UP percpu code. Update UP
1257 * percpu code to honor alignment and remove this ugliness.
1259 ptr
= __alloc_percpu(size
+ align
+ sizeof(void *), 1);
1260 cwqs
= PTR_ALIGN(ptr
, align
);
1261 *(void **)per_cpu_ptr(cwqs
+ 1, 0) = ptr
;
1263 /* On SMP, percpu allocator can do it itself */
1264 cwqs
= __alloc_percpu(size
, align
);
1266 /* just in case, make sure it's actually aligned */
1267 BUG_ON(!IS_ALIGNED((unsigned long)cwqs
, align
));
1271 static void free_cwqs(struct cpu_workqueue_struct
*cwqs
)
1274 /* on UP, the pointer to free is stored right after the cwq */
1276 free_percpu(*(void **)per_cpu_ptr(cwqs
+ 1, 0));
1282 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
1284 struct workqueue_struct
*wq
= cwq
->wq
;
1285 struct task_struct
*p
;
1287 p
= kthread_create(worker_thread
, cwq
, "%s/%d", wq
->name
, cpu
);
1289 * Nobody can add the work_struct to this cwq,
1290 * if (caller is __create_workqueue)
1291 * nobody should see this wq
1292 * else // caller is CPU_UP_PREPARE
1293 * cpu is not on cpu_online_map
1294 * so we can abort safely.
1303 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
1305 struct task_struct
*p
= cwq
->thread
;
1309 kthread_bind(p
, cpu
);
1314 struct workqueue_struct
*__create_workqueue_key(const char *name
,
1316 struct lock_class_key
*key
,
1317 const char *lock_name
)
1319 bool singlethread
= flags
& WQ_SINGLE_THREAD
;
1320 struct workqueue_struct
*wq
;
1323 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1327 wq
->cpu_wq
= alloc_cwqs();
1332 mutex_init(&wq
->flush_mutex
);
1333 atomic_set(&wq
->nr_cwqs_to_flush
, 0);
1334 INIT_LIST_HEAD(&wq
->flusher_queue
);
1335 INIT_LIST_HEAD(&wq
->flusher_overflow
);
1337 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
1338 INIT_LIST_HEAD(&wq
->list
);
1340 cpu_maps_update_begin();
1342 * We must initialize cwqs for each possible cpu even if we
1343 * are going to call destroy_workqueue() finally. Otherwise
1344 * cpu_up() can hit the uninitialized cwq once we drop the
1347 for_each_possible_cpu(cpu
) {
1348 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1350 BUG_ON((unsigned long)cwq
& WORK_STRUCT_FLAG_MASK
);
1353 cwq
->flush_color
= -1;
1354 spin_lock_init(&cwq
->lock
);
1355 INIT_LIST_HEAD(&cwq
->worklist
);
1356 init_waitqueue_head(&cwq
->more_work
);
1360 err
= create_workqueue_thread(cwq
, cpu
);
1361 if (cpu_online(cpu
) && !singlethread
)
1362 start_workqueue_thread(cwq
, cpu
);
1364 start_workqueue_thread(cwq
, -1);
1367 spin_lock(&workqueue_lock
);
1368 list_add(&wq
->list
, &workqueues
);
1369 spin_unlock(&workqueue_lock
);
1371 cpu_maps_update_done();
1374 destroy_workqueue(wq
);
1380 free_cwqs(wq
->cpu_wq
);
1385 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
1388 * destroy_workqueue - safely terminate a workqueue
1389 * @wq: target workqueue
1391 * Safely destroy a workqueue. All work currently pending will be done first.
1393 void destroy_workqueue(struct workqueue_struct
*wq
)
1397 cpu_maps_update_begin();
1398 spin_lock(&workqueue_lock
);
1399 list_del(&wq
->list
);
1400 spin_unlock(&workqueue_lock
);
1401 cpu_maps_update_done();
1403 flush_workqueue(wq
);
1405 for_each_possible_cpu(cpu
) {
1406 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1410 kthread_stop(cwq
->thread
);
1414 for (i
= 0; i
< WORK_NR_COLORS
; i
++)
1415 BUG_ON(cwq
->nr_in_flight
[i
]);
1418 free_cwqs(wq
->cpu_wq
);
1421 EXPORT_SYMBOL_GPL(destroy_workqueue
);
1423 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
1424 unsigned long action
,
1427 unsigned int cpu
= (unsigned long)hcpu
;
1428 struct cpu_workqueue_struct
*cwq
;
1429 struct workqueue_struct
*wq
;
1431 action
&= ~CPU_TASKS_FROZEN
;
1433 list_for_each_entry(wq
, &workqueues
, list
) {
1434 if (wq
->flags
& WQ_SINGLE_THREAD
)
1437 cwq
= get_cwq(cpu
, wq
);
1441 flush_workqueue(wq
);
1446 return notifier_from_errno(0);
1451 struct work_for_cpu
{
1452 struct completion completion
;
1458 static int do_work_for_cpu(void *_wfc
)
1460 struct work_for_cpu
*wfc
= _wfc
;
1461 wfc
->ret
= wfc
->fn(wfc
->arg
);
1462 complete(&wfc
->completion
);
1467 * work_on_cpu - run a function in user context on a particular cpu
1468 * @cpu: the cpu to run on
1469 * @fn: the function to run
1470 * @arg: the function arg
1472 * This will return the value @fn returns.
1473 * It is up to the caller to ensure that the cpu doesn't go offline.
1474 * The caller must not hold any locks which would prevent @fn from completing.
1476 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
1478 struct task_struct
*sub_thread
;
1479 struct work_for_cpu wfc
= {
1480 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
1485 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
1486 if (IS_ERR(sub_thread
))
1487 return PTR_ERR(sub_thread
);
1488 kthread_bind(sub_thread
, cpu
);
1489 wake_up_process(sub_thread
);
1490 wait_for_completion(&wfc
.completion
);
1493 EXPORT_SYMBOL_GPL(work_on_cpu
);
1494 #endif /* CONFIG_SMP */
1496 void __init
init_workqueues(void)
1498 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1499 hotcpu_notifier(workqueue_cpu_callback
, 0);
1500 keventd_wq
= create_workqueue("events");
1501 BUG_ON(!keventd_wq
);