workqueue: implement alloc_ordered_workqueue()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / workqueue.h
CommitLineData
1da177e4
LT
1/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
4e6045f1 11#include <linux/lockdep.h>
7a22ad75 12#include <linux/threads.h>
a08727ba 13#include <asm/atomic.h>
1da177e4
LT
14
15struct workqueue_struct;
16
65f27f38
DH
17struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work);
6bb49e59 19
a08727ba
LT
20/*
21 * The first word is the work queue pointer and the flags rolled into
22 * one
23 */
24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
25
22df02bb
TH
26enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
8a2e8e5d
TH
28 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
22df02bb 31#ifdef CONFIG_DEBUG_OBJECTS_WORK
8a2e8e5d
TH
32 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
0f900049 34#else
8a2e8e5d 35 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
22df02bb
TH
36#endif
37
73f53c4a
TH
38 WORK_STRUCT_COLOR_BITS = 4,
39
22df02bb 40 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
8a2e8e5d 41 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
e120153d 42 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
affee4b2 43 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
22df02bb
TH
44#ifdef CONFIG_DEBUG_OBJECTS_WORK
45 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
46#else
47 WORK_STRUCT_STATIC = 0,
48#endif
49
73f53c4a
TH
50 /*
51 * The last color is no color used for works which don't
52 * participate in workqueue flushing.
53 */
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
55 WORK_NO_COLOR = WORK_NR_COLORS,
56
bdbc5dd7 57 /* special cpu IDs */
f3421797
TH
58 WORK_CPU_UNBOUND = NR_CPUS,
59 WORK_CPU_NONE = NR_CPUS + 1,
bdbc5dd7
TH
60 WORK_CPU_LAST = WORK_CPU_NONE,
61
73f53c4a 62 /*
e120153d 63 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
8a2e8e5d
TH
64 * off. This makes cwqs aligned to 256 bytes and allows 15
65 * workqueue flush colors.
73f53c4a
TH
66 */
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
0f900049 70 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
22df02bb 71 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
bdbc5dd7 72 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
dcd989cb
TH
73
74 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0,
76 WORK_BUSY_RUNNING = 1 << 1,
22df02bb
TH
77};
78
1da177e4 79struct work_struct {
a08727ba 80 atomic_long_t data;
1da177e4 81 struct list_head entry;
6bb49e59 82 work_func_t func;
4e6045f1
JB
83#ifdef CONFIG_LOCKDEP
84 struct lockdep_map lockdep_map;
85#endif
52bad64d
DH
86};
87
7a22ad75
TH
88#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
89#define WORK_DATA_STATIC_INIT() \
90 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
a08727ba 91
52bad64d
DH
92struct delayed_work {
93 struct work_struct work;
1da177e4
LT
94 struct timer_list timer;
95};
96
bf6aede7
JD
97static inline struct delayed_work *to_delayed_work(struct work_struct *work)
98{
99 return container_of(work, struct delayed_work, work);
100}
101
1fa44eca
JB
102struct execute_work {
103 struct work_struct work;
104};
105
4e6045f1
JB
106#ifdef CONFIG_LOCKDEP
107/*
108 * NB: because we have to copy the lockdep_map, setting _key
109 * here is required, otherwise it could get initialised to the
110 * copy of the lockdep_map!
111 */
112#define __WORK_INIT_LOCKDEP_MAP(n, k) \
113 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
114#else
115#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif
117
65f27f38 118#define __WORK_INITIALIZER(n, f) { \
dc186ad7 119 .data = WORK_DATA_STATIC_INIT(), \
23b2e599 120 .entry = { &(n).entry, &(n).entry }, \
65f27f38 121 .func = (f), \
4e6045f1 122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
65f27f38
DH
123 }
124
125#define __DELAYED_WORK_INITIALIZER(n, f) { \
126 .work = __WORK_INITIALIZER((n).work, (f)), \
127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
128 }
129
65f27f38
DH
130#define DECLARE_WORK(n, f) \
131 struct work_struct n = __WORK_INITIALIZER(n, f)
132
65f27f38
DH
133#define DECLARE_DELAYED_WORK(n, f) \
134 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
135
1da177e4 136/*
65f27f38 137 * initialize a work item's function pointer
1da177e4 138 */
65f27f38 139#define PREPARE_WORK(_work, _func) \
1da177e4 140 do { \
52bad64d 141 (_work)->func = (_func); \
1da177e4
LT
142 } while (0)
143
65f27f38
DH
144#define PREPARE_DELAYED_WORK(_work, _func) \
145 PREPARE_WORK(&(_work)->work, (_func))
52bad64d 146
dc186ad7
TG
147#ifdef CONFIG_DEBUG_OBJECTS_WORK
148extern void __init_work(struct work_struct *work, int onstack);
149extern void destroy_work_on_stack(struct work_struct *work);
4690c4ab
TH
150static inline unsigned int work_static(struct work_struct *work)
151{
22df02bb 152 return *work_data_bits(work) & WORK_STRUCT_STATIC;
4690c4ab 153}
dc186ad7
TG
154#else
155static inline void __init_work(struct work_struct *work, int onstack) { }
156static inline void destroy_work_on_stack(struct work_struct *work) { }
4690c4ab 157static inline unsigned int work_static(struct work_struct *work) { return 0; }
dc186ad7
TG
158#endif
159
1da177e4 160/*
52bad64d 161 * initialize all of a work item in one go
a08727ba 162 *
b9049df5 163 * NOTE! No point in using "atomic_long_set()": using a direct
a08727ba
LT
164 * assignment of the work data initializer allows the compiler
165 * to generate better code.
1da177e4 166 */
4e6045f1 167#ifdef CONFIG_LOCKDEP
dc186ad7 168#define __INIT_WORK(_work, _func, _onstack) \
65f27f38 169 do { \
4e6045f1
JB
170 static struct lock_class_key __key; \
171 \
dc186ad7 172 __init_work((_work), _onstack); \
23b2e599 173 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
4e6045f1 174 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
65f27f38
DH
175 INIT_LIST_HEAD(&(_work)->entry); \
176 PREPARE_WORK((_work), (_func)); \
177 } while (0)
4e6045f1 178#else
dc186ad7 179#define __INIT_WORK(_work, _func, _onstack) \
4e6045f1 180 do { \
dc186ad7 181 __init_work((_work), _onstack); \
4e6045f1
JB
182 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
183 INIT_LIST_HEAD(&(_work)->entry); \
184 PREPARE_WORK((_work), (_func)); \
185 } while (0)
186#endif
65f27f38 187
dc186ad7
TG
188#define INIT_WORK(_work, _func) \
189 do { \
190 __INIT_WORK((_work), (_func), 0); \
191 } while (0)
192
193#define INIT_WORK_ON_STACK(_work, _func) \
194 do { \
195 __INIT_WORK((_work), (_func), 1); \
196 } while (0)
197
65f27f38
DH
198#define INIT_DELAYED_WORK(_work, _func) \
199 do { \
200 INIT_WORK(&(_work)->work, (_func)); \
201 init_timer(&(_work)->timer); \
52bad64d
DH
202 } while (0)
203
6d612b0f
PZ
204#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
205 do { \
dc186ad7 206 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
6d612b0f
PZ
207 init_timer_on_stack(&(_work)->timer); \
208 } while (0)
209
dc186ad7 210#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
28287033
VP
211 do { \
212 INIT_WORK(&(_work)->work, (_func)); \
213 init_timer_deferrable(&(_work)->timer); \
214 } while (0)
215
365970a1
DH
216/**
217 * work_pending - Find out whether a work item is currently pending
218 * @work: The work item in question
219 */
220#define work_pending(work) \
22df02bb 221 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
365970a1
DH
222
223/**
224 * delayed_work_pending - Find out whether a delayable work item is currently
225 * pending
226 * @work: The work item in question
227 */
0221872a
LT
228#define delayed_work_pending(w) \
229 work_pending(&(w)->work)
365970a1 230
65f27f38 231/**
23b2e599
ON
232 * work_clear_pending - for internal use only, mark a work item as not pending
233 * @work: The work item in question
65f27f38 234 */
23b2e599 235#define work_clear_pending(work) \
22df02bb 236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
65f27f38 237
c54fce6e
TH
238/*
239 * Workqueue flags and constants. For details, please refer to
240 * Documentation/workqueue.txt.
241 */
97e37d7b 242enum {
bdbc5dd7 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
c7fc77f7 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
bdbc5dd7 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
e22bee78 246 WQ_RESCUER = 1 << 3, /* has an rescue worker */
649027d7 247 WQ_HIGHPRI = 1 << 4, /* high priority */
fb0e7beb 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
b71ab8c2 249
e41e704b
TH
250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
251
b71ab8c2 252 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
f3421797 253 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
b71ab8c2 254 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
97e37d7b 255};
52bad64d 256
f3421797
TH
257/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
258#define WQ_UNBOUND_MAX_ACTIVE \
259 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
65f27f38 260
d320c038
TH
261/*
262 * System-wide workqueues which are always present.
263 *
264 * system_wq is the one used by schedule[_delayed]_work[_on]().
265 * Multi-CPU multi-threaded. There are users which expect relatively
266 * short queue flush time. Don't queue works which can run for too
267 * long.
268 *
269 * system_long_wq is similar to system_wq but may host long running
270 * works. Queue flushing might take relatively long.
271 *
272 * system_nrt_wq is non-reentrant and guarantees that any given work
273 * item is never executed in parallel by multiple CPUs. Queue
274 * flushing might take relatively long.
f3421797
TH
275 *
276 * system_unbound_wq is unbound workqueue. Workers are not bound to
277 * any specific CPU, not concurrency managed, and all queued works are
278 * executed immediately as long as max_active limit is not reached and
279 * resources are available.
d320c038
TH
280 */
281extern struct workqueue_struct *system_wq;
282extern struct workqueue_struct *system_long_wq;
283extern struct workqueue_struct *system_nrt_wq;
f3421797 284extern struct workqueue_struct *system_unbound_wq;
52bad64d 285
4e6045f1 286extern struct workqueue_struct *
d320c038
TH
287__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
288 struct lock_class_key *key, const char *lock_name);
4e6045f1
JB
289
290#ifdef CONFIG_LOCKDEP
d320c038 291#define alloc_workqueue(name, flags, max_active) \
4e6045f1
JB
292({ \
293 static struct lock_class_key __key; \
eb13ba87
JB
294 const char *__lock_name; \
295 \
296 if (__builtin_constant_p(name)) \
297 __lock_name = (name); \
298 else \
299 __lock_name = #name; \
4e6045f1 300 \
d320c038
TH
301 __alloc_workqueue_key((name), (flags), (max_active), \
302 &__key, __lock_name); \
4e6045f1
JB
303})
304#else
d320c038
TH
305#define alloc_workqueue(name, flags, max_active) \
306 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
4e6045f1
JB
307#endif
308
81dcaf65
TH
309/**
310 * alloc_ordered_workqueue - allocate an ordered workqueue
311 * @name: name of the workqueue
312 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_RESCUER are meaningful)
313 *
314 * Allocate an ordered workqueue. An ordered workqueue executes at
315 * most one work item at any given time in the queued order. They are
316 * implemented as unbound workqueues with @max_active of one.
317 *
318 * RETURNS:
319 * Pointer to the allocated workqueue on success, %NULL on failure.
320 */
321static inline struct workqueue_struct *
322alloc_ordered_workqueue(const char *name, unsigned int flags)
323{
324 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
325}
326
97e37d7b 327#define create_workqueue(name) \
d320c038 328 alloc_workqueue((name), WQ_RESCUER, 1)
97e37d7b 329#define create_freezeable_workqueue(name) \
c7fc77f7 330 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
97e37d7b 331#define create_singlethread_workqueue(name) \
c7fc77f7 332 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
1da177e4
LT
333
334extern void destroy_workqueue(struct workqueue_struct *wq);
335
b3c97528 336extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
c1a220e7
ZR
337extern int queue_work_on(int cpu, struct workqueue_struct *wq,
338 struct work_struct *work);
b3c97528
HH
339extern int queue_delayed_work(struct workqueue_struct *wq,
340 struct delayed_work *work, unsigned long delay);
7a6bc1cd 341extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
28e53bdd
ON
342 struct delayed_work *work, unsigned long delay);
343
b3c97528 344extern void flush_workqueue(struct workqueue_struct *wq);
28e53bdd 345extern void flush_scheduled_work(void);
43046b60 346extern void flush_delayed_work(struct delayed_work *work);
1da177e4 347
b3c97528 348extern int schedule_work(struct work_struct *work);
c1a220e7 349extern int schedule_work_on(int cpu, struct work_struct *work);
b3c97528 350extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
28e53bdd
ON
351extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
352 unsigned long delay);
65f27f38 353extern int schedule_on_each_cpu(work_func_t func);
1da177e4
LT
354extern int keventd_up(void);
355
65f27f38 356int execute_in_process_context(work_func_t fn, struct execute_work *);
1da177e4 357
db700897 358extern int flush_work(struct work_struct *work);
1f1f642e 359extern int cancel_work_sync(struct work_struct *work);
28e53bdd 360
dcd989cb
TH
361extern void workqueue_set_max_active(struct workqueue_struct *wq,
362 int max_active);
363extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
364extern unsigned int work_cpu(struct work_struct *work);
365extern unsigned int work_busy(struct work_struct *work);
366
1da177e4
LT
367/*
368 * Kill off a pending schedule_delayed_work(). Note that the work callback
071b6386
ON
369 * function may still be running on return from cancel_delayed_work(), unless
370 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
28e53bdd 371 * cancel_work_sync() to wait on it.
1da177e4 372 */
52bad64d 373static inline int cancel_delayed_work(struct delayed_work *work)
1da177e4
LT
374{
375 int ret;
376
223a10a9 377 ret = del_timer_sync(&work->timer);
1da177e4 378 if (ret)
23b2e599 379 work_clear_pending(&work->work);
1da177e4
LT
380 return ret;
381}
382
4e49627b
ON
383/*
384 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
385 * if it returns 0 the timer function may be running and the queueing is in
386 * progress.
387 */
388static inline int __cancel_delayed_work(struct delayed_work *work)
389{
390 int ret;
391
392 ret = del_timer(&work->timer);
393 if (ret)
394 work_clear_pending(&work->work);
395 return ret;
396}
397
1f1f642e 398extern int cancel_delayed_work_sync(struct delayed_work *work);
1634c48f 399
f5a421a4 400/* Obsolete. use cancel_delayed_work_sync() */
1634c48f
ON
401static inline
402void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
403 struct delayed_work *work)
404{
f5a421a4
ON
405 cancel_delayed_work_sync(work);
406}
407
408/* Obsolete. use cancel_delayed_work_sync() */
409static inline
410void cancel_rearming_delayed_work(struct delayed_work *work)
411{
412 cancel_delayed_work_sync(work);
1634c48f
ON
413}
414
2d3854a3
RR
415#ifndef CONFIG_SMP
416static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
417{
418 return fn(arg);
419}
420#else
421long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
422#endif /* CONFIG_SMP */
a25909a4 423
a0a1a5fd
TH
424#ifdef CONFIG_FREEZER
425extern void freeze_workqueues_begin(void);
426extern bool freeze_workqueues_busy(void);
427extern void thaw_workqueues(void);
428#endif /* CONFIG_FREEZER */
429
a25909a4
PM
430#ifdef CONFIG_LOCKDEP
431int in_workqueue_context(struct workqueue_struct *wq);
432#endif
3b7433b8 433
1da177e4 434#endif