Commit | Line | Data |
---|---|---|
8b712842 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/kthread.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/spinlock.h> | |
b51912c9 | 22 | #include <linux/freezer.h> |
8b712842 CM |
23 | #include "async-thread.h" |
24 | ||
4a69a410 CM |
25 | #define WORK_QUEUED_BIT 0 |
26 | #define WORK_DONE_BIT 1 | |
27 | #define WORK_ORDER_DONE_BIT 2 | |
d313d7a3 | 28 | #define WORK_HIGH_PRIO_BIT 3 |
4a69a410 | 29 | |
8b712842 CM |
30 | /* |
31 | * container for the kthread task pointer and the list of pending work | |
32 | * One of these is allocated per thread. | |
33 | */ | |
34 | struct btrfs_worker_thread { | |
35d8ba66 CM |
35 | /* pool we belong to */ |
36 | struct btrfs_workers *workers; | |
37 | ||
8b712842 CM |
38 | /* list of struct btrfs_work that are waiting for service */ |
39 | struct list_head pending; | |
d313d7a3 | 40 | struct list_head prio_pending; |
8b712842 CM |
41 | |
42 | /* list of worker threads from struct btrfs_workers */ | |
43 | struct list_head worker_list; | |
44 | ||
45 | /* kthread */ | |
46 | struct task_struct *task; | |
47 | ||
48 | /* number of things on the pending list */ | |
49 | atomic_t num_pending; | |
53863232 | 50 | |
9042846b CM |
51 | /* reference counter for this struct */ |
52 | atomic_t refs; | |
53 | ||
4854ddd0 | 54 | unsigned long sequence; |
8b712842 CM |
55 | |
56 | /* protects the pending list. */ | |
57 | spinlock_t lock; | |
58 | ||
59 | /* set to non-zero when this thread is already awake and kicking */ | |
60 | int working; | |
35d8ba66 CM |
61 | |
62 | /* are we currently idle */ | |
63 | int idle; | |
8b712842 CM |
64 | }; |
65 | ||
35d8ba66 CM |
66 | /* |
67 | * helper function to move a thread onto the idle list after it | |
68 | * has finished some requests. | |
69 | */ | |
70 | static void check_idle_worker(struct btrfs_worker_thread *worker) | |
71 | { | |
72 | if (!worker->idle && atomic_read(&worker->num_pending) < | |
73 | worker->workers->idle_thresh / 2) { | |
74 | unsigned long flags; | |
75 | spin_lock_irqsave(&worker->workers->lock, flags); | |
76 | worker->idle = 1; | |
77 | list_move(&worker->worker_list, &worker->workers->idle_list); | |
78 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
79 | } | |
80 | } | |
81 | ||
82 | /* | |
83 | * helper function to move a thread off the idle list after new | |
84 | * pending work is added. | |
85 | */ | |
86 | static void check_busy_worker(struct btrfs_worker_thread *worker) | |
87 | { | |
88 | if (worker->idle && atomic_read(&worker->num_pending) >= | |
89 | worker->workers->idle_thresh) { | |
90 | unsigned long flags; | |
91 | spin_lock_irqsave(&worker->workers->lock, flags); | |
92 | worker->idle = 0; | |
93 | list_move_tail(&worker->worker_list, | |
94 | &worker->workers->worker_list); | |
95 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
96 | } | |
97 | } | |
98 | ||
9042846b CM |
99 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
100 | { | |
101 | struct btrfs_workers *workers = worker->workers; | |
102 | unsigned long flags; | |
103 | ||
104 | rmb(); | |
105 | if (!workers->atomic_start_pending) | |
106 | return; | |
107 | ||
108 | spin_lock_irqsave(&workers->lock, flags); | |
109 | if (!workers->atomic_start_pending) | |
110 | goto out; | |
111 | ||
112 | workers->atomic_start_pending = 0; | |
113 | if (workers->num_workers >= workers->max_workers) | |
114 | goto out; | |
115 | ||
116 | spin_unlock_irqrestore(&workers->lock, flags); | |
117 | btrfs_start_workers(workers, 1); | |
118 | return; | |
119 | ||
120 | out: | |
121 | spin_unlock_irqrestore(&workers->lock, flags); | |
122 | } | |
123 | ||
4a69a410 CM |
124 | static noinline int run_ordered_completions(struct btrfs_workers *workers, |
125 | struct btrfs_work *work) | |
126 | { | |
4a69a410 CM |
127 | if (!workers->ordered) |
128 | return 0; | |
129 | ||
130 | set_bit(WORK_DONE_BIT, &work->flags); | |
131 | ||
4e3f9c50 | 132 | spin_lock(&workers->order_lock); |
4a69a410 | 133 | |
d313d7a3 CM |
134 | while (1) { |
135 | if (!list_empty(&workers->prio_order_list)) { | |
136 | work = list_entry(workers->prio_order_list.next, | |
137 | struct btrfs_work, order_list); | |
138 | } else if (!list_empty(&workers->order_list)) { | |
139 | work = list_entry(workers->order_list.next, | |
140 | struct btrfs_work, order_list); | |
141 | } else { | |
142 | break; | |
143 | } | |
4a69a410 CM |
144 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
145 | break; | |
146 | ||
147 | /* we are going to call the ordered done function, but | |
148 | * we leave the work item on the list as a barrier so | |
149 | * that later work items that are done don't have their | |
150 | * functions called before this one returns | |
151 | */ | |
152 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
153 | break; | |
154 | ||
4e3f9c50 | 155 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
156 | |
157 | work->ordered_func(work); | |
158 | ||
159 | /* now take the lock again and call the freeing code */ | |
4e3f9c50 | 160 | spin_lock(&workers->order_lock); |
4a69a410 CM |
161 | list_del(&work->order_list); |
162 | work->ordered_free(work); | |
163 | } | |
164 | ||
4e3f9c50 | 165 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
166 | return 0; |
167 | } | |
168 | ||
9042846b CM |
169 | static void put_worker(struct btrfs_worker_thread *worker) |
170 | { | |
171 | if (atomic_dec_and_test(&worker->refs)) | |
172 | kfree(worker); | |
173 | } | |
174 | ||
175 | static int try_worker_shutdown(struct btrfs_worker_thread *worker) | |
176 | { | |
177 | int freeit = 0; | |
178 | ||
179 | spin_lock_irq(&worker->lock); | |
180 | spin_lock_irq(&worker->workers->lock); | |
181 | if (worker->workers->num_workers > 1 && | |
182 | worker->idle && | |
183 | !worker->working && | |
184 | !list_empty(&worker->worker_list) && | |
185 | list_empty(&worker->prio_pending) && | |
186 | list_empty(&worker->pending)) { | |
187 | freeit = 1; | |
188 | list_del_init(&worker->worker_list); | |
189 | worker->workers->num_workers--; | |
190 | } | |
191 | spin_unlock_irq(&worker->workers->lock); | |
192 | spin_unlock_irq(&worker->lock); | |
193 | ||
194 | if (freeit) | |
195 | put_worker(worker); | |
196 | return freeit; | |
197 | } | |
198 | ||
8b712842 CM |
199 | /* |
200 | * main loop for servicing work items | |
201 | */ | |
202 | static int worker_loop(void *arg) | |
203 | { | |
204 | struct btrfs_worker_thread *worker = arg; | |
205 | struct list_head *cur; | |
206 | struct btrfs_work *work; | |
207 | do { | |
208 | spin_lock_irq(&worker->lock); | |
b51912c9 | 209 | again_locked: |
d313d7a3 CM |
210 | while (1) { |
211 | if (!list_empty(&worker->prio_pending)) | |
212 | cur = worker->prio_pending.next; | |
213 | else if (!list_empty(&worker->pending)) | |
214 | cur = worker->pending.next; | |
215 | else | |
216 | break; | |
217 | ||
8b712842 CM |
218 | work = list_entry(cur, struct btrfs_work, list); |
219 | list_del(&work->list); | |
4a69a410 | 220 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
8b712842 CM |
221 | |
222 | work->worker = worker; | |
223 | spin_unlock_irq(&worker->lock); | |
224 | ||
225 | work->func(work); | |
226 | ||
227 | atomic_dec(&worker->num_pending); | |
4a69a410 CM |
228 | /* |
229 | * unless this is an ordered work queue, | |
230 | * 'work' was probably freed by func above. | |
231 | */ | |
232 | run_ordered_completions(worker->workers, work); | |
233 | ||
9042846b CM |
234 | check_pending_worker_creates(worker); |
235 | ||
8b712842 | 236 | spin_lock_irq(&worker->lock); |
35d8ba66 | 237 | check_idle_worker(worker); |
8b712842 | 238 | } |
8b712842 | 239 | if (freezing(current)) { |
b51912c9 CM |
240 | worker->working = 0; |
241 | spin_unlock_irq(&worker->lock); | |
8b712842 CM |
242 | refrigerator(); |
243 | } else { | |
8b712842 | 244 | spin_unlock_irq(&worker->lock); |
b51912c9 CM |
245 | if (!kthread_should_stop()) { |
246 | cpu_relax(); | |
247 | /* | |
248 | * we've dropped the lock, did someone else | |
249 | * jump_in? | |
250 | */ | |
251 | smp_mb(); | |
d313d7a3 CM |
252 | if (!list_empty(&worker->pending) || |
253 | !list_empty(&worker->prio_pending)) | |
b51912c9 CM |
254 | continue; |
255 | ||
256 | /* | |
257 | * this short schedule allows more work to | |
258 | * come in without the queue functions | |
259 | * needing to go through wake_up_process() | |
260 | * | |
261 | * worker->working is still 1, so nobody | |
262 | * is going to try and wake us up | |
263 | */ | |
264 | schedule_timeout(1); | |
265 | smp_mb(); | |
d313d7a3 CM |
266 | if (!list_empty(&worker->pending) || |
267 | !list_empty(&worker->prio_pending)) | |
b51912c9 CM |
268 | continue; |
269 | ||
b5555f77 AG |
270 | if (kthread_should_stop()) |
271 | break; | |
272 | ||
b51912c9 CM |
273 | /* still no more work?, sleep for real */ |
274 | spin_lock_irq(&worker->lock); | |
275 | set_current_state(TASK_INTERRUPTIBLE); | |
d313d7a3 CM |
276 | if (!list_empty(&worker->pending) || |
277 | !list_empty(&worker->prio_pending)) | |
b51912c9 CM |
278 | goto again_locked; |
279 | ||
280 | /* | |
281 | * this makes sure we get a wakeup when someone | |
282 | * adds something new to the queue | |
283 | */ | |
284 | worker->working = 0; | |
285 | spin_unlock_irq(&worker->lock); | |
286 | ||
9042846b CM |
287 | if (!kthread_should_stop()) { |
288 | schedule_timeout(HZ * 120); | |
289 | if (!worker->working && | |
290 | try_worker_shutdown(worker)) { | |
291 | return 0; | |
292 | } | |
293 | } | |
b51912c9 | 294 | } |
8b712842 CM |
295 | __set_current_state(TASK_RUNNING); |
296 | } | |
297 | } while (!kthread_should_stop()); | |
298 | return 0; | |
299 | } | |
300 | ||
301 | /* | |
302 | * this will wait for all the worker threads to shutdown | |
303 | */ | |
304 | int btrfs_stop_workers(struct btrfs_workers *workers) | |
305 | { | |
306 | struct list_head *cur; | |
307 | struct btrfs_worker_thread *worker; | |
9042846b | 308 | int can_stop; |
8b712842 | 309 | |
9042846b | 310 | spin_lock_irq(&workers->lock); |
35d8ba66 | 311 | list_splice_init(&workers->idle_list, &workers->worker_list); |
d397712b | 312 | while (!list_empty(&workers->worker_list)) { |
8b712842 CM |
313 | cur = workers->worker_list.next; |
314 | worker = list_entry(cur, struct btrfs_worker_thread, | |
315 | worker_list); | |
9042846b CM |
316 | |
317 | atomic_inc(&worker->refs); | |
318 | workers->num_workers -= 1; | |
319 | if (!list_empty(&worker->worker_list)) { | |
320 | list_del_init(&worker->worker_list); | |
321 | put_worker(worker); | |
322 | can_stop = 1; | |
323 | } else | |
324 | can_stop = 0; | |
325 | spin_unlock_irq(&workers->lock); | |
326 | if (can_stop) | |
327 | kthread_stop(worker->task); | |
328 | spin_lock_irq(&workers->lock); | |
329 | put_worker(worker); | |
8b712842 | 330 | } |
9042846b | 331 | spin_unlock_irq(&workers->lock); |
8b712842 CM |
332 | return 0; |
333 | } | |
334 | ||
335 | /* | |
336 | * simple init on struct btrfs_workers | |
337 | */ | |
5443be45 | 338 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) |
8b712842 CM |
339 | { |
340 | workers->num_workers = 0; | |
341 | INIT_LIST_HEAD(&workers->worker_list); | |
35d8ba66 | 342 | INIT_LIST_HEAD(&workers->idle_list); |
4a69a410 | 343 | INIT_LIST_HEAD(&workers->order_list); |
d313d7a3 | 344 | INIT_LIST_HEAD(&workers->prio_order_list); |
8b712842 | 345 | spin_lock_init(&workers->lock); |
4e3f9c50 | 346 | spin_lock_init(&workers->order_lock); |
8b712842 | 347 | workers->max_workers = max; |
61b49440 | 348 | workers->idle_thresh = 32; |
5443be45 | 349 | workers->name = name; |
4a69a410 | 350 | workers->ordered = 0; |
9042846b CM |
351 | workers->atomic_start_pending = 0; |
352 | workers->atomic_worker_start = 0; | |
8b712842 CM |
353 | } |
354 | ||
355 | /* | |
356 | * starts new worker threads. This does not enforce the max worker | |
357 | * count in case you need to temporarily go past it. | |
358 | */ | |
359 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |
360 | { | |
361 | struct btrfs_worker_thread *worker; | |
362 | int ret = 0; | |
363 | int i; | |
364 | ||
365 | for (i = 0; i < num_workers; i++) { | |
366 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | |
367 | if (!worker) { | |
368 | ret = -ENOMEM; | |
369 | goto fail; | |
370 | } | |
371 | ||
372 | INIT_LIST_HEAD(&worker->pending); | |
d313d7a3 | 373 | INIT_LIST_HEAD(&worker->prio_pending); |
8b712842 CM |
374 | INIT_LIST_HEAD(&worker->worker_list); |
375 | spin_lock_init(&worker->lock); | |
4e3f9c50 | 376 | |
8b712842 | 377 | atomic_set(&worker->num_pending, 0); |
9042846b | 378 | atomic_set(&worker->refs, 1); |
fd0fb038 | 379 | worker->workers = workers; |
5443be45 CM |
380 | worker->task = kthread_run(worker_loop, worker, |
381 | "btrfs-%s-%d", workers->name, | |
382 | workers->num_workers + i); | |
8b712842 CM |
383 | if (IS_ERR(worker->task)) { |
384 | ret = PTR_ERR(worker->task); | |
9b627e9b | 385 | kfree(worker); |
8b712842 CM |
386 | goto fail; |
387 | } | |
8b712842 | 388 | spin_lock_irq(&workers->lock); |
35d8ba66 | 389 | list_add_tail(&worker->worker_list, &workers->idle_list); |
4854ddd0 | 390 | worker->idle = 1; |
8b712842 CM |
391 | workers->num_workers++; |
392 | spin_unlock_irq(&workers->lock); | |
393 | } | |
394 | return 0; | |
395 | fail: | |
396 | btrfs_stop_workers(workers); | |
397 | return ret; | |
398 | } | |
399 | ||
400 | /* | |
401 | * run through the list and find a worker thread that doesn't have a lot | |
402 | * to do right now. This can return null if we aren't yet at the thread | |
403 | * count limit and all of the threads are busy. | |
404 | */ | |
405 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) | |
406 | { | |
407 | struct btrfs_worker_thread *worker; | |
408 | struct list_head *next; | |
8b712842 CM |
409 | int enforce_min = workers->num_workers < workers->max_workers; |
410 | ||
8b712842 | 411 | /* |
35d8ba66 CM |
412 | * if we find an idle thread, don't move it to the end of the |
413 | * idle list. This improves the chance that the next submission | |
414 | * will reuse the same thread, and maybe catch it while it is still | |
415 | * working | |
8b712842 | 416 | */ |
35d8ba66 CM |
417 | if (!list_empty(&workers->idle_list)) { |
418 | next = workers->idle_list.next; | |
8b712842 CM |
419 | worker = list_entry(next, struct btrfs_worker_thread, |
420 | worker_list); | |
35d8ba66 | 421 | return worker; |
8b712842 | 422 | } |
35d8ba66 CM |
423 | if (enforce_min || list_empty(&workers->worker_list)) |
424 | return NULL; | |
425 | ||
8b712842 | 426 | /* |
35d8ba66 | 427 | * if we pick a busy task, move the task to the end of the list. |
d352ac68 CM |
428 | * hopefully this will keep things somewhat evenly balanced. |
429 | * Do the move in batches based on the sequence number. This groups | |
430 | * requests submitted at roughly the same time onto the same worker. | |
8b712842 | 431 | */ |
35d8ba66 CM |
432 | next = workers->worker_list.next; |
433 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); | |
4854ddd0 CM |
434 | atomic_inc(&worker->num_pending); |
435 | worker->sequence++; | |
d352ac68 | 436 | |
53863232 | 437 | if (worker->sequence % workers->idle_thresh == 0) |
4854ddd0 | 438 | list_move_tail(next, &workers->worker_list); |
8b712842 CM |
439 | return worker; |
440 | } | |
441 | ||
d352ac68 CM |
442 | /* |
443 | * selects a worker thread to take the next job. This will either find | |
444 | * an idle worker, start a new worker up to the max count, or just return | |
445 | * one of the existing busy workers. | |
446 | */ | |
8b712842 CM |
447 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
448 | { | |
449 | struct btrfs_worker_thread *worker; | |
450 | unsigned long flags; | |
9042846b | 451 | struct list_head *fallback; |
8b712842 CM |
452 | |
453 | again: | |
454 | spin_lock_irqsave(&workers->lock, flags); | |
455 | worker = next_worker(workers); | |
8b712842 CM |
456 | |
457 | if (!worker) { | |
8b712842 | 458 | if (workers->num_workers >= workers->max_workers) { |
9042846b CM |
459 | goto fallback; |
460 | } else if (workers->atomic_worker_start) { | |
461 | workers->atomic_start_pending = 1; | |
462 | goto fallback; | |
8b712842 CM |
463 | } else { |
464 | spin_unlock_irqrestore(&workers->lock, flags); | |
465 | /* we're below the limit, start another worker */ | |
466 | btrfs_start_workers(workers, 1); | |
467 | goto again; | |
468 | } | |
469 | } | |
4e3f9c50 | 470 | spin_unlock_irqrestore(&workers->lock, flags); |
8b712842 | 471 | return worker; |
9042846b CM |
472 | |
473 | fallback: | |
474 | fallback = NULL; | |
475 | /* | |
476 | * we have failed to find any workers, just | |
477 | * return the first one we can find. | |
478 | */ | |
479 | if (!list_empty(&workers->worker_list)) | |
480 | fallback = workers->worker_list.next; | |
481 | if (!list_empty(&workers->idle_list)) | |
482 | fallback = workers->idle_list.next; | |
483 | BUG_ON(!fallback); | |
484 | worker = list_entry(fallback, | |
485 | struct btrfs_worker_thread, worker_list); | |
486 | spin_unlock_irqrestore(&workers->lock, flags); | |
487 | return worker; | |
8b712842 CM |
488 | } |
489 | ||
490 | /* | |
491 | * btrfs_requeue_work just puts the work item back on the tail of the list | |
492 | * it was taken from. It is intended for use with long running work functions | |
493 | * that make some progress and want to give the cpu up for others. | |
494 | */ | |
495 | int btrfs_requeue_work(struct btrfs_work *work) | |
496 | { | |
497 | struct btrfs_worker_thread *worker = work->worker; | |
498 | unsigned long flags; | |
a6837051 | 499 | int wake = 0; |
8b712842 | 500 | |
4a69a410 | 501 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
502 | goto out; |
503 | ||
504 | spin_lock_irqsave(&worker->lock, flags); | |
d313d7a3 CM |
505 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
506 | list_add_tail(&work->list, &worker->prio_pending); | |
507 | else | |
508 | list_add_tail(&work->list, &worker->pending); | |
b51912c9 | 509 | atomic_inc(&worker->num_pending); |
75ccf47d CM |
510 | |
511 | /* by definition we're busy, take ourselves off the idle | |
512 | * list | |
513 | */ | |
514 | if (worker->idle) { | |
29c5e8ce | 515 | spin_lock(&worker->workers->lock); |
75ccf47d CM |
516 | worker->idle = 0; |
517 | list_move_tail(&worker->worker_list, | |
518 | &worker->workers->worker_list); | |
29c5e8ce | 519 | spin_unlock(&worker->workers->lock); |
75ccf47d | 520 | } |
a6837051 CM |
521 | if (!worker->working) { |
522 | wake = 1; | |
523 | worker->working = 1; | |
524 | } | |
75ccf47d | 525 | |
a6837051 CM |
526 | if (wake) |
527 | wake_up_process(worker->task); | |
9042846b | 528 | spin_unlock_irqrestore(&worker->lock, flags); |
8b712842 | 529 | out: |
a6837051 | 530 | |
8b712842 CM |
531 | return 0; |
532 | } | |
533 | ||
d313d7a3 CM |
534 | void btrfs_set_work_high_prio(struct btrfs_work *work) |
535 | { | |
536 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | |
537 | } | |
538 | ||
8b712842 CM |
539 | /* |
540 | * places a struct btrfs_work into the pending queue of one of the kthreads | |
541 | */ | |
542 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |
543 | { | |
544 | struct btrfs_worker_thread *worker; | |
545 | unsigned long flags; | |
546 | int wake = 0; | |
547 | ||
548 | /* don't requeue something already on a list */ | |
4a69a410 | 549 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
550 | goto out; |
551 | ||
552 | worker = find_worker(workers); | |
4a69a410 | 553 | if (workers->ordered) { |
4e3f9c50 CM |
554 | /* |
555 | * you're not allowed to do ordered queues from an | |
556 | * interrupt handler | |
557 | */ | |
558 | spin_lock(&workers->order_lock); | |
d313d7a3 CM |
559 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
560 | list_add_tail(&work->order_list, | |
561 | &workers->prio_order_list); | |
562 | } else { | |
563 | list_add_tail(&work->order_list, &workers->order_list); | |
564 | } | |
4e3f9c50 | 565 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
566 | } else { |
567 | INIT_LIST_HEAD(&work->order_list); | |
568 | } | |
8b712842 CM |
569 | |
570 | spin_lock_irqsave(&worker->lock, flags); | |
a6837051 | 571 | |
d313d7a3 CM |
572 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
573 | list_add_tail(&work->list, &worker->prio_pending); | |
574 | else | |
575 | list_add_tail(&work->list, &worker->pending); | |
8b712842 | 576 | atomic_inc(&worker->num_pending); |
35d8ba66 | 577 | check_busy_worker(worker); |
8b712842 CM |
578 | |
579 | /* | |
580 | * avoid calling into wake_up_process if this thread has already | |
581 | * been kicked | |
582 | */ | |
583 | if (!worker->working) | |
584 | wake = 1; | |
585 | worker->working = 1; | |
586 | ||
8b712842 CM |
587 | if (wake) |
588 | wake_up_process(worker->task); | |
9042846b CM |
589 | spin_unlock_irqrestore(&worker->lock, flags); |
590 | ||
8b712842 CM |
591 | out: |
592 | return 0; | |
593 | } |