Commit | Line | Data |
---|---|---|
8b712842 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/kthread.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/spinlock.h> | |
d05e5a4d | 22 | # include <linux/freezer.h> |
8b712842 CM |
23 | #include "async-thread.h" |
24 | ||
4a69a410 CM |
25 | #define WORK_QUEUED_BIT 0 |
26 | #define WORK_DONE_BIT 1 | |
27 | #define WORK_ORDER_DONE_BIT 2 | |
28 | ||
8b712842 CM |
29 | /* |
30 | * container for the kthread task pointer and the list of pending work | |
31 | * One of these is allocated per thread. | |
32 | */ | |
33 | struct btrfs_worker_thread { | |
35d8ba66 CM |
34 | /* pool we belong to */ |
35 | struct btrfs_workers *workers; | |
36 | ||
8b712842 CM |
37 | /* list of struct btrfs_work that are waiting for service */ |
38 | struct list_head pending; | |
39 | ||
40 | /* list of worker threads from struct btrfs_workers */ | |
41 | struct list_head worker_list; | |
42 | ||
43 | /* kthread */ | |
44 | struct task_struct *task; | |
45 | ||
46 | /* number of things on the pending list */ | |
47 | atomic_t num_pending; | |
53863232 | 48 | |
4854ddd0 | 49 | unsigned long sequence; |
8b712842 CM |
50 | |
51 | /* protects the pending list. */ | |
52 | spinlock_t lock; | |
53 | ||
54 | /* set to non-zero when this thread is already awake and kicking */ | |
55 | int working; | |
35d8ba66 CM |
56 | |
57 | /* are we currently idle */ | |
58 | int idle; | |
8b712842 CM |
59 | }; |
60 | ||
35d8ba66 CM |
61 | /* |
62 | * helper function to move a thread onto the idle list after it | |
63 | * has finished some requests. | |
64 | */ | |
65 | static void check_idle_worker(struct btrfs_worker_thread *worker) | |
66 | { | |
67 | if (!worker->idle && atomic_read(&worker->num_pending) < | |
68 | worker->workers->idle_thresh / 2) { | |
69 | unsigned long flags; | |
70 | spin_lock_irqsave(&worker->workers->lock, flags); | |
71 | worker->idle = 1; | |
72 | list_move(&worker->worker_list, &worker->workers->idle_list); | |
73 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
74 | } | |
75 | } | |
76 | ||
77 | /* | |
78 | * helper function to move a thread off the idle list after new | |
79 | * pending work is added. | |
80 | */ | |
81 | static void check_busy_worker(struct btrfs_worker_thread *worker) | |
82 | { | |
83 | if (worker->idle && atomic_read(&worker->num_pending) >= | |
84 | worker->workers->idle_thresh) { | |
85 | unsigned long flags; | |
86 | spin_lock_irqsave(&worker->workers->lock, flags); | |
87 | worker->idle = 0; | |
88 | list_move_tail(&worker->worker_list, | |
89 | &worker->workers->worker_list); | |
90 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
91 | } | |
92 | } | |
93 | ||
4a69a410 CM |
94 | static noinline int run_ordered_completions(struct btrfs_workers *workers, |
95 | struct btrfs_work *work) | |
96 | { | |
97 | unsigned long flags; | |
98 | ||
99 | if (!workers->ordered) | |
100 | return 0; | |
101 | ||
102 | set_bit(WORK_DONE_BIT, &work->flags); | |
103 | ||
104 | spin_lock_irqsave(&workers->lock, flags); | |
105 | ||
d397712b | 106 | while (!list_empty(&workers->order_list)) { |
4a69a410 CM |
107 | work = list_entry(workers->order_list.next, |
108 | struct btrfs_work, order_list); | |
109 | ||
110 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | |
111 | break; | |
112 | ||
113 | /* we are going to call the ordered done function, but | |
114 | * we leave the work item on the list as a barrier so | |
115 | * that later work items that are done don't have their | |
116 | * functions called before this one returns | |
117 | */ | |
118 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
119 | break; | |
120 | ||
121 | spin_unlock_irqrestore(&workers->lock, flags); | |
122 | ||
123 | work->ordered_func(work); | |
124 | ||
125 | /* now take the lock again and call the freeing code */ | |
126 | spin_lock_irqsave(&workers->lock, flags); | |
127 | list_del(&work->order_list); | |
128 | work->ordered_free(work); | |
129 | } | |
130 | ||
131 | spin_unlock_irqrestore(&workers->lock, flags); | |
132 | return 0; | |
133 | } | |
134 | ||
8b712842 CM |
135 | /* |
136 | * main loop for servicing work items | |
137 | */ | |
138 | static int worker_loop(void *arg) | |
139 | { | |
140 | struct btrfs_worker_thread *worker = arg; | |
141 | struct list_head *cur; | |
142 | struct btrfs_work *work; | |
143 | do { | |
144 | spin_lock_irq(&worker->lock); | |
d397712b | 145 | while (!list_empty(&worker->pending)) { |
8b712842 CM |
146 | cur = worker->pending.next; |
147 | work = list_entry(cur, struct btrfs_work, list); | |
148 | list_del(&work->list); | |
4a69a410 | 149 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
8b712842 CM |
150 | |
151 | work->worker = worker; | |
152 | spin_unlock_irq(&worker->lock); | |
153 | ||
154 | work->func(work); | |
155 | ||
156 | atomic_dec(&worker->num_pending); | |
4a69a410 CM |
157 | /* |
158 | * unless this is an ordered work queue, | |
159 | * 'work' was probably freed by func above. | |
160 | */ | |
161 | run_ordered_completions(worker->workers, work); | |
162 | ||
8b712842 | 163 | spin_lock_irq(&worker->lock); |
35d8ba66 | 164 | check_idle_worker(worker); |
4a69a410 | 165 | |
8b712842 CM |
166 | } |
167 | worker->working = 0; | |
168 | if (freezing(current)) { | |
169 | refrigerator(); | |
170 | } else { | |
171 | set_current_state(TASK_INTERRUPTIBLE); | |
172 | spin_unlock_irq(&worker->lock); | |
0df49b91 | 173 | if (!kthread_should_stop()) |
174 | schedule(); | |
8b712842 CM |
175 | __set_current_state(TASK_RUNNING); |
176 | } | |
177 | } while (!kthread_should_stop()); | |
178 | return 0; | |
179 | } | |
180 | ||
181 | /* | |
182 | * this will wait for all the worker threads to shutdown | |
183 | */ | |
184 | int btrfs_stop_workers(struct btrfs_workers *workers) | |
185 | { | |
186 | struct list_head *cur; | |
187 | struct btrfs_worker_thread *worker; | |
188 | ||
35d8ba66 | 189 | list_splice_init(&workers->idle_list, &workers->worker_list); |
d397712b | 190 | while (!list_empty(&workers->worker_list)) { |
8b712842 CM |
191 | cur = workers->worker_list.next; |
192 | worker = list_entry(cur, struct btrfs_worker_thread, | |
193 | worker_list); | |
194 | kthread_stop(worker->task); | |
195 | list_del(&worker->worker_list); | |
196 | kfree(worker); | |
197 | } | |
198 | return 0; | |
199 | } | |
200 | ||
201 | /* | |
202 | * simple init on struct btrfs_workers | |
203 | */ | |
5443be45 | 204 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) |
8b712842 CM |
205 | { |
206 | workers->num_workers = 0; | |
207 | INIT_LIST_HEAD(&workers->worker_list); | |
35d8ba66 | 208 | INIT_LIST_HEAD(&workers->idle_list); |
4a69a410 | 209 | INIT_LIST_HEAD(&workers->order_list); |
8b712842 CM |
210 | spin_lock_init(&workers->lock); |
211 | workers->max_workers = max; | |
61b49440 | 212 | workers->idle_thresh = 32; |
5443be45 | 213 | workers->name = name; |
4a69a410 | 214 | workers->ordered = 0; |
8b712842 CM |
215 | } |
216 | ||
217 | /* | |
218 | * starts new worker threads. This does not enforce the max worker | |
219 | * count in case you need to temporarily go past it. | |
220 | */ | |
221 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |
222 | { | |
223 | struct btrfs_worker_thread *worker; | |
224 | int ret = 0; | |
225 | int i; | |
226 | ||
227 | for (i = 0; i < num_workers; i++) { | |
228 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | |
229 | if (!worker) { | |
230 | ret = -ENOMEM; | |
231 | goto fail; | |
232 | } | |
233 | ||
234 | INIT_LIST_HEAD(&worker->pending); | |
235 | INIT_LIST_HEAD(&worker->worker_list); | |
236 | spin_lock_init(&worker->lock); | |
237 | atomic_set(&worker->num_pending, 0); | |
5443be45 CM |
238 | worker->task = kthread_run(worker_loop, worker, |
239 | "btrfs-%s-%d", workers->name, | |
240 | workers->num_workers + i); | |
35d8ba66 | 241 | worker->workers = workers; |
8b712842 | 242 | if (IS_ERR(worker->task)) { |
3bf10418 | 243 | kfree(worker); |
8b712842 CM |
244 | ret = PTR_ERR(worker->task); |
245 | goto fail; | |
246 | } | |
247 | ||
248 | spin_lock_irq(&workers->lock); | |
35d8ba66 | 249 | list_add_tail(&worker->worker_list, &workers->idle_list); |
4854ddd0 | 250 | worker->idle = 1; |
8b712842 CM |
251 | workers->num_workers++; |
252 | spin_unlock_irq(&workers->lock); | |
253 | } | |
254 | return 0; | |
255 | fail: | |
256 | btrfs_stop_workers(workers); | |
257 | return ret; | |
258 | } | |
259 | ||
260 | /* | |
261 | * run through the list and find a worker thread that doesn't have a lot | |
262 | * to do right now. This can return null if we aren't yet at the thread | |
263 | * count limit and all of the threads are busy. | |
264 | */ | |
265 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) | |
266 | { | |
267 | struct btrfs_worker_thread *worker; | |
268 | struct list_head *next; | |
8b712842 CM |
269 | int enforce_min = workers->num_workers < workers->max_workers; |
270 | ||
8b712842 | 271 | /* |
35d8ba66 CM |
272 | * if we find an idle thread, don't move it to the end of the |
273 | * idle list. This improves the chance that the next submission | |
274 | * will reuse the same thread, and maybe catch it while it is still | |
275 | * working | |
8b712842 | 276 | */ |
35d8ba66 CM |
277 | if (!list_empty(&workers->idle_list)) { |
278 | next = workers->idle_list.next; | |
8b712842 CM |
279 | worker = list_entry(next, struct btrfs_worker_thread, |
280 | worker_list); | |
35d8ba66 | 281 | return worker; |
8b712842 | 282 | } |
35d8ba66 CM |
283 | if (enforce_min || list_empty(&workers->worker_list)) |
284 | return NULL; | |
285 | ||
8b712842 | 286 | /* |
35d8ba66 | 287 | * if we pick a busy task, move the task to the end of the list. |
d352ac68 CM |
288 | * hopefully this will keep things somewhat evenly balanced. |
289 | * Do the move in batches based on the sequence number. This groups | |
290 | * requests submitted at roughly the same time onto the same worker. | |
8b712842 | 291 | */ |
35d8ba66 CM |
292 | next = workers->worker_list.next; |
293 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); | |
4854ddd0 CM |
294 | atomic_inc(&worker->num_pending); |
295 | worker->sequence++; | |
d352ac68 | 296 | |
53863232 | 297 | if (worker->sequence % workers->idle_thresh == 0) |
4854ddd0 | 298 | list_move_tail(next, &workers->worker_list); |
8b712842 CM |
299 | return worker; |
300 | } | |
301 | ||
d352ac68 CM |
302 | /* |
303 | * selects a worker thread to take the next job. This will either find | |
304 | * an idle worker, start a new worker up to the max count, or just return | |
305 | * one of the existing busy workers. | |
306 | */ | |
8b712842 CM |
307 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
308 | { | |
309 | struct btrfs_worker_thread *worker; | |
310 | unsigned long flags; | |
311 | ||
312 | again: | |
313 | spin_lock_irqsave(&workers->lock, flags); | |
314 | worker = next_worker(workers); | |
315 | spin_unlock_irqrestore(&workers->lock, flags); | |
316 | ||
317 | if (!worker) { | |
318 | spin_lock_irqsave(&workers->lock, flags); | |
319 | if (workers->num_workers >= workers->max_workers) { | |
35d8ba66 | 320 | struct list_head *fallback = NULL; |
8b712842 CM |
321 | /* |
322 | * we have failed to find any workers, just | |
323 | * return the force one | |
324 | */ | |
35d8ba66 CM |
325 | if (!list_empty(&workers->worker_list)) |
326 | fallback = workers->worker_list.next; | |
327 | if (!list_empty(&workers->idle_list)) | |
328 | fallback = workers->idle_list.next; | |
329 | BUG_ON(!fallback); | |
330 | worker = list_entry(fallback, | |
8b712842 CM |
331 | struct btrfs_worker_thread, worker_list); |
332 | spin_unlock_irqrestore(&workers->lock, flags); | |
333 | } else { | |
334 | spin_unlock_irqrestore(&workers->lock, flags); | |
335 | /* we're below the limit, start another worker */ | |
336 | btrfs_start_workers(workers, 1); | |
337 | goto again; | |
338 | } | |
339 | } | |
340 | return worker; | |
341 | } | |
342 | ||
343 | /* | |
344 | * btrfs_requeue_work just puts the work item back on the tail of the list | |
345 | * it was taken from. It is intended for use with long running work functions | |
346 | * that make some progress and want to give the cpu up for others. | |
347 | */ | |
348 | int btrfs_requeue_work(struct btrfs_work *work) | |
349 | { | |
350 | struct btrfs_worker_thread *worker = work->worker; | |
351 | unsigned long flags; | |
352 | ||
4a69a410 | 353 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
354 | goto out; |
355 | ||
356 | spin_lock_irqsave(&worker->lock, flags); | |
357 | atomic_inc(&worker->num_pending); | |
358 | list_add_tail(&work->list, &worker->pending); | |
75ccf47d CM |
359 | |
360 | /* by definition we're busy, take ourselves off the idle | |
361 | * list | |
362 | */ | |
363 | if (worker->idle) { | |
364 | spin_lock_irqsave(&worker->workers->lock, flags); | |
365 | worker->idle = 0; | |
366 | list_move_tail(&worker->worker_list, | |
367 | &worker->workers->worker_list); | |
368 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
369 | } | |
370 | ||
8b712842 | 371 | spin_unlock_irqrestore(&worker->lock, flags); |
75ccf47d | 372 | |
8b712842 CM |
373 | out: |
374 | return 0; | |
375 | } | |
376 | ||
377 | /* | |
378 | * places a struct btrfs_work into the pending queue of one of the kthreads | |
379 | */ | |
380 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |
381 | { | |
382 | struct btrfs_worker_thread *worker; | |
383 | unsigned long flags; | |
384 | int wake = 0; | |
385 | ||
386 | /* don't requeue something already on a list */ | |
4a69a410 | 387 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
388 | goto out; |
389 | ||
390 | worker = find_worker(workers); | |
4a69a410 CM |
391 | if (workers->ordered) { |
392 | spin_lock_irqsave(&workers->lock, flags); | |
393 | list_add_tail(&work->order_list, &workers->order_list); | |
394 | spin_unlock_irqrestore(&workers->lock, flags); | |
395 | } else { | |
396 | INIT_LIST_HEAD(&work->order_list); | |
397 | } | |
8b712842 CM |
398 | |
399 | spin_lock_irqsave(&worker->lock, flags); | |
400 | atomic_inc(&worker->num_pending); | |
35d8ba66 | 401 | check_busy_worker(worker); |
8b712842 CM |
402 | list_add_tail(&work->list, &worker->pending); |
403 | ||
404 | /* | |
405 | * avoid calling into wake_up_process if this thread has already | |
406 | * been kicked | |
407 | */ | |
408 | if (!worker->working) | |
409 | wake = 1; | |
410 | worker->working = 1; | |
411 | ||
412 | spin_unlock_irqrestore(&worker->lock, flags); | |
413 | ||
414 | if (wake) | |
415 | wake_up_process(worker->task); | |
416 | out: | |
417 | return 0; | |
418 | } |