2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 # include <linux/freezer.h>
24 #include "async-thread.h"
27 * container for the kthread task pointer and the list of pending work
28 * One of these is allocated per thread.
30 struct btrfs_worker_thread
{
31 /* pool we belong to */
32 struct btrfs_workers
*workers
;
34 /* list of struct btrfs_work that are waiting for service */
35 struct list_head pending
;
37 /* list of worker threads from struct btrfs_workers */
38 struct list_head worker_list
;
41 struct task_struct
*task
;
43 /* number of things on the pending list */
46 unsigned long sequence
;
48 /* protects the pending list. */
51 /* set to non-zero when this thread is already awake and kicking */
54 /* are we currently idle */
59 * helper function to move a thread onto the idle list after it
60 * has finished some requests.
62 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
64 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
65 worker
->workers
->idle_thresh
/ 2) {
67 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
69 list_move(&worker
->worker_list
, &worker
->workers
->idle_list
);
70 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
75 * helper function to move a thread off the idle list after new
76 * pending work is added.
78 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
80 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
81 worker
->workers
->idle_thresh
) {
83 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
85 list_move_tail(&worker
->worker_list
,
86 &worker
->workers
->worker_list
);
87 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
92 * main loop for servicing work items
94 static int worker_loop(void *arg
)
96 struct btrfs_worker_thread
*worker
= arg
;
97 struct list_head
*cur
;
98 struct btrfs_work
*work
;
100 spin_lock_irq(&worker
->lock
);
101 while(!list_empty(&worker
->pending
)) {
102 cur
= worker
->pending
.next
;
103 work
= list_entry(cur
, struct btrfs_work
, list
);
104 list_del(&work
->list
);
105 clear_bit(0, &work
->flags
);
107 work
->worker
= worker
;
108 spin_unlock_irq(&worker
->lock
);
112 atomic_dec(&worker
->num_pending
);
113 spin_lock_irq(&worker
->lock
);
114 check_idle_worker(worker
);
117 if (freezing(current
)) {
120 set_current_state(TASK_INTERRUPTIBLE
);
121 spin_unlock_irq(&worker
->lock
);
123 __set_current_state(TASK_RUNNING
);
125 } while (!kthread_should_stop());
130 * this will wait for all the worker threads to shutdown
132 int btrfs_stop_workers(struct btrfs_workers
*workers
)
134 struct list_head
*cur
;
135 struct btrfs_worker_thread
*worker
;
137 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
138 while(!list_empty(&workers
->worker_list
)) {
139 cur
= workers
->worker_list
.next
;
140 worker
= list_entry(cur
, struct btrfs_worker_thread
,
142 kthread_stop(worker
->task
);
143 list_del(&worker
->worker_list
);
150 * simple init on struct btrfs_workers
152 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
)
154 workers
->num_workers
= 0;
155 INIT_LIST_HEAD(&workers
->worker_list
);
156 INIT_LIST_HEAD(&workers
->idle_list
);
157 spin_lock_init(&workers
->lock
);
158 workers
->max_workers
= max
;
159 workers
->idle_thresh
= 32;
160 workers
->name
= name
;
164 * starts new worker threads. This does not enforce the max worker
165 * count in case you need to temporarily go past it.
167 int btrfs_start_workers(struct btrfs_workers
*workers
, int num_workers
)
169 struct btrfs_worker_thread
*worker
;
173 for (i
= 0; i
< num_workers
; i
++) {
174 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
180 INIT_LIST_HEAD(&worker
->pending
);
181 INIT_LIST_HEAD(&worker
->worker_list
);
182 spin_lock_init(&worker
->lock
);
183 atomic_set(&worker
->num_pending
, 0);
184 worker
->task
= kthread_run(worker_loop
, worker
,
185 "btrfs-%s-%d", workers
->name
,
186 workers
->num_workers
+ i
);
187 worker
->workers
= workers
;
188 if (IS_ERR(worker
->task
)) {
190 ret
= PTR_ERR(worker
->task
);
194 spin_lock_irq(&workers
->lock
);
195 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
197 workers
->num_workers
++;
198 spin_unlock_irq(&workers
->lock
);
202 btrfs_stop_workers(workers
);
207 * run through the list and find a worker thread that doesn't have a lot
208 * to do right now. This can return null if we aren't yet at the thread
209 * count limit and all of the threads are busy.
211 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
213 struct btrfs_worker_thread
*worker
;
214 struct list_head
*next
;
215 int enforce_min
= workers
->num_workers
< workers
->max_workers
;
218 * if we find an idle thread, don't move it to the end of the
219 * idle list. This improves the chance that the next submission
220 * will reuse the same thread, and maybe catch it while it is still
223 if (!list_empty(&workers
->idle_list
)) {
224 next
= workers
->idle_list
.next
;
225 worker
= list_entry(next
, struct btrfs_worker_thread
,
229 if (enforce_min
|| list_empty(&workers
->worker_list
))
233 * if we pick a busy task, move the task to the end of the list.
234 * hopefully this will keep things somewhat evenly balanced.
235 * Do the move in batches based on the sequence number. This groups
236 * requests submitted at roughly the same time onto the same worker.
238 next
= workers
->worker_list
.next
;
239 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
240 atomic_inc(&worker
->num_pending
);
243 if (worker
->sequence
% workers
->idle_thresh
== 0)
244 list_move_tail(next
, &workers
->worker_list
);
249 * selects a worker thread to take the next job. This will either find
250 * an idle worker, start a new worker up to the max count, or just return
251 * one of the existing busy workers.
253 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
255 struct btrfs_worker_thread
*worker
;
259 spin_lock_irqsave(&workers
->lock
, flags
);
260 worker
= next_worker(workers
);
261 spin_unlock_irqrestore(&workers
->lock
, flags
);
264 spin_lock_irqsave(&workers
->lock
, flags
);
265 if (workers
->num_workers
>= workers
->max_workers
) {
266 struct list_head
*fallback
= NULL
;
268 * we have failed to find any workers, just
269 * return the force one
271 if (!list_empty(&workers
->worker_list
))
272 fallback
= workers
->worker_list
.next
;
273 if (!list_empty(&workers
->idle_list
))
274 fallback
= workers
->idle_list
.next
;
276 worker
= list_entry(fallback
,
277 struct btrfs_worker_thread
, worker_list
);
278 spin_unlock_irqrestore(&workers
->lock
, flags
);
280 spin_unlock_irqrestore(&workers
->lock
, flags
);
281 /* we're below the limit, start another worker */
282 btrfs_start_workers(workers
, 1);
290 * btrfs_requeue_work just puts the work item back on the tail of the list
291 * it was taken from. It is intended for use with long running work functions
292 * that make some progress and want to give the cpu up for others.
294 int btrfs_requeue_work(struct btrfs_work
*work
)
296 struct btrfs_worker_thread
*worker
= work
->worker
;
299 if (test_and_set_bit(0, &work
->flags
))
302 spin_lock_irqsave(&worker
->lock
, flags
);
303 atomic_inc(&worker
->num_pending
);
304 list_add_tail(&work
->list
, &worker
->pending
);
306 /* by definition we're busy, take ourselves off the idle
310 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
312 list_move_tail(&worker
->worker_list
,
313 &worker
->workers
->worker_list
);
314 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
317 spin_unlock_irqrestore(&worker
->lock
, flags
);
324 * places a struct btrfs_work into the pending queue of one of the kthreads
326 int btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
328 struct btrfs_worker_thread
*worker
;
332 /* don't requeue something already on a list */
333 if (test_and_set_bit(0, &work
->flags
))
336 worker
= find_worker(workers
);
338 spin_lock_irqsave(&worker
->lock
, flags
);
339 atomic_inc(&worker
->num_pending
);
340 check_busy_worker(worker
);
341 list_add_tail(&work
->list
, &worker
->pending
);
344 * avoid calling into wake_up_process if this thread has already
347 if (!worker
->working
)
351 spin_unlock_irqrestore(&worker
->lock
, flags
);
354 wake_up_process(worker
->task
);