2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
27 # include <linux/sched.h>
30 #include "async-thread.h"
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
36 struct btrfs_worker_thread
{
37 /* pool we belong to */
38 struct btrfs_workers
*workers
;
40 /* list of struct btrfs_work that are waiting for service */
41 struct list_head pending
;
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list
;
47 struct task_struct
*task
;
49 /* number of things on the pending list */
52 unsigned long sequence
;
54 /* protects the pending list. */
57 /* set to non-zero when this thread is already awake and kicking */
60 /* are we currently idle */
65 * helper function to move a thread onto the idle list after it
66 * has finished some requests.
68 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
70 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
71 worker
->workers
->idle_thresh
/ 2) {
73 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
75 list_move(&worker
->worker_list
, &worker
->workers
->idle_list
);
76 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
81 * helper function to move a thread off the idle list after new
82 * pending work is added.
84 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
86 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
87 worker
->workers
->idle_thresh
) {
89 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
91 list_move_tail(&worker
->worker_list
,
92 &worker
->workers
->worker_list
);
93 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
98 * main loop for servicing work items
100 static int worker_loop(void *arg
)
102 struct btrfs_worker_thread
*worker
= arg
;
103 struct list_head
*cur
;
104 struct btrfs_work
*work
;
106 spin_lock_irq(&worker
->lock
);
107 while(!list_empty(&worker
->pending
)) {
108 cur
= worker
->pending
.next
;
109 work
= list_entry(cur
, struct btrfs_work
, list
);
110 list_del(&work
->list
);
111 clear_bit(0, &work
->flags
);
113 work
->worker
= worker
;
114 spin_unlock_irq(&worker
->lock
);
118 atomic_dec(&worker
->num_pending
);
119 spin_lock_irq(&worker
->lock
);
120 check_idle_worker(worker
);
123 if (freezing(current
)) {
126 set_current_state(TASK_INTERRUPTIBLE
);
127 spin_unlock_irq(&worker
->lock
);
129 __set_current_state(TASK_RUNNING
);
131 } while (!kthread_should_stop());
136 * this will wait for all the worker threads to shutdown
138 int btrfs_stop_workers(struct btrfs_workers
*workers
)
140 struct list_head
*cur
;
141 struct btrfs_worker_thread
*worker
;
143 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
144 while(!list_empty(&workers
->worker_list
)) {
145 cur
= workers
->worker_list
.next
;
146 worker
= list_entry(cur
, struct btrfs_worker_thread
,
148 kthread_stop(worker
->task
);
149 list_del(&worker
->worker_list
);
156 * simple init on struct btrfs_workers
158 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
)
160 workers
->num_workers
= 0;
161 INIT_LIST_HEAD(&workers
->worker_list
);
162 INIT_LIST_HEAD(&workers
->idle_list
);
163 spin_lock_init(&workers
->lock
);
164 workers
->max_workers
= max
;
165 workers
->idle_thresh
= 32;
166 workers
->name
= name
;
170 * starts new worker threads. This does not enforce the max worker
171 * count in case you need to temporarily go past it.
173 int btrfs_start_workers(struct btrfs_workers
*workers
, int num_workers
)
175 struct btrfs_worker_thread
*worker
;
179 for (i
= 0; i
< num_workers
; i
++) {
180 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
186 INIT_LIST_HEAD(&worker
->pending
);
187 INIT_LIST_HEAD(&worker
->worker_list
);
188 spin_lock_init(&worker
->lock
);
189 atomic_set(&worker
->num_pending
, 0);
190 worker
->task
= kthread_run(worker_loop
, worker
,
191 "btrfs-%s-%d", workers
->name
,
192 workers
->num_workers
+ i
);
193 worker
->workers
= workers
;
194 if (IS_ERR(worker
->task
)) {
196 ret
= PTR_ERR(worker
->task
);
200 spin_lock_irq(&workers
->lock
);
201 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
203 workers
->num_workers
++;
204 spin_unlock_irq(&workers
->lock
);
208 btrfs_stop_workers(workers
);
213 * run through the list and find a worker thread that doesn't have a lot
214 * to do right now. This can return null if we aren't yet at the thread
215 * count limit and all of the threads are busy.
217 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
219 struct btrfs_worker_thread
*worker
;
220 struct list_head
*next
;
221 int enforce_min
= workers
->num_workers
< workers
->max_workers
;
224 * if we find an idle thread, don't move it to the end of the
225 * idle list. This improves the chance that the next submission
226 * will reuse the same thread, and maybe catch it while it is still
229 if (!list_empty(&workers
->idle_list
)) {
230 next
= workers
->idle_list
.next
;
231 worker
= list_entry(next
, struct btrfs_worker_thread
,
235 if (enforce_min
|| list_empty(&workers
->worker_list
))
239 * if we pick a busy task, move the task to the end of the list.
240 * hopefully this will keep things somewhat evenly balanced
242 next
= workers
->worker_list
.next
;
243 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
244 atomic_inc(&worker
->num_pending
);
246 if (worker
->sequence
% workers
->idle_thresh
== 0)
247 list_move_tail(next
, &workers
->worker_list
);
251 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
253 struct btrfs_worker_thread
*worker
;
257 spin_lock_irqsave(&workers
->lock
, flags
);
258 worker
= next_worker(workers
);
259 spin_unlock_irqrestore(&workers
->lock
, flags
);
262 spin_lock_irqsave(&workers
->lock
, flags
);
263 if (workers
->num_workers
>= workers
->max_workers
) {
264 struct list_head
*fallback
= NULL
;
266 * we have failed to find any workers, just
267 * return the force one
269 if (!list_empty(&workers
->worker_list
))
270 fallback
= workers
->worker_list
.next
;
271 if (!list_empty(&workers
->idle_list
))
272 fallback
= workers
->idle_list
.next
;
274 worker
= list_entry(fallback
,
275 struct btrfs_worker_thread
, worker_list
);
276 spin_unlock_irqrestore(&workers
->lock
, flags
);
278 spin_unlock_irqrestore(&workers
->lock
, flags
);
279 /* we're below the limit, start another worker */
280 btrfs_start_workers(workers
, 1);
288 * btrfs_requeue_work just puts the work item back on the tail of the list
289 * it was taken from. It is intended for use with long running work functions
290 * that make some progress and want to give the cpu up for others.
292 int btrfs_requeue_work(struct btrfs_work
*work
)
294 struct btrfs_worker_thread
*worker
= work
->worker
;
297 if (test_and_set_bit(0, &work
->flags
))
300 spin_lock_irqsave(&worker
->lock
, flags
);
301 atomic_inc(&worker
->num_pending
);
302 list_add_tail(&work
->list
, &worker
->pending
);
303 check_busy_worker(worker
);
304 spin_unlock_irqrestore(&worker
->lock
, flags
);
310 * places a struct btrfs_work into the pending queue of one of the kthreads
312 int btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
314 struct btrfs_worker_thread
*worker
;
318 /* don't requeue something already on a list */
319 if (test_and_set_bit(0, &work
->flags
))
322 worker
= find_worker(workers
);
324 spin_lock_irqsave(&worker
->lock
, flags
);
325 atomic_inc(&worker
->num_pending
);
326 check_busy_worker(worker
);
327 list_add_tail(&work
->list
, &worker
->pending
);
330 * avoid calling into wake_up_process if this thread has already
333 if (!worker
->working
)
337 spin_unlock_irqrestore(&worker
->lock
, flags
);
340 wake_up_process(worker
->task
);