Btrfs: Add backport for the kthread work on kernels older than 2.6.20
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / async-thread.c
CommitLineData
8b712842
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
d05e5a4d 19#include <linux/version.h>
8b712842
CM
20#include <linux/kthread.h>
21#include <linux/list.h>
22#include <linux/spinlock.h>
d05e5a4d
CM
23
24#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25# include <linux/freezer.h>
26#else
27# include <linux/sched.h>
28#endif
29
8b712842
CM
30#include "async-thread.h"
31
32/*
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
35 */
36struct btrfs_worker_thread {
37 /* list of struct btrfs_work that are waiting for service */
38 struct list_head pending;
39
40 /* list of worker threads from struct btrfs_workers */
41 struct list_head worker_list;
42
43 /* kthread */
44 struct task_struct *task;
45
46 /* number of things on the pending list */
47 atomic_t num_pending;
48
49 /* protects the pending list. */
50 spinlock_t lock;
51
52 /* set to non-zero when this thread is already awake and kicking */
53 int working;
54};
55
56/*
57 * main loop for servicing work items
58 */
59static int worker_loop(void *arg)
60{
61 struct btrfs_worker_thread *worker = arg;
62 struct list_head *cur;
63 struct btrfs_work *work;
64 do {
65 spin_lock_irq(&worker->lock);
66 while(!list_empty(&worker->pending)) {
67 cur = worker->pending.next;
68 work = list_entry(cur, struct btrfs_work, list);
69 list_del(&work->list);
70 clear_bit(0, &work->flags);
71
72 work->worker = worker;
73 spin_unlock_irq(&worker->lock);
74
75 work->func(work);
76
77 atomic_dec(&worker->num_pending);
78 spin_lock_irq(&worker->lock);
79 }
80 worker->working = 0;
81 if (freezing(current)) {
82 refrigerator();
83 } else {
84 set_current_state(TASK_INTERRUPTIBLE);
85 spin_unlock_irq(&worker->lock);
86 schedule();
87 __set_current_state(TASK_RUNNING);
88 }
89 } while (!kthread_should_stop());
90 return 0;
91}
92
93/*
94 * this will wait for all the worker threads to shutdown
95 */
96int btrfs_stop_workers(struct btrfs_workers *workers)
97{
98 struct list_head *cur;
99 struct btrfs_worker_thread *worker;
100
101 while(!list_empty(&workers->worker_list)) {
102 cur = workers->worker_list.next;
103 worker = list_entry(cur, struct btrfs_worker_thread,
104 worker_list);
105 kthread_stop(worker->task);
106 list_del(&worker->worker_list);
107 kfree(worker);
108 }
109 return 0;
110}
111
112/*
113 * simple init on struct btrfs_workers
114 */
115void btrfs_init_workers(struct btrfs_workers *workers, int max)
116{
117 workers->num_workers = 0;
118 INIT_LIST_HEAD(&workers->worker_list);
119 workers->last = NULL;
120 spin_lock_init(&workers->lock);
121 workers->max_workers = max;
122}
123
124/*
125 * starts new worker threads. This does not enforce the max worker
126 * count in case you need to temporarily go past it.
127 */
128int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
129{
130 struct btrfs_worker_thread *worker;
131 int ret = 0;
132 int i;
133
134 for (i = 0; i < num_workers; i++) {
135 worker = kzalloc(sizeof(*worker), GFP_NOFS);
136 if (!worker) {
137 ret = -ENOMEM;
138 goto fail;
139 }
140
141 INIT_LIST_HEAD(&worker->pending);
142 INIT_LIST_HEAD(&worker->worker_list);
143 spin_lock_init(&worker->lock);
144 atomic_set(&worker->num_pending, 0);
145 worker->task = kthread_run(worker_loop, worker, "btrfs");
146 if (IS_ERR(worker->task)) {
147 ret = PTR_ERR(worker->task);
148 goto fail;
149 }
150
151 spin_lock_irq(&workers->lock);
152 list_add_tail(&worker->worker_list, &workers->worker_list);
153 workers->last = worker;
154 workers->num_workers++;
155 spin_unlock_irq(&workers->lock);
156 }
157 return 0;
158fail:
159 btrfs_stop_workers(workers);
160 return ret;
161}
162
163/*
164 * run through the list and find a worker thread that doesn't have a lot
165 * to do right now. This can return null if we aren't yet at the thread
166 * count limit and all of the threads are busy.
167 */
168static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
169{
170 struct btrfs_worker_thread *worker;
171 struct list_head *next;
172 struct list_head *start;
173 int enforce_min = workers->num_workers < workers->max_workers;
174
175 /* start with the last thread if it isn't busy */
176 worker = workers->last;
177 if (atomic_read(&worker->num_pending) < 64)
178 goto done;
179
180 next = worker->worker_list.next;
181 start = &worker->worker_list;
182
183 /*
184 * check all the workers for someone that is bored. FIXME, do
185 * something smart here
186 */
187 while(next != start) {
188 if (next == &workers->worker_list) {
189 next = workers->worker_list.next;
190 continue;
191 }
192 worker = list_entry(next, struct btrfs_worker_thread,
193 worker_list);
194 if (atomic_read(&worker->num_pending) < 64 || !enforce_min)
195 goto done;
196 next = next->next;
197 }
198 /*
199 * nobody was bored, if we're already at the max thread count,
200 * use the last thread
201 */
202 if (!enforce_min || atomic_read(&workers->last->num_pending) < 64) {
203 return workers->last;
204 }
205 return NULL;
206done:
207 workers->last = worker;
208 return worker;
209}
210
211static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
212{
213 struct btrfs_worker_thread *worker;
214 unsigned long flags;
215
216again:
217 spin_lock_irqsave(&workers->lock, flags);
218 worker = next_worker(workers);
219 spin_unlock_irqrestore(&workers->lock, flags);
220
221 if (!worker) {
222 spin_lock_irqsave(&workers->lock, flags);
223 if (workers->num_workers >= workers->max_workers) {
224 /*
225 * we have failed to find any workers, just
226 * return the force one
227 */
228 worker = list_entry(workers->worker_list.next,
229 struct btrfs_worker_thread, worker_list);
230 spin_unlock_irqrestore(&workers->lock, flags);
231 } else {
232 spin_unlock_irqrestore(&workers->lock, flags);
233 /* we're below the limit, start another worker */
234 btrfs_start_workers(workers, 1);
235 goto again;
236 }
237 }
238 return worker;
239}
240
241/*
242 * btrfs_requeue_work just puts the work item back on the tail of the list
243 * it was taken from. It is intended for use with long running work functions
244 * that make some progress and want to give the cpu up for others.
245 */
246int btrfs_requeue_work(struct btrfs_work *work)
247{
248 struct btrfs_worker_thread *worker = work->worker;
249 unsigned long flags;
250
251 if (test_and_set_bit(0, &work->flags))
252 goto out;
253
254 spin_lock_irqsave(&worker->lock, flags);
255 atomic_inc(&worker->num_pending);
256 list_add_tail(&work->list, &worker->pending);
257 spin_unlock_irqrestore(&worker->lock, flags);
258out:
259 return 0;
260}
261
262/*
263 * places a struct btrfs_work into the pending queue of one of the kthreads
264 */
265int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
266{
267 struct btrfs_worker_thread *worker;
268 unsigned long flags;
269 int wake = 0;
270
271 /* don't requeue something already on a list */
272 if (test_and_set_bit(0, &work->flags))
273 goto out;
274
275 worker = find_worker(workers);
276
277 spin_lock_irqsave(&worker->lock, flags);
278 atomic_inc(&worker->num_pending);
279 list_add_tail(&work->list, &worker->pending);
280
281 /*
282 * avoid calling into wake_up_process if this thread has already
283 * been kicked
284 */
285 if (!worker->working)
286 wake = 1;
287 worker->working = 1;
288
289 spin_unlock_irqrestore(&worker->lock, flags);
290
291 if (wake)
292 wake_up_process(worker->task);
293out:
294 return 0;
295}