dm raid1: use per_bio_data
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / kthread.c
1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <trace/events/sched.h>
21
22 static DEFINE_SPINLOCK(kthread_create_lock);
23 static LIST_HEAD(kthread_create_list);
24 struct task_struct *kthreadd_task;
25
26 struct kthread_create_info
27 {
28 /* Information passed to kthread() from kthreadd. */
29 int (*threadfn)(void *data);
30 void *data;
31 int node;
32
33 /* Result passed back to kthread_create() from kthreadd. */
34 struct task_struct *result;
35 struct completion done;
36
37 struct list_head list;
38 };
39
40 struct kthread {
41 unsigned long flags;
42 unsigned int cpu;
43 void *data;
44 struct completion parked;
45 struct completion exited;
46 };
47
48 enum KTHREAD_BITS {
49 KTHREAD_IS_PER_CPU = 0,
50 KTHREAD_SHOULD_STOP,
51 KTHREAD_SHOULD_PARK,
52 KTHREAD_IS_PARKED,
53 };
54
55 #define to_kthread(tsk) \
56 container_of((tsk)->vfork_done, struct kthread, exited)
57
58 /**
59 * kthread_should_stop - should this kthread return now?
60 *
61 * When someone calls kthread_stop() on your kthread, it will be woken
62 * and this will return true. You should then return, and your return
63 * value will be passed through to kthread_stop().
64 */
65 bool kthread_should_stop(void)
66 {
67 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
68 }
69 EXPORT_SYMBOL(kthread_should_stop);
70
71 /**
72 * kthread_should_park - should this kthread park now?
73 *
74 * When someone calls kthread_park() on your kthread, it will be woken
75 * and this will return true. You should then do the necessary
76 * cleanup and call kthread_parkme()
77 *
78 * Similar to kthread_should_stop(), but this keeps the thread alive
79 * and in a park position. kthread_unpark() "restarts" the thread and
80 * calls the thread function again.
81 */
82 bool kthread_should_park(void)
83 {
84 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
85 }
86
87 /**
88 * kthread_freezable_should_stop - should this freezable kthread return now?
89 * @was_frozen: optional out parameter, indicates whether %current was frozen
90 *
91 * kthread_should_stop() for freezable kthreads, which will enter
92 * refrigerator if necessary. This function is safe from kthread_stop() /
93 * freezer deadlock and freezable kthreads should use this function instead
94 * of calling try_to_freeze() directly.
95 */
96 bool kthread_freezable_should_stop(bool *was_frozen)
97 {
98 bool frozen = false;
99
100 might_sleep();
101
102 if (unlikely(freezing(current)))
103 frozen = __refrigerator(true);
104
105 if (was_frozen)
106 *was_frozen = frozen;
107
108 return kthread_should_stop();
109 }
110 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
111
112 /**
113 * kthread_data - return data value specified on kthread creation
114 * @task: kthread task in question
115 *
116 * Return the data value specified when kthread @task was created.
117 * The caller is responsible for ensuring the validity of @task when
118 * calling this function.
119 */
120 void *kthread_data(struct task_struct *task)
121 {
122 return to_kthread(task)->data;
123 }
124
125 static void __kthread_parkme(struct kthread *self)
126 {
127 __set_current_state(TASK_INTERRUPTIBLE);
128 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130 complete(&self->parked);
131 schedule();
132 __set_current_state(TASK_INTERRUPTIBLE);
133 }
134 clear_bit(KTHREAD_IS_PARKED, &self->flags);
135 __set_current_state(TASK_RUNNING);
136 }
137
138 void kthread_parkme(void)
139 {
140 __kthread_parkme(to_kthread(current));
141 }
142
143 static int kthread(void *_create)
144 {
145 /* Copy data: it's on kthread's stack */
146 struct kthread_create_info *create = _create;
147 int (*threadfn)(void *data) = create->threadfn;
148 void *data = create->data;
149 struct kthread self;
150 int ret;
151
152 self.flags = 0;
153 self.data = data;
154 init_completion(&self.exited);
155 init_completion(&self.parked);
156 current->vfork_done = &self.exited;
157
158 /* OK, tell user we're spawned, wait for stop or wakeup */
159 __set_current_state(TASK_UNINTERRUPTIBLE);
160 create->result = current;
161 complete(&create->done);
162 schedule();
163
164 ret = -EINTR;
165
166 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
167 __kthread_parkme(&self);
168 ret = threadfn(data);
169 }
170 /* we can't just return, we must preserve "self" on stack */
171 do_exit(ret);
172 }
173
174 /* called from do_fork() to get node information for about to be created task */
175 int tsk_fork_get_node(struct task_struct *tsk)
176 {
177 #ifdef CONFIG_NUMA
178 if (tsk == kthreadd_task)
179 return tsk->pref_node_fork;
180 #endif
181 return numa_node_id();
182 }
183
184 static void create_kthread(struct kthread_create_info *create)
185 {
186 int pid;
187
188 #ifdef CONFIG_NUMA
189 current->pref_node_fork = create->node;
190 #endif
191 /* We want our own signal handler (we take no signals by default). */
192 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
193 if (pid < 0) {
194 create->result = ERR_PTR(pid);
195 complete(&create->done);
196 }
197 }
198
199 /**
200 * kthread_create_on_node - create a kthread.
201 * @threadfn: the function to run until signal_pending(current).
202 * @data: data ptr for @threadfn.
203 * @node: memory node number.
204 * @namefmt: printf-style name for the thread.
205 *
206 * Description: This helper function creates and names a kernel
207 * thread. The thread will be stopped: use wake_up_process() to start
208 * it. See also kthread_run().
209 *
210 * If thread is going to be bound on a particular cpu, give its node
211 * in @node, to get NUMA affinity for kthread stack, or else give -1.
212 * When woken, the thread will run @threadfn() with @data as its
213 * argument. @threadfn() can either call do_exit() directly if it is a
214 * standalone thread for which no one will call kthread_stop(), or
215 * return when 'kthread_should_stop()' is true (which means
216 * kthread_stop() has been called). The return value should be zero
217 * or a negative error number; it will be passed to kthread_stop().
218 *
219 * Returns a task_struct or ERR_PTR(-ENOMEM).
220 */
221 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
222 void *data, int node,
223 const char namefmt[],
224 ...)
225 {
226 struct kthread_create_info create;
227
228 create.threadfn = threadfn;
229 create.data = data;
230 create.node = node;
231 init_completion(&create.done);
232
233 spin_lock(&kthread_create_lock);
234 list_add_tail(&create.list, &kthread_create_list);
235 spin_unlock(&kthread_create_lock);
236
237 wake_up_process(kthreadd_task);
238 wait_for_completion(&create.done);
239
240 if (!IS_ERR(create.result)) {
241 static const struct sched_param param = { .sched_priority = 0 };
242 va_list args;
243
244 va_start(args, namefmt);
245 vsnprintf(create.result->comm, sizeof(create.result->comm),
246 namefmt, args);
247 va_end(args);
248 /*
249 * root may have changed our (kthreadd's) priority or CPU mask.
250 * The kernel thread should not inherit these properties.
251 */
252 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
253 set_cpus_allowed_ptr(create.result, cpu_all_mask);
254 }
255 return create.result;
256 }
257 EXPORT_SYMBOL(kthread_create_on_node);
258
259 static void __kthread_bind(struct task_struct *p, unsigned int cpu)
260 {
261 /* It's safe because the task is inactive. */
262 do_set_cpus_allowed(p, cpumask_of(cpu));
263 p->flags |= PF_THREAD_BOUND;
264 }
265
266 /**
267 * kthread_bind - bind a just-created kthread to a cpu.
268 * @p: thread created by kthread_create().
269 * @cpu: cpu (might not be online, must be possible) for @k to run on.
270 *
271 * Description: This function is equivalent to set_cpus_allowed(),
272 * except that @cpu doesn't need to be online, and the thread must be
273 * stopped (i.e., just returned from kthread_create()).
274 */
275 void kthread_bind(struct task_struct *p, unsigned int cpu)
276 {
277 /* Must have done schedule() in kthread() before we set_task_cpu */
278 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
279 WARN_ON(1);
280 return;
281 }
282 __kthread_bind(p, cpu);
283 }
284 EXPORT_SYMBOL(kthread_bind);
285
286 /**
287 * kthread_create_on_cpu - Create a cpu bound kthread
288 * @threadfn: the function to run until signal_pending(current).
289 * @data: data ptr for @threadfn.
290 * @cpu: The cpu on which the thread should be bound,
291 * @namefmt: printf-style name for the thread. Format is restricted
292 * to "name.*%u". Code fills in cpu number.
293 *
294 * Description: This helper function creates and names a kernel thread
295 * The thread will be woken and put into park mode.
296 */
297 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
298 void *data, unsigned int cpu,
299 const char *namefmt)
300 {
301 struct task_struct *p;
302
303 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
304 cpu);
305 if (IS_ERR(p))
306 return p;
307 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
308 to_kthread(p)->cpu = cpu;
309 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
310 kthread_park(p);
311 return p;
312 }
313
314 static struct kthread *task_get_live_kthread(struct task_struct *k)
315 {
316 struct kthread *kthread;
317
318 get_task_struct(k);
319 kthread = to_kthread(k);
320 /* It might have exited */
321 barrier();
322 if (k->vfork_done != NULL)
323 return kthread;
324 return NULL;
325 }
326
327 /**
328 * kthread_unpark - unpark a thread created by kthread_create().
329 * @k: thread created by kthread_create().
330 *
331 * Sets kthread_should_park() for @k to return false, wakes it, and
332 * waits for it to return. If the thread is marked percpu then its
333 * bound to the cpu again.
334 */
335 void kthread_unpark(struct task_struct *k)
336 {
337 struct kthread *kthread = task_get_live_kthread(k);
338
339 if (kthread) {
340 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
341 /*
342 * We clear the IS_PARKED bit here as we don't wait
343 * until the task has left the park code. So if we'd
344 * park before that happens we'd see the IS_PARKED bit
345 * which might be about to be cleared.
346 */
347 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
348 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
349 __kthread_bind(k, kthread->cpu);
350 wake_up_process(k);
351 }
352 }
353 put_task_struct(k);
354 }
355
356 /**
357 * kthread_park - park a thread created by kthread_create().
358 * @k: thread created by kthread_create().
359 *
360 * Sets kthread_should_park() for @k to return true, wakes it, and
361 * waits for it to return. This can also be called after kthread_create()
362 * instead of calling wake_up_process(): the thread will park without
363 * calling threadfn().
364 *
365 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
366 * If called by the kthread itself just the park bit is set.
367 */
368 int kthread_park(struct task_struct *k)
369 {
370 struct kthread *kthread = task_get_live_kthread(k);
371 int ret = -ENOSYS;
372
373 if (kthread) {
374 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
375 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
376 if (k != current) {
377 wake_up_process(k);
378 wait_for_completion(&kthread->parked);
379 }
380 }
381 ret = 0;
382 }
383 put_task_struct(k);
384 return ret;
385 }
386
387 /**
388 * kthread_stop - stop a thread created by kthread_create().
389 * @k: thread created by kthread_create().
390 *
391 * Sets kthread_should_stop() for @k to return true, wakes it, and
392 * waits for it to exit. This can also be called after kthread_create()
393 * instead of calling wake_up_process(): the thread will exit without
394 * calling threadfn().
395 *
396 * If threadfn() may call do_exit() itself, the caller must ensure
397 * task_struct can't go away.
398 *
399 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
400 * was never called.
401 */
402 int kthread_stop(struct task_struct *k)
403 {
404 struct kthread *kthread = task_get_live_kthread(k);
405 int ret;
406
407 trace_sched_kthread_stop(k);
408 if (kthread) {
409 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
410 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
411 wake_up_process(k);
412 wait_for_completion(&kthread->exited);
413 }
414 ret = k->exit_code;
415
416 put_task_struct(k);
417 trace_sched_kthread_stop_ret(ret);
418
419 return ret;
420 }
421 EXPORT_SYMBOL(kthread_stop);
422
423 int kthreadd(void *unused)
424 {
425 struct task_struct *tsk = current;
426
427 /* Setup a clean context for our children to inherit. */
428 set_task_comm(tsk, "kthreadd");
429 ignore_signals(tsk);
430 set_cpus_allowed_ptr(tsk, cpu_all_mask);
431 set_mems_allowed(node_states[N_HIGH_MEMORY]);
432
433 current->flags |= PF_NOFREEZE;
434
435 for (;;) {
436 set_current_state(TASK_INTERRUPTIBLE);
437 if (list_empty(&kthread_create_list))
438 schedule();
439 __set_current_state(TASK_RUNNING);
440
441 spin_lock(&kthread_create_lock);
442 while (!list_empty(&kthread_create_list)) {
443 struct kthread_create_info *create;
444
445 create = list_entry(kthread_create_list.next,
446 struct kthread_create_info, list);
447 list_del_init(&create->list);
448 spin_unlock(&kthread_create_lock);
449
450 create_kthread(create);
451
452 spin_lock(&kthread_create_lock);
453 }
454 spin_unlock(&kthread_create_lock);
455 }
456
457 return 0;
458 }
459
460 void __init_kthread_worker(struct kthread_worker *worker,
461 const char *name,
462 struct lock_class_key *key)
463 {
464 spin_lock_init(&worker->lock);
465 lockdep_set_class_and_name(&worker->lock, key, name);
466 INIT_LIST_HEAD(&worker->work_list);
467 worker->task = NULL;
468 }
469 EXPORT_SYMBOL_GPL(__init_kthread_worker);
470
471 /**
472 * kthread_worker_fn - kthread function to process kthread_worker
473 * @worker_ptr: pointer to initialized kthread_worker
474 *
475 * This function can be used as @threadfn to kthread_create() or
476 * kthread_run() with @worker_ptr argument pointing to an initialized
477 * kthread_worker. The started kthread will process work_list until
478 * the it is stopped with kthread_stop(). A kthread can also call
479 * this function directly after extra initialization.
480 *
481 * Different kthreads can be used for the same kthread_worker as long
482 * as there's only one kthread attached to it at any given time. A
483 * kthread_worker without an attached kthread simply collects queued
484 * kthread_works.
485 */
486 int kthread_worker_fn(void *worker_ptr)
487 {
488 struct kthread_worker *worker = worker_ptr;
489 struct kthread_work *work;
490
491 WARN_ON(worker->task);
492 worker->task = current;
493 repeat:
494 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
495
496 if (kthread_should_stop()) {
497 __set_current_state(TASK_RUNNING);
498 spin_lock_irq(&worker->lock);
499 worker->task = NULL;
500 spin_unlock_irq(&worker->lock);
501 return 0;
502 }
503
504 work = NULL;
505 spin_lock_irq(&worker->lock);
506 if (!list_empty(&worker->work_list)) {
507 work = list_first_entry(&worker->work_list,
508 struct kthread_work, node);
509 list_del_init(&work->node);
510 }
511 worker->current_work = work;
512 spin_unlock_irq(&worker->lock);
513
514 if (work) {
515 __set_current_state(TASK_RUNNING);
516 work->func(work);
517 } else if (!freezing(current))
518 schedule();
519
520 try_to_freeze();
521 goto repeat;
522 }
523 EXPORT_SYMBOL_GPL(kthread_worker_fn);
524
525 /* insert @work before @pos in @worker */
526 static void insert_kthread_work(struct kthread_worker *worker,
527 struct kthread_work *work,
528 struct list_head *pos)
529 {
530 lockdep_assert_held(&worker->lock);
531
532 list_add_tail(&work->node, pos);
533 work->worker = worker;
534 if (likely(worker->task))
535 wake_up_process(worker->task);
536 }
537
538 /**
539 * queue_kthread_work - queue a kthread_work
540 * @worker: target kthread_worker
541 * @work: kthread_work to queue
542 *
543 * Queue @work to work processor @task for async execution. @task
544 * must have been created with kthread_worker_create(). Returns %true
545 * if @work was successfully queued, %false if it was already pending.
546 */
547 bool queue_kthread_work(struct kthread_worker *worker,
548 struct kthread_work *work)
549 {
550 bool ret = false;
551 unsigned long flags;
552
553 spin_lock_irqsave(&worker->lock, flags);
554 if (list_empty(&work->node)) {
555 insert_kthread_work(worker, work, &worker->work_list);
556 ret = true;
557 }
558 spin_unlock_irqrestore(&worker->lock, flags);
559 return ret;
560 }
561 EXPORT_SYMBOL_GPL(queue_kthread_work);
562
563 struct kthread_flush_work {
564 struct kthread_work work;
565 struct completion done;
566 };
567
568 static void kthread_flush_work_fn(struct kthread_work *work)
569 {
570 struct kthread_flush_work *fwork =
571 container_of(work, struct kthread_flush_work, work);
572 complete(&fwork->done);
573 }
574
575 /**
576 * flush_kthread_work - flush a kthread_work
577 * @work: work to flush
578 *
579 * If @work is queued or executing, wait for it to finish execution.
580 */
581 void flush_kthread_work(struct kthread_work *work)
582 {
583 struct kthread_flush_work fwork = {
584 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
585 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
586 };
587 struct kthread_worker *worker;
588 bool noop = false;
589
590 retry:
591 worker = work->worker;
592 if (!worker)
593 return;
594
595 spin_lock_irq(&worker->lock);
596 if (work->worker != worker) {
597 spin_unlock_irq(&worker->lock);
598 goto retry;
599 }
600
601 if (!list_empty(&work->node))
602 insert_kthread_work(worker, &fwork.work, work->node.next);
603 else if (worker->current_work == work)
604 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
605 else
606 noop = true;
607
608 spin_unlock_irq(&worker->lock);
609
610 if (!noop)
611 wait_for_completion(&fwork.done);
612 }
613 EXPORT_SYMBOL_GPL(flush_kthread_work);
614
615 /**
616 * flush_kthread_worker - flush all current works on a kthread_worker
617 * @worker: worker to flush
618 *
619 * Wait until all currently executing or pending works on @worker are
620 * finished.
621 */
622 void flush_kthread_worker(struct kthread_worker *worker)
623 {
624 struct kthread_flush_work fwork = {
625 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
626 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
627 };
628
629 queue_kthread_work(worker, &fwork.work);
630 wait_for_completion(&fwork.done);
631 }
632 EXPORT_SYMBOL_GPL(flush_kthread_worker);