Merge branch 'slab/next' into slab/for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / kthread.c
1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <trace/events/sched.h>
21
22 static DEFINE_SPINLOCK(kthread_create_lock);
23 static LIST_HEAD(kthread_create_list);
24 struct task_struct *kthreadd_task;
25
26 struct kthread_create_info
27 {
28 /* Information passed to kthread() from kthreadd. */
29 int (*threadfn)(void *data);
30 void *data;
31 int node;
32
33 /* Result passed back to kthread_create() from kthreadd. */
34 struct task_struct *result;
35 struct completion done;
36
37 struct list_head list;
38 };
39
40 struct kthread {
41 unsigned long flags;
42 unsigned int cpu;
43 void *data;
44 struct completion parked;
45 struct completion exited;
46 };
47
48 enum KTHREAD_BITS {
49 KTHREAD_IS_PER_CPU = 0,
50 KTHREAD_SHOULD_STOP,
51 KTHREAD_SHOULD_PARK,
52 KTHREAD_IS_PARKED,
53 };
54
55 #define to_kthread(tsk) \
56 container_of((tsk)->vfork_done, struct kthread, exited)
57
58 /**
59 * kthread_should_stop - should this kthread return now?
60 *
61 * When someone calls kthread_stop() on your kthread, it will be woken
62 * and this will return true. You should then return, and your return
63 * value will be passed through to kthread_stop().
64 */
65 bool kthread_should_stop(void)
66 {
67 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
68 }
69 EXPORT_SYMBOL(kthread_should_stop);
70
71 /**
72 * kthread_should_park - should this kthread park now?
73 *
74 * When someone calls kthread_park() on your kthread, it will be woken
75 * and this will return true. You should then do the necessary
76 * cleanup and call kthread_parkme()
77 *
78 * Similar to kthread_should_stop(), but this keeps the thread alive
79 * and in a park position. kthread_unpark() "restarts" the thread and
80 * calls the thread function again.
81 */
82 bool kthread_should_park(void)
83 {
84 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
85 }
86
87 /**
88 * kthread_freezable_should_stop - should this freezable kthread return now?
89 * @was_frozen: optional out parameter, indicates whether %current was frozen
90 *
91 * kthread_should_stop() for freezable kthreads, which will enter
92 * refrigerator if necessary. This function is safe from kthread_stop() /
93 * freezer deadlock and freezable kthreads should use this function instead
94 * of calling try_to_freeze() directly.
95 */
96 bool kthread_freezable_should_stop(bool *was_frozen)
97 {
98 bool frozen = false;
99
100 might_sleep();
101
102 if (unlikely(freezing(current)))
103 frozen = __refrigerator(true);
104
105 if (was_frozen)
106 *was_frozen = frozen;
107
108 return kthread_should_stop();
109 }
110 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
111
112 /**
113 * kthread_data - return data value specified on kthread creation
114 * @task: kthread task in question
115 *
116 * Return the data value specified when kthread @task was created.
117 * The caller is responsible for ensuring the validity of @task when
118 * calling this function.
119 */
120 void *kthread_data(struct task_struct *task)
121 {
122 return to_kthread(task)->data;
123 }
124
125 static void __kthread_parkme(struct kthread *self)
126 {
127 __set_current_state(TASK_PARKED);
128 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130 complete(&self->parked);
131 schedule();
132 __set_current_state(TASK_PARKED);
133 }
134 clear_bit(KTHREAD_IS_PARKED, &self->flags);
135 __set_current_state(TASK_RUNNING);
136 }
137
138 void kthread_parkme(void)
139 {
140 __kthread_parkme(to_kthread(current));
141 }
142
143 static int kthread(void *_create)
144 {
145 /* Copy data: it's on kthread's stack */
146 struct kthread_create_info *create = _create;
147 int (*threadfn)(void *data) = create->threadfn;
148 void *data = create->data;
149 struct kthread self;
150 int ret;
151
152 self.flags = 0;
153 self.data = data;
154 init_completion(&self.exited);
155 init_completion(&self.parked);
156 current->vfork_done = &self.exited;
157
158 /* OK, tell user we're spawned, wait for stop or wakeup */
159 __set_current_state(TASK_UNINTERRUPTIBLE);
160 create->result = current;
161 complete(&create->done);
162 schedule();
163
164 ret = -EINTR;
165
166 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
167 __kthread_parkme(&self);
168 ret = threadfn(data);
169 }
170 /* we can't just return, we must preserve "self" on stack */
171 do_exit(ret);
172 }
173
174 /* called from do_fork() to get node information for about to be created task */
175 int tsk_fork_get_node(struct task_struct *tsk)
176 {
177 #ifdef CONFIG_NUMA
178 if (tsk == kthreadd_task)
179 return tsk->pref_node_fork;
180 #endif
181 return numa_node_id();
182 }
183
184 static void create_kthread(struct kthread_create_info *create)
185 {
186 int pid;
187
188 #ifdef CONFIG_NUMA
189 current->pref_node_fork = create->node;
190 #endif
191 /* We want our own signal handler (we take no signals by default). */
192 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
193 if (pid < 0) {
194 create->result = ERR_PTR(pid);
195 complete(&create->done);
196 }
197 }
198
199 /**
200 * kthread_create_on_node - create a kthread.
201 * @threadfn: the function to run until signal_pending(current).
202 * @data: data ptr for @threadfn.
203 * @node: memory node number.
204 * @namefmt: printf-style name for the thread.
205 *
206 * Description: This helper function creates and names a kernel
207 * thread. The thread will be stopped: use wake_up_process() to start
208 * it. See also kthread_run().
209 *
210 * If thread is going to be bound on a particular cpu, give its node
211 * in @node, to get NUMA affinity for kthread stack, or else give -1.
212 * When woken, the thread will run @threadfn() with @data as its
213 * argument. @threadfn() can either call do_exit() directly if it is a
214 * standalone thread for which no one will call kthread_stop(), or
215 * return when 'kthread_should_stop()' is true (which means
216 * kthread_stop() has been called). The return value should be zero
217 * or a negative error number; it will be passed to kthread_stop().
218 *
219 * Returns a task_struct or ERR_PTR(-ENOMEM).
220 */
221 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
222 void *data, int node,
223 const char namefmt[],
224 ...)
225 {
226 struct kthread_create_info create;
227
228 create.threadfn = threadfn;
229 create.data = data;
230 create.node = node;
231 init_completion(&create.done);
232
233 spin_lock(&kthread_create_lock);
234 list_add_tail(&create.list, &kthread_create_list);
235 spin_unlock(&kthread_create_lock);
236
237 wake_up_process(kthreadd_task);
238 wait_for_completion(&create.done);
239
240 if (!IS_ERR(create.result)) {
241 static const struct sched_param param = { .sched_priority = 0 };
242 va_list args;
243
244 va_start(args, namefmt);
245 vsnprintf(create.result->comm, sizeof(create.result->comm),
246 namefmt, args);
247 va_end(args);
248 /*
249 * root may have changed our (kthreadd's) priority or CPU mask.
250 * The kernel thread should not inherit these properties.
251 */
252 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
253 set_cpus_allowed_ptr(create.result, cpu_all_mask);
254 }
255 return create.result;
256 }
257 EXPORT_SYMBOL(kthread_create_on_node);
258
259 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
260 {
261 /* Must have done schedule() in kthread() before we set_task_cpu */
262 if (!wait_task_inactive(p, state)) {
263 WARN_ON(1);
264 return;
265 }
266 /* It's safe because the task is inactive. */
267 do_set_cpus_allowed(p, cpumask_of(cpu));
268 p->flags |= PF_THREAD_BOUND;
269 }
270
271 /**
272 * kthread_bind - bind a just-created kthread to a cpu.
273 * @p: thread created by kthread_create().
274 * @cpu: cpu (might not be online, must be possible) for @k to run on.
275 *
276 * Description: This function is equivalent to set_cpus_allowed(),
277 * except that @cpu doesn't need to be online, and the thread must be
278 * stopped (i.e., just returned from kthread_create()).
279 */
280 void kthread_bind(struct task_struct *p, unsigned int cpu)
281 {
282 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
283 }
284 EXPORT_SYMBOL(kthread_bind);
285
286 /**
287 * kthread_create_on_cpu - Create a cpu bound kthread
288 * @threadfn: the function to run until signal_pending(current).
289 * @data: data ptr for @threadfn.
290 * @cpu: The cpu on which the thread should be bound,
291 * @namefmt: printf-style name for the thread. Format is restricted
292 * to "name.*%u". Code fills in cpu number.
293 *
294 * Description: This helper function creates and names a kernel thread
295 * The thread will be woken and put into park mode.
296 */
297 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
298 void *data, unsigned int cpu,
299 const char *namefmt)
300 {
301 struct task_struct *p;
302
303 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
304 cpu);
305 if (IS_ERR(p))
306 return p;
307 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
308 to_kthread(p)->cpu = cpu;
309 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
310 kthread_park(p);
311 return p;
312 }
313
314 static struct kthread *task_get_live_kthread(struct task_struct *k)
315 {
316 struct kthread *kthread;
317
318 get_task_struct(k);
319 kthread = to_kthread(k);
320 /* It might have exited */
321 barrier();
322 if (k->vfork_done != NULL)
323 return kthread;
324 return NULL;
325 }
326
327 static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
328 {
329 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
330 /*
331 * We clear the IS_PARKED bit here as we don't wait
332 * until the task has left the park code. So if we'd
333 * park before that happens we'd see the IS_PARKED bit
334 * which might be about to be cleared.
335 */
336 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
337 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
338 __kthread_bind(k, kthread->cpu, TASK_PARKED);
339 wake_up_state(k, TASK_PARKED);
340 }
341 }
342
343 /**
344 * kthread_unpark - unpark a thread created by kthread_create().
345 * @k: thread created by kthread_create().
346 *
347 * Sets kthread_should_park() for @k to return false, wakes it, and
348 * waits for it to return. If the thread is marked percpu then its
349 * bound to the cpu again.
350 */
351 void kthread_unpark(struct task_struct *k)
352 {
353 struct kthread *kthread = task_get_live_kthread(k);
354
355 if (kthread)
356 __kthread_unpark(k, kthread);
357 put_task_struct(k);
358 }
359
360 /**
361 * kthread_park - park a thread created by kthread_create().
362 * @k: thread created by kthread_create().
363 *
364 * Sets kthread_should_park() for @k to return true, wakes it, and
365 * waits for it to return. This can also be called after kthread_create()
366 * instead of calling wake_up_process(): the thread will park without
367 * calling threadfn().
368 *
369 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
370 * If called by the kthread itself just the park bit is set.
371 */
372 int kthread_park(struct task_struct *k)
373 {
374 struct kthread *kthread = task_get_live_kthread(k);
375 int ret = -ENOSYS;
376
377 if (kthread) {
378 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
379 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
380 if (k != current) {
381 wake_up_process(k);
382 wait_for_completion(&kthread->parked);
383 }
384 }
385 ret = 0;
386 }
387 put_task_struct(k);
388 return ret;
389 }
390
391 /**
392 * kthread_stop - stop a thread created by kthread_create().
393 * @k: thread created by kthread_create().
394 *
395 * Sets kthread_should_stop() for @k to return true, wakes it, and
396 * waits for it to exit. This can also be called after kthread_create()
397 * instead of calling wake_up_process(): the thread will exit without
398 * calling threadfn().
399 *
400 * If threadfn() may call do_exit() itself, the caller must ensure
401 * task_struct can't go away.
402 *
403 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
404 * was never called.
405 */
406 int kthread_stop(struct task_struct *k)
407 {
408 struct kthread *kthread = task_get_live_kthread(k);
409 int ret;
410
411 trace_sched_kthread_stop(k);
412 if (kthread) {
413 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
414 __kthread_unpark(k, kthread);
415 wake_up_process(k);
416 wait_for_completion(&kthread->exited);
417 }
418 ret = k->exit_code;
419
420 put_task_struct(k);
421 trace_sched_kthread_stop_ret(ret);
422
423 return ret;
424 }
425 EXPORT_SYMBOL(kthread_stop);
426
427 int kthreadd(void *unused)
428 {
429 struct task_struct *tsk = current;
430
431 /* Setup a clean context for our children to inherit. */
432 set_task_comm(tsk, "kthreadd");
433 ignore_signals(tsk);
434 set_cpus_allowed_ptr(tsk, cpu_all_mask);
435 set_mems_allowed(node_states[N_MEMORY]);
436
437 current->flags |= PF_NOFREEZE;
438
439 for (;;) {
440 set_current_state(TASK_INTERRUPTIBLE);
441 if (list_empty(&kthread_create_list))
442 schedule();
443 __set_current_state(TASK_RUNNING);
444
445 spin_lock(&kthread_create_lock);
446 while (!list_empty(&kthread_create_list)) {
447 struct kthread_create_info *create;
448
449 create = list_entry(kthread_create_list.next,
450 struct kthread_create_info, list);
451 list_del_init(&create->list);
452 spin_unlock(&kthread_create_lock);
453
454 create_kthread(create);
455
456 spin_lock(&kthread_create_lock);
457 }
458 spin_unlock(&kthread_create_lock);
459 }
460
461 return 0;
462 }
463
464 void __init_kthread_worker(struct kthread_worker *worker,
465 const char *name,
466 struct lock_class_key *key)
467 {
468 spin_lock_init(&worker->lock);
469 lockdep_set_class_and_name(&worker->lock, key, name);
470 INIT_LIST_HEAD(&worker->work_list);
471 worker->task = NULL;
472 }
473 EXPORT_SYMBOL_GPL(__init_kthread_worker);
474
475 /**
476 * kthread_worker_fn - kthread function to process kthread_worker
477 * @worker_ptr: pointer to initialized kthread_worker
478 *
479 * This function can be used as @threadfn to kthread_create() or
480 * kthread_run() with @worker_ptr argument pointing to an initialized
481 * kthread_worker. The started kthread will process work_list until
482 * the it is stopped with kthread_stop(). A kthread can also call
483 * this function directly after extra initialization.
484 *
485 * Different kthreads can be used for the same kthread_worker as long
486 * as there's only one kthread attached to it at any given time. A
487 * kthread_worker without an attached kthread simply collects queued
488 * kthread_works.
489 */
490 int kthread_worker_fn(void *worker_ptr)
491 {
492 struct kthread_worker *worker = worker_ptr;
493 struct kthread_work *work;
494
495 WARN_ON(worker->task);
496 worker->task = current;
497 repeat:
498 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
499
500 if (kthread_should_stop()) {
501 __set_current_state(TASK_RUNNING);
502 spin_lock_irq(&worker->lock);
503 worker->task = NULL;
504 spin_unlock_irq(&worker->lock);
505 return 0;
506 }
507
508 work = NULL;
509 spin_lock_irq(&worker->lock);
510 if (!list_empty(&worker->work_list)) {
511 work = list_first_entry(&worker->work_list,
512 struct kthread_work, node);
513 list_del_init(&work->node);
514 }
515 worker->current_work = work;
516 spin_unlock_irq(&worker->lock);
517
518 if (work) {
519 __set_current_state(TASK_RUNNING);
520 work->func(work);
521 } else if (!freezing(current))
522 schedule();
523
524 try_to_freeze();
525 goto repeat;
526 }
527 EXPORT_SYMBOL_GPL(kthread_worker_fn);
528
529 /* insert @work before @pos in @worker */
530 static void insert_kthread_work(struct kthread_worker *worker,
531 struct kthread_work *work,
532 struct list_head *pos)
533 {
534 lockdep_assert_held(&worker->lock);
535
536 list_add_tail(&work->node, pos);
537 work->worker = worker;
538 if (likely(worker->task))
539 wake_up_process(worker->task);
540 }
541
542 /**
543 * queue_kthread_work - queue a kthread_work
544 * @worker: target kthread_worker
545 * @work: kthread_work to queue
546 *
547 * Queue @work to work processor @task for async execution. @task
548 * must have been created with kthread_worker_create(). Returns %true
549 * if @work was successfully queued, %false if it was already pending.
550 */
551 bool queue_kthread_work(struct kthread_worker *worker,
552 struct kthread_work *work)
553 {
554 bool ret = false;
555 unsigned long flags;
556
557 spin_lock_irqsave(&worker->lock, flags);
558 if (list_empty(&work->node)) {
559 insert_kthread_work(worker, work, &worker->work_list);
560 ret = true;
561 }
562 spin_unlock_irqrestore(&worker->lock, flags);
563 return ret;
564 }
565 EXPORT_SYMBOL_GPL(queue_kthread_work);
566
567 struct kthread_flush_work {
568 struct kthread_work work;
569 struct completion done;
570 };
571
572 static void kthread_flush_work_fn(struct kthread_work *work)
573 {
574 struct kthread_flush_work *fwork =
575 container_of(work, struct kthread_flush_work, work);
576 complete(&fwork->done);
577 }
578
579 /**
580 * flush_kthread_work - flush a kthread_work
581 * @work: work to flush
582 *
583 * If @work is queued or executing, wait for it to finish execution.
584 */
585 void flush_kthread_work(struct kthread_work *work)
586 {
587 struct kthread_flush_work fwork = {
588 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
589 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
590 };
591 struct kthread_worker *worker;
592 bool noop = false;
593
594 retry:
595 worker = work->worker;
596 if (!worker)
597 return;
598
599 spin_lock_irq(&worker->lock);
600 if (work->worker != worker) {
601 spin_unlock_irq(&worker->lock);
602 goto retry;
603 }
604
605 if (!list_empty(&work->node))
606 insert_kthread_work(worker, &fwork.work, work->node.next);
607 else if (worker->current_work == work)
608 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
609 else
610 noop = true;
611
612 spin_unlock_irq(&worker->lock);
613
614 if (!noop)
615 wait_for_completion(&fwork.done);
616 }
617 EXPORT_SYMBOL_GPL(flush_kthread_work);
618
619 /**
620 * flush_kthread_worker - flush all current works on a kthread_worker
621 * @worker: worker to flush
622 *
623 * Wait until all currently executing or pending works on @worker are
624 * finished.
625 */
626 void flush_kthread_worker(struct kthread_worker *worker)
627 {
628 struct kthread_flush_work fwork = {
629 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
630 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
631 };
632
633 queue_kthread_work(worker, &fwork.work);
634 wait_for_completion(&fwork.done);
635 }
636 EXPORT_SYMBOL_GPL(flush_kthread_worker);