drivers: tty: samsung: fix misleading intendation
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / kernel / kthread.c
1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <linux/uaccess.h>
21 #include <linux/cgroup.h>
22 #include <trace/events/sched.h>
23
24 static DEFINE_SPINLOCK(kthread_create_lock);
25 static LIST_HEAD(kthread_create_list);
26 struct task_struct *kthreadd_task;
27
28 struct kthread_create_info
29 {
30 /* Information passed to kthread() from kthreadd. */
31 int (*threadfn)(void *data);
32 void *data;
33 int node;
34
35 /* Result passed back to kthread_create() from kthreadd. */
36 struct task_struct *result;
37 struct completion *done;
38
39 struct list_head list;
40 };
41
42 struct kthread {
43 unsigned long flags;
44 unsigned int cpu;
45 void *data;
46 struct completion parked;
47 struct completion exited;
48 };
49
50 enum KTHREAD_BITS {
51 KTHREAD_IS_PER_CPU = 0,
52 KTHREAD_SHOULD_STOP,
53 KTHREAD_SHOULD_PARK,
54 KTHREAD_IS_PARKED,
55 };
56
57 #define __to_kthread(vfork) \
58 container_of(vfork, struct kthread, exited)
59
60 static inline struct kthread *to_kthread(struct task_struct *k)
61 {
62 return __to_kthread(k->vfork_done);
63 }
64
65 static struct kthread *to_live_kthread(struct task_struct *k)
66 {
67 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
68 if (likely(vfork) && try_get_task_stack(k))
69 return __to_kthread(vfork);
70 return NULL;
71 }
72
73 /**
74 * kthread_should_stop - should this kthread return now?
75 *
76 * When someone calls kthread_stop() on your kthread, it will be woken
77 * and this will return true. You should then return, and your return
78 * value will be passed through to kthread_stop().
79 */
80 bool kthread_should_stop(void)
81 {
82 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
83 }
84 EXPORT_SYMBOL(kthread_should_stop);
85
86 /**
87 * kthread_should_park - should this kthread park now?
88 *
89 * When someone calls kthread_park() on your kthread, it will be woken
90 * and this will return true. You should then do the necessary
91 * cleanup and call kthread_parkme()
92 *
93 * Similar to kthread_should_stop(), but this keeps the thread alive
94 * and in a park position. kthread_unpark() "restarts" the thread and
95 * calls the thread function again.
96 */
97 bool kthread_should_park(void)
98 {
99 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
100 }
101 EXPORT_SYMBOL_GPL(kthread_should_park);
102
103 /**
104 * kthread_freezable_should_stop - should this freezable kthread return now?
105 * @was_frozen: optional out parameter, indicates whether %current was frozen
106 *
107 * kthread_should_stop() for freezable kthreads, which will enter
108 * refrigerator if necessary. This function is safe from kthread_stop() /
109 * freezer deadlock and freezable kthreads should use this function instead
110 * of calling try_to_freeze() directly.
111 */
112 bool kthread_freezable_should_stop(bool *was_frozen)
113 {
114 bool frozen = false;
115
116 might_sleep();
117
118 if (unlikely(freezing(current)))
119 frozen = __refrigerator(true);
120
121 if (was_frozen)
122 *was_frozen = frozen;
123
124 return kthread_should_stop();
125 }
126 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
127
128 /**
129 * kthread_data - return data value specified on kthread creation
130 * @task: kthread task in question
131 *
132 * Return the data value specified when kthread @task was created.
133 * The caller is responsible for ensuring the validity of @task when
134 * calling this function.
135 */
136 void *kthread_data(struct task_struct *task)
137 {
138 return to_kthread(task)->data;
139 }
140
141 /**
142 * probe_kthread_data - speculative version of kthread_data()
143 * @task: possible kthread task in question
144 *
145 * @task could be a kthread task. Return the data value specified when it
146 * was created if accessible. If @task isn't a kthread task or its data is
147 * inaccessible for any reason, %NULL is returned. This function requires
148 * that @task itself is safe to dereference.
149 */
150 void *probe_kthread_data(struct task_struct *task)
151 {
152 struct kthread *kthread = to_kthread(task);
153 void *data = NULL;
154
155 probe_kernel_read(&data, &kthread->data, sizeof(data));
156 return data;
157 }
158
159 static void __kthread_parkme(struct kthread *self)
160 {
161 __set_current_state(TASK_PARKED);
162 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
163 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
164 complete(&self->parked);
165 schedule();
166 __set_current_state(TASK_PARKED);
167 }
168 clear_bit(KTHREAD_IS_PARKED, &self->flags);
169 __set_current_state(TASK_RUNNING);
170 }
171
172 void kthread_parkme(void)
173 {
174 __kthread_parkme(to_kthread(current));
175 }
176 EXPORT_SYMBOL_GPL(kthread_parkme);
177
178 static int kthread(void *_create)
179 {
180 /* Copy data: it's on kthread's stack */
181 struct kthread_create_info *create = _create;
182 int (*threadfn)(void *data) = create->threadfn;
183 void *data = create->data;
184 struct completion *done;
185 struct kthread self;
186 int ret;
187
188 self.flags = 0;
189 self.data = data;
190 init_completion(&self.exited);
191 init_completion(&self.parked);
192 current->vfork_done = &self.exited;
193
194 /* If user was SIGKILLed, I release the structure. */
195 done = xchg(&create->done, NULL);
196 if (!done) {
197 kfree(create);
198 do_exit(-EINTR);
199 }
200 /* OK, tell user we're spawned, wait for stop or wakeup */
201 __set_current_state(TASK_UNINTERRUPTIBLE);
202 create->result = current;
203 complete(done);
204 schedule();
205
206 ret = -EINTR;
207
208 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
209 cgroup_kthread_ready();
210 __kthread_parkme(&self);
211 ret = threadfn(data);
212 }
213 /* we can't just return, we must preserve "self" on stack */
214 do_exit(ret);
215 }
216
217 /* called from do_fork() to get node information for about to be created task */
218 int tsk_fork_get_node(struct task_struct *tsk)
219 {
220 #ifdef CONFIG_NUMA
221 if (tsk == kthreadd_task)
222 return tsk->pref_node_fork;
223 #endif
224 return NUMA_NO_NODE;
225 }
226
227 static void create_kthread(struct kthread_create_info *create)
228 {
229 int pid;
230
231 #ifdef CONFIG_NUMA
232 current->pref_node_fork = create->node;
233 #endif
234 /* We want our own signal handler (we take no signals by default). */
235 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
236 if (pid < 0) {
237 /* If user was SIGKILLed, I release the structure. */
238 struct completion *done = xchg(&create->done, NULL);
239
240 if (!done) {
241 kfree(create);
242 return;
243 }
244 create->result = ERR_PTR(pid);
245 complete(done);
246 }
247 }
248
249 /**
250 * kthread_create_on_node - create a kthread.
251 * @threadfn: the function to run until signal_pending(current).
252 * @data: data ptr for @threadfn.
253 * @node: task and thread structures for the thread are allocated on this node
254 * @namefmt: printf-style name for the thread.
255 *
256 * Description: This helper function creates and names a kernel
257 * thread. The thread will be stopped: use wake_up_process() to start
258 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
259 * is affine to all CPUs.
260 *
261 * If thread is going to be bound on a particular cpu, give its node
262 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
263 * When woken, the thread will run @threadfn() with @data as its
264 * argument. @threadfn() can either call do_exit() directly if it is a
265 * standalone thread for which no one will call kthread_stop(), or
266 * return when 'kthread_should_stop()' is true (which means
267 * kthread_stop() has been called). The return value should be zero
268 * or a negative error number; it will be passed to kthread_stop().
269 *
270 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
271 */
272 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
273 void *data, int node,
274 const char namefmt[],
275 ...)
276 {
277 DECLARE_COMPLETION_ONSTACK(done);
278 struct task_struct *task;
279 struct kthread_create_info *create = kmalloc(sizeof(*create),
280 GFP_KERNEL);
281
282 if (!create)
283 return ERR_PTR(-ENOMEM);
284 create->threadfn = threadfn;
285 create->data = data;
286 create->node = node;
287 create->done = &done;
288
289 spin_lock(&kthread_create_lock);
290 list_add_tail(&create->list, &kthread_create_list);
291 spin_unlock(&kthread_create_lock);
292
293 wake_up_process(kthreadd_task);
294 /*
295 * Wait for completion in killable state, for I might be chosen by
296 * the OOM killer while kthreadd is trying to allocate memory for
297 * new kernel thread.
298 */
299 if (unlikely(wait_for_completion_killable(&done))) {
300 /*
301 * If I was SIGKILLed before kthreadd (or new kernel thread)
302 * calls complete(), leave the cleanup of this structure to
303 * that thread.
304 */
305 if (xchg(&create->done, NULL))
306 return ERR_PTR(-EINTR);
307 /*
308 * kthreadd (or new kernel thread) will call complete()
309 * shortly.
310 */
311 wait_for_completion(&done);
312 }
313 task = create->result;
314 if (!IS_ERR(task)) {
315 static const struct sched_param param = { .sched_priority = 0 };
316 va_list args;
317
318 va_start(args, namefmt);
319 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
320 va_end(args);
321 /*
322 * root may have changed our (kthreadd's) priority or CPU mask.
323 * The kernel thread should not inherit these properties.
324 */
325 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
326 set_cpus_allowed_ptr(task, cpu_all_mask);
327 }
328 kfree(create);
329 return task;
330 }
331 EXPORT_SYMBOL(kthread_create_on_node);
332
333 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
334 {
335 unsigned long flags;
336
337 if (!wait_task_inactive(p, state)) {
338 WARN_ON(1);
339 return;
340 }
341
342 /* It's safe because the task is inactive. */
343 raw_spin_lock_irqsave(&p->pi_lock, flags);
344 do_set_cpus_allowed(p, mask);
345 p->flags |= PF_NO_SETAFFINITY;
346 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
347 }
348
349 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
350 {
351 __kthread_bind_mask(p, cpumask_of(cpu), state);
352 }
353
354 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
355 {
356 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
357 }
358
359 /**
360 * kthread_bind - bind a just-created kthread to a cpu.
361 * @p: thread created by kthread_create().
362 * @cpu: cpu (might not be online, must be possible) for @k to run on.
363 *
364 * Description: This function is equivalent to set_cpus_allowed(),
365 * except that @cpu doesn't need to be online, and the thread must be
366 * stopped (i.e., just returned from kthread_create()).
367 */
368 void kthread_bind(struct task_struct *p, unsigned int cpu)
369 {
370 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
371 }
372 EXPORT_SYMBOL(kthread_bind);
373
374 /**
375 * kthread_create_on_cpu - Create a cpu bound kthread
376 * @threadfn: the function to run until signal_pending(current).
377 * @data: data ptr for @threadfn.
378 * @cpu: The cpu on which the thread should be bound,
379 * @namefmt: printf-style name for the thread. Format is restricted
380 * to "name.*%u". Code fills in cpu number.
381 *
382 * Description: This helper function creates and names a kernel thread
383 * The thread will be woken and put into park mode.
384 */
385 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
386 void *data, unsigned int cpu,
387 const char *namefmt)
388 {
389 struct task_struct *p;
390
391 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
392 cpu);
393 if (IS_ERR(p))
394 return p;
395 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
396 to_kthread(p)->cpu = cpu;
397 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
398 kthread_park(p);
399 return p;
400 }
401
402 static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
403 {
404 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
405 /*
406 * We clear the IS_PARKED bit here as we don't wait
407 * until the task has left the park code. So if we'd
408 * park before that happens we'd see the IS_PARKED bit
409 * which might be about to be cleared.
410 */
411 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
412 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
413 __kthread_bind(k, kthread->cpu, TASK_PARKED);
414 wake_up_state(k, TASK_PARKED);
415 }
416 }
417
418 /**
419 * kthread_unpark - unpark a thread created by kthread_create().
420 * @k: thread created by kthread_create().
421 *
422 * Sets kthread_should_park() for @k to return false, wakes it, and
423 * waits for it to return. If the thread is marked percpu then its
424 * bound to the cpu again.
425 */
426 void kthread_unpark(struct task_struct *k)
427 {
428 struct kthread *kthread = to_live_kthread(k);
429
430 if (kthread) {
431 __kthread_unpark(k, kthread);
432 put_task_stack(k);
433 }
434 }
435 EXPORT_SYMBOL_GPL(kthread_unpark);
436
437 /**
438 * kthread_park - park a thread created by kthread_create().
439 * @k: thread created by kthread_create().
440 *
441 * Sets kthread_should_park() for @k to return true, wakes it, and
442 * waits for it to return. This can also be called after kthread_create()
443 * instead of calling wake_up_process(): the thread will park without
444 * calling threadfn().
445 *
446 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
447 * If called by the kthread itself just the park bit is set.
448 */
449 int kthread_park(struct task_struct *k)
450 {
451 struct kthread *kthread = to_live_kthread(k);
452 int ret = -ENOSYS;
453
454 if (kthread) {
455 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
456 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
457 if (k != current) {
458 wake_up_process(k);
459 wait_for_completion(&kthread->parked);
460 }
461 }
462 put_task_stack(k);
463 ret = 0;
464 }
465 return ret;
466 }
467 EXPORT_SYMBOL_GPL(kthread_park);
468
469 /**
470 * kthread_stop - stop a thread created by kthread_create().
471 * @k: thread created by kthread_create().
472 *
473 * Sets kthread_should_stop() for @k to return true, wakes it, and
474 * waits for it to exit. This can also be called after kthread_create()
475 * instead of calling wake_up_process(): the thread will exit without
476 * calling threadfn().
477 *
478 * If threadfn() may call do_exit() itself, the caller must ensure
479 * task_struct can't go away.
480 *
481 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
482 * was never called.
483 */
484 int kthread_stop(struct task_struct *k)
485 {
486 struct kthread *kthread;
487 int ret;
488
489 trace_sched_kthread_stop(k);
490
491 get_task_struct(k);
492 kthread = to_live_kthread(k);
493 if (kthread) {
494 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
495 __kthread_unpark(k, kthread);
496 wake_up_process(k);
497 wait_for_completion(&kthread->exited);
498 put_task_stack(k);
499 }
500 ret = k->exit_code;
501 put_task_struct(k);
502
503 trace_sched_kthread_stop_ret(ret);
504 return ret;
505 }
506 EXPORT_SYMBOL(kthread_stop);
507
508 int kthreadd(void *unused)
509 {
510 struct task_struct *tsk = current;
511
512 /* Setup a clean context for our children to inherit. */
513 set_task_comm(tsk, "kthreadd");
514 ignore_signals(tsk);
515 set_cpus_allowed_ptr(tsk, cpu_all_mask);
516 set_mems_allowed(node_states[N_MEMORY]);
517
518 current->flags |= PF_NOFREEZE;
519 cgroup_init_kthreadd();
520
521 for (;;) {
522 set_current_state(TASK_INTERRUPTIBLE);
523 if (list_empty(&kthread_create_list))
524 schedule();
525 __set_current_state(TASK_RUNNING);
526
527 spin_lock(&kthread_create_lock);
528 while (!list_empty(&kthread_create_list)) {
529 struct kthread_create_info *create;
530
531 create = list_entry(kthread_create_list.next,
532 struct kthread_create_info, list);
533 list_del_init(&create->list);
534 spin_unlock(&kthread_create_lock);
535
536 create_kthread(create);
537
538 spin_lock(&kthread_create_lock);
539 }
540 spin_unlock(&kthread_create_lock);
541 }
542
543 return 0;
544 }
545
546 void __init_kthread_worker(struct kthread_worker *worker,
547 const char *name,
548 struct lock_class_key *key)
549 {
550 spin_lock_init(&worker->lock);
551 lockdep_set_class_and_name(&worker->lock, key, name);
552 INIT_LIST_HEAD(&worker->work_list);
553 worker->task = NULL;
554 }
555 EXPORT_SYMBOL_GPL(__init_kthread_worker);
556
557 /**
558 * kthread_worker_fn - kthread function to process kthread_worker
559 * @worker_ptr: pointer to initialized kthread_worker
560 *
561 * This function can be used as @threadfn to kthread_create() or
562 * kthread_run() with @worker_ptr argument pointing to an initialized
563 * kthread_worker. The started kthread will process work_list until
564 * the it is stopped with kthread_stop(). A kthread can also call
565 * this function directly after extra initialization.
566 *
567 * Different kthreads can be used for the same kthread_worker as long
568 * as there's only one kthread attached to it at any given time. A
569 * kthread_worker without an attached kthread simply collects queued
570 * kthread_works.
571 */
572 int kthread_worker_fn(void *worker_ptr)
573 {
574 struct kthread_worker *worker = worker_ptr;
575 struct kthread_work *work;
576
577 WARN_ON(worker->task);
578 worker->task = current;
579 repeat:
580 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
581
582 if (kthread_should_stop()) {
583 __set_current_state(TASK_RUNNING);
584 spin_lock_irq(&worker->lock);
585 worker->task = NULL;
586 spin_unlock_irq(&worker->lock);
587 return 0;
588 }
589
590 work = NULL;
591 spin_lock_irq(&worker->lock);
592 if (!list_empty(&worker->work_list)) {
593 work = list_first_entry(&worker->work_list,
594 struct kthread_work, node);
595 list_del_init(&work->node);
596 }
597 worker->current_work = work;
598 spin_unlock_irq(&worker->lock);
599
600 if (work) {
601 __set_current_state(TASK_RUNNING);
602 work->func(work);
603 } else if (!freezing(current))
604 schedule();
605
606 try_to_freeze();
607 goto repeat;
608 }
609 EXPORT_SYMBOL_GPL(kthread_worker_fn);
610
611 /*
612 * Returns true when the work could not be queued at the moment.
613 * It happens when it is already pending in a worker list
614 * or when it is being cancelled.
615 */
616 static inline bool queuing_blocked(struct kthread_worker *worker,
617 struct kthread_work *work)
618 {
619 lockdep_assert_held(&worker->lock);
620
621 return !list_empty(&work->node) || work->canceling;
622 }
623
624 /* insert @work before @pos in @worker */
625 static void insert_kthread_work(struct kthread_worker *worker,
626 struct kthread_work *work,
627 struct list_head *pos)
628 {
629 lockdep_assert_held(&worker->lock);
630
631 list_add_tail(&work->node, pos);
632 work->worker = worker;
633 if (!worker->current_work && likely(worker->task))
634 wake_up_process(worker->task);
635 }
636
637 /**
638 * queue_kthread_work - queue a kthread_work
639 * @worker: target kthread_worker
640 * @work: kthread_work to queue
641 *
642 * Queue @work to work processor @task for async execution. @task
643 * must have been created with kthread_worker_create(). Returns %true
644 * if @work was successfully queued, %false if it was already pending.
645 */
646 bool queue_kthread_work(struct kthread_worker *worker,
647 struct kthread_work *work)
648 {
649 bool ret = false;
650 unsigned long flags;
651
652 spin_lock_irqsave(&worker->lock, flags);
653 if (!queuing_blocked(worker, work)) {
654 insert_kthread_work(worker, work, &worker->work_list);
655 ret = true;
656 }
657 spin_unlock_irqrestore(&worker->lock, flags);
658 return ret;
659 }
660 EXPORT_SYMBOL_GPL(queue_kthread_work);
661
662 struct kthread_flush_work {
663 struct kthread_work work;
664 struct completion done;
665 };
666
667 static void kthread_flush_work_fn(struct kthread_work *work)
668 {
669 struct kthread_flush_work *fwork =
670 container_of(work, struct kthread_flush_work, work);
671 complete(&fwork->done);
672 }
673
674 /**
675 * flush_kthread_work - flush a kthread_work
676 * @work: work to flush
677 *
678 * If @work is queued or executing, wait for it to finish execution.
679 */
680 void flush_kthread_work(struct kthread_work *work)
681 {
682 struct kthread_flush_work fwork = {
683 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
684 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
685 };
686 struct kthread_worker *worker;
687 bool noop = false;
688
689 retry:
690 worker = work->worker;
691 if (!worker)
692 return;
693
694 spin_lock_irq(&worker->lock);
695 if (work->worker != worker) {
696 spin_unlock_irq(&worker->lock);
697 goto retry;
698 }
699
700 if (!list_empty(&work->node))
701 insert_kthread_work(worker, &fwork.work, work->node.next);
702 else if (worker->current_work == work)
703 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
704 else
705 noop = true;
706
707 spin_unlock_irq(&worker->lock);
708
709 if (!noop)
710 wait_for_completion(&fwork.done);
711 }
712 EXPORT_SYMBOL_GPL(flush_kthread_work);
713
714 /*
715 * This function removes the work from the worker queue. Also it makes sure
716 * that it won't get queued later via the delayed work's timer.
717 *
718 * The work might still be in use when this function finishes. See the
719 * current_work proceed by the worker.
720 *
721 * Return: %true if @work was pending and successfully canceled,
722 * %false if @work was not pending
723 */
724 static bool __kthread_cancel_work(struct kthread_work *work,
725 unsigned long *flags)
726 {
727 /*
728 * Try to remove the work from a worker list. It might either
729 * be from worker->work_list or from worker->delayed_work_list.
730 */
731 if (!list_empty(&work->node)) {
732 list_del_init(&work->node);
733 return true;
734 }
735
736 return false;
737 }
738
739 static bool __kthread_cancel_work_sync(struct kthread_work *work)
740 {
741 struct kthread_worker *worker = work->worker;
742 unsigned long flags;
743 int ret = false;
744
745 if (!worker)
746 goto out;
747
748 spin_lock_irqsave(&worker->lock, flags);
749 /* Work must not be used with >1 worker, see kthread_queue_work(). */
750 WARN_ON_ONCE(work->worker != worker);
751
752 ret = __kthread_cancel_work(work, &flags);
753
754 if (worker->current_work != work)
755 goto out_fast;
756
757 /*
758 * The work is in progress and we need to wait with the lock released.
759 * In the meantime, block any queuing by setting the canceling counter.
760 */
761 work->canceling++;
762 spin_unlock_irqrestore(&worker->lock, flags);
763 flush_kthread_work(work);
764 spin_lock_irqsave(&worker->lock, flags);
765 work->canceling--;
766
767 out_fast:
768 spin_unlock_irqrestore(&worker->lock, flags);
769 out:
770 return ret;
771 }
772
773 /**
774 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
775 * @work: the kthread work to cancel
776 *
777 * Cancel @work and wait for its execution to finish. This function
778 * can be used even if the work re-queues itself. On return from this
779 * function, @work is guaranteed to be not pending or executing on any CPU.
780 *
781 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
782 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
783 *
784 * The caller must ensure that the worker on which @work was last
785 * queued can't be destroyed before this function returns.
786 *
787 * Return: %true if @work was pending, %false otherwise.
788 */
789 bool kthread_cancel_work_sync(struct kthread_work *work)
790 {
791 return __kthread_cancel_work_sync(work);
792 }
793 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
794
795 /**
796 * flush_kthread_worker - flush all current works on a kthread_worker
797 * @worker: worker to flush
798 *
799 * Wait until all currently executing or pending works on @worker are
800 * finished.
801 */
802 void flush_kthread_worker(struct kthread_worker *worker)
803 {
804 struct kthread_flush_work fwork = {
805 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
806 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
807 };
808
809 queue_kthread_work(worker, &fwork.work);
810 wait_for_completion(&fwork.done);
811 }
812 EXPORT_SYMBOL_GPL(flush_kthread_worker);