async: use ULLONG_MAX for infinity cookie value
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / async.c
CommitLineData
22a9d645
AV
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
84c15027
PM
52#include <linux/atomic.h>
53#include <linux/ktime.h>
9984de1a 54#include <linux/export.h>
22a9d645
AV
55#include <linux/wait.h>
56#include <linux/sched.h>
5a0e3ad6 57#include <linux/slab.h>
083b804c 58#include <linux/workqueue.h>
22a9d645 59
84b233ad
TH
60#include "workqueue_internal.h"
61
22a9d645
AV
62static async_cookie_t next_cookie = 1;
63
c68eee14
TH
64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
22a9d645
AV
66
67static LIST_HEAD(async_pending);
8723d503 68static ASYNC_DOMAIN(async_dfl_domain);
a4683487 69static LIST_HEAD(async_domains);
22a9d645 70static DEFINE_SPINLOCK(async_lock);
a4683487 71static DEFINE_MUTEX(async_register_mutex);
22a9d645
AV
72
73struct async_entry {
083b804c
TH
74 struct list_head list;
75 struct work_struct work;
76 async_cookie_t cookie;
77 async_func_ptr *func;
78 void *data;
8723d503 79 struct async_domain *domain;
22a9d645
AV
80};
81
82static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
83
84static atomic_t entry_count;
22a9d645 85
22a9d645
AV
86
87/*
88 * MUST be called with the lock held!
89 */
8723d503 90static async_cookie_t __lowest_in_progress(struct async_domain *domain)
22a9d645 91{
c68eee14
TH
92 async_cookie_t first_running = ASYNC_COOKIE_MAX;
93 async_cookie_t first_pending = ASYNC_COOKIE_MAX;
22a9d645 94 struct async_entry *entry;
d5a877e8 95
f56c3196
TH
96 /*
97 * Both running and pending lists are sorted but not disjoint.
98 * Take the first cookies from both and return the min.
99 */
8723d503
TH
100 if (!list_empty(&domain->running)) {
101 entry = list_first_entry(&domain->running, typeof(*entry), list);
f56c3196 102 first_running = entry->cookie;
22a9d645
AV
103 }
104
f56c3196 105 list_for_each_entry(entry, &async_pending, list) {
8723d503 106 if (entry->domain == domain) {
f56c3196
TH
107 first_pending = entry->cookie;
108 break;
109 }
110 }
d5a877e8 111
f56c3196 112 return min(first_running, first_pending);
22a9d645 113}
37a76bd4 114
8723d503 115static async_cookie_t lowest_in_progress(struct async_domain *domain)
37a76bd4
AV
116{
117 unsigned long flags;
118 async_cookie_t ret;
119
120 spin_lock_irqsave(&async_lock, flags);
8723d503 121 ret = __lowest_in_progress(domain);
37a76bd4
AV
122 spin_unlock_irqrestore(&async_lock, flags);
123 return ret;
124}
083b804c 125
22a9d645
AV
126/*
127 * pick the first pending entry and run it
128 */
083b804c 129static void async_run_entry_fn(struct work_struct *work)
22a9d645 130{
083b804c
TH
131 struct async_entry *entry =
132 container_of(work, struct async_entry, work);
f56c3196 133 struct async_entry *pos;
22a9d645 134 unsigned long flags;
124ff4e5 135 ktime_t uninitialized_var(calltime), delta, rettime;
8723d503 136 struct async_domain *domain = entry->domain;
22a9d645 137
f56c3196 138 /* 1) move self to the running queue, make sure it stays sorted */
22a9d645 139 spin_lock_irqsave(&async_lock, flags);
8723d503 140 list_for_each_entry_reverse(pos, &domain->running, list)
f56c3196
TH
141 if (entry->cookie < pos->cookie)
142 break;
143 list_move_tail(&entry->list, &pos->list);
22a9d645
AV
144 spin_unlock_irqrestore(&async_lock, flags);
145
083b804c 146 /* 2) run (and print duration) */
ad160d23 147 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027
PM
148 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
149 (long long)entry->cookie,
58763a29 150 entry->func, task_pid_nr(current));
22a9d645
AV
151 calltime = ktime_get();
152 }
153 entry->func(entry->data, entry->cookie);
ad160d23 154 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
155 rettime = ktime_get();
156 delta = ktime_sub(rettime, calltime);
84c15027 157 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
58763a29
AM
158 (long long)entry->cookie,
159 entry->func,
160 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
161 }
162
083b804c 163 /* 3) remove self from the running queue */
22a9d645
AV
164 spin_lock_irqsave(&async_lock, flags);
165 list_del(&entry->list);
8723d503
TH
166 if (domain->registered && --domain->count == 0)
167 list_del_init(&domain->node);
22a9d645 168
083b804c 169 /* 4) free the entry */
22a9d645
AV
170 kfree(entry);
171 atomic_dec(&entry_count);
172
173 spin_unlock_irqrestore(&async_lock, flags);
174
083b804c 175 /* 5) wake up any waiters */
22a9d645 176 wake_up(&async_done);
22a9d645
AV
177}
178
8723d503 179static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
22a9d645
AV
180{
181 struct async_entry *entry;
182 unsigned long flags;
183 async_cookie_t newcookie;
22a9d645
AV
184
185 /* allow irq-off callers */
186 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
187
188 /*
189 * If we're out of memory or if there's too much work
190 * pending already, we execute synchronously.
191 */
083b804c 192 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
193 kfree(entry);
194 spin_lock_irqsave(&async_lock, flags);
195 newcookie = next_cookie++;
196 spin_unlock_irqrestore(&async_lock, flags);
197
198 /* low on memory.. run synchronously */
199 ptr(data, newcookie);
200 return newcookie;
201 }
083b804c 202 INIT_WORK(&entry->work, async_run_entry_fn);
22a9d645
AV
203 entry->func = ptr;
204 entry->data = data;
8723d503 205 entry->domain = domain;
22a9d645
AV
206
207 spin_lock_irqsave(&async_lock, flags);
208 newcookie = entry->cookie = next_cookie++;
209 list_add_tail(&entry->list, &async_pending);
8723d503
TH
210 if (domain->registered && domain->count++ == 0)
211 list_add_tail(&domain->node, &async_domains);
22a9d645
AV
212 atomic_inc(&entry_count);
213 spin_unlock_irqrestore(&async_lock, flags);
083b804c 214
774a1221
TH
215 /* mark that this task has queued an async job, used by module init */
216 current->flags |= PF_USED_ASYNC;
217
083b804c
TH
218 /* schedule for execution */
219 queue_work(system_unbound_wq, &entry->work);
220
22a9d645
AV
221 return newcookie;
222}
223
f30d5b30
CH
224/**
225 * async_schedule - schedule a function for asynchronous execution
226 * @ptr: function to execute asynchronously
227 * @data: data pointer to pass to the function
228 *
229 * Returns an async_cookie_t that may be used for checkpointing later.
230 * Note: This function may be called from atomic or non-atomic contexts.
231 */
22a9d645
AV
232async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
233{
8723d503 234 return __async_schedule(ptr, data, &async_dfl_domain);
22a9d645
AV
235}
236EXPORT_SYMBOL_GPL(async_schedule);
237
f30d5b30 238/**
766ccb9e 239 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
f30d5b30
CH
240 * @ptr: function to execute asynchronously
241 * @data: data pointer to pass to the function
8723d503 242 * @domain: the domain
f30d5b30
CH
243 *
244 * Returns an async_cookie_t that may be used for checkpointing later.
8723d503
TH
245 * @domain may be used in the async_synchronize_*_domain() functions to
246 * wait within a certain synchronization domain rather than globally. A
247 * synchronization domain is specified via @domain. Note: This function
248 * may be called from atomic or non-atomic contexts.
f30d5b30 249 */
766ccb9e 250async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
8723d503 251 struct async_domain *domain)
22a9d645 252{
8723d503 253 return __async_schedule(ptr, data, domain);
22a9d645 254}
766ccb9e 255EXPORT_SYMBOL_GPL(async_schedule_domain);
22a9d645 256
f30d5b30
CH
257/**
258 * async_synchronize_full - synchronize all asynchronous function calls
259 *
260 * This function waits until all asynchronous function calls have been done.
261 */
22a9d645
AV
262void async_synchronize_full(void)
263{
a4683487 264 mutex_lock(&async_register_mutex);
33b04b93 265 do {
a4683487
DW
266 struct async_domain *domain = NULL;
267
268 spin_lock_irq(&async_lock);
269 if (!list_empty(&async_domains))
270 domain = list_first_entry(&async_domains, typeof(*domain), node);
271 spin_unlock_irq(&async_lock);
272
c68eee14 273 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
a4683487
DW
274 } while (!list_empty(&async_domains));
275 mutex_unlock(&async_register_mutex);
22a9d645
AV
276}
277EXPORT_SYMBOL_GPL(async_synchronize_full);
278
a4683487
DW
279/**
280 * async_unregister_domain - ensure no more anonymous waiters on this domain
281 * @domain: idle domain to flush out of any async_synchronize_full instances
282 *
283 * async_synchronize_{cookie|full}_domain() are not flushed since callers
284 * of these routines should know the lifetime of @domain
285 *
286 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
287 */
288void async_unregister_domain(struct async_domain *domain)
289{
290 mutex_lock(&async_register_mutex);
291 spin_lock_irq(&async_lock);
292 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
8723d503 293 !list_empty(&domain->running));
a4683487
DW
294 domain->registered = 0;
295 spin_unlock_irq(&async_lock);
296 mutex_unlock(&async_register_mutex);
297}
298EXPORT_SYMBOL_GPL(async_unregister_domain);
299
f30d5b30 300/**
766ccb9e 301 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
8723d503 302 * @domain: the domain to synchronize
f30d5b30 303 *
766ccb9e 304 * This function waits until all asynchronous function calls for the
8723d503 305 * synchronization domain specified by @domain have been done.
f30d5b30 306 */
2955b47d 307void async_synchronize_full_domain(struct async_domain *domain)
22a9d645 308{
c68eee14 309 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
22a9d645 310}
766ccb9e 311EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 312
f30d5b30 313/**
766ccb9e 314 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30 315 * @cookie: async_cookie_t to use as checkpoint
8723d503 316 * @domain: the domain to synchronize
f30d5b30 317 *
766ccb9e 318 * This function waits until all asynchronous function calls for the
8723d503
TH
319 * synchronization domain specified by @domain submitted prior to @cookie
320 * have been done.
f30d5b30 321 */
8723d503 322void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
22a9d645 323{
124ff4e5 324 ktime_t uninitialized_var(starttime), delta, endtime;
22a9d645 325
8723d503 326 if (!domain)
a4683487
DW
327 return;
328
ad160d23 329 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027 330 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
22a9d645
AV
331 starttime = ktime_get();
332 }
333
8723d503 334 wait_event(async_done, lowest_in_progress(domain) >= cookie);
22a9d645 335
ad160d23 336 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
337 endtime = ktime_get();
338 delta = ktime_sub(endtime, starttime);
339
84c15027 340 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
58763a29
AM
341 task_pid_nr(current),
342 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
343 }
344}
766ccb9e 345EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 346
f30d5b30
CH
347/**
348 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
349 * @cookie: async_cookie_t to use as checkpoint
350 *
351 * This function waits until all asynchronous function calls prior to @cookie
352 * have been done.
353 */
22a9d645
AV
354void async_synchronize_cookie(async_cookie_t cookie)
355{
8723d503 356 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
22a9d645
AV
357}
358EXPORT_SYMBOL_GPL(async_synchronize_cookie);
84b233ad
TH
359
360/**
361 * current_is_async - is %current an async worker task?
362 *
363 * Returns %true if %current is an async worker task.
364 */
365bool current_is_async(void)
366{
367 struct worker *worker = current_wq_worker();
368
369 return worker && worker->current_func == async_run_entry_fn;
370}