Merge branch 'master' into for-3.9-async
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / async.c
CommitLineData
22a9d645
AV
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
84c15027
PM
52#include <linux/atomic.h>
53#include <linux/ktime.h>
9984de1a 54#include <linux/export.h>
22a9d645
AV
55#include <linux/wait.h>
56#include <linux/sched.h>
5a0e3ad6 57#include <linux/slab.h>
083b804c 58#include <linux/workqueue.h>
22a9d645 59
84b233ad
TH
60#include "workqueue_internal.h"
61
22a9d645
AV
62static async_cookie_t next_cookie = 1;
63
22a9d645
AV
64#define MAX_WORK 32768
65
66static LIST_HEAD(async_pending);
2955b47d 67static ASYNC_DOMAIN(async_running);
a4683487 68static LIST_HEAD(async_domains);
22a9d645 69static DEFINE_SPINLOCK(async_lock);
a4683487 70static DEFINE_MUTEX(async_register_mutex);
22a9d645
AV
71
72struct async_entry {
083b804c
TH
73 struct list_head list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_ptr *func;
77 void *data;
2955b47d 78 struct async_domain *running;
22a9d645
AV
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
82
83static atomic_t entry_count;
22a9d645 84
22a9d645
AV
85
86/*
87 * MUST be called with the lock held!
88 */
2955b47d 89static async_cookie_t __lowest_in_progress(struct async_domain *running)
22a9d645 90{
f56c3196
TH
91 async_cookie_t first_running = next_cookie; /* infinity value */
92 async_cookie_t first_pending = next_cookie; /* ditto */
22a9d645 93 struct async_entry *entry;
d5a877e8 94
f56c3196
TH
95 /*
96 * Both running and pending lists are sorted but not disjoint.
97 * Take the first cookies from both and return the min.
98 */
2955b47d
DW
99 if (!list_empty(&running->domain)) {
100 entry = list_first_entry(&running->domain, typeof(*entry), list);
f56c3196 101 first_running = entry->cookie;
22a9d645
AV
102 }
103
f56c3196
TH
104 list_for_each_entry(entry, &async_pending, list) {
105 if (entry->running == running) {
106 first_pending = entry->cookie;
107 break;
108 }
109 }
d5a877e8 110
f56c3196 111 return min(first_running, first_pending);
22a9d645 112}
37a76bd4 113
2955b47d 114static async_cookie_t lowest_in_progress(struct async_domain *running)
37a76bd4
AV
115{
116 unsigned long flags;
117 async_cookie_t ret;
118
119 spin_lock_irqsave(&async_lock, flags);
120 ret = __lowest_in_progress(running);
121 spin_unlock_irqrestore(&async_lock, flags);
122 return ret;
123}
083b804c 124
22a9d645
AV
125/*
126 * pick the first pending entry and run it
127 */
083b804c 128static void async_run_entry_fn(struct work_struct *work)
22a9d645 129{
083b804c
TH
130 struct async_entry *entry =
131 container_of(work, struct async_entry, work);
f56c3196 132 struct async_entry *pos;
22a9d645 133 unsigned long flags;
124ff4e5 134 ktime_t uninitialized_var(calltime), delta, rettime;
2955b47d 135 struct async_domain *running = entry->running;
22a9d645 136
f56c3196 137 /* 1) move self to the running queue, make sure it stays sorted */
22a9d645 138 spin_lock_irqsave(&async_lock, flags);
f56c3196
TH
139 list_for_each_entry_reverse(pos, &running->domain, list)
140 if (entry->cookie < pos->cookie)
141 break;
142 list_move_tail(&entry->list, &pos->list);
22a9d645
AV
143 spin_unlock_irqrestore(&async_lock, flags);
144
083b804c 145 /* 2) run (and print duration) */
ad160d23 146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027
PM
147 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
148 (long long)entry->cookie,
58763a29 149 entry->func, task_pid_nr(current));
22a9d645
AV
150 calltime = ktime_get();
151 }
152 entry->func(entry->data, entry->cookie);
ad160d23 153 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
154 rettime = ktime_get();
155 delta = ktime_sub(rettime, calltime);
84c15027 156 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
58763a29
AM
157 (long long)entry->cookie,
158 entry->func,
159 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
160 }
161
083b804c 162 /* 3) remove self from the running queue */
22a9d645
AV
163 spin_lock_irqsave(&async_lock, flags);
164 list_del(&entry->list);
a4683487
DW
165 if (running->registered && --running->count == 0)
166 list_del_init(&running->node);
22a9d645 167
083b804c 168 /* 4) free the entry */
22a9d645
AV
169 kfree(entry);
170 atomic_dec(&entry_count);
171
172 spin_unlock_irqrestore(&async_lock, flags);
173
083b804c 174 /* 5) wake up any waiters */
22a9d645 175 wake_up(&async_done);
22a9d645
AV
176}
177
2955b47d 178static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
22a9d645
AV
179{
180 struct async_entry *entry;
181 unsigned long flags;
182 async_cookie_t newcookie;
22a9d645
AV
183
184 /* allow irq-off callers */
185 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
186
187 /*
188 * If we're out of memory or if there's too much work
189 * pending already, we execute synchronously.
190 */
083b804c 191 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
192 kfree(entry);
193 spin_lock_irqsave(&async_lock, flags);
194 newcookie = next_cookie++;
195 spin_unlock_irqrestore(&async_lock, flags);
196
197 /* low on memory.. run synchronously */
198 ptr(data, newcookie);
199 return newcookie;
200 }
083b804c 201 INIT_WORK(&entry->work, async_run_entry_fn);
22a9d645
AV
202 entry->func = ptr;
203 entry->data = data;
204 entry->running = running;
205
206 spin_lock_irqsave(&async_lock, flags);
207 newcookie = entry->cookie = next_cookie++;
208 list_add_tail(&entry->list, &async_pending);
a4683487
DW
209 if (running->registered && running->count++ == 0)
210 list_add_tail(&running->node, &async_domains);
22a9d645
AV
211 atomic_inc(&entry_count);
212 spin_unlock_irqrestore(&async_lock, flags);
083b804c 213
774a1221
TH
214 /* mark that this task has queued an async job, used by module init */
215 current->flags |= PF_USED_ASYNC;
216
083b804c
TH
217 /* schedule for execution */
218 queue_work(system_unbound_wq, &entry->work);
219
22a9d645
AV
220 return newcookie;
221}
222
f30d5b30
CH
223/**
224 * async_schedule - schedule a function for asynchronous execution
225 * @ptr: function to execute asynchronously
226 * @data: data pointer to pass to the function
227 *
228 * Returns an async_cookie_t that may be used for checkpointing later.
229 * Note: This function may be called from atomic or non-atomic contexts.
230 */
22a9d645
AV
231async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
232{
7a89bbc7 233 return __async_schedule(ptr, data, &async_running);
22a9d645
AV
234}
235EXPORT_SYMBOL_GPL(async_schedule);
236
f30d5b30 237/**
766ccb9e 238 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
f30d5b30
CH
239 * @ptr: function to execute asynchronously
240 * @data: data pointer to pass to the function
766ccb9e 241 * @running: running list for the domain
f30d5b30
CH
242 *
243 * Returns an async_cookie_t that may be used for checkpointing later.
766ccb9e
CH
244 * @running may be used in the async_synchronize_*_domain() functions
245 * to wait within a certain synchronization domain rather than globally.
246 * A synchronization domain is specified via the running queue @running to use.
f30d5b30
CH
247 * Note: This function may be called from atomic or non-atomic contexts.
248 */
766ccb9e 249async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
2955b47d 250 struct async_domain *running)
22a9d645
AV
251{
252 return __async_schedule(ptr, data, running);
253}
766ccb9e 254EXPORT_SYMBOL_GPL(async_schedule_domain);
22a9d645 255
f30d5b30
CH
256/**
257 * async_synchronize_full - synchronize all asynchronous function calls
258 *
259 * This function waits until all asynchronous function calls have been done.
260 */
22a9d645
AV
261void async_synchronize_full(void)
262{
a4683487 263 mutex_lock(&async_register_mutex);
33b04b93 264 do {
a4683487
DW
265 struct async_domain *domain = NULL;
266
267 spin_lock_irq(&async_lock);
268 if (!list_empty(&async_domains))
269 domain = list_first_entry(&async_domains, typeof(*domain), node);
270 spin_unlock_irq(&async_lock);
271
272 async_synchronize_cookie_domain(next_cookie, domain);
273 } while (!list_empty(&async_domains));
274 mutex_unlock(&async_register_mutex);
22a9d645
AV
275}
276EXPORT_SYMBOL_GPL(async_synchronize_full);
277
a4683487
DW
278/**
279 * async_unregister_domain - ensure no more anonymous waiters on this domain
280 * @domain: idle domain to flush out of any async_synchronize_full instances
281 *
282 * async_synchronize_{cookie|full}_domain() are not flushed since callers
283 * of these routines should know the lifetime of @domain
284 *
285 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
286 */
287void async_unregister_domain(struct async_domain *domain)
288{
289 mutex_lock(&async_register_mutex);
290 spin_lock_irq(&async_lock);
291 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
292 !list_empty(&domain->domain));
293 domain->registered = 0;
294 spin_unlock_irq(&async_lock);
295 mutex_unlock(&async_register_mutex);
296}
297EXPORT_SYMBOL_GPL(async_unregister_domain);
298
f30d5b30 299/**
766ccb9e 300 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
2955b47d 301 * @domain: running list to synchronize on
f30d5b30 302 *
766ccb9e 303 * This function waits until all asynchronous function calls for the
2955b47d 304 * synchronization domain specified by the running list @domain have been done.
f30d5b30 305 */
2955b47d 306void async_synchronize_full_domain(struct async_domain *domain)
22a9d645 307{
2955b47d 308 async_synchronize_cookie_domain(next_cookie, domain);
22a9d645 309}
766ccb9e 310EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 311
f30d5b30 312/**
766ccb9e 313 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30
CH
314 * @cookie: async_cookie_t to use as checkpoint
315 * @running: running list to synchronize on
316 *
766ccb9e 317 * This function waits until all asynchronous function calls for the
2955b47d 318 * synchronization domain specified by running list @running submitted
766ccb9e 319 * prior to @cookie have been done.
f30d5b30 320 */
2955b47d 321void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
22a9d645 322{
124ff4e5 323 ktime_t uninitialized_var(starttime), delta, endtime;
22a9d645 324
a4683487
DW
325 if (!running)
326 return;
327
ad160d23 328 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027 329 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
22a9d645
AV
330 starttime = ktime_get();
331 }
332
37a76bd4 333 wait_event(async_done, lowest_in_progress(running) >= cookie);
22a9d645 334
ad160d23 335 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
336 endtime = ktime_get();
337 delta = ktime_sub(endtime, starttime);
338
84c15027 339 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
58763a29
AM
340 task_pid_nr(current),
341 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
342 }
343}
766ccb9e 344EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 345
f30d5b30
CH
346/**
347 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
348 * @cookie: async_cookie_t to use as checkpoint
349 *
350 * This function waits until all asynchronous function calls prior to @cookie
351 * have been done.
352 */
22a9d645
AV
353void async_synchronize_cookie(async_cookie_t cookie)
354{
766ccb9e 355 async_synchronize_cookie_domain(cookie, &async_running);
22a9d645
AV
356}
357EXPORT_SYMBOL_GPL(async_synchronize_cookie);
84b233ad
TH
358
359/**
360 * current_is_async - is %current an async worker task?
361 *
362 * Returns %true if %current is an async worker task.
363 */
364bool current_is_async(void)
365{
366 struct worker *worker = current_wq_worker();
367
368 return worker && worker->current_func == async_run_entry_fn;
369}