a4c1a9e63b2e0e835e8a70d9c5380fca71a26c53
[deliverable/linux.git] / kernel / async.c
1 /*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14 /*
15
16 Goals and Theory of Operation
17
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48
49 */
50
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59
60 #include "workqueue_internal.h"
61
62 static async_cookie_t next_cookie = 1;
63
64 #define MAX_WORK 32768
65 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
66
67 static LIST_HEAD(async_pending);
68 static ASYNC_DOMAIN(async_dfl_domain);
69 static LIST_HEAD(async_domains);
70 static DEFINE_SPINLOCK(async_lock);
71 static DEFINE_MUTEX(async_register_mutex);
72
73 struct async_entry {
74 struct list_head list;
75 struct work_struct work;
76 async_cookie_t cookie;
77 async_func_ptr *func;
78 void *data;
79 struct async_domain *domain;
80 };
81
82 static DECLARE_WAIT_QUEUE_HEAD(async_done);
83
84 static atomic_t entry_count;
85
86
87 /*
88 * MUST be called with the lock held!
89 */
90 static async_cookie_t __lowest_in_progress(struct async_domain *domain)
91 {
92 async_cookie_t first_running = ASYNC_COOKIE_MAX;
93 async_cookie_t first_pending = ASYNC_COOKIE_MAX;
94 struct async_entry *entry;
95
96 /*
97 * Both running and pending lists are sorted but not disjoint.
98 * Take the first cookies from both and return the min.
99 */
100 if (!list_empty(&domain->running)) {
101 entry = list_first_entry(&domain->running, typeof(*entry), list);
102 first_running = entry->cookie;
103 }
104
105 list_for_each_entry(entry, &async_pending, list) {
106 if (entry->domain == domain) {
107 first_pending = entry->cookie;
108 break;
109 }
110 }
111
112 return min(first_running, first_pending);
113 }
114
115 static async_cookie_t lowest_in_progress(struct async_domain *domain)
116 {
117 unsigned long flags;
118 async_cookie_t ret;
119
120 spin_lock_irqsave(&async_lock, flags);
121 ret = __lowest_in_progress(domain);
122 spin_unlock_irqrestore(&async_lock, flags);
123 return ret;
124 }
125
126 /*
127 * pick the first pending entry and run it
128 */
129 static void async_run_entry_fn(struct work_struct *work)
130 {
131 struct async_entry *entry =
132 container_of(work, struct async_entry, work);
133 struct async_entry *pos;
134 unsigned long flags;
135 ktime_t uninitialized_var(calltime), delta, rettime;
136 struct async_domain *domain = entry->domain;
137
138 /* 1) move self to the running queue, make sure it stays sorted */
139 spin_lock_irqsave(&async_lock, flags);
140 list_for_each_entry_reverse(pos, &domain->running, list)
141 if (entry->cookie < pos->cookie)
142 break;
143 list_move_tail(&entry->list, &pos->list);
144 spin_unlock_irqrestore(&async_lock, flags);
145
146 /* 2) run (and print duration) */
147 if (initcall_debug && system_state == SYSTEM_BOOTING) {
148 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
149 (long long)entry->cookie,
150 entry->func, task_pid_nr(current));
151 calltime = ktime_get();
152 }
153 entry->func(entry->data, entry->cookie);
154 if (initcall_debug && system_state == SYSTEM_BOOTING) {
155 rettime = ktime_get();
156 delta = ktime_sub(rettime, calltime);
157 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
158 (long long)entry->cookie,
159 entry->func,
160 (long long)ktime_to_ns(delta) >> 10);
161 }
162
163 /* 3) remove self from the running queue */
164 spin_lock_irqsave(&async_lock, flags);
165 list_del(&entry->list);
166 if (domain->registered && --domain->count == 0)
167 list_del_init(&domain->node);
168
169 /* 4) free the entry */
170 kfree(entry);
171 atomic_dec(&entry_count);
172
173 spin_unlock_irqrestore(&async_lock, flags);
174
175 /* 5) wake up any waiters */
176 wake_up(&async_done);
177 }
178
179 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
180 {
181 struct async_entry *entry;
182 unsigned long flags;
183 async_cookie_t newcookie;
184
185 /* allow irq-off callers */
186 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
187
188 /*
189 * If we're out of memory or if there's too much work
190 * pending already, we execute synchronously.
191 */
192 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
193 kfree(entry);
194 spin_lock_irqsave(&async_lock, flags);
195 newcookie = next_cookie++;
196 spin_unlock_irqrestore(&async_lock, flags);
197
198 /* low on memory.. run synchronously */
199 ptr(data, newcookie);
200 return newcookie;
201 }
202 INIT_WORK(&entry->work, async_run_entry_fn);
203 entry->func = ptr;
204 entry->data = data;
205 entry->domain = domain;
206
207 spin_lock_irqsave(&async_lock, flags);
208 newcookie = entry->cookie = next_cookie++;
209 list_add_tail(&entry->list, &async_pending);
210 if (domain->registered && domain->count++ == 0)
211 list_add_tail(&domain->node, &async_domains);
212 atomic_inc(&entry_count);
213 spin_unlock_irqrestore(&async_lock, flags);
214
215 /* mark that this task has queued an async job, used by module init */
216 current->flags |= PF_USED_ASYNC;
217
218 /* schedule for execution */
219 queue_work(system_unbound_wq, &entry->work);
220
221 return newcookie;
222 }
223
224 /**
225 * async_schedule - schedule a function for asynchronous execution
226 * @ptr: function to execute asynchronously
227 * @data: data pointer to pass to the function
228 *
229 * Returns an async_cookie_t that may be used for checkpointing later.
230 * Note: This function may be called from atomic or non-atomic contexts.
231 */
232 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
233 {
234 return __async_schedule(ptr, data, &async_dfl_domain);
235 }
236 EXPORT_SYMBOL_GPL(async_schedule);
237
238 /**
239 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
240 * @ptr: function to execute asynchronously
241 * @data: data pointer to pass to the function
242 * @domain: the domain
243 *
244 * Returns an async_cookie_t that may be used for checkpointing later.
245 * @domain may be used in the async_synchronize_*_domain() functions to
246 * wait within a certain synchronization domain rather than globally. A
247 * synchronization domain is specified via @domain. Note: This function
248 * may be called from atomic or non-atomic contexts.
249 */
250 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
251 struct async_domain *domain)
252 {
253 return __async_schedule(ptr, data, domain);
254 }
255 EXPORT_SYMBOL_GPL(async_schedule_domain);
256
257 /**
258 * async_synchronize_full - synchronize all asynchronous function calls
259 *
260 * This function waits until all asynchronous function calls have been done.
261 */
262 void async_synchronize_full(void)
263 {
264 mutex_lock(&async_register_mutex);
265 do {
266 struct async_domain *domain = NULL;
267
268 spin_lock_irq(&async_lock);
269 if (!list_empty(&async_domains))
270 domain = list_first_entry(&async_domains, typeof(*domain), node);
271 spin_unlock_irq(&async_lock);
272
273 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
274 } while (!list_empty(&async_domains));
275 mutex_unlock(&async_register_mutex);
276 }
277 EXPORT_SYMBOL_GPL(async_synchronize_full);
278
279 /**
280 * async_unregister_domain - ensure no more anonymous waiters on this domain
281 * @domain: idle domain to flush out of any async_synchronize_full instances
282 *
283 * async_synchronize_{cookie|full}_domain() are not flushed since callers
284 * of these routines should know the lifetime of @domain
285 *
286 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
287 */
288 void async_unregister_domain(struct async_domain *domain)
289 {
290 mutex_lock(&async_register_mutex);
291 spin_lock_irq(&async_lock);
292 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
293 !list_empty(&domain->running));
294 domain->registered = 0;
295 spin_unlock_irq(&async_lock);
296 mutex_unlock(&async_register_mutex);
297 }
298 EXPORT_SYMBOL_GPL(async_unregister_domain);
299
300 /**
301 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
302 * @domain: the domain to synchronize
303 *
304 * This function waits until all asynchronous function calls for the
305 * synchronization domain specified by @domain have been done.
306 */
307 void async_synchronize_full_domain(struct async_domain *domain)
308 {
309 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
310 }
311 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
312
313 /**
314 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
315 * @cookie: async_cookie_t to use as checkpoint
316 * @domain: the domain to synchronize
317 *
318 * This function waits until all asynchronous function calls for the
319 * synchronization domain specified by @domain submitted prior to @cookie
320 * have been done.
321 */
322 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
323 {
324 ktime_t uninitialized_var(starttime), delta, endtime;
325
326 if (!domain)
327 return;
328
329 if (initcall_debug && system_state == SYSTEM_BOOTING) {
330 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
331 starttime = ktime_get();
332 }
333
334 wait_event(async_done, lowest_in_progress(domain) >= cookie);
335
336 if (initcall_debug && system_state == SYSTEM_BOOTING) {
337 endtime = ktime_get();
338 delta = ktime_sub(endtime, starttime);
339
340 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
341 task_pid_nr(current),
342 (long long)ktime_to_ns(delta) >> 10);
343 }
344 }
345 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
346
347 /**
348 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
349 * @cookie: async_cookie_t to use as checkpoint
350 *
351 * This function waits until all asynchronous function calls prior to @cookie
352 * have been done.
353 */
354 void async_synchronize_cookie(async_cookie_t cookie)
355 {
356 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
357 }
358 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
359
360 /**
361 * current_is_async - is %current an async worker task?
362 *
363 * Returns %true if %current is an async worker task.
364 */
365 bool current_is_async(void)
366 {
367 struct worker *worker = current_wq_worker();
368
369 return worker && worker->current_func == async_run_entry_fn;
370 }
This page took 0.03747 seconds and 4 git commands to generate.