Merge branch 'master' into for-3.9-async
[deliverable/linux.git] / kernel / async.c
1 /*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14 /*
15
16 Goals and Theory of Operation
17
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48
49 */
50
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59
60 #include "workqueue_internal.h"
61
62 static async_cookie_t next_cookie = 1;
63
64 #define MAX_WORK 32768
65
66 static LIST_HEAD(async_pending);
67 static ASYNC_DOMAIN(async_running);
68 static LIST_HEAD(async_domains);
69 static DEFINE_SPINLOCK(async_lock);
70 static DEFINE_MUTEX(async_register_mutex);
71
72 struct async_entry {
73 struct list_head list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_ptr *func;
77 void *data;
78 struct async_domain *running;
79 };
80
81 static DECLARE_WAIT_QUEUE_HEAD(async_done);
82
83 static atomic_t entry_count;
84
85
86 /*
87 * MUST be called with the lock held!
88 */
89 static async_cookie_t __lowest_in_progress(struct async_domain *running)
90 {
91 async_cookie_t first_running = next_cookie; /* infinity value */
92 async_cookie_t first_pending = next_cookie; /* ditto */
93 struct async_entry *entry;
94
95 /*
96 * Both running and pending lists are sorted but not disjoint.
97 * Take the first cookies from both and return the min.
98 */
99 if (!list_empty(&running->domain)) {
100 entry = list_first_entry(&running->domain, typeof(*entry), list);
101 first_running = entry->cookie;
102 }
103
104 list_for_each_entry(entry, &async_pending, list) {
105 if (entry->running == running) {
106 first_pending = entry->cookie;
107 break;
108 }
109 }
110
111 return min(first_running, first_pending);
112 }
113
114 static async_cookie_t lowest_in_progress(struct async_domain *running)
115 {
116 unsigned long flags;
117 async_cookie_t ret;
118
119 spin_lock_irqsave(&async_lock, flags);
120 ret = __lowest_in_progress(running);
121 spin_unlock_irqrestore(&async_lock, flags);
122 return ret;
123 }
124
125 /*
126 * pick the first pending entry and run it
127 */
128 static void async_run_entry_fn(struct work_struct *work)
129 {
130 struct async_entry *entry =
131 container_of(work, struct async_entry, work);
132 struct async_entry *pos;
133 unsigned long flags;
134 ktime_t uninitialized_var(calltime), delta, rettime;
135 struct async_domain *running = entry->running;
136
137 /* 1) move self to the running queue, make sure it stays sorted */
138 spin_lock_irqsave(&async_lock, flags);
139 list_for_each_entry_reverse(pos, &running->domain, list)
140 if (entry->cookie < pos->cookie)
141 break;
142 list_move_tail(&entry->list, &pos->list);
143 spin_unlock_irqrestore(&async_lock, flags);
144
145 /* 2) run (and print duration) */
146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
147 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
148 (long long)entry->cookie,
149 entry->func, task_pid_nr(current));
150 calltime = ktime_get();
151 }
152 entry->func(entry->data, entry->cookie);
153 if (initcall_debug && system_state == SYSTEM_BOOTING) {
154 rettime = ktime_get();
155 delta = ktime_sub(rettime, calltime);
156 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
157 (long long)entry->cookie,
158 entry->func,
159 (long long)ktime_to_ns(delta) >> 10);
160 }
161
162 /* 3) remove self from the running queue */
163 spin_lock_irqsave(&async_lock, flags);
164 list_del(&entry->list);
165 if (running->registered && --running->count == 0)
166 list_del_init(&running->node);
167
168 /* 4) free the entry */
169 kfree(entry);
170 atomic_dec(&entry_count);
171
172 spin_unlock_irqrestore(&async_lock, flags);
173
174 /* 5) wake up any waiters */
175 wake_up(&async_done);
176 }
177
178 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
179 {
180 struct async_entry *entry;
181 unsigned long flags;
182 async_cookie_t newcookie;
183
184 /* allow irq-off callers */
185 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
186
187 /*
188 * If we're out of memory or if there's too much work
189 * pending already, we execute synchronously.
190 */
191 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
192 kfree(entry);
193 spin_lock_irqsave(&async_lock, flags);
194 newcookie = next_cookie++;
195 spin_unlock_irqrestore(&async_lock, flags);
196
197 /* low on memory.. run synchronously */
198 ptr(data, newcookie);
199 return newcookie;
200 }
201 INIT_WORK(&entry->work, async_run_entry_fn);
202 entry->func = ptr;
203 entry->data = data;
204 entry->running = running;
205
206 spin_lock_irqsave(&async_lock, flags);
207 newcookie = entry->cookie = next_cookie++;
208 list_add_tail(&entry->list, &async_pending);
209 if (running->registered && running->count++ == 0)
210 list_add_tail(&running->node, &async_domains);
211 atomic_inc(&entry_count);
212 spin_unlock_irqrestore(&async_lock, flags);
213
214 /* mark that this task has queued an async job, used by module init */
215 current->flags |= PF_USED_ASYNC;
216
217 /* schedule for execution */
218 queue_work(system_unbound_wq, &entry->work);
219
220 return newcookie;
221 }
222
223 /**
224 * async_schedule - schedule a function for asynchronous execution
225 * @ptr: function to execute asynchronously
226 * @data: data pointer to pass to the function
227 *
228 * Returns an async_cookie_t that may be used for checkpointing later.
229 * Note: This function may be called from atomic or non-atomic contexts.
230 */
231 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
232 {
233 return __async_schedule(ptr, data, &async_running);
234 }
235 EXPORT_SYMBOL_GPL(async_schedule);
236
237 /**
238 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
239 * @ptr: function to execute asynchronously
240 * @data: data pointer to pass to the function
241 * @running: running list for the domain
242 *
243 * Returns an async_cookie_t that may be used for checkpointing later.
244 * @running may be used in the async_synchronize_*_domain() functions
245 * to wait within a certain synchronization domain rather than globally.
246 * A synchronization domain is specified via the running queue @running to use.
247 * Note: This function may be called from atomic or non-atomic contexts.
248 */
249 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
250 struct async_domain *running)
251 {
252 return __async_schedule(ptr, data, running);
253 }
254 EXPORT_SYMBOL_GPL(async_schedule_domain);
255
256 /**
257 * async_synchronize_full - synchronize all asynchronous function calls
258 *
259 * This function waits until all asynchronous function calls have been done.
260 */
261 void async_synchronize_full(void)
262 {
263 mutex_lock(&async_register_mutex);
264 do {
265 struct async_domain *domain = NULL;
266
267 spin_lock_irq(&async_lock);
268 if (!list_empty(&async_domains))
269 domain = list_first_entry(&async_domains, typeof(*domain), node);
270 spin_unlock_irq(&async_lock);
271
272 async_synchronize_cookie_domain(next_cookie, domain);
273 } while (!list_empty(&async_domains));
274 mutex_unlock(&async_register_mutex);
275 }
276 EXPORT_SYMBOL_GPL(async_synchronize_full);
277
278 /**
279 * async_unregister_domain - ensure no more anonymous waiters on this domain
280 * @domain: idle domain to flush out of any async_synchronize_full instances
281 *
282 * async_synchronize_{cookie|full}_domain() are not flushed since callers
283 * of these routines should know the lifetime of @domain
284 *
285 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
286 */
287 void async_unregister_domain(struct async_domain *domain)
288 {
289 mutex_lock(&async_register_mutex);
290 spin_lock_irq(&async_lock);
291 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
292 !list_empty(&domain->domain));
293 domain->registered = 0;
294 spin_unlock_irq(&async_lock);
295 mutex_unlock(&async_register_mutex);
296 }
297 EXPORT_SYMBOL_GPL(async_unregister_domain);
298
299 /**
300 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
301 * @domain: running list to synchronize on
302 *
303 * This function waits until all asynchronous function calls for the
304 * synchronization domain specified by the running list @domain have been done.
305 */
306 void async_synchronize_full_domain(struct async_domain *domain)
307 {
308 async_synchronize_cookie_domain(next_cookie, domain);
309 }
310 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
311
312 /**
313 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
314 * @cookie: async_cookie_t to use as checkpoint
315 * @running: running list to synchronize on
316 *
317 * This function waits until all asynchronous function calls for the
318 * synchronization domain specified by running list @running submitted
319 * prior to @cookie have been done.
320 */
321 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
322 {
323 ktime_t uninitialized_var(starttime), delta, endtime;
324
325 if (!running)
326 return;
327
328 if (initcall_debug && system_state == SYSTEM_BOOTING) {
329 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
330 starttime = ktime_get();
331 }
332
333 wait_event(async_done, lowest_in_progress(running) >= cookie);
334
335 if (initcall_debug && system_state == SYSTEM_BOOTING) {
336 endtime = ktime_get();
337 delta = ktime_sub(endtime, starttime);
338
339 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
340 task_pid_nr(current),
341 (long long)ktime_to_ns(delta) >> 10);
342 }
343 }
344 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
345
346 /**
347 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
348 * @cookie: async_cookie_t to use as checkpoint
349 *
350 * This function waits until all asynchronous function calls prior to @cookie
351 * have been done.
352 */
353 void async_synchronize_cookie(async_cookie_t cookie)
354 {
355 async_synchronize_cookie_domain(cookie, &async_running);
356 }
357 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
358
359 /**
360 * current_is_async - is %current an async worker task?
361 *
362 * Returns %true if %current is an async worker task.
363 */
364 bool current_is_async(void)
365 {
366 struct worker *worker = current_wq_worker();
367
368 return worker && worker->current_func == async_run_entry_fn;
369 }
This page took 0.050463 seconds and 6 git commands to generate.