2 * async.c: Asynchronous function calls for boot performance
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
16 Goals and Theory of Operation
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
60 #include "workqueue_internal.h"
62 static async_cookie_t next_cookie
= 1;
64 #define MAX_WORK 32768
65 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
67 static LIST_HEAD(async_pending
);
68 static ASYNC_DOMAIN(async_dfl_domain
);
69 static LIST_HEAD(async_domains
);
70 static DEFINE_SPINLOCK(async_lock
);
71 static DEFINE_MUTEX(async_register_mutex
);
74 struct list_head list
;
75 struct work_struct work
;
76 async_cookie_t cookie
;
79 struct async_domain
*domain
;
82 static DECLARE_WAIT_QUEUE_HEAD(async_done
);
84 static atomic_t entry_count
;
88 * MUST be called with the lock held!
90 static async_cookie_t
__lowest_in_progress(struct async_domain
*domain
)
92 async_cookie_t first_running
= ASYNC_COOKIE_MAX
;
93 async_cookie_t first_pending
= ASYNC_COOKIE_MAX
;
94 struct async_entry
*entry
;
97 * Both running and pending lists are sorted but not disjoint.
98 * Take the first cookies from both and return the min.
100 if (!list_empty(&domain
->running
)) {
101 entry
= list_first_entry(&domain
->running
, typeof(*entry
), list
);
102 first_running
= entry
->cookie
;
105 list_for_each_entry(entry
, &async_pending
, list
) {
106 if (entry
->domain
== domain
) {
107 first_pending
= entry
->cookie
;
112 return min(first_running
, first_pending
);
115 static async_cookie_t
lowest_in_progress(struct async_domain
*domain
)
120 spin_lock_irqsave(&async_lock
, flags
);
121 ret
= __lowest_in_progress(domain
);
122 spin_unlock_irqrestore(&async_lock
, flags
);
127 * pick the first pending entry and run it
129 static void async_run_entry_fn(struct work_struct
*work
)
131 struct async_entry
*entry
=
132 container_of(work
, struct async_entry
, work
);
133 struct async_entry
*pos
;
135 ktime_t
uninitialized_var(calltime
), delta
, rettime
;
136 struct async_domain
*domain
= entry
->domain
;
138 /* 1) move self to the running queue, make sure it stays sorted */
139 spin_lock_irqsave(&async_lock
, flags
);
140 list_for_each_entry_reverse(pos
, &domain
->running
, list
)
141 if (entry
->cookie
< pos
->cookie
)
143 list_move_tail(&entry
->list
, &pos
->list
);
144 spin_unlock_irqrestore(&async_lock
, flags
);
146 /* 2) run (and print duration) */
147 if (initcall_debug
&& system_state
== SYSTEM_BOOTING
) {
148 printk(KERN_DEBUG
"calling %lli_%pF @ %i\n",
149 (long long)entry
->cookie
,
150 entry
->func
, task_pid_nr(current
));
151 calltime
= ktime_get();
153 entry
->func(entry
->data
, entry
->cookie
);
154 if (initcall_debug
&& system_state
== SYSTEM_BOOTING
) {
155 rettime
= ktime_get();
156 delta
= ktime_sub(rettime
, calltime
);
157 printk(KERN_DEBUG
"initcall %lli_%pF returned 0 after %lld usecs\n",
158 (long long)entry
->cookie
,
160 (long long)ktime_to_ns(delta
) >> 10);
163 /* 3) remove self from the running queue */
164 spin_lock_irqsave(&async_lock
, flags
);
165 list_del(&entry
->list
);
166 if (domain
->registered
&& --domain
->count
== 0)
167 list_del_init(&domain
->node
);
169 /* 4) free the entry */
171 atomic_dec(&entry_count
);
173 spin_unlock_irqrestore(&async_lock
, flags
);
175 /* 5) wake up any waiters */
176 wake_up(&async_done
);
179 static async_cookie_t
__async_schedule(async_func_ptr
*ptr
, void *data
, struct async_domain
*domain
)
181 struct async_entry
*entry
;
183 async_cookie_t newcookie
;
185 /* allow irq-off callers */
186 entry
= kzalloc(sizeof(struct async_entry
), GFP_ATOMIC
);
189 * If we're out of memory or if there's too much work
190 * pending already, we execute synchronously.
192 if (!entry
|| atomic_read(&entry_count
) > MAX_WORK
) {
194 spin_lock_irqsave(&async_lock
, flags
);
195 newcookie
= next_cookie
++;
196 spin_unlock_irqrestore(&async_lock
, flags
);
198 /* low on memory.. run synchronously */
199 ptr(data
, newcookie
);
202 INIT_WORK(&entry
->work
, async_run_entry_fn
);
205 entry
->domain
= domain
;
207 spin_lock_irqsave(&async_lock
, flags
);
208 newcookie
= entry
->cookie
= next_cookie
++;
209 list_add_tail(&entry
->list
, &async_pending
);
210 if (domain
->registered
&& domain
->count
++ == 0)
211 list_add_tail(&domain
->node
, &async_domains
);
212 atomic_inc(&entry_count
);
213 spin_unlock_irqrestore(&async_lock
, flags
);
215 /* mark that this task has queued an async job, used by module init */
216 current
->flags
|= PF_USED_ASYNC
;
218 /* schedule for execution */
219 queue_work(system_unbound_wq
, &entry
->work
);
225 * async_schedule - schedule a function for asynchronous execution
226 * @ptr: function to execute asynchronously
227 * @data: data pointer to pass to the function
229 * Returns an async_cookie_t that may be used for checkpointing later.
230 * Note: This function may be called from atomic or non-atomic contexts.
232 async_cookie_t
async_schedule(async_func_ptr
*ptr
, void *data
)
234 return __async_schedule(ptr
, data
, &async_dfl_domain
);
236 EXPORT_SYMBOL_GPL(async_schedule
);
239 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
240 * @ptr: function to execute asynchronously
241 * @data: data pointer to pass to the function
242 * @domain: the domain
244 * Returns an async_cookie_t that may be used for checkpointing later.
245 * @domain may be used in the async_synchronize_*_domain() functions to
246 * wait within a certain synchronization domain rather than globally. A
247 * synchronization domain is specified via @domain. Note: This function
248 * may be called from atomic or non-atomic contexts.
250 async_cookie_t
async_schedule_domain(async_func_ptr
*ptr
, void *data
,
251 struct async_domain
*domain
)
253 return __async_schedule(ptr
, data
, domain
);
255 EXPORT_SYMBOL_GPL(async_schedule_domain
);
258 * async_synchronize_full - synchronize all asynchronous function calls
260 * This function waits until all asynchronous function calls have been done.
262 void async_synchronize_full(void)
264 mutex_lock(&async_register_mutex
);
266 struct async_domain
*domain
= NULL
;
268 spin_lock_irq(&async_lock
);
269 if (!list_empty(&async_domains
))
270 domain
= list_first_entry(&async_domains
, typeof(*domain
), node
);
271 spin_unlock_irq(&async_lock
);
273 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX
, domain
);
274 } while (!list_empty(&async_domains
));
275 mutex_unlock(&async_register_mutex
);
277 EXPORT_SYMBOL_GPL(async_synchronize_full
);
280 * async_unregister_domain - ensure no more anonymous waiters on this domain
281 * @domain: idle domain to flush out of any async_synchronize_full instances
283 * async_synchronize_{cookie|full}_domain() are not flushed since callers
284 * of these routines should know the lifetime of @domain
286 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
288 void async_unregister_domain(struct async_domain
*domain
)
290 mutex_lock(&async_register_mutex
);
291 spin_lock_irq(&async_lock
);
292 WARN_ON(!domain
->registered
|| !list_empty(&domain
->node
) ||
293 !list_empty(&domain
->running
));
294 domain
->registered
= 0;
295 spin_unlock_irq(&async_lock
);
296 mutex_unlock(&async_register_mutex
);
298 EXPORT_SYMBOL_GPL(async_unregister_domain
);
301 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
302 * @domain: the domain to synchronize
304 * This function waits until all asynchronous function calls for the
305 * synchronization domain specified by @domain have been done.
307 void async_synchronize_full_domain(struct async_domain
*domain
)
309 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX
, domain
);
311 EXPORT_SYMBOL_GPL(async_synchronize_full_domain
);
314 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
315 * @cookie: async_cookie_t to use as checkpoint
316 * @domain: the domain to synchronize
318 * This function waits until all asynchronous function calls for the
319 * synchronization domain specified by @domain submitted prior to @cookie
322 void async_synchronize_cookie_domain(async_cookie_t cookie
, struct async_domain
*domain
)
324 ktime_t
uninitialized_var(starttime
), delta
, endtime
;
329 if (initcall_debug
&& system_state
== SYSTEM_BOOTING
) {
330 printk(KERN_DEBUG
"async_waiting @ %i\n", task_pid_nr(current
));
331 starttime
= ktime_get();
334 wait_event(async_done
, lowest_in_progress(domain
) >= cookie
);
336 if (initcall_debug
&& system_state
== SYSTEM_BOOTING
) {
337 endtime
= ktime_get();
338 delta
= ktime_sub(endtime
, starttime
);
340 printk(KERN_DEBUG
"async_continuing @ %i after %lli usec\n",
341 task_pid_nr(current
),
342 (long long)ktime_to_ns(delta
) >> 10);
345 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain
);
348 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
349 * @cookie: async_cookie_t to use as checkpoint
351 * This function waits until all asynchronous function calls prior to @cookie
354 void async_synchronize_cookie(async_cookie_t cookie
)
356 async_synchronize_cookie_domain(cookie
, &async_dfl_domain
);
358 EXPORT_SYMBOL_GPL(async_synchronize_cookie
);
361 * current_is_async - is %current an async worker task?
363 * Returns %true if %current is an async worker task.
365 bool current_is_async(void)
367 struct worker
*worker
= current_wq_worker();
369 return worker
&& worker
->current_func
== async_run_entry_fn
;