Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / kernel / async.c
1 /*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14 /*
15
16 Goals and Theory of Operation
17
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48
49 */
50
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59
60 static async_cookie_t next_cookie = 1;
61
62 #define MAX_WORK 32768
63
64 static LIST_HEAD(async_pending);
65 static ASYNC_DOMAIN(async_running);
66 static LIST_HEAD(async_domains);
67 static DEFINE_SPINLOCK(async_lock);
68 static DEFINE_MUTEX(async_register_mutex);
69
70 struct async_entry {
71 struct list_head list;
72 struct work_struct work;
73 async_cookie_t cookie;
74 async_func_ptr *func;
75 void *data;
76 struct async_domain *running;
77 };
78
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
80
81 static atomic_t entry_count;
82
83
84 /*
85 * MUST be called with the lock held!
86 */
87 static async_cookie_t __lowest_in_progress(struct async_domain *running)
88 {
89 struct async_entry *entry;
90
91 if (!list_empty(&running->domain)) {
92 entry = list_first_entry(&running->domain, typeof(*entry), list);
93 return entry->cookie;
94 }
95
96 list_for_each_entry(entry, &async_pending, list)
97 if (entry->running == running)
98 return entry->cookie;
99
100 return next_cookie; /* "infinity" value */
101 }
102
103 static async_cookie_t lowest_in_progress(struct async_domain *running)
104 {
105 unsigned long flags;
106 async_cookie_t ret;
107
108 spin_lock_irqsave(&async_lock, flags);
109 ret = __lowest_in_progress(running);
110 spin_unlock_irqrestore(&async_lock, flags);
111 return ret;
112 }
113
114 /*
115 * pick the first pending entry and run it
116 */
117 static void async_run_entry_fn(struct work_struct *work)
118 {
119 struct async_entry *entry =
120 container_of(work, struct async_entry, work);
121 unsigned long flags;
122 ktime_t uninitialized_var(calltime), delta, rettime;
123 struct async_domain *running = entry->running;
124
125 /* 1) move self to the running queue */
126 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, &running->domain);
128 spin_unlock_irqrestore(&async_lock, flags);
129
130 /* 2) run (and print duration) */
131 if (initcall_debug && system_state == SYSTEM_BOOTING) {
132 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
133 (long long)entry->cookie,
134 entry->func, task_pid_nr(current));
135 calltime = ktime_get();
136 }
137 entry->func(entry->data, entry->cookie);
138 if (initcall_debug && system_state == SYSTEM_BOOTING) {
139 rettime = ktime_get();
140 delta = ktime_sub(rettime, calltime);
141 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
142 (long long)entry->cookie,
143 entry->func,
144 (long long)ktime_to_ns(delta) >> 10);
145 }
146
147 /* 3) remove self from the running queue */
148 spin_lock_irqsave(&async_lock, flags);
149 list_del(&entry->list);
150 if (running->registered && --running->count == 0)
151 list_del_init(&running->node);
152
153 /* 4) free the entry */
154 kfree(entry);
155 atomic_dec(&entry_count);
156
157 spin_unlock_irqrestore(&async_lock, flags);
158
159 /* 5) wake up any waiters */
160 wake_up(&async_done);
161 }
162
163 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
164 {
165 struct async_entry *entry;
166 unsigned long flags;
167 async_cookie_t newcookie;
168
169 /* allow irq-off callers */
170 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
171
172 /*
173 * If we're out of memory or if there's too much work
174 * pending already, we execute synchronously.
175 */
176 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
177 kfree(entry);
178 spin_lock_irqsave(&async_lock, flags);
179 newcookie = next_cookie++;
180 spin_unlock_irqrestore(&async_lock, flags);
181
182 /* low on memory.. run synchronously */
183 ptr(data, newcookie);
184 return newcookie;
185 }
186 INIT_WORK(&entry->work, async_run_entry_fn);
187 entry->func = ptr;
188 entry->data = data;
189 entry->running = running;
190
191 spin_lock_irqsave(&async_lock, flags);
192 newcookie = entry->cookie = next_cookie++;
193 list_add_tail(&entry->list, &async_pending);
194 if (running->registered && running->count++ == 0)
195 list_add_tail(&running->node, &async_domains);
196 atomic_inc(&entry_count);
197 spin_unlock_irqrestore(&async_lock, flags);
198
199 /* mark that this task has queued an async job, used by module init */
200 current->flags |= PF_USED_ASYNC;
201
202 /* schedule for execution */
203 queue_work(system_unbound_wq, &entry->work);
204
205 return newcookie;
206 }
207
208 /**
209 * async_schedule - schedule a function for asynchronous execution
210 * @ptr: function to execute asynchronously
211 * @data: data pointer to pass to the function
212 *
213 * Returns an async_cookie_t that may be used for checkpointing later.
214 * Note: This function may be called from atomic or non-atomic contexts.
215 */
216 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
217 {
218 return __async_schedule(ptr, data, &async_running);
219 }
220 EXPORT_SYMBOL_GPL(async_schedule);
221
222 /**
223 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
224 * @ptr: function to execute asynchronously
225 * @data: data pointer to pass to the function
226 * @running: running list for the domain
227 *
228 * Returns an async_cookie_t that may be used for checkpointing later.
229 * @running may be used in the async_synchronize_*_domain() functions
230 * to wait within a certain synchronization domain rather than globally.
231 * A synchronization domain is specified via the running queue @running to use.
232 * Note: This function may be called from atomic or non-atomic contexts.
233 */
234 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
235 struct async_domain *running)
236 {
237 return __async_schedule(ptr, data, running);
238 }
239 EXPORT_SYMBOL_GPL(async_schedule_domain);
240
241 /**
242 * async_synchronize_full - synchronize all asynchronous function calls
243 *
244 * This function waits until all asynchronous function calls have been done.
245 */
246 void async_synchronize_full(void)
247 {
248 mutex_lock(&async_register_mutex);
249 do {
250 struct async_domain *domain = NULL;
251
252 spin_lock_irq(&async_lock);
253 if (!list_empty(&async_domains))
254 domain = list_first_entry(&async_domains, typeof(*domain), node);
255 spin_unlock_irq(&async_lock);
256
257 async_synchronize_cookie_domain(next_cookie, domain);
258 } while (!list_empty(&async_domains));
259 mutex_unlock(&async_register_mutex);
260 }
261 EXPORT_SYMBOL_GPL(async_synchronize_full);
262
263 /**
264 * async_unregister_domain - ensure no more anonymous waiters on this domain
265 * @domain: idle domain to flush out of any async_synchronize_full instances
266 *
267 * async_synchronize_{cookie|full}_domain() are not flushed since callers
268 * of these routines should know the lifetime of @domain
269 *
270 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
271 */
272 void async_unregister_domain(struct async_domain *domain)
273 {
274 mutex_lock(&async_register_mutex);
275 spin_lock_irq(&async_lock);
276 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
277 !list_empty(&domain->domain));
278 domain->registered = 0;
279 spin_unlock_irq(&async_lock);
280 mutex_unlock(&async_register_mutex);
281 }
282 EXPORT_SYMBOL_GPL(async_unregister_domain);
283
284 /**
285 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
286 * @domain: running list to synchronize on
287 *
288 * This function waits until all asynchronous function calls for the
289 * synchronization domain specified by the running list @domain have been done.
290 */
291 void async_synchronize_full_domain(struct async_domain *domain)
292 {
293 async_synchronize_cookie_domain(next_cookie, domain);
294 }
295 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
296
297 /**
298 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
299 * @cookie: async_cookie_t to use as checkpoint
300 * @running: running list to synchronize on
301 *
302 * This function waits until all asynchronous function calls for the
303 * synchronization domain specified by running list @running submitted
304 * prior to @cookie have been done.
305 */
306 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
307 {
308 ktime_t uninitialized_var(starttime), delta, endtime;
309
310 if (!running)
311 return;
312
313 if (initcall_debug && system_state == SYSTEM_BOOTING) {
314 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
315 starttime = ktime_get();
316 }
317
318 wait_event(async_done, lowest_in_progress(running) >= cookie);
319
320 if (initcall_debug && system_state == SYSTEM_BOOTING) {
321 endtime = ktime_get();
322 delta = ktime_sub(endtime, starttime);
323
324 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
325 task_pid_nr(current),
326 (long long)ktime_to_ns(delta) >> 10);
327 }
328 }
329 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
330
331 /**
332 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
333 * @cookie: async_cookie_t to use as checkpoint
334 *
335 * This function waits until all asynchronous function calls prior to @cookie
336 * have been done.
337 */
338 void async_synchronize_cookie(async_cookie_t cookie)
339 {
340 async_synchronize_cookie_domain(cookie, &async_running);
341 }
342 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
This page took 0.044663 seconds and 6 git commands to generate.