Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / drivers / base / power / main.c
1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include <linux/timer.h>
35
36 #include "../base.h"
37 #include "power.h"
38
39 typedef int (*pm_callback_t)(struct device *);
40
41 /*
42 * The entries in the dpm_list list are in a depth first order, simply
43 * because children are guaranteed to be discovered after parents, and
44 * are inserted at the back of the list on discovery.
45 *
46 * Since device_pm_add() may be called with a device lock held,
47 * we must never try to acquire a device lock while holding
48 * dpm_list_mutex.
49 */
50
51 LIST_HEAD(dpm_list);
52 static LIST_HEAD(dpm_prepared_list);
53 static LIST_HEAD(dpm_suspended_list);
54 static LIST_HEAD(dpm_late_early_list);
55 static LIST_HEAD(dpm_noirq_list);
56
57 struct suspend_stats suspend_stats;
58 static DEFINE_MUTEX(dpm_list_mtx);
59 static pm_message_t pm_transition;
60
61 static int async_error;
62
63 static char *pm_verb(int event)
64 {
65 switch (event) {
66 case PM_EVENT_SUSPEND:
67 return "suspend";
68 case PM_EVENT_RESUME:
69 return "resume";
70 case PM_EVENT_FREEZE:
71 return "freeze";
72 case PM_EVENT_QUIESCE:
73 return "quiesce";
74 case PM_EVENT_HIBERNATE:
75 return "hibernate";
76 case PM_EVENT_THAW:
77 return "thaw";
78 case PM_EVENT_RESTORE:
79 return "restore";
80 case PM_EVENT_RECOVER:
81 return "recover";
82 default:
83 return "(unknown PM event)";
84 }
85 }
86
87 /**
88 * device_pm_sleep_init - Initialize system suspend-related device fields.
89 * @dev: Device object being initialized.
90 */
91 void device_pm_sleep_init(struct device *dev)
92 {
93 dev->power.is_prepared = false;
94 dev->power.is_suspended = false;
95 dev->power.is_noirq_suspended = false;
96 dev->power.is_late_suspended = false;
97 init_completion(&dev->power.completion);
98 complete_all(&dev->power.completion);
99 dev->power.wakeup = NULL;
100 INIT_LIST_HEAD(&dev->power.entry);
101 }
102
103 /**
104 * device_pm_lock - Lock the list of active devices used by the PM core.
105 */
106 void device_pm_lock(void)
107 {
108 mutex_lock(&dpm_list_mtx);
109 }
110
111 /**
112 * device_pm_unlock - Unlock the list of active devices used by the PM core.
113 */
114 void device_pm_unlock(void)
115 {
116 mutex_unlock(&dpm_list_mtx);
117 }
118
119 /**
120 * device_pm_add - Add a device to the PM core's list of active devices.
121 * @dev: Device to add to the list.
122 */
123 void device_pm_add(struct device *dev)
124 {
125 pr_debug("PM: Adding info for %s:%s\n",
126 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
127 mutex_lock(&dpm_list_mtx);
128 if (dev->parent && dev->parent->power.is_prepared)
129 dev_warn(dev, "parent %s should not be sleeping\n",
130 dev_name(dev->parent));
131 list_add_tail(&dev->power.entry, &dpm_list);
132 mutex_unlock(&dpm_list_mtx);
133 }
134
135 /**
136 * device_pm_remove - Remove a device from the PM core's list of active devices.
137 * @dev: Device to be removed from the list.
138 */
139 void device_pm_remove(struct device *dev)
140 {
141 pr_debug("PM: Removing info for %s:%s\n",
142 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
143 complete_all(&dev->power.completion);
144 mutex_lock(&dpm_list_mtx);
145 list_del_init(&dev->power.entry);
146 mutex_unlock(&dpm_list_mtx);
147 device_wakeup_disable(dev);
148 pm_runtime_remove(dev);
149 }
150
151 /**
152 * device_pm_move_before - Move device in the PM core's list of active devices.
153 * @deva: Device to move in dpm_list.
154 * @devb: Device @deva should come before.
155 */
156 void device_pm_move_before(struct device *deva, struct device *devb)
157 {
158 pr_debug("PM: Moving %s:%s before %s:%s\n",
159 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
160 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
161 /* Delete deva from dpm_list and reinsert before devb. */
162 list_move_tail(&deva->power.entry, &devb->power.entry);
163 }
164
165 /**
166 * device_pm_move_after - Move device in the PM core's list of active devices.
167 * @deva: Device to move in dpm_list.
168 * @devb: Device @deva should come after.
169 */
170 void device_pm_move_after(struct device *deva, struct device *devb)
171 {
172 pr_debug("PM: Moving %s:%s after %s:%s\n",
173 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
174 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 /* Delete deva from dpm_list and reinsert after devb. */
176 list_move(&deva->power.entry, &devb->power.entry);
177 }
178
179 /**
180 * device_pm_move_last - Move device to end of the PM core's list of devices.
181 * @dev: Device to move in dpm_list.
182 */
183 void device_pm_move_last(struct device *dev)
184 {
185 pr_debug("PM: Moving %s:%s to end of list\n",
186 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
187 list_move_tail(&dev->power.entry, &dpm_list);
188 }
189
190 static ktime_t initcall_debug_start(struct device *dev)
191 {
192 ktime_t calltime = ktime_set(0, 0);
193
194 if (pm_print_times_enabled) {
195 pr_info("calling %s+ @ %i, parent: %s\n",
196 dev_name(dev), task_pid_nr(current),
197 dev->parent ? dev_name(dev->parent) : "none");
198 calltime = ktime_get();
199 }
200
201 return calltime;
202 }
203
204 static void initcall_debug_report(struct device *dev, ktime_t calltime,
205 int error, pm_message_t state, char *info)
206 {
207 ktime_t rettime;
208 s64 nsecs;
209
210 rettime = ktime_get();
211 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
212
213 if (pm_print_times_enabled) {
214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215 error, (unsigned long long)nsecs >> 10);
216 }
217
218 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
219 error);
220 }
221
222 /**
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
226 */
227 static void dpm_wait(struct device *dev, bool async)
228 {
229 if (!dev)
230 return;
231
232 if (async || (pm_async_enabled && dev->power.async_suspend))
233 wait_for_completion(&dev->power.completion);
234 }
235
236 static int dpm_wait_fn(struct device *dev, void *async_ptr)
237 {
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
240 }
241
242 static void dpm_wait_for_children(struct device *dev, bool async)
243 {
244 device_for_each_child(dev, &async, dpm_wait_fn);
245 }
246
247 /**
248 * pm_op - Return the PM operation appropriate for given PM event.
249 * @ops: PM operations to choose from.
250 * @state: PM transition of the system being carried out.
251 */
252 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
253 {
254 switch (state.event) {
255 #ifdef CONFIG_SUSPEND
256 case PM_EVENT_SUSPEND:
257 return ops->suspend;
258 case PM_EVENT_RESUME:
259 return ops->resume;
260 #endif /* CONFIG_SUSPEND */
261 #ifdef CONFIG_HIBERNATE_CALLBACKS
262 case PM_EVENT_FREEZE:
263 case PM_EVENT_QUIESCE:
264 return ops->freeze;
265 case PM_EVENT_HIBERNATE:
266 return ops->poweroff;
267 case PM_EVENT_THAW:
268 case PM_EVENT_RECOVER:
269 return ops->thaw;
270 break;
271 case PM_EVENT_RESTORE:
272 return ops->restore;
273 #endif /* CONFIG_HIBERNATE_CALLBACKS */
274 }
275
276 return NULL;
277 }
278
279 /**
280 * pm_late_early_op - Return the PM operation appropriate for given PM event.
281 * @ops: PM operations to choose from.
282 * @state: PM transition of the system being carried out.
283 *
284 * Runtime PM is disabled for @dev while this function is being executed.
285 */
286 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
287 pm_message_t state)
288 {
289 switch (state.event) {
290 #ifdef CONFIG_SUSPEND
291 case PM_EVENT_SUSPEND:
292 return ops->suspend_late;
293 case PM_EVENT_RESUME:
294 return ops->resume_early;
295 #endif /* CONFIG_SUSPEND */
296 #ifdef CONFIG_HIBERNATE_CALLBACKS
297 case PM_EVENT_FREEZE:
298 case PM_EVENT_QUIESCE:
299 return ops->freeze_late;
300 case PM_EVENT_HIBERNATE:
301 return ops->poweroff_late;
302 case PM_EVENT_THAW:
303 case PM_EVENT_RECOVER:
304 return ops->thaw_early;
305 case PM_EVENT_RESTORE:
306 return ops->restore_early;
307 #endif /* CONFIG_HIBERNATE_CALLBACKS */
308 }
309
310 return NULL;
311 }
312
313 /**
314 * pm_noirq_op - Return the PM operation appropriate for given PM event.
315 * @ops: PM operations to choose from.
316 * @state: PM transition of the system being carried out.
317 *
318 * The driver of @dev will not receive interrupts while this function is being
319 * executed.
320 */
321 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
322 {
323 switch (state.event) {
324 #ifdef CONFIG_SUSPEND
325 case PM_EVENT_SUSPEND:
326 return ops->suspend_noirq;
327 case PM_EVENT_RESUME:
328 return ops->resume_noirq;
329 #endif /* CONFIG_SUSPEND */
330 #ifdef CONFIG_HIBERNATE_CALLBACKS
331 case PM_EVENT_FREEZE:
332 case PM_EVENT_QUIESCE:
333 return ops->freeze_noirq;
334 case PM_EVENT_HIBERNATE:
335 return ops->poweroff_noirq;
336 case PM_EVENT_THAW:
337 case PM_EVENT_RECOVER:
338 return ops->thaw_noirq;
339 case PM_EVENT_RESTORE:
340 return ops->restore_noirq;
341 #endif /* CONFIG_HIBERNATE_CALLBACKS */
342 }
343
344 return NULL;
345 }
346
347 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
348 {
349 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
350 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
351 ", may wakeup" : "");
352 }
353
354 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
355 int error)
356 {
357 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
358 dev_name(dev), pm_verb(state.event), info, error);
359 }
360
361 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
362 {
363 ktime_t calltime;
364 u64 usecs64;
365 int usecs;
366
367 calltime = ktime_get();
368 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
369 do_div(usecs64, NSEC_PER_USEC);
370 usecs = usecs64;
371 if (usecs == 0)
372 usecs = 1;
373 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
374 info ?: "", info ? " " : "", pm_verb(state.event),
375 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
376 }
377
378 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
379 pm_message_t state, char *info)
380 {
381 ktime_t calltime;
382 int error;
383
384 if (!cb)
385 return 0;
386
387 calltime = initcall_debug_start(dev);
388
389 pm_dev_dbg(dev, state, info);
390 error = cb(dev);
391 suspend_report_result(cb, error);
392
393 initcall_debug_report(dev, calltime, error, state, info);
394
395 return error;
396 }
397
398 #ifdef CONFIG_DPM_WATCHDOG
399 struct dpm_watchdog {
400 struct device *dev;
401 struct task_struct *tsk;
402 struct timer_list timer;
403 };
404
405 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
406 struct dpm_watchdog wd
407
408 /**
409 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
410 * @data: Watchdog object address.
411 *
412 * Called when a driver has timed out suspending or resuming.
413 * There's not much we can do here to recover so panic() to
414 * capture a crash-dump in pstore.
415 */
416 static void dpm_watchdog_handler(unsigned long data)
417 {
418 struct dpm_watchdog *wd = (void *)data;
419
420 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
421 show_stack(wd->tsk, NULL);
422 panic("%s %s: unrecoverable failure\n",
423 dev_driver_string(wd->dev), dev_name(wd->dev));
424 }
425
426 /**
427 * dpm_watchdog_set - Enable pm watchdog for given device.
428 * @wd: Watchdog. Must be allocated on the stack.
429 * @dev: Device to handle.
430 */
431 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
432 {
433 struct timer_list *timer = &wd->timer;
434
435 wd->dev = dev;
436 wd->tsk = current;
437
438 init_timer_on_stack(timer);
439 /* use same timeout value for both suspend and resume */
440 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
441 timer->function = dpm_watchdog_handler;
442 timer->data = (unsigned long)wd;
443 add_timer(timer);
444 }
445
446 /**
447 * dpm_watchdog_clear - Disable suspend/resume watchdog.
448 * @wd: Watchdog to disable.
449 */
450 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
451 {
452 struct timer_list *timer = &wd->timer;
453
454 del_timer_sync(timer);
455 destroy_timer_on_stack(timer);
456 }
457 #else
458 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
459 #define dpm_watchdog_set(x, y)
460 #define dpm_watchdog_clear(x)
461 #endif
462
463 /*------------------------- Resume routines -------------------------*/
464
465 /**
466 * device_resume_noirq - Execute an "early resume" callback for given device.
467 * @dev: Device to handle.
468 * @state: PM transition of the system being carried out.
469 *
470 * The driver of @dev will not receive interrupts while this function is being
471 * executed.
472 */
473 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
474 {
475 pm_callback_t callback = NULL;
476 char *info = NULL;
477 int error = 0;
478
479 TRACE_DEVICE(dev);
480 TRACE_RESUME(0);
481
482 if (dev->power.syscore)
483 goto Out;
484
485 if (!dev->power.is_noirq_suspended)
486 goto Out;
487
488 dpm_wait(dev->parent, async);
489
490 if (dev->pm_domain) {
491 info = "noirq power domain ";
492 callback = pm_noirq_op(&dev->pm_domain->ops, state);
493 } else if (dev->type && dev->type->pm) {
494 info = "noirq type ";
495 callback = pm_noirq_op(dev->type->pm, state);
496 } else if (dev->class && dev->class->pm) {
497 info = "noirq class ";
498 callback = pm_noirq_op(dev->class->pm, state);
499 } else if (dev->bus && dev->bus->pm) {
500 info = "noirq bus ";
501 callback = pm_noirq_op(dev->bus->pm, state);
502 }
503
504 if (!callback && dev->driver && dev->driver->pm) {
505 info = "noirq driver ";
506 callback = pm_noirq_op(dev->driver->pm, state);
507 }
508
509 error = dpm_run_callback(callback, dev, state, info);
510 dev->power.is_noirq_suspended = false;
511
512 Out:
513 complete_all(&dev->power.completion);
514 TRACE_RESUME(error);
515 return error;
516 }
517
518 static bool is_async(struct device *dev)
519 {
520 return dev->power.async_suspend && pm_async_enabled
521 && !pm_trace_is_enabled();
522 }
523
524 static void async_resume_noirq(void *data, async_cookie_t cookie)
525 {
526 struct device *dev = (struct device *)data;
527 int error;
528
529 error = device_resume_noirq(dev, pm_transition, true);
530 if (error)
531 pm_dev_err(dev, pm_transition, " async", error);
532
533 put_device(dev);
534 }
535
536 /**
537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
538 * @state: PM transition of the system being carried out.
539 *
540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541 * enable device drivers to receive interrupts.
542 */
543 static void dpm_resume_noirq(pm_message_t state)
544 {
545 struct device *dev;
546 ktime_t starttime = ktime_get();
547
548 mutex_lock(&dpm_list_mtx);
549 pm_transition = state;
550
551 /*
552 * Advanced the async threads upfront,
553 * in case the starting of async threads is
554 * delayed by non-async resuming devices.
555 */
556 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
557 reinit_completion(&dev->power.completion);
558 if (is_async(dev)) {
559 get_device(dev);
560 async_schedule(async_resume_noirq, dev);
561 }
562 }
563
564 while (!list_empty(&dpm_noirq_list)) {
565 dev = to_device(dpm_noirq_list.next);
566 get_device(dev);
567 list_move_tail(&dev->power.entry, &dpm_late_early_list);
568 mutex_unlock(&dpm_list_mtx);
569
570 if (!is_async(dev)) {
571 int error;
572
573 error = device_resume_noirq(dev, state, false);
574 if (error) {
575 suspend_stats.failed_resume_noirq++;
576 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
577 dpm_save_failed_dev(dev_name(dev));
578 pm_dev_err(dev, state, " noirq", error);
579 }
580 }
581
582 mutex_lock(&dpm_list_mtx);
583 put_device(dev);
584 }
585 mutex_unlock(&dpm_list_mtx);
586 async_synchronize_full();
587 dpm_show_time(starttime, state, "noirq");
588 resume_device_irqs();
589 cpuidle_resume();
590 }
591
592 /**
593 * device_resume_early - Execute an "early resume" callback for given device.
594 * @dev: Device to handle.
595 * @state: PM transition of the system being carried out.
596 *
597 * Runtime PM is disabled for @dev while this function is being executed.
598 */
599 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
600 {
601 pm_callback_t callback = NULL;
602 char *info = NULL;
603 int error = 0;
604
605 TRACE_DEVICE(dev);
606 TRACE_RESUME(0);
607
608 if (dev->power.syscore)
609 goto Out;
610
611 if (!dev->power.is_late_suspended)
612 goto Out;
613
614 dpm_wait(dev->parent, async);
615
616 if (dev->pm_domain) {
617 info = "early power domain ";
618 callback = pm_late_early_op(&dev->pm_domain->ops, state);
619 } else if (dev->type && dev->type->pm) {
620 info = "early type ";
621 callback = pm_late_early_op(dev->type->pm, state);
622 } else if (dev->class && dev->class->pm) {
623 info = "early class ";
624 callback = pm_late_early_op(dev->class->pm, state);
625 } else if (dev->bus && dev->bus->pm) {
626 info = "early bus ";
627 callback = pm_late_early_op(dev->bus->pm, state);
628 }
629
630 if (!callback && dev->driver && dev->driver->pm) {
631 info = "early driver ";
632 callback = pm_late_early_op(dev->driver->pm, state);
633 }
634
635 error = dpm_run_callback(callback, dev, state, info);
636 dev->power.is_late_suspended = false;
637
638 Out:
639 TRACE_RESUME(error);
640
641 pm_runtime_enable(dev);
642 complete_all(&dev->power.completion);
643 return error;
644 }
645
646 static void async_resume_early(void *data, async_cookie_t cookie)
647 {
648 struct device *dev = (struct device *)data;
649 int error;
650
651 error = device_resume_early(dev, pm_transition, true);
652 if (error)
653 pm_dev_err(dev, pm_transition, " async", error);
654
655 put_device(dev);
656 }
657
658 /**
659 * dpm_resume_early - Execute "early resume" callbacks for all devices.
660 * @state: PM transition of the system being carried out.
661 */
662 static void dpm_resume_early(pm_message_t state)
663 {
664 struct device *dev;
665 ktime_t starttime = ktime_get();
666
667 mutex_lock(&dpm_list_mtx);
668 pm_transition = state;
669
670 /*
671 * Advanced the async threads upfront,
672 * in case the starting of async threads is
673 * delayed by non-async resuming devices.
674 */
675 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
676 reinit_completion(&dev->power.completion);
677 if (is_async(dev)) {
678 get_device(dev);
679 async_schedule(async_resume_early, dev);
680 }
681 }
682
683 while (!list_empty(&dpm_late_early_list)) {
684 dev = to_device(dpm_late_early_list.next);
685 get_device(dev);
686 list_move_tail(&dev->power.entry, &dpm_suspended_list);
687 mutex_unlock(&dpm_list_mtx);
688
689 if (!is_async(dev)) {
690 int error;
691
692 error = device_resume_early(dev, state, false);
693 if (error) {
694 suspend_stats.failed_resume_early++;
695 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
696 dpm_save_failed_dev(dev_name(dev));
697 pm_dev_err(dev, state, " early", error);
698 }
699 }
700 mutex_lock(&dpm_list_mtx);
701 put_device(dev);
702 }
703 mutex_unlock(&dpm_list_mtx);
704 async_synchronize_full();
705 dpm_show_time(starttime, state, "early");
706 }
707
708 /**
709 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
710 * @state: PM transition of the system being carried out.
711 */
712 void dpm_resume_start(pm_message_t state)
713 {
714 dpm_resume_noirq(state);
715 dpm_resume_early(state);
716 }
717 EXPORT_SYMBOL_GPL(dpm_resume_start);
718
719 /**
720 * device_resume - Execute "resume" callbacks for given device.
721 * @dev: Device to handle.
722 * @state: PM transition of the system being carried out.
723 * @async: If true, the device is being resumed asynchronously.
724 */
725 static int device_resume(struct device *dev, pm_message_t state, bool async)
726 {
727 pm_callback_t callback = NULL;
728 char *info = NULL;
729 int error = 0;
730 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
731
732 TRACE_DEVICE(dev);
733 TRACE_RESUME(0);
734
735 if (dev->power.syscore)
736 goto Complete;
737
738 dpm_wait(dev->parent, async);
739 dpm_watchdog_set(&wd, dev);
740 device_lock(dev);
741
742 /*
743 * This is a fib. But we'll allow new children to be added below
744 * a resumed device, even if the device hasn't been completed yet.
745 */
746 dev->power.is_prepared = false;
747
748 if (!dev->power.is_suspended)
749 goto Unlock;
750
751 if (dev->pm_domain) {
752 info = "power domain ";
753 callback = pm_op(&dev->pm_domain->ops, state);
754 goto Driver;
755 }
756
757 if (dev->type && dev->type->pm) {
758 info = "type ";
759 callback = pm_op(dev->type->pm, state);
760 goto Driver;
761 }
762
763 if (dev->class) {
764 if (dev->class->pm) {
765 info = "class ";
766 callback = pm_op(dev->class->pm, state);
767 goto Driver;
768 } else if (dev->class->resume) {
769 info = "legacy class ";
770 callback = dev->class->resume;
771 goto End;
772 }
773 }
774
775 if (dev->bus) {
776 if (dev->bus->pm) {
777 info = "bus ";
778 callback = pm_op(dev->bus->pm, state);
779 } else if (dev->bus->resume) {
780 info = "legacy bus ";
781 callback = dev->bus->resume;
782 goto End;
783 }
784 }
785
786 Driver:
787 if (!callback && dev->driver && dev->driver->pm) {
788 info = "driver ";
789 callback = pm_op(dev->driver->pm, state);
790 }
791
792 End:
793 error = dpm_run_callback(callback, dev, state, info);
794 dev->power.is_suspended = false;
795
796 Unlock:
797 device_unlock(dev);
798 dpm_watchdog_clear(&wd);
799
800 Complete:
801 complete_all(&dev->power.completion);
802
803 TRACE_RESUME(error);
804
805 return error;
806 }
807
808 static void async_resume(void *data, async_cookie_t cookie)
809 {
810 struct device *dev = (struct device *)data;
811 int error;
812
813 error = device_resume(dev, pm_transition, true);
814 if (error)
815 pm_dev_err(dev, pm_transition, " async", error);
816 put_device(dev);
817 }
818
819 /**
820 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
821 * @state: PM transition of the system being carried out.
822 *
823 * Execute the appropriate "resume" callback for all devices whose status
824 * indicates that they are suspended.
825 */
826 void dpm_resume(pm_message_t state)
827 {
828 struct device *dev;
829 ktime_t starttime = ktime_get();
830
831 might_sleep();
832
833 mutex_lock(&dpm_list_mtx);
834 pm_transition = state;
835 async_error = 0;
836
837 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
838 reinit_completion(&dev->power.completion);
839 if (is_async(dev)) {
840 get_device(dev);
841 async_schedule(async_resume, dev);
842 }
843 }
844
845 while (!list_empty(&dpm_suspended_list)) {
846 dev = to_device(dpm_suspended_list.next);
847 get_device(dev);
848 if (!is_async(dev)) {
849 int error;
850
851 mutex_unlock(&dpm_list_mtx);
852
853 error = device_resume(dev, state, false);
854 if (error) {
855 suspend_stats.failed_resume++;
856 dpm_save_failed_step(SUSPEND_RESUME);
857 dpm_save_failed_dev(dev_name(dev));
858 pm_dev_err(dev, state, "", error);
859 }
860
861 mutex_lock(&dpm_list_mtx);
862 }
863 if (!list_empty(&dev->power.entry))
864 list_move_tail(&dev->power.entry, &dpm_prepared_list);
865 put_device(dev);
866 }
867 mutex_unlock(&dpm_list_mtx);
868 async_synchronize_full();
869 dpm_show_time(starttime, state, NULL);
870
871 cpufreq_resume();
872 }
873
874 /**
875 * device_complete - Complete a PM transition for given device.
876 * @dev: Device to handle.
877 * @state: PM transition of the system being carried out.
878 */
879 static void device_complete(struct device *dev, pm_message_t state)
880 {
881 void (*callback)(struct device *) = NULL;
882 char *info = NULL;
883
884 if (dev->power.syscore)
885 return;
886
887 device_lock(dev);
888
889 if (dev->pm_domain) {
890 info = "completing power domain ";
891 callback = dev->pm_domain->ops.complete;
892 } else if (dev->type && dev->type->pm) {
893 info = "completing type ";
894 callback = dev->type->pm->complete;
895 } else if (dev->class && dev->class->pm) {
896 info = "completing class ";
897 callback = dev->class->pm->complete;
898 } else if (dev->bus && dev->bus->pm) {
899 info = "completing bus ";
900 callback = dev->bus->pm->complete;
901 }
902
903 if (!callback && dev->driver && dev->driver->pm) {
904 info = "completing driver ";
905 callback = dev->driver->pm->complete;
906 }
907
908 if (callback) {
909 pm_dev_dbg(dev, state, info);
910 callback(dev);
911 }
912
913 device_unlock(dev);
914
915 pm_runtime_put(dev);
916 }
917
918 /**
919 * dpm_complete - Complete a PM transition for all non-sysdev devices.
920 * @state: PM transition of the system being carried out.
921 *
922 * Execute the ->complete() callbacks for all devices whose PM status is not
923 * DPM_ON (this allows new devices to be registered).
924 */
925 void dpm_complete(pm_message_t state)
926 {
927 struct list_head list;
928
929 might_sleep();
930
931 INIT_LIST_HEAD(&list);
932 mutex_lock(&dpm_list_mtx);
933 while (!list_empty(&dpm_prepared_list)) {
934 struct device *dev = to_device(dpm_prepared_list.prev);
935
936 get_device(dev);
937 dev->power.is_prepared = false;
938 list_move(&dev->power.entry, &list);
939 mutex_unlock(&dpm_list_mtx);
940
941 device_complete(dev, state);
942
943 mutex_lock(&dpm_list_mtx);
944 put_device(dev);
945 }
946 list_splice(&list, &dpm_list);
947 mutex_unlock(&dpm_list_mtx);
948 }
949
950 /**
951 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
952 * @state: PM transition of the system being carried out.
953 *
954 * Execute "resume" callbacks for all devices and complete the PM transition of
955 * the system.
956 */
957 void dpm_resume_end(pm_message_t state)
958 {
959 dpm_resume(state);
960 dpm_complete(state);
961 }
962 EXPORT_SYMBOL_GPL(dpm_resume_end);
963
964
965 /*------------------------- Suspend routines -------------------------*/
966
967 /**
968 * resume_event - Return a "resume" message for given "suspend" sleep state.
969 * @sleep_state: PM message representing a sleep state.
970 *
971 * Return a PM message representing the resume event corresponding to given
972 * sleep state.
973 */
974 static pm_message_t resume_event(pm_message_t sleep_state)
975 {
976 switch (sleep_state.event) {
977 case PM_EVENT_SUSPEND:
978 return PMSG_RESUME;
979 case PM_EVENT_FREEZE:
980 case PM_EVENT_QUIESCE:
981 return PMSG_RECOVER;
982 case PM_EVENT_HIBERNATE:
983 return PMSG_RESTORE;
984 }
985 return PMSG_ON;
986 }
987
988 /**
989 * device_suspend_noirq - Execute a "late suspend" callback for given device.
990 * @dev: Device to handle.
991 * @state: PM transition of the system being carried out.
992 *
993 * The driver of @dev will not receive interrupts while this function is being
994 * executed.
995 */
996 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
997 {
998 pm_callback_t callback = NULL;
999 char *info = NULL;
1000 int error = 0;
1001
1002 if (async_error)
1003 goto Complete;
1004
1005 if (pm_wakeup_pending()) {
1006 async_error = -EBUSY;
1007 goto Complete;
1008 }
1009
1010 if (dev->power.syscore)
1011 goto Complete;
1012
1013 dpm_wait_for_children(dev, async);
1014
1015 if (dev->pm_domain) {
1016 info = "noirq power domain ";
1017 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1018 } else if (dev->type && dev->type->pm) {
1019 info = "noirq type ";
1020 callback = pm_noirq_op(dev->type->pm, state);
1021 } else if (dev->class && dev->class->pm) {
1022 info = "noirq class ";
1023 callback = pm_noirq_op(dev->class->pm, state);
1024 } else if (dev->bus && dev->bus->pm) {
1025 info = "noirq bus ";
1026 callback = pm_noirq_op(dev->bus->pm, state);
1027 }
1028
1029 if (!callback && dev->driver && dev->driver->pm) {
1030 info = "noirq driver ";
1031 callback = pm_noirq_op(dev->driver->pm, state);
1032 }
1033
1034 error = dpm_run_callback(callback, dev, state, info);
1035 if (!error)
1036 dev->power.is_noirq_suspended = true;
1037 else
1038 async_error = error;
1039
1040 Complete:
1041 complete_all(&dev->power.completion);
1042 return error;
1043 }
1044
1045 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1046 {
1047 struct device *dev = (struct device *)data;
1048 int error;
1049
1050 error = __device_suspend_noirq(dev, pm_transition, true);
1051 if (error) {
1052 dpm_save_failed_dev(dev_name(dev));
1053 pm_dev_err(dev, pm_transition, " async", error);
1054 }
1055
1056 put_device(dev);
1057 }
1058
1059 static int device_suspend_noirq(struct device *dev)
1060 {
1061 reinit_completion(&dev->power.completion);
1062
1063 if (pm_async_enabled && dev->power.async_suspend) {
1064 get_device(dev);
1065 async_schedule(async_suspend_noirq, dev);
1066 return 0;
1067 }
1068 return __device_suspend_noirq(dev, pm_transition, false);
1069 }
1070
1071 /**
1072 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1073 * @state: PM transition of the system being carried out.
1074 *
1075 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1076 * handlers for all non-sysdev devices.
1077 */
1078 static int dpm_suspend_noirq(pm_message_t state)
1079 {
1080 ktime_t starttime = ktime_get();
1081 int error = 0;
1082
1083 cpuidle_pause();
1084 suspend_device_irqs();
1085 mutex_lock(&dpm_list_mtx);
1086 pm_transition = state;
1087 async_error = 0;
1088
1089 while (!list_empty(&dpm_late_early_list)) {
1090 struct device *dev = to_device(dpm_late_early_list.prev);
1091
1092 get_device(dev);
1093 mutex_unlock(&dpm_list_mtx);
1094
1095 error = device_suspend_noirq(dev);
1096
1097 mutex_lock(&dpm_list_mtx);
1098 if (error) {
1099 pm_dev_err(dev, state, " noirq", error);
1100 dpm_save_failed_dev(dev_name(dev));
1101 put_device(dev);
1102 break;
1103 }
1104 if (!list_empty(&dev->power.entry))
1105 list_move(&dev->power.entry, &dpm_noirq_list);
1106 put_device(dev);
1107
1108 if (async_error)
1109 break;
1110 }
1111 mutex_unlock(&dpm_list_mtx);
1112 async_synchronize_full();
1113 if (!error)
1114 error = async_error;
1115
1116 if (error) {
1117 suspend_stats.failed_suspend_noirq++;
1118 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1119 dpm_resume_noirq(resume_event(state));
1120 } else {
1121 dpm_show_time(starttime, state, "noirq");
1122 }
1123 return error;
1124 }
1125
1126 /**
1127 * device_suspend_late - Execute a "late suspend" callback for given device.
1128 * @dev: Device to handle.
1129 * @state: PM transition of the system being carried out.
1130 *
1131 * Runtime PM is disabled for @dev while this function is being executed.
1132 */
1133 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1134 {
1135 pm_callback_t callback = NULL;
1136 char *info = NULL;
1137 int error = 0;
1138
1139 __pm_runtime_disable(dev, false);
1140
1141 if (async_error)
1142 goto Complete;
1143
1144 if (pm_wakeup_pending()) {
1145 async_error = -EBUSY;
1146 goto Complete;
1147 }
1148
1149 if (dev->power.syscore)
1150 goto Complete;
1151
1152 dpm_wait_for_children(dev, async);
1153
1154 if (dev->pm_domain) {
1155 info = "late power domain ";
1156 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1157 } else if (dev->type && dev->type->pm) {
1158 info = "late type ";
1159 callback = pm_late_early_op(dev->type->pm, state);
1160 } else if (dev->class && dev->class->pm) {
1161 info = "late class ";
1162 callback = pm_late_early_op(dev->class->pm, state);
1163 } else if (dev->bus && dev->bus->pm) {
1164 info = "late bus ";
1165 callback = pm_late_early_op(dev->bus->pm, state);
1166 }
1167
1168 if (!callback && dev->driver && dev->driver->pm) {
1169 info = "late driver ";
1170 callback = pm_late_early_op(dev->driver->pm, state);
1171 }
1172
1173 error = dpm_run_callback(callback, dev, state, info);
1174 if (!error)
1175 dev->power.is_late_suspended = true;
1176 else
1177 async_error = error;
1178
1179 Complete:
1180 complete_all(&dev->power.completion);
1181 return error;
1182 }
1183
1184 static void async_suspend_late(void *data, async_cookie_t cookie)
1185 {
1186 struct device *dev = (struct device *)data;
1187 int error;
1188
1189 error = __device_suspend_late(dev, pm_transition, true);
1190 if (error) {
1191 dpm_save_failed_dev(dev_name(dev));
1192 pm_dev_err(dev, pm_transition, " async", error);
1193 }
1194 put_device(dev);
1195 }
1196
1197 static int device_suspend_late(struct device *dev)
1198 {
1199 reinit_completion(&dev->power.completion);
1200
1201 if (pm_async_enabled && dev->power.async_suspend) {
1202 get_device(dev);
1203 async_schedule(async_suspend_late, dev);
1204 return 0;
1205 }
1206
1207 return __device_suspend_late(dev, pm_transition, false);
1208 }
1209
1210 /**
1211 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1212 * @state: PM transition of the system being carried out.
1213 */
1214 static int dpm_suspend_late(pm_message_t state)
1215 {
1216 ktime_t starttime = ktime_get();
1217 int error = 0;
1218
1219 mutex_lock(&dpm_list_mtx);
1220 pm_transition = state;
1221 async_error = 0;
1222
1223 while (!list_empty(&dpm_suspended_list)) {
1224 struct device *dev = to_device(dpm_suspended_list.prev);
1225
1226 get_device(dev);
1227 mutex_unlock(&dpm_list_mtx);
1228
1229 error = device_suspend_late(dev);
1230
1231 mutex_lock(&dpm_list_mtx);
1232 if (error) {
1233 pm_dev_err(dev, state, " late", error);
1234 dpm_save_failed_dev(dev_name(dev));
1235 put_device(dev);
1236 break;
1237 }
1238 if (!list_empty(&dev->power.entry))
1239 list_move(&dev->power.entry, &dpm_late_early_list);
1240 put_device(dev);
1241
1242 if (async_error)
1243 break;
1244 }
1245 mutex_unlock(&dpm_list_mtx);
1246 async_synchronize_full();
1247 if (error) {
1248 suspend_stats.failed_suspend_late++;
1249 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1250 dpm_resume_early(resume_event(state));
1251 } else {
1252 dpm_show_time(starttime, state, "late");
1253 }
1254 return error;
1255 }
1256
1257 /**
1258 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1259 * @state: PM transition of the system being carried out.
1260 */
1261 int dpm_suspend_end(pm_message_t state)
1262 {
1263 int error = dpm_suspend_late(state);
1264 if (error)
1265 return error;
1266
1267 error = dpm_suspend_noirq(state);
1268 if (error) {
1269 dpm_resume_early(resume_event(state));
1270 return error;
1271 }
1272
1273 return 0;
1274 }
1275 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1276
1277 /**
1278 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1279 * @dev: Device to suspend.
1280 * @state: PM transition of the system being carried out.
1281 * @cb: Suspend callback to execute.
1282 */
1283 static int legacy_suspend(struct device *dev, pm_message_t state,
1284 int (*cb)(struct device *dev, pm_message_t state),
1285 char *info)
1286 {
1287 int error;
1288 ktime_t calltime;
1289
1290 calltime = initcall_debug_start(dev);
1291
1292 error = cb(dev, state);
1293 suspend_report_result(cb, error);
1294
1295 initcall_debug_report(dev, calltime, error, state, info);
1296
1297 return error;
1298 }
1299
1300 /**
1301 * device_suspend - Execute "suspend" callbacks for given device.
1302 * @dev: Device to handle.
1303 * @state: PM transition of the system being carried out.
1304 * @async: If true, the device is being suspended asynchronously.
1305 */
1306 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1307 {
1308 pm_callback_t callback = NULL;
1309 char *info = NULL;
1310 int error = 0;
1311 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1312
1313 dpm_wait_for_children(dev, async);
1314
1315 if (async_error)
1316 goto Complete;
1317
1318 /*
1319 * If a device configured to wake up the system from sleep states
1320 * has been suspended at run time and there's a resume request pending
1321 * for it, this is equivalent to the device signaling wakeup, so the
1322 * system suspend operation should be aborted.
1323 */
1324 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1325 pm_wakeup_event(dev, 0);
1326
1327 if (pm_wakeup_pending()) {
1328 async_error = -EBUSY;
1329 goto Complete;
1330 }
1331
1332 if (dev->power.syscore)
1333 goto Complete;
1334
1335 dpm_watchdog_set(&wd, dev);
1336 device_lock(dev);
1337
1338 if (dev->pm_domain) {
1339 info = "power domain ";
1340 callback = pm_op(&dev->pm_domain->ops, state);
1341 goto Run;
1342 }
1343
1344 if (dev->type && dev->type->pm) {
1345 info = "type ";
1346 callback = pm_op(dev->type->pm, state);
1347 goto Run;
1348 }
1349
1350 if (dev->class) {
1351 if (dev->class->pm) {
1352 info = "class ";
1353 callback = pm_op(dev->class->pm, state);
1354 goto Run;
1355 } else if (dev->class->suspend) {
1356 pm_dev_dbg(dev, state, "legacy class ");
1357 error = legacy_suspend(dev, state, dev->class->suspend,
1358 "legacy class ");
1359 goto End;
1360 }
1361 }
1362
1363 if (dev->bus) {
1364 if (dev->bus->pm) {
1365 info = "bus ";
1366 callback = pm_op(dev->bus->pm, state);
1367 } else if (dev->bus->suspend) {
1368 pm_dev_dbg(dev, state, "legacy bus ");
1369 error = legacy_suspend(dev, state, dev->bus->suspend,
1370 "legacy bus ");
1371 goto End;
1372 }
1373 }
1374
1375 Run:
1376 if (!callback && dev->driver && dev->driver->pm) {
1377 info = "driver ";
1378 callback = pm_op(dev->driver->pm, state);
1379 }
1380
1381 error = dpm_run_callback(callback, dev, state, info);
1382
1383 End:
1384 if (!error) {
1385 dev->power.is_suspended = true;
1386 if (dev->power.wakeup_path
1387 && dev->parent && !dev->parent->power.ignore_children)
1388 dev->parent->power.wakeup_path = true;
1389 }
1390
1391 device_unlock(dev);
1392 dpm_watchdog_clear(&wd);
1393
1394 Complete:
1395 complete_all(&dev->power.completion);
1396 if (error)
1397 async_error = error;
1398
1399 return error;
1400 }
1401
1402 static void async_suspend(void *data, async_cookie_t cookie)
1403 {
1404 struct device *dev = (struct device *)data;
1405 int error;
1406
1407 error = __device_suspend(dev, pm_transition, true);
1408 if (error) {
1409 dpm_save_failed_dev(dev_name(dev));
1410 pm_dev_err(dev, pm_transition, " async", error);
1411 }
1412
1413 put_device(dev);
1414 }
1415
1416 static int device_suspend(struct device *dev)
1417 {
1418 reinit_completion(&dev->power.completion);
1419
1420 if (pm_async_enabled && dev->power.async_suspend) {
1421 get_device(dev);
1422 async_schedule(async_suspend, dev);
1423 return 0;
1424 }
1425
1426 return __device_suspend(dev, pm_transition, false);
1427 }
1428
1429 /**
1430 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1431 * @state: PM transition of the system being carried out.
1432 */
1433 int dpm_suspend(pm_message_t state)
1434 {
1435 ktime_t starttime = ktime_get();
1436 int error = 0;
1437
1438 might_sleep();
1439
1440 cpufreq_suspend();
1441
1442 mutex_lock(&dpm_list_mtx);
1443 pm_transition = state;
1444 async_error = 0;
1445 while (!list_empty(&dpm_prepared_list)) {
1446 struct device *dev = to_device(dpm_prepared_list.prev);
1447
1448 get_device(dev);
1449 mutex_unlock(&dpm_list_mtx);
1450
1451 error = device_suspend(dev);
1452
1453 mutex_lock(&dpm_list_mtx);
1454 if (error) {
1455 pm_dev_err(dev, state, "", error);
1456 dpm_save_failed_dev(dev_name(dev));
1457 put_device(dev);
1458 break;
1459 }
1460 if (!list_empty(&dev->power.entry))
1461 list_move(&dev->power.entry, &dpm_suspended_list);
1462 put_device(dev);
1463 if (async_error)
1464 break;
1465 }
1466 mutex_unlock(&dpm_list_mtx);
1467 async_synchronize_full();
1468 if (!error)
1469 error = async_error;
1470 if (error) {
1471 suspend_stats.failed_suspend++;
1472 dpm_save_failed_step(SUSPEND_SUSPEND);
1473 } else
1474 dpm_show_time(starttime, state, NULL);
1475 return error;
1476 }
1477
1478 /**
1479 * device_prepare - Prepare a device for system power transition.
1480 * @dev: Device to handle.
1481 * @state: PM transition of the system being carried out.
1482 *
1483 * Execute the ->prepare() callback(s) for given device. No new children of the
1484 * device may be registered after this function has returned.
1485 */
1486 static int device_prepare(struct device *dev, pm_message_t state)
1487 {
1488 int (*callback)(struct device *) = NULL;
1489 char *info = NULL;
1490 int error = 0;
1491
1492 if (dev->power.syscore)
1493 return 0;
1494
1495 /*
1496 * If a device's parent goes into runtime suspend at the wrong time,
1497 * it won't be possible to resume the device. To prevent this we
1498 * block runtime suspend here, during the prepare phase, and allow
1499 * it again during the complete phase.
1500 */
1501 pm_runtime_get_noresume(dev);
1502
1503 device_lock(dev);
1504
1505 dev->power.wakeup_path = device_may_wakeup(dev);
1506
1507 if (dev->pm_domain) {
1508 info = "preparing power domain ";
1509 callback = dev->pm_domain->ops.prepare;
1510 } else if (dev->type && dev->type->pm) {
1511 info = "preparing type ";
1512 callback = dev->type->pm->prepare;
1513 } else if (dev->class && dev->class->pm) {
1514 info = "preparing class ";
1515 callback = dev->class->pm->prepare;
1516 } else if (dev->bus && dev->bus->pm) {
1517 info = "preparing bus ";
1518 callback = dev->bus->pm->prepare;
1519 }
1520
1521 if (!callback && dev->driver && dev->driver->pm) {
1522 info = "preparing driver ";
1523 callback = dev->driver->pm->prepare;
1524 }
1525
1526 if (callback) {
1527 error = callback(dev);
1528 suspend_report_result(callback, error);
1529 }
1530
1531 device_unlock(dev);
1532
1533 if (error)
1534 pm_runtime_put(dev);
1535
1536 return error;
1537 }
1538
1539 /**
1540 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1541 * @state: PM transition of the system being carried out.
1542 *
1543 * Execute the ->prepare() callback(s) for all devices.
1544 */
1545 int dpm_prepare(pm_message_t state)
1546 {
1547 int error = 0;
1548
1549 might_sleep();
1550
1551 mutex_lock(&dpm_list_mtx);
1552 while (!list_empty(&dpm_list)) {
1553 struct device *dev = to_device(dpm_list.next);
1554
1555 get_device(dev);
1556 mutex_unlock(&dpm_list_mtx);
1557
1558 error = device_prepare(dev, state);
1559
1560 mutex_lock(&dpm_list_mtx);
1561 if (error) {
1562 if (error == -EAGAIN) {
1563 put_device(dev);
1564 error = 0;
1565 continue;
1566 }
1567 printk(KERN_INFO "PM: Device %s not prepared "
1568 "for power transition: code %d\n",
1569 dev_name(dev), error);
1570 put_device(dev);
1571 break;
1572 }
1573 dev->power.is_prepared = true;
1574 if (!list_empty(&dev->power.entry))
1575 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1576 put_device(dev);
1577 }
1578 mutex_unlock(&dpm_list_mtx);
1579 return error;
1580 }
1581
1582 /**
1583 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1584 * @state: PM transition of the system being carried out.
1585 *
1586 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1587 * callbacks for them.
1588 */
1589 int dpm_suspend_start(pm_message_t state)
1590 {
1591 int error;
1592
1593 error = dpm_prepare(state);
1594 if (error) {
1595 suspend_stats.failed_prepare++;
1596 dpm_save_failed_step(SUSPEND_PREPARE);
1597 } else
1598 error = dpm_suspend(state);
1599 return error;
1600 }
1601 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1602
1603 void __suspend_report_result(const char *function, void *fn, int ret)
1604 {
1605 if (ret)
1606 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1607 }
1608 EXPORT_SYMBOL_GPL(__suspend_report_result);
1609
1610 /**
1611 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1612 * @dev: Device to wait for.
1613 * @subordinate: Device that needs to wait for @dev.
1614 */
1615 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1616 {
1617 dpm_wait(dev, subordinate->power.async_suspend);
1618 return async_error;
1619 }
1620 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1621
1622 /**
1623 * dpm_for_each_dev - device iterator.
1624 * @data: data for the callback.
1625 * @fn: function to be called for each device.
1626 *
1627 * Iterate over devices in dpm_list, and call @fn for each device,
1628 * passing it @data.
1629 */
1630 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1631 {
1632 struct device *dev;
1633
1634 if (!fn)
1635 return;
1636
1637 device_pm_lock();
1638 list_for_each_entry(dev, &dpm_list, power.entry)
1639 fn(dev, data);
1640 device_pm_unlock();
1641 }
1642 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
This page took 0.065436 seconds and 5 git commands to generate.