e6d628012654e0edbf2b2081aa8dc524e7206a52
[deliverable/linux.git] / drivers / base / power / main.c
1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30
31 #include "../base.h"
32 #include "power.h"
33
34 /*
35 * The entries in the dpm_list list are in a depth first order, simply
36 * because children are guaranteed to be discovered after parents, and
37 * are inserted at the back of the list on discovery.
38 *
39 * Since device_pm_add() may be called with a device lock held,
40 * we must never try to acquire a device lock while holding
41 * dpm_list_mutex.
42 */
43
44 LIST_HEAD(dpm_list);
45
46 static DEFINE_MUTEX(dpm_list_mtx);
47 static pm_message_t pm_transition;
48
49 /*
50 * Set once the preparation of devices for a PM transition has started, reset
51 * before starting to resume devices. Protected by dpm_list_mtx.
52 */
53 static bool transition_started;
54
55 static int async_error;
56
57 /**
58 * device_pm_init - Initialize the PM-related part of a device object.
59 * @dev: Device object being initialized.
60 */
61 void device_pm_init(struct device *dev)
62 {
63 dev->power.status = DPM_ON;
64 init_completion(&dev->power.completion);
65 complete_all(&dev->power.completion);
66 dev->power.wakeup = NULL;
67 spin_lock_init(&dev->power.lock);
68 pm_runtime_init(dev);
69 }
70
71 /**
72 * device_pm_lock - Lock the list of active devices used by the PM core.
73 */
74 void device_pm_lock(void)
75 {
76 mutex_lock(&dpm_list_mtx);
77 }
78
79 /**
80 * device_pm_unlock - Unlock the list of active devices used by the PM core.
81 */
82 void device_pm_unlock(void)
83 {
84 mutex_unlock(&dpm_list_mtx);
85 }
86
87 /**
88 * device_pm_add - Add a device to the PM core's list of active devices.
89 * @dev: Device to add to the list.
90 */
91 void device_pm_add(struct device *dev)
92 {
93 pr_debug("PM: Adding info for %s:%s\n",
94 dev->bus ? dev->bus->name : "No Bus",
95 kobject_name(&dev->kobj));
96 mutex_lock(&dpm_list_mtx);
97 if (dev->parent) {
98 if (dev->parent->power.status >= DPM_SUSPENDING)
99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent));
101 } else if (transition_started) {
102 /*
103 * We refuse to register parentless devices while a PM
104 * transition is in progress in order to avoid leaving them
105 * unhandled down the road
106 */
107 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
108 }
109
110 list_add_tail(&dev->power.entry, &dpm_list);
111 mutex_unlock(&dpm_list_mtx);
112 }
113
114 /**
115 * device_pm_remove - Remove a device from the PM core's list of active devices.
116 * @dev: Device to be removed from the list.
117 */
118 void device_pm_remove(struct device *dev)
119 {
120 pr_debug("PM: Removing info for %s:%s\n",
121 dev->bus ? dev->bus->name : "No Bus",
122 kobject_name(&dev->kobj));
123 complete_all(&dev->power.completion);
124 mutex_lock(&dpm_list_mtx);
125 list_del_init(&dev->power.entry);
126 mutex_unlock(&dpm_list_mtx);
127 device_wakeup_disable(dev);
128 pm_runtime_remove(dev);
129 }
130
131 /**
132 * device_pm_move_before - Move device in the PM core's list of active devices.
133 * @deva: Device to move in dpm_list.
134 * @devb: Device @deva should come before.
135 */
136 void device_pm_move_before(struct device *deva, struct device *devb)
137 {
138 pr_debug("PM: Moving %s:%s before %s:%s\n",
139 deva->bus ? deva->bus->name : "No Bus",
140 kobject_name(&deva->kobj),
141 devb->bus ? devb->bus->name : "No Bus",
142 kobject_name(&devb->kobj));
143 /* Delete deva from dpm_list and reinsert before devb. */
144 list_move_tail(&deva->power.entry, &devb->power.entry);
145 }
146
147 /**
148 * device_pm_move_after - Move device in the PM core's list of active devices.
149 * @deva: Device to move in dpm_list.
150 * @devb: Device @deva should come after.
151 */
152 void device_pm_move_after(struct device *deva, struct device *devb)
153 {
154 pr_debug("PM: Moving %s:%s after %s:%s\n",
155 deva->bus ? deva->bus->name : "No Bus",
156 kobject_name(&deva->kobj),
157 devb->bus ? devb->bus->name : "No Bus",
158 kobject_name(&devb->kobj));
159 /* Delete deva from dpm_list and reinsert after devb. */
160 list_move(&deva->power.entry, &devb->power.entry);
161 }
162
163 /**
164 * device_pm_move_last - Move device to end of the PM core's list of devices.
165 * @dev: Device to move in dpm_list.
166 */
167 void device_pm_move_last(struct device *dev)
168 {
169 pr_debug("PM: Moving %s:%s to end of list\n",
170 dev->bus ? dev->bus->name : "No Bus",
171 kobject_name(&dev->kobj));
172 list_move_tail(&dev->power.entry, &dpm_list);
173 }
174
175 static ktime_t initcall_debug_start(struct device *dev)
176 {
177 ktime_t calltime = ktime_set(0, 0);
178
179 if (initcall_debug) {
180 pr_info("calling %s+ @ %i\n",
181 dev_name(dev), task_pid_nr(current));
182 calltime = ktime_get();
183 }
184
185 return calltime;
186 }
187
188 static void initcall_debug_report(struct device *dev, ktime_t calltime,
189 int error)
190 {
191 ktime_t delta, rettime;
192
193 if (initcall_debug) {
194 rettime = ktime_get();
195 delta = ktime_sub(rettime, calltime);
196 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
197 error, (unsigned long long)ktime_to_ns(delta) >> 10);
198 }
199 }
200
201 /**
202 * dpm_wait - Wait for a PM operation to complete.
203 * @dev: Device to wait for.
204 * @async: If unset, wait only if the device's power.async_suspend flag is set.
205 */
206 static void dpm_wait(struct device *dev, bool async)
207 {
208 if (!dev)
209 return;
210
211 if (async || (pm_async_enabled && dev->power.async_suspend))
212 wait_for_completion(&dev->power.completion);
213 }
214
215 static int dpm_wait_fn(struct device *dev, void *async_ptr)
216 {
217 dpm_wait(dev, *((bool *)async_ptr));
218 return 0;
219 }
220
221 static void dpm_wait_for_children(struct device *dev, bool async)
222 {
223 device_for_each_child(dev, &async, dpm_wait_fn);
224 }
225
226 /**
227 * pm_op - Execute the PM operation appropriate for given PM event.
228 * @dev: Device to handle.
229 * @ops: PM operations to choose from.
230 * @state: PM transition of the system being carried out.
231 */
232 static int pm_op(struct device *dev,
233 const struct dev_pm_ops *ops,
234 pm_message_t state)
235 {
236 int error = 0;
237 ktime_t calltime;
238
239 calltime = initcall_debug_start(dev);
240
241 switch (state.event) {
242 #ifdef CONFIG_SUSPEND
243 case PM_EVENT_SUSPEND:
244 if (ops->suspend) {
245 error = ops->suspend(dev);
246 suspend_report_result(ops->suspend, error);
247 }
248 break;
249 case PM_EVENT_RESUME:
250 if (ops->resume) {
251 error = ops->resume(dev);
252 suspend_report_result(ops->resume, error);
253 }
254 break;
255 #endif /* CONFIG_SUSPEND */
256 #ifdef CONFIG_HIBERNATION
257 case PM_EVENT_FREEZE:
258 case PM_EVENT_QUIESCE:
259 if (ops->freeze) {
260 error = ops->freeze(dev);
261 suspend_report_result(ops->freeze, error);
262 }
263 break;
264 case PM_EVENT_HIBERNATE:
265 if (ops->poweroff) {
266 error = ops->poweroff(dev);
267 suspend_report_result(ops->poweroff, error);
268 }
269 break;
270 case PM_EVENT_THAW:
271 case PM_EVENT_RECOVER:
272 if (ops->thaw) {
273 error = ops->thaw(dev);
274 suspend_report_result(ops->thaw, error);
275 }
276 break;
277 case PM_EVENT_RESTORE:
278 if (ops->restore) {
279 error = ops->restore(dev);
280 suspend_report_result(ops->restore, error);
281 }
282 break;
283 #endif /* CONFIG_HIBERNATION */
284 default:
285 error = -EINVAL;
286 }
287
288 initcall_debug_report(dev, calltime, error);
289
290 return error;
291 }
292
293 /**
294 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
295 * @dev: Device to handle.
296 * @ops: PM operations to choose from.
297 * @state: PM transition of the system being carried out.
298 *
299 * The driver of @dev will not receive interrupts while this function is being
300 * executed.
301 */
302 static int pm_noirq_op(struct device *dev,
303 const struct dev_pm_ops *ops,
304 pm_message_t state)
305 {
306 int error = 0;
307 ktime_t calltime = ktime_set(0, 0), delta, rettime;
308
309 if (initcall_debug) {
310 pr_info("calling %s+ @ %i, parent: %s\n",
311 dev_name(dev), task_pid_nr(current),
312 dev->parent ? dev_name(dev->parent) : "none");
313 calltime = ktime_get();
314 }
315
316 switch (state.event) {
317 #ifdef CONFIG_SUSPEND
318 case PM_EVENT_SUSPEND:
319 if (ops->suspend_noirq) {
320 error = ops->suspend_noirq(dev);
321 suspend_report_result(ops->suspend_noirq, error);
322 }
323 break;
324 case PM_EVENT_RESUME:
325 if (ops->resume_noirq) {
326 error = ops->resume_noirq(dev);
327 suspend_report_result(ops->resume_noirq, error);
328 }
329 break;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATION
332 case PM_EVENT_FREEZE:
333 case PM_EVENT_QUIESCE:
334 if (ops->freeze_noirq) {
335 error = ops->freeze_noirq(dev);
336 suspend_report_result(ops->freeze_noirq, error);
337 }
338 break;
339 case PM_EVENT_HIBERNATE:
340 if (ops->poweroff_noirq) {
341 error = ops->poweroff_noirq(dev);
342 suspend_report_result(ops->poweroff_noirq, error);
343 }
344 break;
345 case PM_EVENT_THAW:
346 case PM_EVENT_RECOVER:
347 if (ops->thaw_noirq) {
348 error = ops->thaw_noirq(dev);
349 suspend_report_result(ops->thaw_noirq, error);
350 }
351 break;
352 case PM_EVENT_RESTORE:
353 if (ops->restore_noirq) {
354 error = ops->restore_noirq(dev);
355 suspend_report_result(ops->restore_noirq, error);
356 }
357 break;
358 #endif /* CONFIG_HIBERNATION */
359 default:
360 error = -EINVAL;
361 }
362
363 if (initcall_debug) {
364 rettime = ktime_get();
365 delta = ktime_sub(rettime, calltime);
366 printk("initcall %s_i+ returned %d after %Ld usecs\n",
367 dev_name(dev), error,
368 (unsigned long long)ktime_to_ns(delta) >> 10);
369 }
370
371 return error;
372 }
373
374 static char *pm_verb(int event)
375 {
376 switch (event) {
377 case PM_EVENT_SUSPEND:
378 return "suspend";
379 case PM_EVENT_RESUME:
380 return "resume";
381 case PM_EVENT_FREEZE:
382 return "freeze";
383 case PM_EVENT_QUIESCE:
384 return "quiesce";
385 case PM_EVENT_HIBERNATE:
386 return "hibernate";
387 case PM_EVENT_THAW:
388 return "thaw";
389 case PM_EVENT_RESTORE:
390 return "restore";
391 case PM_EVENT_RECOVER:
392 return "recover";
393 default:
394 return "(unknown PM event)";
395 }
396 }
397
398 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
399 {
400 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
401 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
402 ", may wakeup" : "");
403 }
404
405 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
406 int error)
407 {
408 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
409 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
410 }
411
412 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
413 {
414 ktime_t calltime;
415 u64 usecs64;
416 int usecs;
417
418 calltime = ktime_get();
419 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
420 do_div(usecs64, NSEC_PER_USEC);
421 usecs = usecs64;
422 if (usecs == 0)
423 usecs = 1;
424 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
425 info ?: "", info ? " " : "", pm_verb(state.event),
426 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
427 }
428
429 /*------------------------- Resume routines -------------------------*/
430
431 /**
432 * device_resume_noirq - Execute an "early resume" callback for given device.
433 * @dev: Device to handle.
434 * @state: PM transition of the system being carried out.
435 *
436 * The driver of @dev will not receive interrupts while this function is being
437 * executed.
438 */
439 static int device_resume_noirq(struct device *dev, pm_message_t state)
440 {
441 int error = 0;
442
443 TRACE_DEVICE(dev);
444 TRACE_RESUME(0);
445
446 if (dev->bus && dev->bus->pm) {
447 pm_dev_dbg(dev, state, "EARLY ");
448 error = pm_noirq_op(dev, dev->bus->pm, state);
449 if (error)
450 goto End;
451 }
452
453 if (dev->type && dev->type->pm) {
454 pm_dev_dbg(dev, state, "EARLY type ");
455 error = pm_noirq_op(dev, dev->type->pm, state);
456 if (error)
457 goto End;
458 }
459
460 if (dev->class && dev->class->pm) {
461 pm_dev_dbg(dev, state, "EARLY class ");
462 error = pm_noirq_op(dev, dev->class->pm, state);
463 }
464
465 End:
466 TRACE_RESUME(error);
467 return error;
468 }
469
470 /**
471 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
472 * @state: PM transition of the system being carried out.
473 *
474 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
475 * enable device drivers to receive interrupts.
476 */
477 void dpm_resume_noirq(pm_message_t state)
478 {
479 struct list_head list;
480 ktime_t starttime = ktime_get();
481
482 INIT_LIST_HEAD(&list);
483 mutex_lock(&dpm_list_mtx);
484 transition_started = false;
485 while (!list_empty(&dpm_list)) {
486 struct device *dev = to_device(dpm_list.next);
487
488 get_device(dev);
489 if (dev->power.status > DPM_OFF) {
490 int error;
491
492 dev->power.status = DPM_OFF;
493 mutex_unlock(&dpm_list_mtx);
494
495 error = device_resume_noirq(dev, state);
496
497 mutex_lock(&dpm_list_mtx);
498 if (error)
499 pm_dev_err(dev, state, " early", error);
500 }
501 if (!list_empty(&dev->power.entry))
502 list_move_tail(&dev->power.entry, &list);
503 put_device(dev);
504 }
505 list_splice(&list, &dpm_list);
506 mutex_unlock(&dpm_list_mtx);
507 dpm_show_time(starttime, state, "early");
508 resume_device_irqs();
509 }
510 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
511
512 /**
513 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
514 * @dev: Device to resume.
515 * @cb: Resume callback to execute.
516 */
517 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
518 {
519 int error;
520 ktime_t calltime;
521
522 calltime = initcall_debug_start(dev);
523
524 error = cb(dev);
525 suspend_report_result(cb, error);
526
527 initcall_debug_report(dev, calltime, error);
528
529 return error;
530 }
531
532 /**
533 * device_resume - Execute "resume" callbacks for given device.
534 * @dev: Device to handle.
535 * @state: PM transition of the system being carried out.
536 * @async: If true, the device is being resumed asynchronously.
537 */
538 static int device_resume(struct device *dev, pm_message_t state, bool async)
539 {
540 int error = 0;
541
542 TRACE_DEVICE(dev);
543 TRACE_RESUME(0);
544
545 dpm_wait(dev->parent, async);
546 device_lock(dev);
547
548 dev->power.status = DPM_RESUMING;
549
550 if (dev->bus) {
551 if (dev->bus->pm) {
552 pm_dev_dbg(dev, state, "");
553 error = pm_op(dev, dev->bus->pm, state);
554 } else if (dev->bus->resume) {
555 pm_dev_dbg(dev, state, "legacy ");
556 error = legacy_resume(dev, dev->bus->resume);
557 }
558 if (error)
559 goto End;
560 }
561
562 if (dev->type) {
563 if (dev->type->pm) {
564 pm_dev_dbg(dev, state, "type ");
565 error = pm_op(dev, dev->type->pm, state);
566 }
567 if (error)
568 goto End;
569 }
570
571 if (dev->class) {
572 if (dev->class->pm) {
573 pm_dev_dbg(dev, state, "class ");
574 error = pm_op(dev, dev->class->pm, state);
575 } else if (dev->class->resume) {
576 pm_dev_dbg(dev, state, "legacy class ");
577 error = legacy_resume(dev, dev->class->resume);
578 }
579 }
580 End:
581 device_unlock(dev);
582 complete_all(&dev->power.completion);
583
584 TRACE_RESUME(error);
585 return error;
586 }
587
588 static void async_resume(void *data, async_cookie_t cookie)
589 {
590 struct device *dev = (struct device *)data;
591 int error;
592
593 error = device_resume(dev, pm_transition, true);
594 if (error)
595 pm_dev_err(dev, pm_transition, " async", error);
596 put_device(dev);
597 }
598
599 static bool is_async(struct device *dev)
600 {
601 return dev->power.async_suspend && pm_async_enabled
602 && !pm_trace_is_enabled();
603 }
604
605 /**
606 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
607 * @state: PM transition of the system being carried out.
608 *
609 * Execute the appropriate "resume" callback for all devices whose status
610 * indicates that they are suspended.
611 */
612 static void dpm_resume(pm_message_t state)
613 {
614 struct list_head list;
615 struct device *dev;
616 ktime_t starttime = ktime_get();
617
618 INIT_LIST_HEAD(&list);
619 mutex_lock(&dpm_list_mtx);
620 pm_transition = state;
621 async_error = 0;
622
623 list_for_each_entry(dev, &dpm_list, power.entry) {
624 if (dev->power.status < DPM_OFF)
625 continue;
626
627 INIT_COMPLETION(dev->power.completion);
628 if (is_async(dev)) {
629 get_device(dev);
630 async_schedule(async_resume, dev);
631 }
632 }
633
634 while (!list_empty(&dpm_list)) {
635 dev = to_device(dpm_list.next);
636 get_device(dev);
637 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
638 int error;
639
640 mutex_unlock(&dpm_list_mtx);
641
642 error = device_resume(dev, state, false);
643
644 mutex_lock(&dpm_list_mtx);
645 if (error)
646 pm_dev_err(dev, state, "", error);
647 } else if (dev->power.status == DPM_SUSPENDING) {
648 /* Allow new children of the device to be registered */
649 dev->power.status = DPM_RESUMING;
650 }
651 if (!list_empty(&dev->power.entry))
652 list_move_tail(&dev->power.entry, &list);
653 put_device(dev);
654 }
655 list_splice(&list, &dpm_list);
656 mutex_unlock(&dpm_list_mtx);
657 async_synchronize_full();
658 dpm_show_time(starttime, state, NULL);
659 }
660
661 /**
662 * device_complete - Complete a PM transition for given device.
663 * @dev: Device to handle.
664 * @state: PM transition of the system being carried out.
665 */
666 static void device_complete(struct device *dev, pm_message_t state)
667 {
668 device_lock(dev);
669
670 if (dev->class && dev->class->pm && dev->class->pm->complete) {
671 pm_dev_dbg(dev, state, "completing class ");
672 dev->class->pm->complete(dev);
673 }
674
675 if (dev->type && dev->type->pm && dev->type->pm->complete) {
676 pm_dev_dbg(dev, state, "completing type ");
677 dev->type->pm->complete(dev);
678 }
679
680 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
681 pm_dev_dbg(dev, state, "completing ");
682 dev->bus->pm->complete(dev);
683 }
684
685 device_unlock(dev);
686 }
687
688 /**
689 * dpm_complete - Complete a PM transition for all non-sysdev devices.
690 * @state: PM transition of the system being carried out.
691 *
692 * Execute the ->complete() callbacks for all devices whose PM status is not
693 * DPM_ON (this allows new devices to be registered).
694 */
695 static void dpm_complete(pm_message_t state)
696 {
697 struct list_head list;
698
699 INIT_LIST_HEAD(&list);
700 mutex_lock(&dpm_list_mtx);
701 transition_started = false;
702 while (!list_empty(&dpm_list)) {
703 struct device *dev = to_device(dpm_list.prev);
704
705 get_device(dev);
706 if (dev->power.status > DPM_ON) {
707 dev->power.status = DPM_ON;
708 mutex_unlock(&dpm_list_mtx);
709
710 device_complete(dev, state);
711 pm_runtime_put_sync(dev);
712
713 mutex_lock(&dpm_list_mtx);
714 }
715 if (!list_empty(&dev->power.entry))
716 list_move(&dev->power.entry, &list);
717 put_device(dev);
718 }
719 list_splice(&list, &dpm_list);
720 mutex_unlock(&dpm_list_mtx);
721 }
722
723 /**
724 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
725 * @state: PM transition of the system being carried out.
726 *
727 * Execute "resume" callbacks for all devices and complete the PM transition of
728 * the system.
729 */
730 void dpm_resume_end(pm_message_t state)
731 {
732 might_sleep();
733 dpm_resume(state);
734 dpm_complete(state);
735 }
736 EXPORT_SYMBOL_GPL(dpm_resume_end);
737
738
739 /*------------------------- Suspend routines -------------------------*/
740
741 /**
742 * resume_event - Return a "resume" message for given "suspend" sleep state.
743 * @sleep_state: PM message representing a sleep state.
744 *
745 * Return a PM message representing the resume event corresponding to given
746 * sleep state.
747 */
748 static pm_message_t resume_event(pm_message_t sleep_state)
749 {
750 switch (sleep_state.event) {
751 case PM_EVENT_SUSPEND:
752 return PMSG_RESUME;
753 case PM_EVENT_FREEZE:
754 case PM_EVENT_QUIESCE:
755 return PMSG_RECOVER;
756 case PM_EVENT_HIBERNATE:
757 return PMSG_RESTORE;
758 }
759 return PMSG_ON;
760 }
761
762 /**
763 * device_suspend_noirq - Execute a "late suspend" callback for given device.
764 * @dev: Device to handle.
765 * @state: PM transition of the system being carried out.
766 *
767 * The driver of @dev will not receive interrupts while this function is being
768 * executed.
769 */
770 static int device_suspend_noirq(struct device *dev, pm_message_t state)
771 {
772 int error = 0;
773
774 if (dev->class && dev->class->pm) {
775 pm_dev_dbg(dev, state, "LATE class ");
776 error = pm_noirq_op(dev, dev->class->pm, state);
777 if (error)
778 goto End;
779 }
780
781 if (dev->type && dev->type->pm) {
782 pm_dev_dbg(dev, state, "LATE type ");
783 error = pm_noirq_op(dev, dev->type->pm, state);
784 if (error)
785 goto End;
786 }
787
788 if (dev->bus && dev->bus->pm) {
789 pm_dev_dbg(dev, state, "LATE ");
790 error = pm_noirq_op(dev, dev->bus->pm, state);
791 }
792
793 End:
794 return error;
795 }
796
797 /**
798 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
799 * @state: PM transition of the system being carried out.
800 *
801 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
802 * handlers for all non-sysdev devices.
803 */
804 int dpm_suspend_noirq(pm_message_t state)
805 {
806 struct list_head list;
807 ktime_t starttime = ktime_get();
808 int error = 0;
809
810 INIT_LIST_HEAD(&list);
811 suspend_device_irqs();
812 mutex_lock(&dpm_list_mtx);
813 while (!list_empty(&dpm_list)) {
814 struct device *dev = to_device(dpm_list.prev);
815
816 get_device(dev);
817 mutex_unlock(&dpm_list_mtx);
818
819 error = device_suspend_noirq(dev, state);
820
821 mutex_lock(&dpm_list_mtx);
822 if (error) {
823 pm_dev_err(dev, state, " late", error);
824 put_device(dev);
825 break;
826 }
827 dev->power.status = DPM_OFF_IRQ;
828 if (!list_empty(&dev->power.entry))
829 list_move(&dev->power.entry, &list);
830 put_device(dev);
831 }
832 list_splice_tail(&list, &dpm_list);
833 mutex_unlock(&dpm_list_mtx);
834 if (error)
835 dpm_resume_noirq(resume_event(state));
836 else
837 dpm_show_time(starttime, state, "late");
838 return error;
839 }
840 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
841
842 /**
843 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
844 * @dev: Device to suspend.
845 * @state: PM transition of the system being carried out.
846 * @cb: Suspend callback to execute.
847 */
848 static int legacy_suspend(struct device *dev, pm_message_t state,
849 int (*cb)(struct device *dev, pm_message_t state))
850 {
851 int error;
852 ktime_t calltime;
853
854 calltime = initcall_debug_start(dev);
855
856 error = cb(dev, state);
857 suspend_report_result(cb, error);
858
859 initcall_debug_report(dev, calltime, error);
860
861 return error;
862 }
863
864 /**
865 * device_suspend - Execute "suspend" callbacks for given device.
866 * @dev: Device to handle.
867 * @state: PM transition of the system being carried out.
868 * @async: If true, the device is being suspended asynchronously.
869 */
870 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
871 {
872 int error = 0;
873
874 dpm_wait_for_children(dev, async);
875 device_lock(dev);
876
877 if (async_error)
878 goto End;
879
880 if (pm_wakeup_pending()) {
881 async_error = -EBUSY;
882 goto End;
883 }
884
885 if (dev->class) {
886 if (dev->class->pm) {
887 pm_dev_dbg(dev, state, "class ");
888 error = pm_op(dev, dev->class->pm, state);
889 } else if (dev->class->suspend) {
890 pm_dev_dbg(dev, state, "legacy class ");
891 error = legacy_suspend(dev, state, dev->class->suspend);
892 }
893 if (error)
894 goto End;
895 }
896
897 if (dev->type) {
898 if (dev->type->pm) {
899 pm_dev_dbg(dev, state, "type ");
900 error = pm_op(dev, dev->type->pm, state);
901 }
902 if (error)
903 goto End;
904 }
905
906 if (dev->bus) {
907 if (dev->bus->pm) {
908 pm_dev_dbg(dev, state, "");
909 error = pm_op(dev, dev->bus->pm, state);
910 } else if (dev->bus->suspend) {
911 pm_dev_dbg(dev, state, "legacy ");
912 error = legacy_suspend(dev, state, dev->bus->suspend);
913 }
914 }
915
916 if (!error)
917 dev->power.status = DPM_OFF;
918
919 End:
920 device_unlock(dev);
921 complete_all(&dev->power.completion);
922
923 if (error)
924 async_error = error;
925
926 return error;
927 }
928
929 static void async_suspend(void *data, async_cookie_t cookie)
930 {
931 struct device *dev = (struct device *)data;
932 int error;
933
934 error = __device_suspend(dev, pm_transition, true);
935 if (error)
936 pm_dev_err(dev, pm_transition, " async", error);
937
938 put_device(dev);
939 }
940
941 static int device_suspend(struct device *dev)
942 {
943 INIT_COMPLETION(dev->power.completion);
944
945 if (pm_async_enabled && dev->power.async_suspend) {
946 get_device(dev);
947 async_schedule(async_suspend, dev);
948 return 0;
949 }
950
951 return __device_suspend(dev, pm_transition, false);
952 }
953
954 /**
955 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
956 * @state: PM transition of the system being carried out.
957 */
958 static int dpm_suspend(pm_message_t state)
959 {
960 struct list_head list;
961 ktime_t starttime = ktime_get();
962 int error = 0;
963
964 INIT_LIST_HEAD(&list);
965 mutex_lock(&dpm_list_mtx);
966 pm_transition = state;
967 async_error = 0;
968 while (!list_empty(&dpm_list)) {
969 struct device *dev = to_device(dpm_list.prev);
970
971 get_device(dev);
972 mutex_unlock(&dpm_list_mtx);
973
974 error = device_suspend(dev);
975
976 mutex_lock(&dpm_list_mtx);
977 if (error) {
978 pm_dev_err(dev, state, "", error);
979 put_device(dev);
980 break;
981 }
982 if (!list_empty(&dev->power.entry))
983 list_move(&dev->power.entry, &list);
984 put_device(dev);
985 if (async_error)
986 break;
987 }
988 list_splice(&list, dpm_list.prev);
989 mutex_unlock(&dpm_list_mtx);
990 async_synchronize_full();
991 if (!error)
992 error = async_error;
993 if (!error)
994 dpm_show_time(starttime, state, NULL);
995 return error;
996 }
997
998 /**
999 * device_prepare - Prepare a device for system power transition.
1000 * @dev: Device to handle.
1001 * @state: PM transition of the system being carried out.
1002 *
1003 * Execute the ->prepare() callback(s) for given device. No new children of the
1004 * device may be registered after this function has returned.
1005 */
1006 static int device_prepare(struct device *dev, pm_message_t state)
1007 {
1008 int error = 0;
1009
1010 device_lock(dev);
1011
1012 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
1013 pm_dev_dbg(dev, state, "preparing ");
1014 error = dev->bus->pm->prepare(dev);
1015 suspend_report_result(dev->bus->pm->prepare, error);
1016 if (error)
1017 goto End;
1018 }
1019
1020 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
1021 pm_dev_dbg(dev, state, "preparing type ");
1022 error = dev->type->pm->prepare(dev);
1023 suspend_report_result(dev->type->pm->prepare, error);
1024 if (error)
1025 goto End;
1026 }
1027
1028 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
1029 pm_dev_dbg(dev, state, "preparing class ");
1030 error = dev->class->pm->prepare(dev);
1031 suspend_report_result(dev->class->pm->prepare, error);
1032 }
1033 End:
1034 device_unlock(dev);
1035
1036 return error;
1037 }
1038
1039 /**
1040 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1041 * @state: PM transition of the system being carried out.
1042 *
1043 * Execute the ->prepare() callback(s) for all devices.
1044 */
1045 static int dpm_prepare(pm_message_t state)
1046 {
1047 struct list_head list;
1048 int error = 0;
1049
1050 INIT_LIST_HEAD(&list);
1051 mutex_lock(&dpm_list_mtx);
1052 transition_started = true;
1053 while (!list_empty(&dpm_list)) {
1054 struct device *dev = to_device(dpm_list.next);
1055
1056 get_device(dev);
1057 dev->power.status = DPM_PREPARING;
1058 mutex_unlock(&dpm_list_mtx);
1059
1060 pm_runtime_get_noresume(dev);
1061 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1062 pm_wakeup_event(dev, 0);
1063
1064 if (pm_wakeup_pending()) {
1065 pm_runtime_put_sync(dev);
1066 error = -EBUSY;
1067 } else {
1068 error = device_prepare(dev, state);
1069 }
1070
1071 mutex_lock(&dpm_list_mtx);
1072 if (error) {
1073 dev->power.status = DPM_ON;
1074 if (error == -EAGAIN) {
1075 put_device(dev);
1076 error = 0;
1077 continue;
1078 }
1079 printk(KERN_INFO "PM: Device %s not prepared "
1080 "for power transition: code %d\n",
1081 kobject_name(&dev->kobj), error);
1082 put_device(dev);
1083 break;
1084 }
1085 dev->power.status = DPM_SUSPENDING;
1086 if (!list_empty(&dev->power.entry))
1087 list_move_tail(&dev->power.entry, &list);
1088 put_device(dev);
1089 }
1090 list_splice(&list, &dpm_list);
1091 mutex_unlock(&dpm_list_mtx);
1092 return error;
1093 }
1094
1095 /**
1096 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1097 * @state: PM transition of the system being carried out.
1098 *
1099 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1100 * callbacks for them.
1101 */
1102 int dpm_suspend_start(pm_message_t state)
1103 {
1104 int error;
1105
1106 might_sleep();
1107 error = dpm_prepare(state);
1108 if (!error)
1109 error = dpm_suspend(state);
1110 return error;
1111 }
1112 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1113
1114 void __suspend_report_result(const char *function, void *fn, int ret)
1115 {
1116 if (ret)
1117 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1118 }
1119 EXPORT_SYMBOL_GPL(__suspend_report_result);
1120
1121 /**
1122 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1123 * @dev: Device to wait for.
1124 * @subordinate: Device that needs to wait for @dev.
1125 */
1126 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1127 {
1128 dpm_wait(dev, subordinate->power.async_suspend);
1129 return async_error;
1130 }
1131 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
This page took 0.050914 seconds and 4 git commands to generate.