2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
14 static int rpm_resume(struct device
*dev
, int rpmflags
);
15 static int rpm_suspend(struct device
*dev
, int rpmflags
);
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
28 void update_pm_runtime_accounting(struct device
*dev
)
30 unsigned long now
= jiffies
;
33 delta
= now
- dev
->power
.accounting_timestamp
;
38 dev
->power
.accounting_timestamp
= now
;
40 if (dev
->power
.disable_depth
> 0)
43 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
44 dev
->power
.suspended_jiffies
+= delta
;
46 dev
->power
.active_jiffies
+= delta
;
49 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
51 update_pm_runtime_accounting(dev
);
52 dev
->power
.runtime_status
= status
;
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
59 static void pm_runtime_deactivate_timer(struct device
*dev
)
61 if (dev
->power
.timer_expires
> 0) {
62 del_timer(&dev
->power
.suspend_timer
);
63 dev
->power
.timer_expires
= 0;
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
71 static void pm_runtime_cancel_pending(struct device
*dev
)
73 pm_runtime_deactivate_timer(dev
);
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
78 dev
->power
.request
= RPM_REQ_NONE
;
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
93 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
95 int autosuspend_delay
;
97 unsigned long last_busy
;
98 unsigned long expires
= 0;
100 if (!dev
->power
.use_autosuspend
)
103 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
104 if (autosuspend_delay
< 0)
107 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
108 elapsed
= jiffies
- last_busy
;
110 goto out
; /* jiffies has wrapped around. */
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
116 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
117 if (autosuspend_delay
>= 1000)
118 expires
= round_jiffies(expires
);
120 if (elapsed
>= expires
- last_busy
)
121 expires
= 0; /* Already expired. */
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
132 static int rpm_check_suspend_allowed(struct device
*dev
)
136 if (dev
->power
.runtime_error
)
138 else if (dev
->power
.disable_depth
> 0)
140 else if (atomic_read(&dev
->power
.usage_count
) > 0)
142 else if (!pm_children_suspended(dev
))
145 /* Pending resume requests take precedence over suspends. */
146 else if ((dev
->power
.deferred_resume
147 && dev
->power
.runtime_status
== RPM_SUSPENDING
)
148 || (dev
->power
.request_pending
149 && dev
->power
.request
== RPM_REQ_RESUME
))
151 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
158 * rpm_idle - Notify device bus type if the device can be suspended.
159 * @dev: Device to notify the bus type about.
160 * @rpmflags: Flag bits.
162 * Check if the device's run-time PM status allows it to be suspended. If
163 * another idle notification has been started earlier, return immediately. If
164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
165 * run the ->runtime_idle() callback directly.
167 * This function must be called under dev->power.lock with interrupts disabled.
169 static int rpm_idle(struct device
*dev
, int rpmflags
)
171 int (*callback
)(struct device
*);
174 retval
= rpm_check_suspend_allowed(dev
);
176 ; /* Conditions are wrong. */
178 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
179 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
183 * Any pending request other than an idle notification takes
184 * precedence over us, except that the timer may be running.
186 else if (dev
->power
.request_pending
&&
187 dev
->power
.request
> RPM_REQ_IDLE
)
190 /* Act as though RPM_NOWAIT is always set. */
191 else if (dev
->power
.idle_notification
)
192 retval
= -EINPROGRESS
;
196 /* Pending requests need to be canceled. */
197 dev
->power
.request
= RPM_REQ_NONE
;
199 if (dev
->power
.no_callbacks
) {
200 /* Assume ->runtime_idle() callback would have suspended. */
201 retval
= rpm_suspend(dev
, rpmflags
);
205 /* Carry out an asynchronous or a synchronous idle notification. */
206 if (rpmflags
& RPM_ASYNC
) {
207 dev
->power
.request
= RPM_REQ_IDLE
;
208 if (!dev
->power
.request_pending
) {
209 dev
->power
.request_pending
= true;
210 queue_work(pm_wq
, &dev
->power
.work
);
215 dev
->power
.idle_notification
= true;
218 callback
= dev
->pm_domain
->ops
.runtime_idle
;
219 else if (dev
->type
&& dev
->type
->pm
)
220 callback
= dev
->type
->pm
->runtime_idle
;
221 else if (dev
->class && dev
->class->pm
)
222 callback
= dev
->class->pm
->runtime_idle
;
223 else if (dev
->bus
&& dev
->bus
->pm
)
224 callback
= dev
->bus
->pm
->runtime_idle
;
229 spin_unlock_irq(&dev
->power
.lock
);
233 spin_lock_irq(&dev
->power
.lock
);
236 dev
->power
.idle_notification
= false;
237 wake_up_all(&dev
->power
.wait_queue
);
244 * rpm_callback - Run a given runtime PM callback for a given device.
245 * @cb: Runtime PM callback to run.
246 * @dev: Device to run the callback for.
248 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
249 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
256 if (dev
->power
.irq_safe
) {
259 spin_unlock_irq(&dev
->power
.lock
);
263 spin_lock_irq(&dev
->power
.lock
);
265 dev
->power
.runtime_error
= retval
;
266 return retval
!= -EACCES
? retval
: -EIO
;
270 * rpm_suspend - Carry out run-time suspend of given device.
271 * @dev: Device to suspend.
272 * @rpmflags: Flag bits.
274 * Check if the device's run-time PM status allows it to be suspended. If
275 * another suspend has been started earlier, either return immediately or wait
276 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
277 * pending idle notification. If the RPM_ASYNC flag is set then queue a
278 * suspend request; otherwise run the ->runtime_suspend() callback directly.
279 * If a deferred resume was requested while the callback was running then carry
280 * it out; otherwise send an idle notification for the device (if the suspend
281 * failed) or for its parent (if the suspend succeeded).
283 * This function must be called under dev->power.lock with interrupts disabled.
285 static int rpm_suspend(struct device
*dev
, int rpmflags
)
286 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
288 int (*callback
)(struct device
*);
289 struct device
*parent
= NULL
;
292 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
295 retval
= rpm_check_suspend_allowed(dev
);
298 ; /* Conditions are wrong. */
300 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
301 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
302 !(rpmflags
& RPM_ASYNC
))
307 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
308 if ((rpmflags
& RPM_AUTO
)
309 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
310 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
313 /* Pending requests need to be canceled. */
314 dev
->power
.request
= RPM_REQ_NONE
;
317 * Optimization: If the timer is already running and is
318 * set to expire at or before the autosuspend delay,
319 * avoid the overhead of resetting it. Just let it
320 * expire; pm_suspend_timer_fn() will take care of the
323 if (!(dev
->power
.timer_expires
&& time_before_eq(
324 dev
->power
.timer_expires
, expires
))) {
325 dev
->power
.timer_expires
= expires
;
326 mod_timer(&dev
->power
.suspend_timer
, expires
);
328 dev
->power
.timer_autosuspends
= 1;
333 /* Other scheduled or pending requests need to be canceled. */
334 pm_runtime_cancel_pending(dev
);
336 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
339 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
340 retval
= -EINPROGRESS
;
344 /* Wait for the other suspend running in parallel with us. */
346 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
347 TASK_UNINTERRUPTIBLE
);
348 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
351 spin_unlock_irq(&dev
->power
.lock
);
355 spin_lock_irq(&dev
->power
.lock
);
357 finish_wait(&dev
->power
.wait_queue
, &wait
);
361 dev
->power
.deferred_resume
= false;
362 if (dev
->power
.no_callbacks
)
363 goto no_callback
; /* Assume success. */
365 /* Carry out an asynchronous or a synchronous suspend. */
366 if (rpmflags
& RPM_ASYNC
) {
367 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
368 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
369 if (!dev
->power
.request_pending
) {
370 dev
->power
.request_pending
= true;
371 queue_work(pm_wq
, &dev
->power
.work
);
376 __update_runtime_status(dev
, RPM_SUSPENDING
);
379 callback
= dev
->pm_domain
->ops
.runtime_suspend
;
380 else if (dev
->type
&& dev
->type
->pm
)
381 callback
= dev
->type
->pm
->runtime_suspend
;
382 else if (dev
->class && dev
->class->pm
)
383 callback
= dev
->class->pm
->runtime_suspend
;
384 else if (dev
->bus
&& dev
->bus
->pm
)
385 callback
= dev
->bus
->pm
->runtime_suspend
;
389 retval
= rpm_callback(callback
, dev
);
391 __update_runtime_status(dev
, RPM_ACTIVE
);
392 dev
->power
.deferred_resume
= 0;
393 if (retval
== -EAGAIN
|| retval
== -EBUSY
)
394 dev
->power
.runtime_error
= 0;
396 pm_runtime_cancel_pending(dev
);
399 __update_runtime_status(dev
, RPM_SUSPENDED
);
400 pm_runtime_deactivate_timer(dev
);
403 parent
= dev
->parent
;
404 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
407 wake_up_all(&dev
->power
.wait_queue
);
409 if (dev
->power
.deferred_resume
) {
415 /* Maybe the parent is now able to suspend. */
416 if (parent
&& !parent
->power
.ignore_children
&& !dev
->power
.irq_safe
) {
417 spin_unlock(&dev
->power
.lock
);
419 spin_lock(&parent
->power
.lock
);
420 rpm_idle(parent
, RPM_ASYNC
);
421 spin_unlock(&parent
->power
.lock
);
423 spin_lock(&dev
->power
.lock
);
427 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
433 * rpm_resume - Carry out run-time resume of given device.
434 * @dev: Device to resume.
435 * @rpmflags: Flag bits.
437 * Check if the device's run-time PM status allows it to be resumed. Cancel
438 * any scheduled or pending requests. If another resume has been started
439 * earlier, either return immediately or wait for it to finish, depending on the
440 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
441 * parallel with this function, either tell the other process to resume after
442 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
443 * flag is set then queue a resume request; otherwise run the
444 * ->runtime_resume() callback directly. Queue an idle notification for the
445 * device if the resume succeeded.
447 * This function must be called under dev->power.lock with interrupts disabled.
449 static int rpm_resume(struct device
*dev
, int rpmflags
)
450 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
452 int (*callback
)(struct device
*);
453 struct device
*parent
= NULL
;
456 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
459 if (dev
->power
.runtime_error
)
461 else if (dev
->power
.disable_depth
> 0)
467 * Other scheduled or pending requests need to be canceled. Small
468 * optimization: If an autosuspend timer is running, leave it running
469 * rather than cancelling it now only to restart it again in the near
472 dev
->power
.request
= RPM_REQ_NONE
;
473 if (!dev
->power
.timer_autosuspends
)
474 pm_runtime_deactivate_timer(dev
);
476 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
481 if (dev
->power
.runtime_status
== RPM_RESUMING
482 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
485 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
486 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
487 dev
->power
.deferred_resume
= true;
489 retval
= -EINPROGRESS
;
493 /* Wait for the operation carried out in parallel with us. */
495 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
496 TASK_UNINTERRUPTIBLE
);
497 if (dev
->power
.runtime_status
!= RPM_RESUMING
498 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
501 spin_unlock_irq(&dev
->power
.lock
);
505 spin_lock_irq(&dev
->power
.lock
);
507 finish_wait(&dev
->power
.wait_queue
, &wait
);
512 * See if we can skip waking up the parent. This is safe only if
513 * power.no_callbacks is set, because otherwise we don't know whether
514 * the resume will actually succeed.
516 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
517 spin_lock_nested(&dev
->parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
518 if (dev
->parent
->power
.disable_depth
> 0
519 || dev
->parent
->power
.ignore_children
520 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
521 atomic_inc(&dev
->parent
->power
.child_count
);
522 spin_unlock(&dev
->parent
->power
.lock
);
523 goto no_callback
; /* Assume success. */
525 spin_unlock(&dev
->parent
->power
.lock
);
528 /* Carry out an asynchronous or a synchronous resume. */
529 if (rpmflags
& RPM_ASYNC
) {
530 dev
->power
.request
= RPM_REQ_RESUME
;
531 if (!dev
->power
.request_pending
) {
532 dev
->power
.request_pending
= true;
533 queue_work(pm_wq
, &dev
->power
.work
);
539 if (!parent
&& dev
->parent
) {
541 * Increment the parent's usage counter and resume it if
542 * necessary. Not needed if dev is irq-safe; then the
543 * parent is permanently resumed.
545 parent
= dev
->parent
;
546 if (dev
->power
.irq_safe
)
548 spin_unlock(&dev
->power
.lock
);
550 pm_runtime_get_noresume(parent
);
552 spin_lock(&parent
->power
.lock
);
554 * We can resume if the parent's run-time PM is disabled or it
555 * is set to ignore children.
557 if (!parent
->power
.disable_depth
558 && !parent
->power
.ignore_children
) {
559 rpm_resume(parent
, 0);
560 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
563 spin_unlock(&parent
->power
.lock
);
565 spin_lock(&dev
->power
.lock
);
572 if (dev
->power
.no_callbacks
)
573 goto no_callback
; /* Assume success. */
575 __update_runtime_status(dev
, RPM_RESUMING
);
578 callback
= dev
->pm_domain
->ops
.runtime_resume
;
579 else if (dev
->type
&& dev
->type
->pm
)
580 callback
= dev
->type
->pm
->runtime_resume
;
581 else if (dev
->class && dev
->class->pm
)
582 callback
= dev
->class->pm
->runtime_resume
;
583 else if (dev
->bus
&& dev
->bus
->pm
)
584 callback
= dev
->bus
->pm
->runtime_resume
;
588 retval
= rpm_callback(callback
, dev
);
590 __update_runtime_status(dev
, RPM_SUSPENDED
);
591 pm_runtime_cancel_pending(dev
);
594 __update_runtime_status(dev
, RPM_ACTIVE
);
596 atomic_inc(&parent
->power
.child_count
);
598 wake_up_all(&dev
->power
.wait_queue
);
601 rpm_idle(dev
, RPM_ASYNC
);
604 if (parent
&& !dev
->power
.irq_safe
) {
605 spin_unlock_irq(&dev
->power
.lock
);
607 pm_runtime_put(parent
);
609 spin_lock_irq(&dev
->power
.lock
);
612 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
618 * pm_runtime_work - Universal run-time PM work function.
619 * @work: Work structure used for scheduling the execution of this function.
621 * Use @work to get the device object the work is to be done for, determine what
622 * is to be done and execute the appropriate run-time PM function.
624 static void pm_runtime_work(struct work_struct
*work
)
626 struct device
*dev
= container_of(work
, struct device
, power
.work
);
627 enum rpm_request req
;
629 spin_lock_irq(&dev
->power
.lock
);
631 if (!dev
->power
.request_pending
)
634 req
= dev
->power
.request
;
635 dev
->power
.request
= RPM_REQ_NONE
;
636 dev
->power
.request_pending
= false;
642 rpm_idle(dev
, RPM_NOWAIT
);
644 case RPM_REQ_SUSPEND
:
645 rpm_suspend(dev
, RPM_NOWAIT
);
647 case RPM_REQ_AUTOSUSPEND
:
648 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
651 rpm_resume(dev
, RPM_NOWAIT
);
656 spin_unlock_irq(&dev
->power
.lock
);
660 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
661 * @data: Device pointer passed by pm_schedule_suspend().
663 * Check if the time is right and queue a suspend request.
665 static void pm_suspend_timer_fn(unsigned long data
)
667 struct device
*dev
= (struct device
*)data
;
669 unsigned long expires
;
671 spin_lock_irqsave(&dev
->power
.lock
, flags
);
673 expires
= dev
->power
.timer_expires
;
674 /* If 'expire' is after 'jiffies' we've been called too early. */
675 if (expires
> 0 && !time_after(expires
, jiffies
)) {
676 dev
->power
.timer_expires
= 0;
677 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
678 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
681 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
685 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
686 * @dev: Device to suspend.
687 * @delay: Time to wait before submitting a suspend request, in milliseconds.
689 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
694 spin_lock_irqsave(&dev
->power
.lock
, flags
);
697 retval
= rpm_suspend(dev
, RPM_ASYNC
);
701 retval
= rpm_check_suspend_allowed(dev
);
705 /* Other scheduled or pending requests need to be canceled. */
706 pm_runtime_cancel_pending(dev
);
708 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
709 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
710 dev
->power
.timer_autosuspends
= 0;
711 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
714 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
718 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
721 * __pm_runtime_idle - Entry point for run-time idle operations.
722 * @dev: Device to send idle notification for.
723 * @rpmflags: Flag bits.
725 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
726 * return immediately if it is larger than zero. Then carry out an idle
727 * notification, either synchronous or asynchronous.
729 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
731 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
736 if (rpmflags
& RPM_GET_PUT
) {
737 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
741 spin_lock_irqsave(&dev
->power
.lock
, flags
);
742 retval
= rpm_idle(dev
, rpmflags
);
743 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
747 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
750 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
751 * @dev: Device to suspend.
752 * @rpmflags: Flag bits.
754 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
755 * return immediately if it is larger than zero. Then carry out a suspend,
756 * either synchronous or asynchronous.
758 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
760 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
765 if (rpmflags
& RPM_GET_PUT
) {
766 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
770 spin_lock_irqsave(&dev
->power
.lock
, flags
);
771 retval
= rpm_suspend(dev
, rpmflags
);
772 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
776 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
779 * __pm_runtime_resume - Entry point for run-time resume operations.
780 * @dev: Device to resume.
781 * @rpmflags: Flag bits.
783 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
784 * carry out a resume, either synchronous or asynchronous.
786 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
788 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
793 if (rpmflags
& RPM_GET_PUT
)
794 atomic_inc(&dev
->power
.usage_count
);
796 spin_lock_irqsave(&dev
->power
.lock
, flags
);
797 retval
= rpm_resume(dev
, rpmflags
);
798 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
802 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
805 * __pm_runtime_set_status - Set run-time PM status of a device.
806 * @dev: Device to handle.
807 * @status: New run-time PM status of the device.
809 * If run-time PM of the device is disabled or its power.runtime_error field is
810 * different from zero, the status may be changed either to RPM_ACTIVE, or to
811 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
812 * However, if the device has a parent and the parent is not active, and the
813 * parent's power.ignore_children flag is unset, the device's status cannot be
814 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
816 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
817 * and the device parent's counter of unsuspended children is modified to
818 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
819 * notification request for the parent is submitted.
821 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
823 struct device
*parent
= dev
->parent
;
825 bool notify_parent
= false;
828 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
831 spin_lock_irqsave(&dev
->power
.lock
, flags
);
833 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
838 if (dev
->power
.runtime_status
== status
)
841 if (status
== RPM_SUSPENDED
) {
842 /* It always is possible to set the status to 'suspended'. */
844 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
845 notify_parent
= !parent
->power
.ignore_children
;
851 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
854 * It is invalid to put an active child under a parent that is
855 * not active, has run-time PM enabled and the
856 * 'power.ignore_children' flag unset.
858 if (!parent
->power
.disable_depth
859 && !parent
->power
.ignore_children
860 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
862 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
863 atomic_inc(&parent
->power
.child_count
);
865 spin_unlock(&parent
->power
.lock
);
872 __update_runtime_status(dev
, status
);
873 dev
->power
.runtime_error
= 0;
875 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
878 pm_request_idle(parent
);
882 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
885 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
886 * @dev: Device to handle.
888 * Flush all pending requests for the device from pm_wq and wait for all
889 * run-time PM operations involving the device in progress to complete.
891 * Should be called under dev->power.lock with interrupts disabled.
893 static void __pm_runtime_barrier(struct device
*dev
)
895 pm_runtime_deactivate_timer(dev
);
897 if (dev
->power
.request_pending
) {
898 dev
->power
.request
= RPM_REQ_NONE
;
899 spin_unlock_irq(&dev
->power
.lock
);
901 cancel_work_sync(&dev
->power
.work
);
903 spin_lock_irq(&dev
->power
.lock
);
904 dev
->power
.request_pending
= false;
907 if (dev
->power
.runtime_status
== RPM_SUSPENDING
908 || dev
->power
.runtime_status
== RPM_RESUMING
909 || dev
->power
.idle_notification
) {
912 /* Suspend, wake-up or idle notification in progress. */
914 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
915 TASK_UNINTERRUPTIBLE
);
916 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
917 && dev
->power
.runtime_status
!= RPM_RESUMING
918 && !dev
->power
.idle_notification
)
920 spin_unlock_irq(&dev
->power
.lock
);
924 spin_lock_irq(&dev
->power
.lock
);
926 finish_wait(&dev
->power
.wait_queue
, &wait
);
931 * pm_runtime_barrier - Flush pending requests and wait for completions.
932 * @dev: Device to handle.
934 * Prevent the device from being suspended by incrementing its usage counter and
935 * if there's a pending resume request for the device, wake the device up.
936 * Next, make sure that all pending requests for the device have been flushed
937 * from pm_wq and wait for all run-time PM operations involving the device in
938 * progress to complete.
941 * 1, if there was a resume request pending and the device had to be woken up,
944 int pm_runtime_barrier(struct device
*dev
)
948 pm_runtime_get_noresume(dev
);
949 spin_lock_irq(&dev
->power
.lock
);
951 if (dev
->power
.request_pending
952 && dev
->power
.request
== RPM_REQ_RESUME
) {
957 __pm_runtime_barrier(dev
);
959 spin_unlock_irq(&dev
->power
.lock
);
960 pm_runtime_put_noidle(dev
);
964 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
967 * __pm_runtime_disable - Disable run-time PM of a device.
968 * @dev: Device to handle.
969 * @check_resume: If set, check if there's a resume request for the device.
971 * Increment power.disable_depth for the device and if was zero previously,
972 * cancel all pending run-time PM requests for the device and wait for all
973 * operations in progress to complete. The device can be either active or
974 * suspended after its run-time PM has been disabled.
976 * If @check_resume is set and there's a resume request pending when
977 * __pm_runtime_disable() is called and power.disable_depth is zero, the
978 * function will wake up the device before disabling its run-time PM.
980 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
982 spin_lock_irq(&dev
->power
.lock
);
984 if (dev
->power
.disable_depth
> 0) {
985 dev
->power
.disable_depth
++;
990 * Wake up the device if there's a resume request pending, because that
991 * means there probably is some I/O to process and disabling run-time PM
992 * shouldn't prevent the device from processing the I/O.
994 if (check_resume
&& dev
->power
.request_pending
995 && dev
->power
.request
== RPM_REQ_RESUME
) {
997 * Prevent suspends and idle notifications from being carried
998 * out after we have woken up the device.
1000 pm_runtime_get_noresume(dev
);
1004 pm_runtime_put_noidle(dev
);
1007 if (!dev
->power
.disable_depth
++)
1008 __pm_runtime_barrier(dev
);
1011 spin_unlock_irq(&dev
->power
.lock
);
1013 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
1016 * pm_runtime_enable - Enable run-time PM of a device.
1017 * @dev: Device to handle.
1019 void pm_runtime_enable(struct device
*dev
)
1021 unsigned long flags
;
1023 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1025 if (dev
->power
.disable_depth
> 0)
1026 dev
->power
.disable_depth
--;
1028 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1030 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1032 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1035 * pm_runtime_forbid - Block run-time PM of a device.
1036 * @dev: Device to handle.
1038 * Increase the device's usage count and clear its power.runtime_auto flag,
1039 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1042 void pm_runtime_forbid(struct device
*dev
)
1044 spin_lock_irq(&dev
->power
.lock
);
1045 if (!dev
->power
.runtime_auto
)
1048 dev
->power
.runtime_auto
= false;
1049 atomic_inc(&dev
->power
.usage_count
);
1053 spin_unlock_irq(&dev
->power
.lock
);
1055 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1058 * pm_runtime_allow - Unblock run-time PM of a device.
1059 * @dev: Device to handle.
1061 * Decrease the device's usage count and set its power.runtime_auto flag.
1063 void pm_runtime_allow(struct device
*dev
)
1065 spin_lock_irq(&dev
->power
.lock
);
1066 if (dev
->power
.runtime_auto
)
1069 dev
->power
.runtime_auto
= true;
1070 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1071 rpm_idle(dev
, RPM_AUTO
);
1074 spin_unlock_irq(&dev
->power
.lock
);
1076 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1079 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1080 * @dev: Device to handle.
1082 * Set the power.no_callbacks flag, which tells the PM core that this
1083 * device is power-managed through its parent and has no run-time PM
1084 * callbacks of its own. The run-time sysfs attributes will be removed.
1086 void pm_runtime_no_callbacks(struct device
*dev
)
1088 spin_lock_irq(&dev
->power
.lock
);
1089 dev
->power
.no_callbacks
= 1;
1090 spin_unlock_irq(&dev
->power
.lock
);
1091 if (device_is_registered(dev
))
1092 rpm_sysfs_remove(dev
);
1094 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1097 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1098 * @dev: Device to handle
1100 * Set the power.irq_safe flag, which tells the PM core that the
1101 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1102 * always be invoked with the spinlock held and interrupts disabled. It also
1103 * causes the parent's usage counter to be permanently incremented, preventing
1104 * the parent from runtime suspending -- otherwise an irq-safe child might have
1105 * to wait for a non-irq-safe parent.
1107 void pm_runtime_irq_safe(struct device
*dev
)
1110 pm_runtime_get_sync(dev
->parent
);
1111 spin_lock_irq(&dev
->power
.lock
);
1112 dev
->power
.irq_safe
= 1;
1113 spin_unlock_irq(&dev
->power
.lock
);
1115 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe
);
1118 * update_autosuspend - Handle a change to a device's autosuspend settings.
1119 * @dev: Device to handle.
1120 * @old_delay: The former autosuspend_delay value.
1121 * @old_use: The former use_autosuspend value.
1123 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1124 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1126 * This function must be called under dev->power.lock with interrupts disabled.
1128 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1130 int delay
= dev
->power
.autosuspend_delay
;
1132 /* Should runtime suspend be prevented now? */
1133 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1135 /* If it used to be allowed then prevent it. */
1136 if (!old_use
|| old_delay
>= 0) {
1137 atomic_inc(&dev
->power
.usage_count
);
1142 /* Runtime suspend should be allowed now. */
1145 /* If it used to be prevented then allow it. */
1146 if (old_use
&& old_delay
< 0)
1147 atomic_dec(&dev
->power
.usage_count
);
1149 /* Maybe we can autosuspend now. */
1150 rpm_idle(dev
, RPM_AUTO
);
1155 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1156 * @dev: Device to handle.
1157 * @delay: Value of the new delay in milliseconds.
1159 * Set the device's power.autosuspend_delay value. If it changes to negative
1160 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1161 * changes the other way, allow run-time suspends.
1163 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1165 int old_delay
, old_use
;
1167 spin_lock_irq(&dev
->power
.lock
);
1168 old_delay
= dev
->power
.autosuspend_delay
;
1169 old_use
= dev
->power
.use_autosuspend
;
1170 dev
->power
.autosuspend_delay
= delay
;
1171 update_autosuspend(dev
, old_delay
, old_use
);
1172 spin_unlock_irq(&dev
->power
.lock
);
1174 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1177 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1178 * @dev: Device to handle.
1179 * @use: New value for use_autosuspend.
1181 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1182 * suspends as needed.
1184 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1186 int old_delay
, old_use
;
1188 spin_lock_irq(&dev
->power
.lock
);
1189 old_delay
= dev
->power
.autosuspend_delay
;
1190 old_use
= dev
->power
.use_autosuspend
;
1191 dev
->power
.use_autosuspend
= use
;
1192 update_autosuspend(dev
, old_delay
, old_use
);
1193 spin_unlock_irq(&dev
->power
.lock
);
1195 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1198 * pm_runtime_init - Initialize run-time PM fields in given device object.
1199 * @dev: Device object to initialize.
1201 void pm_runtime_init(struct device
*dev
)
1203 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1204 dev
->power
.idle_notification
= false;
1206 dev
->power
.disable_depth
= 1;
1207 atomic_set(&dev
->power
.usage_count
, 0);
1209 dev
->power
.runtime_error
= 0;
1211 atomic_set(&dev
->power
.child_count
, 0);
1212 pm_suspend_ignore_children(dev
, false);
1213 dev
->power
.runtime_auto
= true;
1215 dev
->power
.request_pending
= false;
1216 dev
->power
.request
= RPM_REQ_NONE
;
1217 dev
->power
.deferred_resume
= false;
1218 dev
->power
.accounting_timestamp
= jiffies
;
1219 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1221 dev
->power
.timer_expires
= 0;
1222 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1223 (unsigned long)dev
);
1225 init_waitqueue_head(&dev
->power
.wait_queue
);
1229 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1230 * @dev: Device object being removed from device hierarchy.
1232 void pm_runtime_remove(struct device
*dev
)
1234 __pm_runtime_disable(dev
, false);
1236 /* Change the status back to 'suspended' to match the initial status. */
1237 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1238 pm_runtime_set_suspended(dev
);
1239 if (dev
->power
.irq_safe
&& dev
->parent
)
1240 pm_runtime_put_sync(dev
->parent
);