2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
14 static int rpm_resume(struct device
*dev
, int rpmflags
);
15 static int rpm_suspend(struct device
*dev
, int rpmflags
);
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
28 void update_pm_runtime_accounting(struct device
*dev
)
30 unsigned long now
= jiffies
;
33 delta
= now
- dev
->power
.accounting_timestamp
;
38 dev
->power
.accounting_timestamp
= now
;
40 if (dev
->power
.disable_depth
> 0)
43 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
44 dev
->power
.suspended_jiffies
+= delta
;
46 dev
->power
.active_jiffies
+= delta
;
49 static void __update_runtime_status(struct device
*dev
, enum rpm_status status
)
51 update_pm_runtime_accounting(dev
);
52 dev
->power
.runtime_status
= status
;
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
59 static void pm_runtime_deactivate_timer(struct device
*dev
)
61 if (dev
->power
.timer_expires
> 0) {
62 del_timer(&dev
->power
.suspend_timer
);
63 dev
->power
.timer_expires
= 0;
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
71 static void pm_runtime_cancel_pending(struct device
*dev
)
73 pm_runtime_deactivate_timer(dev
);
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
78 dev
->power
.request
= RPM_REQ_NONE
;
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
93 unsigned long pm_runtime_autosuspend_expiration(struct device
*dev
)
95 int autosuspend_delay
;
97 unsigned long last_busy
;
98 unsigned long expires
= 0;
100 if (!dev
->power
.use_autosuspend
)
103 autosuspend_delay
= ACCESS_ONCE(dev
->power
.autosuspend_delay
);
104 if (autosuspend_delay
< 0)
107 last_busy
= ACCESS_ONCE(dev
->power
.last_busy
);
108 elapsed
= jiffies
- last_busy
;
110 goto out
; /* jiffies has wrapped around. */
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
116 expires
= last_busy
+ msecs_to_jiffies(autosuspend_delay
);
117 if (autosuspend_delay
>= 1000)
118 expires
= round_jiffies(expires
);
120 if (elapsed
>= expires
- last_busy
)
121 expires
= 0; /* Already expired. */
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration
);
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
132 static int rpm_check_suspend_allowed(struct device
*dev
)
136 if (dev
->power
.runtime_error
)
138 else if (atomic_read(&dev
->power
.usage_count
) > 0
139 || dev
->power
.disable_depth
> 0)
141 else if (!pm_children_suspended(dev
))
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev
->power
.deferred_resume
146 && dev
->power
.runtime_status
== RPM_SUSPENDING
)
147 || (dev
->power
.request_pending
148 && dev
->power
.request
== RPM_REQ_RESUME
))
150 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
157 * rpm_idle - Notify device bus type if the device can be suspended.
158 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits.
161 * Check if the device's run-time PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly.
166 * This function must be called under dev->power.lock with interrupts disabled.
168 static int rpm_idle(struct device
*dev
, int rpmflags
)
170 int (*callback
)(struct device
*);
173 retval
= rpm_check_suspend_allowed(dev
);
175 ; /* Conditions are wrong. */
177 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
178 else if (dev
->power
.runtime_status
!= RPM_ACTIVE
)
182 * Any pending request other than an idle notification takes
183 * precedence over us, except that the timer may be running.
185 else if (dev
->power
.request_pending
&&
186 dev
->power
.request
> RPM_REQ_IDLE
)
189 /* Act as though RPM_NOWAIT is always set. */
190 else if (dev
->power
.idle_notification
)
191 retval
= -EINPROGRESS
;
195 /* Pending requests need to be canceled. */
196 dev
->power
.request
= RPM_REQ_NONE
;
198 if (dev
->power
.no_callbacks
) {
199 /* Assume ->runtime_idle() callback would have suspended. */
200 retval
= rpm_suspend(dev
, rpmflags
);
204 /* Carry out an asynchronous or a synchronous idle notification. */
205 if (rpmflags
& RPM_ASYNC
) {
206 dev
->power
.request
= RPM_REQ_IDLE
;
207 if (!dev
->power
.request_pending
) {
208 dev
->power
.request_pending
= true;
209 queue_work(pm_wq
, &dev
->power
.work
);
214 dev
->power
.idle_notification
= true;
216 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
)
217 callback
= dev
->bus
->pm
->runtime_idle
;
218 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_idle
)
219 callback
= dev
->type
->pm
->runtime_idle
;
220 else if (dev
->class && dev
->class->pm
)
221 callback
= dev
->class->pm
->runtime_idle
;
226 spin_unlock_irq(&dev
->power
.lock
);
230 spin_lock_irq(&dev
->power
.lock
);
233 dev
->power
.idle_notification
= false;
234 wake_up_all(&dev
->power
.wait_queue
);
241 * rpm_callback - Run a given runtime PM callback for a given device.
242 * @cb: Runtime PM callback to run.
243 * @dev: Device to run the callback for.
245 static int rpm_callback(int (*cb
)(struct device
*), struct device
*dev
)
246 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
253 if (dev
->power
.irq_safe
) {
256 spin_unlock_irq(&dev
->power
.lock
);
260 spin_lock_irq(&dev
->power
.lock
);
262 dev
->power
.runtime_error
= retval
;
267 * rpm_suspend - Carry out run-time suspend of given device.
268 * @dev: Device to suspend.
269 * @rpmflags: Flag bits.
271 * Check if the device's run-time PM status allows it to be suspended. If
272 * another suspend has been started earlier, either return immediately or wait
273 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
274 * pending idle notification. If the RPM_ASYNC flag is set then queue a
275 * suspend request; otherwise run the ->runtime_suspend() callback directly.
276 * If a deferred resume was requested while the callback was running then carry
277 * it out; otherwise send an idle notification for the device (if the suspend
278 * failed) or for its parent (if the suspend succeeded).
280 * This function must be called under dev->power.lock with interrupts disabled.
282 static int rpm_suspend(struct device
*dev
, int rpmflags
)
283 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
285 int (*callback
)(struct device
*);
286 struct device
*parent
= NULL
;
289 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
292 retval
= rpm_check_suspend_allowed(dev
);
295 ; /* Conditions are wrong. */
297 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
298 else if (dev
->power
.runtime_status
== RPM_RESUMING
&&
299 !(rpmflags
& RPM_ASYNC
))
304 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
305 if ((rpmflags
& RPM_AUTO
)
306 && dev
->power
.runtime_status
!= RPM_SUSPENDING
) {
307 unsigned long expires
= pm_runtime_autosuspend_expiration(dev
);
310 /* Pending requests need to be canceled. */
311 dev
->power
.request
= RPM_REQ_NONE
;
314 * Optimization: If the timer is already running and is
315 * set to expire at or before the autosuspend delay,
316 * avoid the overhead of resetting it. Just let it
317 * expire; pm_suspend_timer_fn() will take care of the
320 if (!(dev
->power
.timer_expires
&& time_before_eq(
321 dev
->power
.timer_expires
, expires
))) {
322 dev
->power
.timer_expires
= expires
;
323 mod_timer(&dev
->power
.suspend_timer
, expires
);
325 dev
->power
.timer_autosuspends
= 1;
330 /* Other scheduled or pending requests need to be canceled. */
331 pm_runtime_cancel_pending(dev
);
333 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
336 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
337 retval
= -EINPROGRESS
;
341 /* Wait for the other suspend running in parallel with us. */
343 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
344 TASK_UNINTERRUPTIBLE
);
345 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
348 spin_unlock_irq(&dev
->power
.lock
);
352 spin_lock_irq(&dev
->power
.lock
);
354 finish_wait(&dev
->power
.wait_queue
, &wait
);
358 dev
->power
.deferred_resume
= false;
359 if (dev
->power
.no_callbacks
)
360 goto no_callback
; /* Assume success. */
362 /* Carry out an asynchronous or a synchronous suspend. */
363 if (rpmflags
& RPM_ASYNC
) {
364 dev
->power
.request
= (rpmflags
& RPM_AUTO
) ?
365 RPM_REQ_AUTOSUSPEND
: RPM_REQ_SUSPEND
;
366 if (!dev
->power
.request_pending
) {
367 dev
->power
.request_pending
= true;
368 queue_work(pm_wq
, &dev
->power
.work
);
373 __update_runtime_status(dev
, RPM_SUSPENDING
);
375 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
)
376 callback
= dev
->bus
->pm
->runtime_suspend
;
377 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_suspend
)
378 callback
= dev
->type
->pm
->runtime_suspend
;
379 else if (dev
->class && dev
->class->pm
)
380 callback
= dev
->class->pm
->runtime_suspend
;
384 retval
= rpm_callback(callback
, dev
);
386 __update_runtime_status(dev
, RPM_ACTIVE
);
387 dev
->power
.deferred_resume
= 0;
388 if (retval
== -EAGAIN
|| retval
== -EBUSY
)
389 dev
->power
.runtime_error
= 0;
391 pm_runtime_cancel_pending(dev
);
394 __update_runtime_status(dev
, RPM_SUSPENDED
);
395 pm_runtime_deactivate_timer(dev
);
398 parent
= dev
->parent
;
399 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
402 wake_up_all(&dev
->power
.wait_queue
);
404 if (dev
->power
.deferred_resume
) {
410 if (parent
&& !parent
->power
.ignore_children
&& !dev
->power
.irq_safe
) {
411 spin_unlock_irq(&dev
->power
.lock
);
413 pm_request_idle(parent
);
415 spin_lock_irq(&dev
->power
.lock
);
419 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
425 * rpm_resume - Carry out run-time resume of given device.
426 * @dev: Device to resume.
427 * @rpmflags: Flag bits.
429 * Check if the device's run-time PM status allows it to be resumed. Cancel
430 * any scheduled or pending requests. If another resume has been started
431 * earlier, either return imediately or wait for it to finish, depending on the
432 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
433 * parallel with this function, either tell the other process to resume after
434 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
435 * flag is set then queue a resume request; otherwise run the
436 * ->runtime_resume() callback directly. Queue an idle notification for the
437 * device if the resume succeeded.
439 * This function must be called under dev->power.lock with interrupts disabled.
441 static int rpm_resume(struct device
*dev
, int rpmflags
)
442 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
444 int (*callback
)(struct device
*);
445 struct device
*parent
= NULL
;
448 dev_dbg(dev
, "%s flags 0x%x\n", __func__
, rpmflags
);
451 if (dev
->power
.runtime_error
)
453 else if (dev
->power
.disable_depth
> 0)
459 * Other scheduled or pending requests need to be canceled. Small
460 * optimization: If an autosuspend timer is running, leave it running
461 * rather than cancelling it now only to restart it again in the near
464 dev
->power
.request
= RPM_REQ_NONE
;
465 if (!dev
->power
.timer_autosuspends
)
466 pm_runtime_deactivate_timer(dev
);
468 if (dev
->power
.runtime_status
== RPM_ACTIVE
) {
473 if (dev
->power
.runtime_status
== RPM_RESUMING
474 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
477 if (rpmflags
& (RPM_ASYNC
| RPM_NOWAIT
)) {
478 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
479 dev
->power
.deferred_resume
= true;
481 retval
= -EINPROGRESS
;
485 /* Wait for the operation carried out in parallel with us. */
487 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
488 TASK_UNINTERRUPTIBLE
);
489 if (dev
->power
.runtime_status
!= RPM_RESUMING
490 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
493 spin_unlock_irq(&dev
->power
.lock
);
497 spin_lock_irq(&dev
->power
.lock
);
499 finish_wait(&dev
->power
.wait_queue
, &wait
);
504 * See if we can skip waking up the parent. This is safe only if
505 * power.no_callbacks is set, because otherwise we don't know whether
506 * the resume will actually succeed.
508 if (dev
->power
.no_callbacks
&& !parent
&& dev
->parent
) {
509 spin_lock_nested(&dev
->parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
510 if (dev
->parent
->power
.disable_depth
> 0
511 || dev
->parent
->power
.ignore_children
512 || dev
->parent
->power
.runtime_status
== RPM_ACTIVE
) {
513 atomic_inc(&dev
->parent
->power
.child_count
);
514 spin_unlock(&dev
->parent
->power
.lock
);
515 goto no_callback
; /* Assume success. */
517 spin_unlock(&dev
->parent
->power
.lock
);
520 /* Carry out an asynchronous or a synchronous resume. */
521 if (rpmflags
& RPM_ASYNC
) {
522 dev
->power
.request
= RPM_REQ_RESUME
;
523 if (!dev
->power
.request_pending
) {
524 dev
->power
.request_pending
= true;
525 queue_work(pm_wq
, &dev
->power
.work
);
531 if (!parent
&& dev
->parent
) {
533 * Increment the parent's usage counter and resume it if
534 * necessary. Not needed if dev is irq-safe; then the
535 * parent is permanently resumed.
537 parent
= dev
->parent
;
538 if (dev
->power
.irq_safe
)
540 spin_unlock(&dev
->power
.lock
);
542 pm_runtime_get_noresume(parent
);
544 spin_lock(&parent
->power
.lock
);
546 * We can resume if the parent's run-time PM is disabled or it
547 * is set to ignore children.
549 if (!parent
->power
.disable_depth
550 && !parent
->power
.ignore_children
) {
551 rpm_resume(parent
, 0);
552 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
555 spin_unlock(&parent
->power
.lock
);
557 spin_lock(&dev
->power
.lock
);
564 if (dev
->power
.no_callbacks
)
565 goto no_callback
; /* Assume success. */
567 __update_runtime_status(dev
, RPM_RESUMING
);
569 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
)
570 callback
= dev
->bus
->pm
->runtime_resume
;
571 else if (dev
->type
&& dev
->type
->pm
&& dev
->type
->pm
->runtime_resume
)
572 callback
= dev
->type
->pm
->runtime_resume
;
573 else if (dev
->class && dev
->class->pm
)
574 callback
= dev
->class->pm
->runtime_resume
;
578 retval
= rpm_callback(callback
, dev
);
580 __update_runtime_status(dev
, RPM_SUSPENDED
);
581 pm_runtime_cancel_pending(dev
);
584 __update_runtime_status(dev
, RPM_ACTIVE
);
586 atomic_inc(&parent
->power
.child_count
);
588 wake_up_all(&dev
->power
.wait_queue
);
591 rpm_idle(dev
, RPM_ASYNC
);
594 if (parent
&& !dev
->power
.irq_safe
) {
595 spin_unlock_irq(&dev
->power
.lock
);
597 pm_runtime_put(parent
);
599 spin_lock_irq(&dev
->power
.lock
);
602 dev_dbg(dev
, "%s returns %d\n", __func__
, retval
);
608 * pm_runtime_work - Universal run-time PM work function.
609 * @work: Work structure used for scheduling the execution of this function.
611 * Use @work to get the device object the work is to be done for, determine what
612 * is to be done and execute the appropriate run-time PM function.
614 static void pm_runtime_work(struct work_struct
*work
)
616 struct device
*dev
= container_of(work
, struct device
, power
.work
);
617 enum rpm_request req
;
619 spin_lock_irq(&dev
->power
.lock
);
621 if (!dev
->power
.request_pending
)
624 req
= dev
->power
.request
;
625 dev
->power
.request
= RPM_REQ_NONE
;
626 dev
->power
.request_pending
= false;
632 rpm_idle(dev
, RPM_NOWAIT
);
634 case RPM_REQ_SUSPEND
:
635 rpm_suspend(dev
, RPM_NOWAIT
);
637 case RPM_REQ_AUTOSUSPEND
:
638 rpm_suspend(dev
, RPM_NOWAIT
| RPM_AUTO
);
641 rpm_resume(dev
, RPM_NOWAIT
);
646 spin_unlock_irq(&dev
->power
.lock
);
650 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
651 * @data: Device pointer passed by pm_schedule_suspend().
653 * Check if the time is right and queue a suspend request.
655 static void pm_suspend_timer_fn(unsigned long data
)
657 struct device
*dev
= (struct device
*)data
;
659 unsigned long expires
;
661 spin_lock_irqsave(&dev
->power
.lock
, flags
);
663 expires
= dev
->power
.timer_expires
;
664 /* If 'expire' is after 'jiffies' we've been called too early. */
665 if (expires
> 0 && !time_after(expires
, jiffies
)) {
666 dev
->power
.timer_expires
= 0;
667 rpm_suspend(dev
, dev
->power
.timer_autosuspends
?
668 (RPM_ASYNC
| RPM_AUTO
) : RPM_ASYNC
);
671 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
675 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
676 * @dev: Device to suspend.
677 * @delay: Time to wait before submitting a suspend request, in milliseconds.
679 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
684 spin_lock_irqsave(&dev
->power
.lock
, flags
);
687 retval
= rpm_suspend(dev
, RPM_ASYNC
);
691 retval
= rpm_check_suspend_allowed(dev
);
695 /* Other scheduled or pending requests need to be canceled. */
696 pm_runtime_cancel_pending(dev
);
698 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
699 dev
->power
.timer_expires
+= !dev
->power
.timer_expires
;
700 dev
->power
.timer_autosuspends
= 0;
701 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
704 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
708 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
711 * __pm_runtime_idle - Entry point for run-time idle operations.
712 * @dev: Device to send idle notification for.
713 * @rpmflags: Flag bits.
715 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
716 * return immediately if it is larger than zero. Then carry out an idle
717 * notification, either synchronous or asynchronous.
719 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
721 int __pm_runtime_idle(struct device
*dev
, int rpmflags
)
726 if (rpmflags
& RPM_GET_PUT
) {
727 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
731 spin_lock_irqsave(&dev
->power
.lock
, flags
);
732 retval
= rpm_idle(dev
, rpmflags
);
733 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
737 EXPORT_SYMBOL_GPL(__pm_runtime_idle
);
740 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
741 * @dev: Device to suspend.
742 * @rpmflags: Flag bits.
744 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
745 * return immediately if it is larger than zero. Then carry out a suspend,
746 * either synchronous or asynchronous.
748 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
750 int __pm_runtime_suspend(struct device
*dev
, int rpmflags
)
755 if (rpmflags
& RPM_GET_PUT
) {
756 if (!atomic_dec_and_test(&dev
->power
.usage_count
))
760 spin_lock_irqsave(&dev
->power
.lock
, flags
);
761 retval
= rpm_suspend(dev
, rpmflags
);
762 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
766 EXPORT_SYMBOL_GPL(__pm_runtime_suspend
);
769 * __pm_runtime_resume - Entry point for run-time resume operations.
770 * @dev: Device to resume.
771 * @rpmflags: Flag bits.
773 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
774 * carry out a resume, either synchronous or asynchronous.
776 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
778 int __pm_runtime_resume(struct device
*dev
, int rpmflags
)
783 if (rpmflags
& RPM_GET_PUT
)
784 atomic_inc(&dev
->power
.usage_count
);
786 spin_lock_irqsave(&dev
->power
.lock
, flags
);
787 retval
= rpm_resume(dev
, rpmflags
);
788 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
792 EXPORT_SYMBOL_GPL(__pm_runtime_resume
);
795 * __pm_runtime_set_status - Set run-time PM status of a device.
796 * @dev: Device to handle.
797 * @status: New run-time PM status of the device.
799 * If run-time PM of the device is disabled or its power.runtime_error field is
800 * different from zero, the status may be changed either to RPM_ACTIVE, or to
801 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
802 * However, if the device has a parent and the parent is not active, and the
803 * parent's power.ignore_children flag is unset, the device's status cannot be
804 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
806 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
807 * and the device parent's counter of unsuspended children is modified to
808 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
809 * notification request for the parent is submitted.
811 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
813 struct device
*parent
= dev
->parent
;
815 bool notify_parent
= false;
818 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
821 spin_lock_irqsave(&dev
->power
.lock
, flags
);
823 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
828 if (dev
->power
.runtime_status
== status
)
831 if (status
== RPM_SUSPENDED
) {
832 /* It always is possible to set the status to 'suspended'. */
834 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
835 notify_parent
= !parent
->power
.ignore_children
;
841 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
844 * It is invalid to put an active child under a parent that is
845 * not active, has run-time PM enabled and the
846 * 'power.ignore_children' flag unset.
848 if (!parent
->power
.disable_depth
849 && !parent
->power
.ignore_children
850 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
852 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
853 atomic_inc(&parent
->power
.child_count
);
855 spin_unlock(&parent
->power
.lock
);
862 __update_runtime_status(dev
, status
);
863 dev
->power
.runtime_error
= 0;
865 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
868 pm_request_idle(parent
);
872 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
875 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
876 * @dev: Device to handle.
878 * Flush all pending requests for the device from pm_wq and wait for all
879 * run-time PM operations involving the device in progress to complete.
881 * Should be called under dev->power.lock with interrupts disabled.
883 static void __pm_runtime_barrier(struct device
*dev
)
885 pm_runtime_deactivate_timer(dev
);
887 if (dev
->power
.request_pending
) {
888 dev
->power
.request
= RPM_REQ_NONE
;
889 spin_unlock_irq(&dev
->power
.lock
);
891 cancel_work_sync(&dev
->power
.work
);
893 spin_lock_irq(&dev
->power
.lock
);
894 dev
->power
.request_pending
= false;
897 if (dev
->power
.runtime_status
== RPM_SUSPENDING
898 || dev
->power
.runtime_status
== RPM_RESUMING
899 || dev
->power
.idle_notification
) {
902 /* Suspend, wake-up or idle notification in progress. */
904 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
905 TASK_UNINTERRUPTIBLE
);
906 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
907 && dev
->power
.runtime_status
!= RPM_RESUMING
908 && !dev
->power
.idle_notification
)
910 spin_unlock_irq(&dev
->power
.lock
);
914 spin_lock_irq(&dev
->power
.lock
);
916 finish_wait(&dev
->power
.wait_queue
, &wait
);
921 * pm_runtime_barrier - Flush pending requests and wait for completions.
922 * @dev: Device to handle.
924 * Prevent the device from being suspended by incrementing its usage counter and
925 * if there's a pending resume request for the device, wake the device up.
926 * Next, make sure that all pending requests for the device have been flushed
927 * from pm_wq and wait for all run-time PM operations involving the device in
928 * progress to complete.
931 * 1, if there was a resume request pending and the device had to be woken up,
934 int pm_runtime_barrier(struct device
*dev
)
938 pm_runtime_get_noresume(dev
);
939 spin_lock_irq(&dev
->power
.lock
);
941 if (dev
->power
.request_pending
942 && dev
->power
.request
== RPM_REQ_RESUME
) {
947 __pm_runtime_barrier(dev
);
949 spin_unlock_irq(&dev
->power
.lock
);
950 pm_runtime_put_noidle(dev
);
954 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
957 * __pm_runtime_disable - Disable run-time PM of a device.
958 * @dev: Device to handle.
959 * @check_resume: If set, check if there's a resume request for the device.
961 * Increment power.disable_depth for the device and if was zero previously,
962 * cancel all pending run-time PM requests for the device and wait for all
963 * operations in progress to complete. The device can be either active or
964 * suspended after its run-time PM has been disabled.
966 * If @check_resume is set and there's a resume request pending when
967 * __pm_runtime_disable() is called and power.disable_depth is zero, the
968 * function will wake up the device before disabling its run-time PM.
970 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
972 spin_lock_irq(&dev
->power
.lock
);
974 if (dev
->power
.disable_depth
> 0) {
975 dev
->power
.disable_depth
++;
980 * Wake up the device if there's a resume request pending, because that
981 * means there probably is some I/O to process and disabling run-time PM
982 * shouldn't prevent the device from processing the I/O.
984 if (check_resume
&& dev
->power
.request_pending
985 && dev
->power
.request
== RPM_REQ_RESUME
) {
987 * Prevent suspends and idle notifications from being carried
988 * out after we have woken up the device.
990 pm_runtime_get_noresume(dev
);
994 pm_runtime_put_noidle(dev
);
997 if (!dev
->power
.disable_depth
++)
998 __pm_runtime_barrier(dev
);
1001 spin_unlock_irq(&dev
->power
.lock
);
1003 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
1006 * pm_runtime_enable - Enable run-time PM of a device.
1007 * @dev: Device to handle.
1009 void pm_runtime_enable(struct device
*dev
)
1011 unsigned long flags
;
1013 spin_lock_irqsave(&dev
->power
.lock
, flags
);
1015 if (dev
->power
.disable_depth
> 0)
1016 dev
->power
.disable_depth
--;
1018 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
1020 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
1022 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
1025 * pm_runtime_forbid - Block run-time PM of a device.
1026 * @dev: Device to handle.
1028 * Increase the device's usage count and clear its power.runtime_auto flag,
1029 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1032 void pm_runtime_forbid(struct device
*dev
)
1034 spin_lock_irq(&dev
->power
.lock
);
1035 if (!dev
->power
.runtime_auto
)
1038 dev
->power
.runtime_auto
= false;
1039 atomic_inc(&dev
->power
.usage_count
);
1043 spin_unlock_irq(&dev
->power
.lock
);
1045 EXPORT_SYMBOL_GPL(pm_runtime_forbid
);
1048 * pm_runtime_allow - Unblock run-time PM of a device.
1049 * @dev: Device to handle.
1051 * Decrease the device's usage count and set its power.runtime_auto flag.
1053 void pm_runtime_allow(struct device
*dev
)
1055 spin_lock_irq(&dev
->power
.lock
);
1056 if (dev
->power
.runtime_auto
)
1059 dev
->power
.runtime_auto
= true;
1060 if (atomic_dec_and_test(&dev
->power
.usage_count
))
1061 rpm_idle(dev
, RPM_AUTO
);
1064 spin_unlock_irq(&dev
->power
.lock
);
1066 EXPORT_SYMBOL_GPL(pm_runtime_allow
);
1069 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1070 * @dev: Device to handle.
1072 * Set the power.no_callbacks flag, which tells the PM core that this
1073 * device is power-managed through its parent and has no run-time PM
1074 * callbacks of its own. The run-time sysfs attributes will be removed.
1076 void pm_runtime_no_callbacks(struct device
*dev
)
1078 spin_lock_irq(&dev
->power
.lock
);
1079 dev
->power
.no_callbacks
= 1;
1080 spin_unlock_irq(&dev
->power
.lock
);
1081 if (device_is_registered(dev
))
1082 rpm_sysfs_remove(dev
);
1084 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks
);
1087 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1088 * @dev: Device to handle
1090 * Set the power.irq_safe flag, which tells the PM core that the
1091 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1092 * always be invoked with the spinlock held and interrupts disabled. It also
1093 * causes the parent's usage counter to be permanently incremented, preventing
1094 * the parent from runtime suspending -- otherwise an irq-safe child might have
1095 * to wait for a non-irq-safe parent.
1097 void pm_runtime_irq_safe(struct device
*dev
)
1100 pm_runtime_get_sync(dev
->parent
);
1101 spin_lock_irq(&dev
->power
.lock
);
1102 dev
->power
.irq_safe
= 1;
1103 spin_unlock_irq(&dev
->power
.lock
);
1105 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe
);
1108 * update_autosuspend - Handle a change to a device's autosuspend settings.
1109 * @dev: Device to handle.
1110 * @old_delay: The former autosuspend_delay value.
1111 * @old_use: The former use_autosuspend value.
1113 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1114 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1116 * This function must be called under dev->power.lock with interrupts disabled.
1118 static void update_autosuspend(struct device
*dev
, int old_delay
, int old_use
)
1120 int delay
= dev
->power
.autosuspend_delay
;
1122 /* Should runtime suspend be prevented now? */
1123 if (dev
->power
.use_autosuspend
&& delay
< 0) {
1125 /* If it used to be allowed then prevent it. */
1126 if (!old_use
|| old_delay
>= 0) {
1127 atomic_inc(&dev
->power
.usage_count
);
1132 /* Runtime suspend should be allowed now. */
1135 /* If it used to be prevented then allow it. */
1136 if (old_use
&& old_delay
< 0)
1137 atomic_dec(&dev
->power
.usage_count
);
1139 /* Maybe we can autosuspend now. */
1140 rpm_idle(dev
, RPM_AUTO
);
1145 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1146 * @dev: Device to handle.
1147 * @delay: Value of the new delay in milliseconds.
1149 * Set the device's power.autosuspend_delay value. If it changes to negative
1150 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1151 * changes the other way, allow run-time suspends.
1153 void pm_runtime_set_autosuspend_delay(struct device
*dev
, int delay
)
1155 int old_delay
, old_use
;
1157 spin_lock_irq(&dev
->power
.lock
);
1158 old_delay
= dev
->power
.autosuspend_delay
;
1159 old_use
= dev
->power
.use_autosuspend
;
1160 dev
->power
.autosuspend_delay
= delay
;
1161 update_autosuspend(dev
, old_delay
, old_use
);
1162 spin_unlock_irq(&dev
->power
.lock
);
1164 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay
);
1167 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1168 * @dev: Device to handle.
1169 * @use: New value for use_autosuspend.
1171 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1172 * suspends as needed.
1174 void __pm_runtime_use_autosuspend(struct device
*dev
, bool use
)
1176 int old_delay
, old_use
;
1178 spin_lock_irq(&dev
->power
.lock
);
1179 old_delay
= dev
->power
.autosuspend_delay
;
1180 old_use
= dev
->power
.use_autosuspend
;
1181 dev
->power
.use_autosuspend
= use
;
1182 update_autosuspend(dev
, old_delay
, old_use
);
1183 spin_unlock_irq(&dev
->power
.lock
);
1185 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend
);
1188 * pm_runtime_init - Initialize run-time PM fields in given device object.
1189 * @dev: Device object to initialize.
1191 void pm_runtime_init(struct device
*dev
)
1193 dev
->power
.runtime_status
= RPM_SUSPENDED
;
1194 dev
->power
.idle_notification
= false;
1196 dev
->power
.disable_depth
= 1;
1197 atomic_set(&dev
->power
.usage_count
, 0);
1199 dev
->power
.runtime_error
= 0;
1201 atomic_set(&dev
->power
.child_count
, 0);
1202 pm_suspend_ignore_children(dev
, false);
1203 dev
->power
.runtime_auto
= true;
1205 dev
->power
.request_pending
= false;
1206 dev
->power
.request
= RPM_REQ_NONE
;
1207 dev
->power
.deferred_resume
= false;
1208 dev
->power
.accounting_timestamp
= jiffies
;
1209 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
1211 dev
->power
.timer_expires
= 0;
1212 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
1213 (unsigned long)dev
);
1215 init_waitqueue_head(&dev
->power
.wait_queue
);
1219 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1220 * @dev: Device object being removed from device hierarchy.
1222 void pm_runtime_remove(struct device
*dev
)
1224 __pm_runtime_disable(dev
, false);
1226 /* Change the status back to 'suspended' to match the initial status. */
1227 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1228 pm_runtime_set_suspended(dev
);
1229 if (dev
->power
.irq_safe
&& dev
->parent
)
1230 pm_runtime_put_sync(dev
->parent
);