2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * This file is released under the GPLv2.
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
13 static int __pm_runtime_resume(struct device
*dev
, bool from_wq
);
14 static int __pm_request_idle(struct device
*dev
);
15 static int __pm_request_resume(struct device
*dev
);
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
21 static void pm_runtime_deactivate_timer(struct device
*dev
)
23 if (dev
->power
.timer_expires
> 0) {
24 del_timer(&dev
->power
.suspend_timer
);
25 dev
->power
.timer_expires
= 0;
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
33 static void pm_runtime_cancel_pending(struct device
*dev
)
35 pm_runtime_deactivate_timer(dev
);
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
40 dev
->power
.request
= RPM_REQ_NONE
;
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
47 * This function must be called under dev->power.lock with interrupts disabled.
49 static int __pm_runtime_idle(struct device
*dev
)
50 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
54 if (dev
->power
.runtime_error
)
56 else if (dev
->power
.idle_notification
)
57 retval
= -EINPROGRESS
;
58 else if (atomic_read(&dev
->power
.usage_count
) > 0
59 || dev
->power
.disable_depth
> 0
60 || dev
->power
.runtime_status
!= RPM_ACTIVE
)
62 else if (!pm_children_suspended(dev
))
67 if (dev
->power
.request_pending
) {
69 * If an idle notification request is pending, cancel it. Any
70 * other pending request takes precedence over us.
72 if (dev
->power
.request
== RPM_REQ_IDLE
) {
73 dev
->power
.request
= RPM_REQ_NONE
;
74 } else if (dev
->power
.request
!= RPM_REQ_NONE
) {
80 dev
->power
.idle_notification
= true;
82 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_idle
) {
83 spin_unlock_irq(&dev
->power
.lock
);
85 dev
->bus
->pm
->runtime_idle(dev
);
87 spin_lock_irq(&dev
->power
.lock
);
90 dev
->power
.idle_notification
= false;
91 wake_up_all(&dev
->power
.wait_queue
);
98 * pm_runtime_idle - Notify device bus type if the device can be suspended.
99 * @dev: Device to notify the bus type about.
101 int pm_runtime_idle(struct device
*dev
)
105 spin_lock_irq(&dev
->power
.lock
);
106 retval
= __pm_runtime_idle(dev
);
107 spin_unlock_irq(&dev
->power
.lock
);
111 EXPORT_SYMBOL_GPL(pm_runtime_idle
);
114 * __pm_runtime_suspend - Carry out run-time suspend of given device.
115 * @dev: Device to suspend.
116 * @from_wq: If set, the function has been called via pm_wq.
118 * Check if the device can be suspended and run the ->runtime_suspend() callback
119 * provided by its bus type. If another suspend has been started earlier, wait
120 * for it to finish. If an idle notification or suspend request is pending or
121 * scheduled, cancel it.
123 * This function must be called under dev->power.lock with interrupts disabled.
125 int __pm_runtime_suspend(struct device
*dev
, bool from_wq
)
126 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
128 struct device
*parent
= NULL
;
132 dev_dbg(dev
, "__pm_runtime_suspend()%s!\n",
133 from_wq
? " from workqueue" : "");
136 if (dev
->power
.runtime_error
) {
141 /* Pending resume requests take precedence over us. */
142 if (dev
->power
.request_pending
143 && dev
->power
.request
== RPM_REQ_RESUME
) {
148 /* Other scheduled or pending requests need to be canceled. */
149 pm_runtime_cancel_pending(dev
);
151 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
153 else if (dev
->power
.runtime_status
== RPM_RESUMING
154 || dev
->power
.disable_depth
> 0
155 || atomic_read(&dev
->power
.usage_count
) > 0)
157 else if (!pm_children_suspended(dev
))
162 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
166 retval
= -EINPROGRESS
;
170 /* Wait for the other suspend running in parallel with us. */
172 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
173 TASK_UNINTERRUPTIBLE
);
174 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
)
177 spin_unlock_irq(&dev
->power
.lock
);
181 spin_lock_irq(&dev
->power
.lock
);
183 finish_wait(&dev
->power
.wait_queue
, &wait
);
187 dev
->power
.runtime_status
= RPM_SUSPENDING
;
188 dev
->power
.deferred_resume
= false;
190 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_suspend
) {
191 spin_unlock_irq(&dev
->power
.lock
);
193 retval
= dev
->bus
->pm
->runtime_suspend(dev
);
195 spin_lock_irq(&dev
->power
.lock
);
196 dev
->power
.runtime_error
= retval
;
202 dev
->power
.runtime_status
= RPM_ACTIVE
;
203 pm_runtime_cancel_pending(dev
);
205 if (retval
== -EAGAIN
|| retval
== -EBUSY
) {
207 dev
->power
.runtime_error
= 0;
210 dev
->power
.runtime_status
= RPM_SUSPENDED
;
213 parent
= dev
->parent
;
214 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
217 wake_up_all(&dev
->power
.wait_queue
);
219 if (dev
->power
.deferred_resume
) {
220 __pm_runtime_resume(dev
, false);
226 __pm_runtime_idle(dev
);
228 if (parent
&& !parent
->power
.ignore_children
) {
229 spin_unlock_irq(&dev
->power
.lock
);
231 pm_request_idle(parent
);
233 spin_lock_irq(&dev
->power
.lock
);
237 dev_dbg(dev
, "__pm_runtime_suspend() returns %d!\n", retval
);
243 * pm_runtime_suspend - Carry out run-time suspend of given device.
244 * @dev: Device to suspend.
246 int pm_runtime_suspend(struct device
*dev
)
250 spin_lock_irq(&dev
->power
.lock
);
251 retval
= __pm_runtime_suspend(dev
, false);
252 spin_unlock_irq(&dev
->power
.lock
);
256 EXPORT_SYMBOL_GPL(pm_runtime_suspend
);
259 * __pm_runtime_resume - Carry out run-time resume of given device.
260 * @dev: Device to resume.
261 * @from_wq: If set, the function has been called via pm_wq.
263 * Check if the device can be woken up and run the ->runtime_resume() callback
264 * provided by its bus type. If another resume has been started earlier, wait
265 * for it to finish. If there's a suspend running in parallel with this
266 * function, wait for it to finish and resume the device. Cancel any scheduled
267 * or pending requests.
269 * This function must be called under dev->power.lock with interrupts disabled.
271 int __pm_runtime_resume(struct device
*dev
, bool from_wq
)
272 __releases(&dev
->power
.lock
) __acquires(&dev
->power
.lock
)
274 struct device
*parent
= NULL
;
277 dev_dbg(dev
, "__pm_runtime_resume()%s!\n",
278 from_wq
? " from workqueue" : "");
281 if (dev
->power
.runtime_error
) {
286 pm_runtime_cancel_pending(dev
);
288 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
290 else if (dev
->power
.disable_depth
> 0)
295 if (dev
->power
.runtime_status
== RPM_RESUMING
296 || dev
->power
.runtime_status
== RPM_SUSPENDING
) {
300 if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
301 dev
->power
.deferred_resume
= true;
302 retval
= -EINPROGRESS
;
306 /* Wait for the operation carried out in parallel with us. */
308 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
309 TASK_UNINTERRUPTIBLE
);
310 if (dev
->power
.runtime_status
!= RPM_RESUMING
311 && dev
->power
.runtime_status
!= RPM_SUSPENDING
)
314 spin_unlock_irq(&dev
->power
.lock
);
318 spin_lock_irq(&dev
->power
.lock
);
320 finish_wait(&dev
->power
.wait_queue
, &wait
);
324 if (!parent
&& dev
->parent
) {
326 * Increment the parent's resume counter and resume it if
329 parent
= dev
->parent
;
330 spin_unlock(&dev
->power
.lock
);
332 pm_runtime_get_noresume(parent
);
334 spin_lock(&parent
->power
.lock
);
336 * We can resume if the parent's run-time PM is disabled or it
337 * is set to ignore children.
339 if (!parent
->power
.disable_depth
340 && !parent
->power
.ignore_children
) {
341 __pm_runtime_resume(parent
, false);
342 if (parent
->power
.runtime_status
!= RPM_ACTIVE
)
345 spin_unlock(&parent
->power
.lock
);
347 spin_lock(&dev
->power
.lock
);
353 dev
->power
.runtime_status
= RPM_RESUMING
;
355 if (dev
->bus
&& dev
->bus
->pm
&& dev
->bus
->pm
->runtime_resume
) {
356 spin_unlock_irq(&dev
->power
.lock
);
358 retval
= dev
->bus
->pm
->runtime_resume(dev
);
360 spin_lock_irq(&dev
->power
.lock
);
361 dev
->power
.runtime_error
= retval
;
367 dev
->power
.runtime_status
= RPM_SUSPENDED
;
368 pm_runtime_cancel_pending(dev
);
370 dev
->power
.runtime_status
= RPM_ACTIVE
;
372 atomic_inc(&parent
->power
.child_count
);
374 wake_up_all(&dev
->power
.wait_queue
);
377 __pm_request_idle(dev
);
381 spin_unlock_irq(&dev
->power
.lock
);
383 pm_runtime_put(parent
);
385 spin_lock_irq(&dev
->power
.lock
);
388 dev_dbg(dev
, "__pm_runtime_resume() returns %d!\n", retval
);
394 * pm_runtime_resume - Carry out run-time resume of given device.
395 * @dev: Device to suspend.
397 int pm_runtime_resume(struct device
*dev
)
401 spin_lock_irq(&dev
->power
.lock
);
402 retval
= __pm_runtime_resume(dev
, false);
403 spin_unlock_irq(&dev
->power
.lock
);
407 EXPORT_SYMBOL_GPL(pm_runtime_resume
);
410 * pm_runtime_work - Universal run-time PM work function.
411 * @work: Work structure used for scheduling the execution of this function.
413 * Use @work to get the device object the work is to be done for, determine what
414 * is to be done and execute the appropriate run-time PM function.
416 static void pm_runtime_work(struct work_struct
*work
)
418 struct device
*dev
= container_of(work
, struct device
, power
.work
);
419 enum rpm_request req
;
421 spin_lock_irq(&dev
->power
.lock
);
423 if (!dev
->power
.request_pending
)
426 req
= dev
->power
.request
;
427 dev
->power
.request
= RPM_REQ_NONE
;
428 dev
->power
.request_pending
= false;
434 __pm_runtime_idle(dev
);
436 case RPM_REQ_SUSPEND
:
437 __pm_runtime_suspend(dev
, true);
440 __pm_runtime_resume(dev
, true);
445 spin_unlock_irq(&dev
->power
.lock
);
449 * __pm_request_idle - Submit an idle notification request for given device.
450 * @dev: Device to handle.
452 * Check if the device's run-time PM status is correct for suspending the device
453 * and queue up a request to run __pm_runtime_idle() for it.
455 * This function must be called under dev->power.lock with interrupts disabled.
457 static int __pm_request_idle(struct device
*dev
)
461 if (dev
->power
.runtime_error
)
463 else if (atomic_read(&dev
->power
.usage_count
) > 0
464 || dev
->power
.disable_depth
> 0
465 || dev
->power
.runtime_status
== RPM_SUSPENDED
466 || dev
->power
.runtime_status
== RPM_SUSPENDING
)
468 else if (!pm_children_suspended(dev
))
473 if (dev
->power
.request_pending
) {
474 /* Any requests other then RPM_REQ_IDLE take precedence. */
475 if (dev
->power
.request
== RPM_REQ_NONE
)
476 dev
->power
.request
= RPM_REQ_IDLE
;
477 else if (dev
->power
.request
!= RPM_REQ_IDLE
)
482 dev
->power
.request
= RPM_REQ_IDLE
;
483 dev
->power
.request_pending
= true;
484 queue_work(pm_wq
, &dev
->power
.work
);
490 * pm_request_idle - Submit an idle notification request for given device.
491 * @dev: Device to handle.
493 int pm_request_idle(struct device
*dev
)
498 spin_lock_irqsave(&dev
->power
.lock
, flags
);
499 retval
= __pm_request_idle(dev
);
500 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
504 EXPORT_SYMBOL_GPL(pm_request_idle
);
507 * __pm_request_suspend - Submit a suspend request for given device.
508 * @dev: Device to suspend.
510 * This function must be called under dev->power.lock with interrupts disabled.
512 static int __pm_request_suspend(struct device
*dev
)
516 if (dev
->power
.runtime_error
)
519 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
521 else if (atomic_read(&dev
->power
.usage_count
) > 0
522 || dev
->power
.disable_depth
> 0)
524 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
525 retval
= -EINPROGRESS
;
526 else if (!pm_children_suspended(dev
))
531 pm_runtime_deactivate_timer(dev
);
533 if (dev
->power
.request_pending
) {
535 * Pending resume requests take precedence over us, but we can
536 * overtake any other pending request.
538 if (dev
->power
.request
== RPM_REQ_RESUME
)
540 else if (dev
->power
.request
!= RPM_REQ_SUSPEND
)
541 dev
->power
.request
= retval
?
542 RPM_REQ_NONE
: RPM_REQ_SUSPEND
;
548 dev
->power
.request
= RPM_REQ_SUSPEND
;
549 dev
->power
.request_pending
= true;
550 queue_work(pm_wq
, &dev
->power
.work
);
556 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
557 * @data: Device pointer passed by pm_schedule_suspend().
559 * Check if the time is right and execute __pm_request_suspend() in that case.
561 static void pm_suspend_timer_fn(unsigned long data
)
563 struct device
*dev
= (struct device
*)data
;
565 unsigned long expires
;
567 spin_lock_irqsave(&dev
->power
.lock
, flags
);
569 expires
= dev
->power
.timer_expires
;
570 /* If 'expire' is after 'jiffies' we've been called too early. */
571 if (expires
> 0 && !time_after(expires
, jiffies
)) {
572 dev
->power
.timer_expires
= 0;
573 __pm_request_suspend(dev
);
576 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
580 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
581 * @dev: Device to suspend.
582 * @delay: Time to wait before submitting a suspend request, in milliseconds.
584 int pm_schedule_suspend(struct device
*dev
, unsigned int delay
)
589 spin_lock_irqsave(&dev
->power
.lock
, flags
);
591 if (dev
->power
.runtime_error
) {
597 retval
= __pm_request_suspend(dev
);
601 pm_runtime_deactivate_timer(dev
);
603 if (dev
->power
.request_pending
) {
605 * Pending resume requests take precedence over us, but any
606 * other pending requests have to be canceled.
608 if (dev
->power
.request
== RPM_REQ_RESUME
) {
612 dev
->power
.request
= RPM_REQ_NONE
;
615 if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
617 else if (dev
->power
.runtime_status
== RPM_SUSPENDING
)
618 retval
= -EINPROGRESS
;
619 else if (atomic_read(&dev
->power
.usage_count
) > 0
620 || dev
->power
.disable_depth
> 0)
622 else if (!pm_children_suspended(dev
))
627 dev
->power
.timer_expires
= jiffies
+ msecs_to_jiffies(delay
);
628 if (!dev
->power
.timer_expires
)
629 dev
->power
.timer_expires
= 1;
630 mod_timer(&dev
->power
.suspend_timer
, dev
->power
.timer_expires
);
633 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
637 EXPORT_SYMBOL_GPL(pm_schedule_suspend
);
640 * pm_request_resume - Submit a resume request for given device.
641 * @dev: Device to resume.
643 * This function must be called under dev->power.lock with interrupts disabled.
645 static int __pm_request_resume(struct device
*dev
)
649 if (dev
->power
.runtime_error
)
652 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
654 else if (dev
->power
.runtime_status
== RPM_RESUMING
)
655 retval
= -EINPROGRESS
;
656 else if (dev
->power
.disable_depth
> 0)
661 pm_runtime_deactivate_timer(dev
);
663 if (dev
->power
.runtime_status
== RPM_SUSPENDING
) {
664 dev
->power
.deferred_resume
= true;
667 if (dev
->power
.request_pending
) {
668 /* If non-resume request is pending, we can overtake it. */
669 dev
->power
.request
= retval
? RPM_REQ_NONE
: RPM_REQ_RESUME
;
675 dev
->power
.request
= RPM_REQ_RESUME
;
676 dev
->power
.request_pending
= true;
677 queue_work(pm_wq
, &dev
->power
.work
);
683 * pm_request_resume - Submit a resume request for given device.
684 * @dev: Device to resume.
686 int pm_request_resume(struct device
*dev
)
691 spin_lock_irqsave(&dev
->power
.lock
, flags
);
692 retval
= __pm_request_resume(dev
);
693 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
697 EXPORT_SYMBOL_GPL(pm_request_resume
);
700 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
701 * @dev: Device to handle.
702 * @sync: If set and the device is suspended, resume it synchronously.
704 * Increment the usage count of the device and if it was zero previously,
705 * resume it or submit a resume request for it, depending on the value of @sync.
707 int __pm_runtime_get(struct device
*dev
, bool sync
)
711 if (atomic_add_return(1, &dev
->power
.usage_count
) == 1)
712 retval
= sync
? pm_runtime_resume(dev
) : pm_request_resume(dev
);
716 EXPORT_SYMBOL_GPL(__pm_runtime_get
);
719 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
720 * @dev: Device to handle.
721 * @sync: If the device's bus type is to be notified, do that synchronously.
723 * Decrement the usage count of the device and if it reaches zero, carry out a
724 * synchronous idle notification or submit an idle notification request for it,
725 * depending on the value of @sync.
727 int __pm_runtime_put(struct device
*dev
, bool sync
)
731 if (atomic_dec_and_test(&dev
->power
.usage_count
))
732 retval
= sync
? pm_runtime_idle(dev
) : pm_request_idle(dev
);
736 EXPORT_SYMBOL_GPL(__pm_runtime_put
);
739 * __pm_runtime_set_status - Set run-time PM status of a device.
740 * @dev: Device to handle.
741 * @status: New run-time PM status of the device.
743 * If run-time PM of the device is disabled or its power.runtime_error field is
744 * different from zero, the status may be changed either to RPM_ACTIVE, or to
745 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
746 * However, if the device has a parent and the parent is not active, and the
747 * parent's power.ignore_children flag is unset, the device's status cannot be
748 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
750 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
751 * and the device parent's counter of unsuspended children is modified to
752 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
753 * notification request for the parent is submitted.
755 int __pm_runtime_set_status(struct device
*dev
, unsigned int status
)
757 struct device
*parent
= dev
->parent
;
759 bool notify_parent
= false;
762 if (status
!= RPM_ACTIVE
&& status
!= RPM_SUSPENDED
)
765 spin_lock_irqsave(&dev
->power
.lock
, flags
);
767 if (!dev
->power
.runtime_error
&& !dev
->power
.disable_depth
) {
772 if (dev
->power
.runtime_status
== status
)
775 if (status
== RPM_SUSPENDED
) {
776 /* It always is possible to set the status to 'suspended'. */
778 atomic_add_unless(&parent
->power
.child_count
, -1, 0);
779 notify_parent
= !parent
->power
.ignore_children
;
785 spin_lock_nested(&parent
->power
.lock
, SINGLE_DEPTH_NESTING
);
788 * It is invalid to put an active child under a parent that is
789 * not active, has run-time PM enabled and the
790 * 'power.ignore_children' flag unset.
792 if (!parent
->power
.disable_depth
793 && !parent
->power
.ignore_children
794 && parent
->power
.runtime_status
!= RPM_ACTIVE
)
796 else if (dev
->power
.runtime_status
== RPM_SUSPENDED
)
797 atomic_inc(&parent
->power
.child_count
);
799 spin_unlock(&parent
->power
.lock
);
806 dev
->power
.runtime_status
= status
;
807 dev
->power
.runtime_error
= 0;
809 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
812 pm_request_idle(parent
);
816 EXPORT_SYMBOL_GPL(__pm_runtime_set_status
);
819 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
820 * @dev: Device to handle.
822 * Flush all pending requests for the device from pm_wq and wait for all
823 * run-time PM operations involving the device in progress to complete.
825 * Should be called under dev->power.lock with interrupts disabled.
827 static void __pm_runtime_barrier(struct device
*dev
)
829 pm_runtime_deactivate_timer(dev
);
831 if (dev
->power
.request_pending
) {
832 dev
->power
.request
= RPM_REQ_NONE
;
833 spin_unlock_irq(&dev
->power
.lock
);
835 cancel_work_sync(&dev
->power
.work
);
837 spin_lock_irq(&dev
->power
.lock
);
838 dev
->power
.request_pending
= false;
841 if (dev
->power
.runtime_status
== RPM_SUSPENDING
842 || dev
->power
.runtime_status
== RPM_RESUMING
843 || dev
->power
.idle_notification
) {
846 /* Suspend, wake-up or idle notification in progress. */
848 prepare_to_wait(&dev
->power
.wait_queue
, &wait
,
849 TASK_UNINTERRUPTIBLE
);
850 if (dev
->power
.runtime_status
!= RPM_SUSPENDING
851 && dev
->power
.runtime_status
!= RPM_RESUMING
852 && !dev
->power
.idle_notification
)
854 spin_unlock_irq(&dev
->power
.lock
);
858 spin_lock_irq(&dev
->power
.lock
);
860 finish_wait(&dev
->power
.wait_queue
, &wait
);
865 * pm_runtime_barrier - Flush pending requests and wait for completions.
866 * @dev: Device to handle.
868 * Prevent the device from being suspended by incrementing its usage counter and
869 * if there's a pending resume request for the device, wake the device up.
870 * Next, make sure that all pending requests for the device have been flushed
871 * from pm_wq and wait for all run-time PM operations involving the device in
872 * progress to complete.
875 * 1, if there was a resume request pending and the device had to be woken up,
878 int pm_runtime_barrier(struct device
*dev
)
882 pm_runtime_get_noresume(dev
);
883 spin_lock_irq(&dev
->power
.lock
);
885 if (dev
->power
.request_pending
886 && dev
->power
.request
== RPM_REQ_RESUME
) {
887 __pm_runtime_resume(dev
, false);
891 __pm_runtime_barrier(dev
);
893 spin_unlock_irq(&dev
->power
.lock
);
894 pm_runtime_put_noidle(dev
);
898 EXPORT_SYMBOL_GPL(pm_runtime_barrier
);
901 * __pm_runtime_disable - Disable run-time PM of a device.
902 * @dev: Device to handle.
903 * @check_resume: If set, check if there's a resume request for the device.
905 * Increment power.disable_depth for the device and if was zero previously,
906 * cancel all pending run-time PM requests for the device and wait for all
907 * operations in progress to complete. The device can be either active or
908 * suspended after its run-time PM has been disabled.
910 * If @check_resume is set and there's a resume request pending when
911 * __pm_runtime_disable() is called and power.disable_depth is zero, the
912 * function will wake up the device before disabling its run-time PM.
914 void __pm_runtime_disable(struct device
*dev
, bool check_resume
)
916 spin_lock_irq(&dev
->power
.lock
);
918 if (dev
->power
.disable_depth
> 0) {
919 dev
->power
.disable_depth
++;
924 * Wake up the device if there's a resume request pending, because that
925 * means there probably is some I/O to process and disabling run-time PM
926 * shouldn't prevent the device from processing the I/O.
928 if (check_resume
&& dev
->power
.request_pending
929 && dev
->power
.request
== RPM_REQ_RESUME
) {
931 * Prevent suspends and idle notifications from being carried
932 * out after we have woken up the device.
934 pm_runtime_get_noresume(dev
);
936 __pm_runtime_resume(dev
, false);
938 pm_runtime_put_noidle(dev
);
941 if (!dev
->power
.disable_depth
++)
942 __pm_runtime_barrier(dev
);
945 spin_unlock_irq(&dev
->power
.lock
);
947 EXPORT_SYMBOL_GPL(__pm_runtime_disable
);
950 * pm_runtime_enable - Enable run-time PM of a device.
951 * @dev: Device to handle.
953 void pm_runtime_enable(struct device
*dev
)
957 spin_lock_irqsave(&dev
->power
.lock
, flags
);
959 if (dev
->power
.disable_depth
> 0)
960 dev
->power
.disable_depth
--;
962 dev_warn(dev
, "Unbalanced %s!\n", __func__
);
964 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
966 EXPORT_SYMBOL_GPL(pm_runtime_enable
);
969 * pm_runtime_init - Initialize run-time PM fields in given device object.
970 * @dev: Device object to initialize.
972 void pm_runtime_init(struct device
*dev
)
974 spin_lock_init(&dev
->power
.lock
);
976 dev
->power
.runtime_status
= RPM_SUSPENDED
;
977 dev
->power
.idle_notification
= false;
979 dev
->power
.disable_depth
= 1;
980 atomic_set(&dev
->power
.usage_count
, 0);
982 dev
->power
.runtime_error
= 0;
984 atomic_set(&dev
->power
.child_count
, 0);
985 pm_suspend_ignore_children(dev
, false);
987 dev
->power
.request_pending
= false;
988 dev
->power
.request
= RPM_REQ_NONE
;
989 dev
->power
.deferred_resume
= false;
990 INIT_WORK(&dev
->power
.work
, pm_runtime_work
);
992 dev
->power
.timer_expires
= 0;
993 setup_timer(&dev
->power
.suspend_timer
, pm_suspend_timer_fn
,
996 init_waitqueue_head(&dev
->power
.wait_queue
);
1000 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1001 * @dev: Device object being removed from device hierarchy.
1003 void pm_runtime_remove(struct device
*dev
)
1005 __pm_runtime_disable(dev
, false);
1007 /* Change the status back to 'suspended' to match the initial status. */
1008 if (dev
->power
.runtime_status
== RPM_ACTIVE
)
1009 pm_runtime_set_suspended(dev
);
This page took 0.160864 seconds and 5 git commands to generate.