PM / Runtime: Return special error code if runtime PM is disabled
[deliverable/linux.git] / drivers / base / power / runtime.c
1 /*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include "power.h"
13
14 static int rpm_resume(struct device *dev, int rpmflags);
15 static int rpm_suspend(struct device *dev, int rpmflags);
16
17 /**
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
20 *
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
26 * correctly.
27 */
28 void update_pm_runtime_accounting(struct device *dev)
29 {
30 unsigned long now = jiffies;
31 int delta;
32
33 delta = now - dev->power.accounting_timestamp;
34
35 if (delta < 0)
36 delta = 0;
37
38 dev->power.accounting_timestamp = now;
39
40 if (dev->power.disable_depth > 0)
41 return;
42
43 if (dev->power.runtime_status == RPM_SUSPENDED)
44 dev->power.suspended_jiffies += delta;
45 else
46 dev->power.active_jiffies += delta;
47 }
48
49 static void __update_runtime_status(struct device *dev, enum rpm_status status)
50 {
51 update_pm_runtime_accounting(dev);
52 dev->power.runtime_status = status;
53 }
54
55 /**
56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
58 */
59 static void pm_runtime_deactivate_timer(struct device *dev)
60 {
61 if (dev->power.timer_expires > 0) {
62 del_timer(&dev->power.suspend_timer);
63 dev->power.timer_expires = 0;
64 }
65 }
66
67 /**
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
70 */
71 static void pm_runtime_cancel_pending(struct device *dev)
72 {
73 pm_runtime_deactivate_timer(dev);
74 /*
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
77 */
78 dev->power.request = RPM_REQ_NONE;
79 }
80
81 /*
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
84 *
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 *
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
92 */
93 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94 {
95 int autosuspend_delay;
96 long elapsed;
97 unsigned long last_busy;
98 unsigned long expires = 0;
99
100 if (!dev->power.use_autosuspend)
101 goto out;
102
103 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104 if (autosuspend_delay < 0)
105 goto out;
106
107 last_busy = ACCESS_ONCE(dev->power.last_busy);
108 elapsed = jiffies - last_busy;
109 if (elapsed < 0)
110 goto out; /* jiffies has wrapped around. */
111
112 /*
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
115 */
116 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117 if (autosuspend_delay >= 1000)
118 expires = round_jiffies(expires);
119 expires += !expires;
120 if (elapsed >= expires - last_busy)
121 expires = 0; /* Already expired. */
122
123 out:
124 return expires;
125 }
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127
128 /**
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
131 */
132 static int rpm_check_suspend_allowed(struct device *dev)
133 {
134 int retval = 0;
135
136 if (dev->power.runtime_error)
137 retval = -EINVAL;
138 else if (dev->power.disable_depth > 0)
139 retval = -EACCES;
140 else if (atomic_read(&dev->power.usage_count) > 0)
141 retval = -EAGAIN;
142 else if (!pm_children_suspended(dev))
143 retval = -EBUSY;
144
145 /* Pending resume requests take precedence over suspends. */
146 else if ((dev->power.deferred_resume
147 && dev->power.runtime_status == RPM_SUSPENDING)
148 || (dev->power.request_pending
149 && dev->power.request == RPM_REQ_RESUME))
150 retval = -EAGAIN;
151 else if (dev->power.runtime_status == RPM_SUSPENDED)
152 retval = 1;
153
154 return retval;
155 }
156
157 /**
158 * rpm_idle - Notify device bus type if the device can be suspended.
159 * @dev: Device to notify the bus type about.
160 * @rpmflags: Flag bits.
161 *
162 * Check if the device's run-time PM status allows it to be suspended. If
163 * another idle notification has been started earlier, return immediately. If
164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
165 * run the ->runtime_idle() callback directly.
166 *
167 * This function must be called under dev->power.lock with interrupts disabled.
168 */
169 static int rpm_idle(struct device *dev, int rpmflags)
170 {
171 int (*callback)(struct device *);
172 int retval;
173
174 retval = rpm_check_suspend_allowed(dev);
175 if (retval < 0)
176 ; /* Conditions are wrong. */
177
178 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
179 else if (dev->power.runtime_status != RPM_ACTIVE)
180 retval = -EAGAIN;
181
182 /*
183 * Any pending request other than an idle notification takes
184 * precedence over us, except that the timer may be running.
185 */
186 else if (dev->power.request_pending &&
187 dev->power.request > RPM_REQ_IDLE)
188 retval = -EAGAIN;
189
190 /* Act as though RPM_NOWAIT is always set. */
191 else if (dev->power.idle_notification)
192 retval = -EINPROGRESS;
193 if (retval)
194 goto out;
195
196 /* Pending requests need to be canceled. */
197 dev->power.request = RPM_REQ_NONE;
198
199 if (dev->power.no_callbacks) {
200 /* Assume ->runtime_idle() callback would have suspended. */
201 retval = rpm_suspend(dev, rpmflags);
202 goto out;
203 }
204
205 /* Carry out an asynchronous or a synchronous idle notification. */
206 if (rpmflags & RPM_ASYNC) {
207 dev->power.request = RPM_REQ_IDLE;
208 if (!dev->power.request_pending) {
209 dev->power.request_pending = true;
210 queue_work(pm_wq, &dev->power.work);
211 }
212 goto out;
213 }
214
215 dev->power.idle_notification = true;
216
217 if (dev->pm_domain)
218 callback = dev->pm_domain->ops.runtime_idle;
219 else if (dev->type && dev->type->pm)
220 callback = dev->type->pm->runtime_idle;
221 else if (dev->class && dev->class->pm)
222 callback = dev->class->pm->runtime_idle;
223 else if (dev->bus && dev->bus->pm)
224 callback = dev->bus->pm->runtime_idle;
225 else
226 callback = NULL;
227
228 if (callback) {
229 spin_unlock_irq(&dev->power.lock);
230
231 callback(dev);
232
233 spin_lock_irq(&dev->power.lock);
234 }
235
236 dev->power.idle_notification = false;
237 wake_up_all(&dev->power.wait_queue);
238
239 out:
240 return retval;
241 }
242
243 /**
244 * rpm_callback - Run a given runtime PM callback for a given device.
245 * @cb: Runtime PM callback to run.
246 * @dev: Device to run the callback for.
247 */
248 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
249 __releases(&dev->power.lock) __acquires(&dev->power.lock)
250 {
251 int retval;
252
253 if (!cb)
254 return -ENOSYS;
255
256 if (dev->power.irq_safe) {
257 retval = cb(dev);
258 } else {
259 spin_unlock_irq(&dev->power.lock);
260
261 retval = cb(dev);
262
263 spin_lock_irq(&dev->power.lock);
264 }
265 dev->power.runtime_error = retval;
266 return retval != -EACCES ? retval : -EIO;
267 }
268
269 /**
270 * rpm_suspend - Carry out run-time suspend of given device.
271 * @dev: Device to suspend.
272 * @rpmflags: Flag bits.
273 *
274 * Check if the device's run-time PM status allows it to be suspended. If
275 * another suspend has been started earlier, either return immediately or wait
276 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
277 * pending idle notification. If the RPM_ASYNC flag is set then queue a
278 * suspend request; otherwise run the ->runtime_suspend() callback directly.
279 * If a deferred resume was requested while the callback was running then carry
280 * it out; otherwise send an idle notification for the device (if the suspend
281 * failed) or for its parent (if the suspend succeeded).
282 *
283 * This function must be called under dev->power.lock with interrupts disabled.
284 */
285 static int rpm_suspend(struct device *dev, int rpmflags)
286 __releases(&dev->power.lock) __acquires(&dev->power.lock)
287 {
288 int (*callback)(struct device *);
289 struct device *parent = NULL;
290 int retval;
291
292 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
293
294 repeat:
295 retval = rpm_check_suspend_allowed(dev);
296
297 if (retval < 0)
298 ; /* Conditions are wrong. */
299
300 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
301 else if (dev->power.runtime_status == RPM_RESUMING &&
302 !(rpmflags & RPM_ASYNC))
303 retval = -EAGAIN;
304 if (retval)
305 goto out;
306
307 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
308 if ((rpmflags & RPM_AUTO)
309 && dev->power.runtime_status != RPM_SUSPENDING) {
310 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
311
312 if (expires != 0) {
313 /* Pending requests need to be canceled. */
314 dev->power.request = RPM_REQ_NONE;
315
316 /*
317 * Optimization: If the timer is already running and is
318 * set to expire at or before the autosuspend delay,
319 * avoid the overhead of resetting it. Just let it
320 * expire; pm_suspend_timer_fn() will take care of the
321 * rest.
322 */
323 if (!(dev->power.timer_expires && time_before_eq(
324 dev->power.timer_expires, expires))) {
325 dev->power.timer_expires = expires;
326 mod_timer(&dev->power.suspend_timer, expires);
327 }
328 dev->power.timer_autosuspends = 1;
329 goto out;
330 }
331 }
332
333 /* Other scheduled or pending requests need to be canceled. */
334 pm_runtime_cancel_pending(dev);
335
336 if (dev->power.runtime_status == RPM_SUSPENDING) {
337 DEFINE_WAIT(wait);
338
339 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
340 retval = -EINPROGRESS;
341 goto out;
342 }
343
344 /* Wait for the other suspend running in parallel with us. */
345 for (;;) {
346 prepare_to_wait(&dev->power.wait_queue, &wait,
347 TASK_UNINTERRUPTIBLE);
348 if (dev->power.runtime_status != RPM_SUSPENDING)
349 break;
350
351 spin_unlock_irq(&dev->power.lock);
352
353 schedule();
354
355 spin_lock_irq(&dev->power.lock);
356 }
357 finish_wait(&dev->power.wait_queue, &wait);
358 goto repeat;
359 }
360
361 dev->power.deferred_resume = false;
362 if (dev->power.no_callbacks)
363 goto no_callback; /* Assume success. */
364
365 /* Carry out an asynchronous or a synchronous suspend. */
366 if (rpmflags & RPM_ASYNC) {
367 dev->power.request = (rpmflags & RPM_AUTO) ?
368 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
369 if (!dev->power.request_pending) {
370 dev->power.request_pending = true;
371 queue_work(pm_wq, &dev->power.work);
372 }
373 goto out;
374 }
375
376 __update_runtime_status(dev, RPM_SUSPENDING);
377
378 if (dev->pm_domain)
379 callback = dev->pm_domain->ops.runtime_suspend;
380 else if (dev->type && dev->type->pm)
381 callback = dev->type->pm->runtime_suspend;
382 else if (dev->class && dev->class->pm)
383 callback = dev->class->pm->runtime_suspend;
384 else if (dev->bus && dev->bus->pm)
385 callback = dev->bus->pm->runtime_suspend;
386 else
387 callback = NULL;
388
389 retval = rpm_callback(callback, dev);
390 if (retval) {
391 __update_runtime_status(dev, RPM_ACTIVE);
392 dev->power.deferred_resume = 0;
393 if (retval == -EAGAIN || retval == -EBUSY)
394 dev->power.runtime_error = 0;
395 else
396 pm_runtime_cancel_pending(dev);
397 } else {
398 no_callback:
399 __update_runtime_status(dev, RPM_SUSPENDED);
400 pm_runtime_deactivate_timer(dev);
401
402 if (dev->parent) {
403 parent = dev->parent;
404 atomic_add_unless(&parent->power.child_count, -1, 0);
405 }
406 }
407 wake_up_all(&dev->power.wait_queue);
408
409 if (dev->power.deferred_resume) {
410 rpm_resume(dev, 0);
411 retval = -EAGAIN;
412 goto out;
413 }
414
415 /* Maybe the parent is now able to suspend. */
416 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
417 spin_unlock(&dev->power.lock);
418
419 spin_lock(&parent->power.lock);
420 rpm_idle(parent, RPM_ASYNC);
421 spin_unlock(&parent->power.lock);
422
423 spin_lock(&dev->power.lock);
424 }
425
426 out:
427 dev_dbg(dev, "%s returns %d\n", __func__, retval);
428
429 return retval;
430 }
431
432 /**
433 * rpm_resume - Carry out run-time resume of given device.
434 * @dev: Device to resume.
435 * @rpmflags: Flag bits.
436 *
437 * Check if the device's run-time PM status allows it to be resumed. Cancel
438 * any scheduled or pending requests. If another resume has been started
439 * earlier, either return immediately or wait for it to finish, depending on the
440 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
441 * parallel with this function, either tell the other process to resume after
442 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
443 * flag is set then queue a resume request; otherwise run the
444 * ->runtime_resume() callback directly. Queue an idle notification for the
445 * device if the resume succeeded.
446 *
447 * This function must be called under dev->power.lock with interrupts disabled.
448 */
449 static int rpm_resume(struct device *dev, int rpmflags)
450 __releases(&dev->power.lock) __acquires(&dev->power.lock)
451 {
452 int (*callback)(struct device *);
453 struct device *parent = NULL;
454 int retval = 0;
455
456 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
457
458 repeat:
459 if (dev->power.runtime_error)
460 retval = -EINVAL;
461 else if (dev->power.disable_depth > 0)
462 retval = -EACCES;
463 if (retval)
464 goto out;
465
466 /*
467 * Other scheduled or pending requests need to be canceled. Small
468 * optimization: If an autosuspend timer is running, leave it running
469 * rather than cancelling it now only to restart it again in the near
470 * future.
471 */
472 dev->power.request = RPM_REQ_NONE;
473 if (!dev->power.timer_autosuspends)
474 pm_runtime_deactivate_timer(dev);
475
476 if (dev->power.runtime_status == RPM_ACTIVE) {
477 retval = 1;
478 goto out;
479 }
480
481 if (dev->power.runtime_status == RPM_RESUMING
482 || dev->power.runtime_status == RPM_SUSPENDING) {
483 DEFINE_WAIT(wait);
484
485 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
486 if (dev->power.runtime_status == RPM_SUSPENDING)
487 dev->power.deferred_resume = true;
488 else
489 retval = -EINPROGRESS;
490 goto out;
491 }
492
493 /* Wait for the operation carried out in parallel with us. */
494 for (;;) {
495 prepare_to_wait(&dev->power.wait_queue, &wait,
496 TASK_UNINTERRUPTIBLE);
497 if (dev->power.runtime_status != RPM_RESUMING
498 && dev->power.runtime_status != RPM_SUSPENDING)
499 break;
500
501 spin_unlock_irq(&dev->power.lock);
502
503 schedule();
504
505 spin_lock_irq(&dev->power.lock);
506 }
507 finish_wait(&dev->power.wait_queue, &wait);
508 goto repeat;
509 }
510
511 /*
512 * See if we can skip waking up the parent. This is safe only if
513 * power.no_callbacks is set, because otherwise we don't know whether
514 * the resume will actually succeed.
515 */
516 if (dev->power.no_callbacks && !parent && dev->parent) {
517 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
518 if (dev->parent->power.disable_depth > 0
519 || dev->parent->power.ignore_children
520 || dev->parent->power.runtime_status == RPM_ACTIVE) {
521 atomic_inc(&dev->parent->power.child_count);
522 spin_unlock(&dev->parent->power.lock);
523 goto no_callback; /* Assume success. */
524 }
525 spin_unlock(&dev->parent->power.lock);
526 }
527
528 /* Carry out an asynchronous or a synchronous resume. */
529 if (rpmflags & RPM_ASYNC) {
530 dev->power.request = RPM_REQ_RESUME;
531 if (!dev->power.request_pending) {
532 dev->power.request_pending = true;
533 queue_work(pm_wq, &dev->power.work);
534 }
535 retval = 0;
536 goto out;
537 }
538
539 if (!parent && dev->parent) {
540 /*
541 * Increment the parent's usage counter and resume it if
542 * necessary. Not needed if dev is irq-safe; then the
543 * parent is permanently resumed.
544 */
545 parent = dev->parent;
546 if (dev->power.irq_safe)
547 goto skip_parent;
548 spin_unlock(&dev->power.lock);
549
550 pm_runtime_get_noresume(parent);
551
552 spin_lock(&parent->power.lock);
553 /*
554 * We can resume if the parent's run-time PM is disabled or it
555 * is set to ignore children.
556 */
557 if (!parent->power.disable_depth
558 && !parent->power.ignore_children) {
559 rpm_resume(parent, 0);
560 if (parent->power.runtime_status != RPM_ACTIVE)
561 retval = -EBUSY;
562 }
563 spin_unlock(&parent->power.lock);
564
565 spin_lock(&dev->power.lock);
566 if (retval)
567 goto out;
568 goto repeat;
569 }
570 skip_parent:
571
572 if (dev->power.no_callbacks)
573 goto no_callback; /* Assume success. */
574
575 __update_runtime_status(dev, RPM_RESUMING);
576
577 if (dev->pm_domain)
578 callback = dev->pm_domain->ops.runtime_resume;
579 else if (dev->type && dev->type->pm)
580 callback = dev->type->pm->runtime_resume;
581 else if (dev->class && dev->class->pm)
582 callback = dev->class->pm->runtime_resume;
583 else if (dev->bus && dev->bus->pm)
584 callback = dev->bus->pm->runtime_resume;
585 else
586 callback = NULL;
587
588 retval = rpm_callback(callback, dev);
589 if (retval) {
590 __update_runtime_status(dev, RPM_SUSPENDED);
591 pm_runtime_cancel_pending(dev);
592 } else {
593 no_callback:
594 __update_runtime_status(dev, RPM_ACTIVE);
595 if (parent)
596 atomic_inc(&parent->power.child_count);
597 }
598 wake_up_all(&dev->power.wait_queue);
599
600 if (!retval)
601 rpm_idle(dev, RPM_ASYNC);
602
603 out:
604 if (parent && !dev->power.irq_safe) {
605 spin_unlock_irq(&dev->power.lock);
606
607 pm_runtime_put(parent);
608
609 spin_lock_irq(&dev->power.lock);
610 }
611
612 dev_dbg(dev, "%s returns %d\n", __func__, retval);
613
614 return retval;
615 }
616
617 /**
618 * pm_runtime_work - Universal run-time PM work function.
619 * @work: Work structure used for scheduling the execution of this function.
620 *
621 * Use @work to get the device object the work is to be done for, determine what
622 * is to be done and execute the appropriate run-time PM function.
623 */
624 static void pm_runtime_work(struct work_struct *work)
625 {
626 struct device *dev = container_of(work, struct device, power.work);
627 enum rpm_request req;
628
629 spin_lock_irq(&dev->power.lock);
630
631 if (!dev->power.request_pending)
632 goto out;
633
634 req = dev->power.request;
635 dev->power.request = RPM_REQ_NONE;
636 dev->power.request_pending = false;
637
638 switch (req) {
639 case RPM_REQ_NONE:
640 break;
641 case RPM_REQ_IDLE:
642 rpm_idle(dev, RPM_NOWAIT);
643 break;
644 case RPM_REQ_SUSPEND:
645 rpm_suspend(dev, RPM_NOWAIT);
646 break;
647 case RPM_REQ_AUTOSUSPEND:
648 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
649 break;
650 case RPM_REQ_RESUME:
651 rpm_resume(dev, RPM_NOWAIT);
652 break;
653 }
654
655 out:
656 spin_unlock_irq(&dev->power.lock);
657 }
658
659 /**
660 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
661 * @data: Device pointer passed by pm_schedule_suspend().
662 *
663 * Check if the time is right and queue a suspend request.
664 */
665 static void pm_suspend_timer_fn(unsigned long data)
666 {
667 struct device *dev = (struct device *)data;
668 unsigned long flags;
669 unsigned long expires;
670
671 spin_lock_irqsave(&dev->power.lock, flags);
672
673 expires = dev->power.timer_expires;
674 /* If 'expire' is after 'jiffies' we've been called too early. */
675 if (expires > 0 && !time_after(expires, jiffies)) {
676 dev->power.timer_expires = 0;
677 rpm_suspend(dev, dev->power.timer_autosuspends ?
678 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
679 }
680
681 spin_unlock_irqrestore(&dev->power.lock, flags);
682 }
683
684 /**
685 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
686 * @dev: Device to suspend.
687 * @delay: Time to wait before submitting a suspend request, in milliseconds.
688 */
689 int pm_schedule_suspend(struct device *dev, unsigned int delay)
690 {
691 unsigned long flags;
692 int retval;
693
694 spin_lock_irqsave(&dev->power.lock, flags);
695
696 if (!delay) {
697 retval = rpm_suspend(dev, RPM_ASYNC);
698 goto out;
699 }
700
701 retval = rpm_check_suspend_allowed(dev);
702 if (retval)
703 goto out;
704
705 /* Other scheduled or pending requests need to be canceled. */
706 pm_runtime_cancel_pending(dev);
707
708 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
709 dev->power.timer_expires += !dev->power.timer_expires;
710 dev->power.timer_autosuspends = 0;
711 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
712
713 out:
714 spin_unlock_irqrestore(&dev->power.lock, flags);
715
716 return retval;
717 }
718 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
719
720 /**
721 * __pm_runtime_idle - Entry point for run-time idle operations.
722 * @dev: Device to send idle notification for.
723 * @rpmflags: Flag bits.
724 *
725 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
726 * return immediately if it is larger than zero. Then carry out an idle
727 * notification, either synchronous or asynchronous.
728 *
729 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
730 */
731 int __pm_runtime_idle(struct device *dev, int rpmflags)
732 {
733 unsigned long flags;
734 int retval;
735
736 if (rpmflags & RPM_GET_PUT) {
737 if (!atomic_dec_and_test(&dev->power.usage_count))
738 return 0;
739 }
740
741 spin_lock_irqsave(&dev->power.lock, flags);
742 retval = rpm_idle(dev, rpmflags);
743 spin_unlock_irqrestore(&dev->power.lock, flags);
744
745 return retval;
746 }
747 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
748
749 /**
750 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
751 * @dev: Device to suspend.
752 * @rpmflags: Flag bits.
753 *
754 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
755 * return immediately if it is larger than zero. Then carry out a suspend,
756 * either synchronous or asynchronous.
757 *
758 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
759 */
760 int __pm_runtime_suspend(struct device *dev, int rpmflags)
761 {
762 unsigned long flags;
763 int retval;
764
765 if (rpmflags & RPM_GET_PUT) {
766 if (!atomic_dec_and_test(&dev->power.usage_count))
767 return 0;
768 }
769
770 spin_lock_irqsave(&dev->power.lock, flags);
771 retval = rpm_suspend(dev, rpmflags);
772 spin_unlock_irqrestore(&dev->power.lock, flags);
773
774 return retval;
775 }
776 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
777
778 /**
779 * __pm_runtime_resume - Entry point for run-time resume operations.
780 * @dev: Device to resume.
781 * @rpmflags: Flag bits.
782 *
783 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
784 * carry out a resume, either synchronous or asynchronous.
785 *
786 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
787 */
788 int __pm_runtime_resume(struct device *dev, int rpmflags)
789 {
790 unsigned long flags;
791 int retval;
792
793 if (rpmflags & RPM_GET_PUT)
794 atomic_inc(&dev->power.usage_count);
795
796 spin_lock_irqsave(&dev->power.lock, flags);
797 retval = rpm_resume(dev, rpmflags);
798 spin_unlock_irqrestore(&dev->power.lock, flags);
799
800 return retval;
801 }
802 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
803
804 /**
805 * __pm_runtime_set_status - Set run-time PM status of a device.
806 * @dev: Device to handle.
807 * @status: New run-time PM status of the device.
808 *
809 * If run-time PM of the device is disabled or its power.runtime_error field is
810 * different from zero, the status may be changed either to RPM_ACTIVE, or to
811 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
812 * However, if the device has a parent and the parent is not active, and the
813 * parent's power.ignore_children flag is unset, the device's status cannot be
814 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
815 *
816 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
817 * and the device parent's counter of unsuspended children is modified to
818 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
819 * notification request for the parent is submitted.
820 */
821 int __pm_runtime_set_status(struct device *dev, unsigned int status)
822 {
823 struct device *parent = dev->parent;
824 unsigned long flags;
825 bool notify_parent = false;
826 int error = 0;
827
828 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
829 return -EINVAL;
830
831 spin_lock_irqsave(&dev->power.lock, flags);
832
833 if (!dev->power.runtime_error && !dev->power.disable_depth) {
834 error = -EAGAIN;
835 goto out;
836 }
837
838 if (dev->power.runtime_status == status)
839 goto out_set;
840
841 if (status == RPM_SUSPENDED) {
842 /* It always is possible to set the status to 'suspended'. */
843 if (parent) {
844 atomic_add_unless(&parent->power.child_count, -1, 0);
845 notify_parent = !parent->power.ignore_children;
846 }
847 goto out_set;
848 }
849
850 if (parent) {
851 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
852
853 /*
854 * It is invalid to put an active child under a parent that is
855 * not active, has run-time PM enabled and the
856 * 'power.ignore_children' flag unset.
857 */
858 if (!parent->power.disable_depth
859 && !parent->power.ignore_children
860 && parent->power.runtime_status != RPM_ACTIVE)
861 error = -EBUSY;
862 else if (dev->power.runtime_status == RPM_SUSPENDED)
863 atomic_inc(&parent->power.child_count);
864
865 spin_unlock(&parent->power.lock);
866
867 if (error)
868 goto out;
869 }
870
871 out_set:
872 __update_runtime_status(dev, status);
873 dev->power.runtime_error = 0;
874 out:
875 spin_unlock_irqrestore(&dev->power.lock, flags);
876
877 if (notify_parent)
878 pm_request_idle(parent);
879
880 return error;
881 }
882 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
883
884 /**
885 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
886 * @dev: Device to handle.
887 *
888 * Flush all pending requests for the device from pm_wq and wait for all
889 * run-time PM operations involving the device in progress to complete.
890 *
891 * Should be called under dev->power.lock with interrupts disabled.
892 */
893 static void __pm_runtime_barrier(struct device *dev)
894 {
895 pm_runtime_deactivate_timer(dev);
896
897 if (dev->power.request_pending) {
898 dev->power.request = RPM_REQ_NONE;
899 spin_unlock_irq(&dev->power.lock);
900
901 cancel_work_sync(&dev->power.work);
902
903 spin_lock_irq(&dev->power.lock);
904 dev->power.request_pending = false;
905 }
906
907 if (dev->power.runtime_status == RPM_SUSPENDING
908 || dev->power.runtime_status == RPM_RESUMING
909 || dev->power.idle_notification) {
910 DEFINE_WAIT(wait);
911
912 /* Suspend, wake-up or idle notification in progress. */
913 for (;;) {
914 prepare_to_wait(&dev->power.wait_queue, &wait,
915 TASK_UNINTERRUPTIBLE);
916 if (dev->power.runtime_status != RPM_SUSPENDING
917 && dev->power.runtime_status != RPM_RESUMING
918 && !dev->power.idle_notification)
919 break;
920 spin_unlock_irq(&dev->power.lock);
921
922 schedule();
923
924 spin_lock_irq(&dev->power.lock);
925 }
926 finish_wait(&dev->power.wait_queue, &wait);
927 }
928 }
929
930 /**
931 * pm_runtime_barrier - Flush pending requests and wait for completions.
932 * @dev: Device to handle.
933 *
934 * Prevent the device from being suspended by incrementing its usage counter and
935 * if there's a pending resume request for the device, wake the device up.
936 * Next, make sure that all pending requests for the device have been flushed
937 * from pm_wq and wait for all run-time PM operations involving the device in
938 * progress to complete.
939 *
940 * Return value:
941 * 1, if there was a resume request pending and the device had to be woken up,
942 * 0, otherwise
943 */
944 int pm_runtime_barrier(struct device *dev)
945 {
946 int retval = 0;
947
948 pm_runtime_get_noresume(dev);
949 spin_lock_irq(&dev->power.lock);
950
951 if (dev->power.request_pending
952 && dev->power.request == RPM_REQ_RESUME) {
953 rpm_resume(dev, 0);
954 retval = 1;
955 }
956
957 __pm_runtime_barrier(dev);
958
959 spin_unlock_irq(&dev->power.lock);
960 pm_runtime_put_noidle(dev);
961
962 return retval;
963 }
964 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
965
966 /**
967 * __pm_runtime_disable - Disable run-time PM of a device.
968 * @dev: Device to handle.
969 * @check_resume: If set, check if there's a resume request for the device.
970 *
971 * Increment power.disable_depth for the device and if was zero previously,
972 * cancel all pending run-time PM requests for the device and wait for all
973 * operations in progress to complete. The device can be either active or
974 * suspended after its run-time PM has been disabled.
975 *
976 * If @check_resume is set and there's a resume request pending when
977 * __pm_runtime_disable() is called and power.disable_depth is zero, the
978 * function will wake up the device before disabling its run-time PM.
979 */
980 void __pm_runtime_disable(struct device *dev, bool check_resume)
981 {
982 spin_lock_irq(&dev->power.lock);
983
984 if (dev->power.disable_depth > 0) {
985 dev->power.disable_depth++;
986 goto out;
987 }
988
989 /*
990 * Wake up the device if there's a resume request pending, because that
991 * means there probably is some I/O to process and disabling run-time PM
992 * shouldn't prevent the device from processing the I/O.
993 */
994 if (check_resume && dev->power.request_pending
995 && dev->power.request == RPM_REQ_RESUME) {
996 /*
997 * Prevent suspends and idle notifications from being carried
998 * out after we have woken up the device.
999 */
1000 pm_runtime_get_noresume(dev);
1001
1002 rpm_resume(dev, 0);
1003
1004 pm_runtime_put_noidle(dev);
1005 }
1006
1007 if (!dev->power.disable_depth++)
1008 __pm_runtime_barrier(dev);
1009
1010 out:
1011 spin_unlock_irq(&dev->power.lock);
1012 }
1013 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1014
1015 /**
1016 * pm_runtime_enable - Enable run-time PM of a device.
1017 * @dev: Device to handle.
1018 */
1019 void pm_runtime_enable(struct device *dev)
1020 {
1021 unsigned long flags;
1022
1023 spin_lock_irqsave(&dev->power.lock, flags);
1024
1025 if (dev->power.disable_depth > 0)
1026 dev->power.disable_depth--;
1027 else
1028 dev_warn(dev, "Unbalanced %s!\n", __func__);
1029
1030 spin_unlock_irqrestore(&dev->power.lock, flags);
1031 }
1032 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1033
1034 /**
1035 * pm_runtime_forbid - Block run-time PM of a device.
1036 * @dev: Device to handle.
1037 *
1038 * Increase the device's usage count and clear its power.runtime_auto flag,
1039 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1040 * for it.
1041 */
1042 void pm_runtime_forbid(struct device *dev)
1043 {
1044 spin_lock_irq(&dev->power.lock);
1045 if (!dev->power.runtime_auto)
1046 goto out;
1047
1048 dev->power.runtime_auto = false;
1049 atomic_inc(&dev->power.usage_count);
1050 rpm_resume(dev, 0);
1051
1052 out:
1053 spin_unlock_irq(&dev->power.lock);
1054 }
1055 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1056
1057 /**
1058 * pm_runtime_allow - Unblock run-time PM of a device.
1059 * @dev: Device to handle.
1060 *
1061 * Decrease the device's usage count and set its power.runtime_auto flag.
1062 */
1063 void pm_runtime_allow(struct device *dev)
1064 {
1065 spin_lock_irq(&dev->power.lock);
1066 if (dev->power.runtime_auto)
1067 goto out;
1068
1069 dev->power.runtime_auto = true;
1070 if (atomic_dec_and_test(&dev->power.usage_count))
1071 rpm_idle(dev, RPM_AUTO);
1072
1073 out:
1074 spin_unlock_irq(&dev->power.lock);
1075 }
1076 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1077
1078 /**
1079 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1080 * @dev: Device to handle.
1081 *
1082 * Set the power.no_callbacks flag, which tells the PM core that this
1083 * device is power-managed through its parent and has no run-time PM
1084 * callbacks of its own. The run-time sysfs attributes will be removed.
1085 */
1086 void pm_runtime_no_callbacks(struct device *dev)
1087 {
1088 spin_lock_irq(&dev->power.lock);
1089 dev->power.no_callbacks = 1;
1090 spin_unlock_irq(&dev->power.lock);
1091 if (device_is_registered(dev))
1092 rpm_sysfs_remove(dev);
1093 }
1094 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1095
1096 /**
1097 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1098 * @dev: Device to handle
1099 *
1100 * Set the power.irq_safe flag, which tells the PM core that the
1101 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1102 * always be invoked with the spinlock held and interrupts disabled. It also
1103 * causes the parent's usage counter to be permanently incremented, preventing
1104 * the parent from runtime suspending -- otherwise an irq-safe child might have
1105 * to wait for a non-irq-safe parent.
1106 */
1107 void pm_runtime_irq_safe(struct device *dev)
1108 {
1109 if (dev->parent)
1110 pm_runtime_get_sync(dev->parent);
1111 spin_lock_irq(&dev->power.lock);
1112 dev->power.irq_safe = 1;
1113 spin_unlock_irq(&dev->power.lock);
1114 }
1115 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1116
1117 /**
1118 * update_autosuspend - Handle a change to a device's autosuspend settings.
1119 * @dev: Device to handle.
1120 * @old_delay: The former autosuspend_delay value.
1121 * @old_use: The former use_autosuspend value.
1122 *
1123 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1124 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1125 *
1126 * This function must be called under dev->power.lock with interrupts disabled.
1127 */
1128 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1129 {
1130 int delay = dev->power.autosuspend_delay;
1131
1132 /* Should runtime suspend be prevented now? */
1133 if (dev->power.use_autosuspend && delay < 0) {
1134
1135 /* If it used to be allowed then prevent it. */
1136 if (!old_use || old_delay >= 0) {
1137 atomic_inc(&dev->power.usage_count);
1138 rpm_resume(dev, 0);
1139 }
1140 }
1141
1142 /* Runtime suspend should be allowed now. */
1143 else {
1144
1145 /* If it used to be prevented then allow it. */
1146 if (old_use && old_delay < 0)
1147 atomic_dec(&dev->power.usage_count);
1148
1149 /* Maybe we can autosuspend now. */
1150 rpm_idle(dev, RPM_AUTO);
1151 }
1152 }
1153
1154 /**
1155 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1156 * @dev: Device to handle.
1157 * @delay: Value of the new delay in milliseconds.
1158 *
1159 * Set the device's power.autosuspend_delay value. If it changes to negative
1160 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1161 * changes the other way, allow run-time suspends.
1162 */
1163 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1164 {
1165 int old_delay, old_use;
1166
1167 spin_lock_irq(&dev->power.lock);
1168 old_delay = dev->power.autosuspend_delay;
1169 old_use = dev->power.use_autosuspend;
1170 dev->power.autosuspend_delay = delay;
1171 update_autosuspend(dev, old_delay, old_use);
1172 spin_unlock_irq(&dev->power.lock);
1173 }
1174 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1175
1176 /**
1177 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1178 * @dev: Device to handle.
1179 * @use: New value for use_autosuspend.
1180 *
1181 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1182 * suspends as needed.
1183 */
1184 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1185 {
1186 int old_delay, old_use;
1187
1188 spin_lock_irq(&dev->power.lock);
1189 old_delay = dev->power.autosuspend_delay;
1190 old_use = dev->power.use_autosuspend;
1191 dev->power.use_autosuspend = use;
1192 update_autosuspend(dev, old_delay, old_use);
1193 spin_unlock_irq(&dev->power.lock);
1194 }
1195 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1196
1197 /**
1198 * pm_runtime_init - Initialize run-time PM fields in given device object.
1199 * @dev: Device object to initialize.
1200 */
1201 void pm_runtime_init(struct device *dev)
1202 {
1203 dev->power.runtime_status = RPM_SUSPENDED;
1204 dev->power.idle_notification = false;
1205
1206 dev->power.disable_depth = 1;
1207 atomic_set(&dev->power.usage_count, 0);
1208
1209 dev->power.runtime_error = 0;
1210
1211 atomic_set(&dev->power.child_count, 0);
1212 pm_suspend_ignore_children(dev, false);
1213 dev->power.runtime_auto = true;
1214
1215 dev->power.request_pending = false;
1216 dev->power.request = RPM_REQ_NONE;
1217 dev->power.deferred_resume = false;
1218 dev->power.accounting_timestamp = jiffies;
1219 INIT_WORK(&dev->power.work, pm_runtime_work);
1220
1221 dev->power.timer_expires = 0;
1222 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1223 (unsigned long)dev);
1224
1225 init_waitqueue_head(&dev->power.wait_queue);
1226 }
1227
1228 /**
1229 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1230 * @dev: Device object being removed from device hierarchy.
1231 */
1232 void pm_runtime_remove(struct device *dev)
1233 {
1234 __pm_runtime_disable(dev, false);
1235
1236 /* Change the status back to 'suspended' to match the initial status. */
1237 if (dev->power.runtime_status == RPM_ACTIVE)
1238 pm_runtime_set_suspended(dev);
1239 if (dev->power.irq_safe && dev->parent)
1240 pm_runtime_put_sync(dev->parent);
1241 }
This page took 0.081166 seconds and 5 git commands to generate.