84f4beefa4f88f94698b16859425c558462120c7
[deliverable/linux.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
19
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21 ({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34 })
35
36 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37 ({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48 })
49
50 static LIST_HEAD(gpd_list);
51 static DEFINE_MUTEX(gpd_list_lock);
52
53 #ifdef CONFIG_PM
54
55 struct generic_pm_domain *dev_to_genpd(struct device *dev)
56 {
57 if (IS_ERR_OR_NULL(dev->pm_domain))
58 return ERR_PTR(-EINVAL);
59
60 return pd_to_genpd(dev->pm_domain);
61 }
62
63 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64 {
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
67 }
68
69 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70 {
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
73 }
74
75 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76 {
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
79 }
80
81 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82 {
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
86 }
87
88 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
89 {
90 bool ret = false;
91
92 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
93 ret = !!atomic_dec_and_test(&genpd->sd_count);
94
95 return ret;
96 }
97
98 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
99 {
100 atomic_inc(&genpd->sd_count);
101 smp_mb__after_atomic_inc();
102 }
103
104 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
105 {
106 DEFINE_WAIT(wait);
107
108 mutex_lock(&genpd->lock);
109 /*
110 * Wait for the domain to transition into either the active,
111 * or the power off state.
112 */
113 for (;;) {
114 prepare_to_wait(&genpd->status_wait_queue, &wait,
115 TASK_UNINTERRUPTIBLE);
116 if (genpd->status == GPD_STATE_ACTIVE
117 || genpd->status == GPD_STATE_POWER_OFF)
118 break;
119 mutex_unlock(&genpd->lock);
120
121 schedule();
122
123 mutex_lock(&genpd->lock);
124 }
125 finish_wait(&genpd->status_wait_queue, &wait);
126 }
127
128 static void genpd_release_lock(struct generic_pm_domain *genpd)
129 {
130 mutex_unlock(&genpd->lock);
131 }
132
133 static void genpd_set_active(struct generic_pm_domain *genpd)
134 {
135 if (genpd->resume_count == 0)
136 genpd->status = GPD_STATE_ACTIVE;
137 }
138
139 /**
140 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
141 * @genpd: PM domain to power up.
142 *
143 * Restore power to @genpd and all of its masters so that it is possible to
144 * resume a device belonging to it.
145 */
146 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
147 __releases(&genpd->lock) __acquires(&genpd->lock)
148 {
149 struct gpd_link *link;
150 DEFINE_WAIT(wait);
151 int ret = 0;
152
153 /* If the domain's master is being waited for, we have to wait too. */
154 for (;;) {
155 prepare_to_wait(&genpd->status_wait_queue, &wait,
156 TASK_UNINTERRUPTIBLE);
157 if (genpd->status != GPD_STATE_WAIT_MASTER)
158 break;
159 mutex_unlock(&genpd->lock);
160
161 schedule();
162
163 mutex_lock(&genpd->lock);
164 }
165 finish_wait(&genpd->status_wait_queue, &wait);
166
167 if (genpd->status == GPD_STATE_ACTIVE
168 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
169 return 0;
170
171 if (genpd->status != GPD_STATE_POWER_OFF) {
172 genpd_set_active(genpd);
173 return 0;
174 }
175
176 /*
177 * The list is guaranteed not to change while the loop below is being
178 * executed, unless one of the masters' .power_on() callbacks fiddles
179 * with it.
180 */
181 list_for_each_entry(link, &genpd->slave_links, slave_node) {
182 genpd_sd_counter_inc(link->master);
183 genpd->status = GPD_STATE_WAIT_MASTER;
184
185 mutex_unlock(&genpd->lock);
186
187 ret = pm_genpd_poweron(link->master);
188
189 mutex_lock(&genpd->lock);
190
191 /*
192 * The "wait for parent" status is guaranteed not to change
193 * while the master is powering on.
194 */
195 genpd->status = GPD_STATE_POWER_OFF;
196 wake_up_all(&genpd->status_wait_queue);
197 if (ret) {
198 genpd_sd_counter_dec(link->master);
199 goto err;
200 }
201 }
202
203 if (genpd->power_on) {
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
207 ret = genpd->power_on(genpd);
208 if (ret)
209 goto err;
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns;
214 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name,
217 elapsed_ns);
218 }
219 }
220
221 genpd_set_active(genpd);
222
223 return 0;
224
225 err:
226 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
227 genpd_sd_counter_dec(link->master);
228
229 return ret;
230 }
231
232 /**
233 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
234 * @genpd: PM domain to power up.
235 */
236 int pm_genpd_poweron(struct generic_pm_domain *genpd)
237 {
238 int ret;
239
240 mutex_lock(&genpd->lock);
241 ret = __pm_genpd_poweron(genpd);
242 mutex_unlock(&genpd->lock);
243 return ret;
244 }
245
246 #endif /* CONFIG_PM */
247
248 #ifdef CONFIG_PM_RUNTIME
249
250 /**
251 * __pm_genpd_save_device - Save the pre-suspend state of a device.
252 * @pdd: Domain data of the device to save the state of.
253 * @genpd: PM domain the device belongs to.
254 */
255 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
256 struct generic_pm_domain *genpd)
257 __releases(&genpd->lock) __acquires(&genpd->lock)
258 {
259 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
260 struct device *dev = pdd->dev;
261 int ret = 0;
262
263 if (gpd_data->need_restore)
264 return 0;
265
266 mutex_unlock(&genpd->lock);
267
268 genpd_start_dev(genpd, dev);
269 ret = genpd_save_dev(genpd, dev);
270 genpd_stop_dev(genpd, dev);
271
272 mutex_lock(&genpd->lock);
273
274 if (!ret)
275 gpd_data->need_restore = true;
276
277 return ret;
278 }
279
280 /**
281 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
282 * @pdd: Domain data of the device to restore the state of.
283 * @genpd: PM domain the device belongs to.
284 */
285 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
286 struct generic_pm_domain *genpd)
287 __releases(&genpd->lock) __acquires(&genpd->lock)
288 {
289 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
290 struct device *dev = pdd->dev;
291
292 if (!gpd_data->need_restore)
293 return;
294
295 mutex_unlock(&genpd->lock);
296
297 genpd_start_dev(genpd, dev);
298 genpd_restore_dev(genpd, dev);
299 genpd_stop_dev(genpd, dev);
300
301 mutex_lock(&genpd->lock);
302
303 gpd_data->need_restore = false;
304 }
305
306 /**
307 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
308 * @genpd: PM domain to check.
309 *
310 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
311 * a "power off" operation, which means that a "power on" has occured in the
312 * meantime, or if its resume_count field is different from zero, which means
313 * that one of its devices has been resumed in the meantime.
314 */
315 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
316 {
317 return genpd->status == GPD_STATE_WAIT_MASTER
318 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
319 }
320
321 /**
322 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
323 * @genpd: PM domait to power off.
324 *
325 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
326 * before.
327 */
328 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
329 {
330 if (!work_pending(&genpd->power_off_work))
331 queue_work(pm_wq, &genpd->power_off_work);
332 }
333
334 /**
335 * pm_genpd_poweroff - Remove power from a given PM domain.
336 * @genpd: PM domain to power down.
337 *
338 * If all of the @genpd's devices have been suspended and all of its subdomains
339 * have been powered down, run the runtime suspend callbacks provided by all of
340 * the @genpd's devices' drivers and remove power from @genpd.
341 */
342 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
343 __releases(&genpd->lock) __acquires(&genpd->lock)
344 {
345 struct pm_domain_data *pdd;
346 struct gpd_link *link;
347 unsigned int not_suspended;
348 int ret = 0;
349
350 start:
351 /*
352 * Do not try to power off the domain in the following situations:
353 * (1) The domain is already in the "power off" state.
354 * (2) The domain is waiting for its master to power up.
355 * (3) One of the domain's devices is being resumed right now.
356 * (4) System suspend is in progress.
357 */
358 if (genpd->status == GPD_STATE_POWER_OFF
359 || genpd->status == GPD_STATE_WAIT_MASTER
360 || genpd->resume_count > 0 || genpd->prepared_count > 0)
361 return 0;
362
363 if (atomic_read(&genpd->sd_count) > 0)
364 return -EBUSY;
365
366 not_suspended = 0;
367 list_for_each_entry(pdd, &genpd->dev_list, list_node)
368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
369 || pdd->dev->power.irq_safe))
370 not_suspended++;
371
372 if (not_suspended > genpd->in_progress)
373 return -EBUSY;
374
375 if (genpd->poweroff_task) {
376 /*
377 * Another instance of pm_genpd_poweroff() is executing
378 * callbacks, so tell it to start over and return.
379 */
380 genpd->status = GPD_STATE_REPEAT;
381 return 0;
382 }
383
384 if (genpd->gov && genpd->gov->power_down_ok) {
385 if (!genpd->gov->power_down_ok(&genpd->domain))
386 return -EAGAIN;
387 }
388
389 genpd->status = GPD_STATE_BUSY;
390 genpd->poweroff_task = current;
391
392 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
393 ret = atomic_read(&genpd->sd_count) == 0 ?
394 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
395
396 if (genpd_abort_poweroff(genpd))
397 goto out;
398
399 if (ret) {
400 genpd_set_active(genpd);
401 goto out;
402 }
403
404 if (genpd->status == GPD_STATE_REPEAT) {
405 genpd->poweroff_task = NULL;
406 goto start;
407 }
408 }
409
410 if (genpd->power_off) {
411 ktime_t time_start;
412 s64 elapsed_ns;
413
414 if (atomic_read(&genpd->sd_count) > 0) {
415 ret = -EBUSY;
416 goto out;
417 }
418
419 time_start = ktime_get();
420
421 /*
422 * If sd_count > 0 at this point, one of the subdomains hasn't
423 * managed to call pm_genpd_poweron() for the master yet after
424 * incrementing it. In that case pm_genpd_poweron() will wait
425 * for us to drop the lock, so we can call .power_off() and let
426 * the pm_genpd_poweron() restore power for us (this shouldn't
427 * happen very often).
428 */
429 ret = genpd->power_off(genpd);
430 if (ret == -EBUSY) {
431 genpd_set_active(genpd);
432 goto out;
433 }
434
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns;
438 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name,
441 elapsed_ns);
442 }
443 }
444
445 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
457
458 list_for_each_entry(link, &genpd->slave_links, slave_node) {
459 genpd_sd_counter_dec(link->master);
460 genpd_queue_power_off_work(link->master);
461 }
462
463 out:
464 genpd->poweroff_task = NULL;
465 wake_up_all(&genpd->status_wait_queue);
466 return ret;
467 }
468
469 /**
470 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
471 * @work: Work structure used for scheduling the execution of this function.
472 */
473 static void genpd_power_off_work_fn(struct work_struct *work)
474 {
475 struct generic_pm_domain *genpd;
476
477 genpd = container_of(work, struct generic_pm_domain, power_off_work);
478
479 genpd_acquire_lock(genpd);
480 pm_genpd_poweroff(genpd);
481 genpd_release_lock(genpd);
482 }
483
484 /**
485 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
486 * @dev: Device to suspend.
487 *
488 * Carry out a runtime suspend of a device under the assumption that its
489 * pm_domain field points to the domain member of an object of type
490 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
491 */
492 static int pm_genpd_runtime_suspend(struct device *dev)
493 {
494 struct generic_pm_domain *genpd;
495 bool (*stop_ok)(struct device *__dev);
496 int ret;
497
498 dev_dbg(dev, "%s()\n", __func__);
499
500 genpd = dev_to_genpd(dev);
501 if (IS_ERR(genpd))
502 return -EINVAL;
503
504 might_sleep_if(!genpd->dev_irq_safe);
505
506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
507 if (stop_ok && !stop_ok(dev))
508 return -EBUSY;
509
510 ret = genpd_stop_dev(genpd, dev);
511 if (ret)
512 return ret;
513
514 pm_runtime_update_max_time_suspended(dev,
515 dev_gpd_data(dev)->td.start_latency_ns);
516
517 /*
518 * If power.irq_safe is set, this routine will be run with interrupts
519 * off, so it can't use mutexes.
520 */
521 if (dev->power.irq_safe)
522 return 0;
523
524 mutex_lock(&genpd->lock);
525 genpd->in_progress++;
526 pm_genpd_poweroff(genpd);
527 genpd->in_progress--;
528 mutex_unlock(&genpd->lock);
529
530 return 0;
531 }
532
533 /**
534 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
535 * @dev: Device to resume.
536 *
537 * Carry out a runtime resume of a device under the assumption that its
538 * pm_domain field points to the domain member of an object of type
539 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
540 */
541 static int pm_genpd_runtime_resume(struct device *dev)
542 {
543 struct generic_pm_domain *genpd;
544 DEFINE_WAIT(wait);
545 int ret;
546
547 dev_dbg(dev, "%s()\n", __func__);
548
549 genpd = dev_to_genpd(dev);
550 if (IS_ERR(genpd))
551 return -EINVAL;
552
553 might_sleep_if(!genpd->dev_irq_safe);
554
555 /* If power.irq_safe, the PM domain is never powered off. */
556 if (dev->power.irq_safe)
557 goto out;
558
559 mutex_lock(&genpd->lock);
560 ret = __pm_genpd_poweron(genpd);
561 if (ret) {
562 mutex_unlock(&genpd->lock);
563 return ret;
564 }
565 genpd->status = GPD_STATE_BUSY;
566 genpd->resume_count++;
567 for (;;) {
568 prepare_to_wait(&genpd->status_wait_queue, &wait,
569 TASK_UNINTERRUPTIBLE);
570 /*
571 * If current is the powering off task, we have been called
572 * reentrantly from one of the device callbacks, so we should
573 * not wait.
574 */
575 if (!genpd->poweroff_task || genpd->poweroff_task == current)
576 break;
577 mutex_unlock(&genpd->lock);
578
579 schedule();
580
581 mutex_lock(&genpd->lock);
582 }
583 finish_wait(&genpd->status_wait_queue, &wait);
584 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
585 genpd->resume_count--;
586 genpd_set_active(genpd);
587 wake_up_all(&genpd->status_wait_queue);
588 mutex_unlock(&genpd->lock);
589
590 out:
591 genpd_start_dev(genpd, dev);
592
593 return 0;
594 }
595
596 /**
597 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
598 */
599 void pm_genpd_poweroff_unused(void)
600 {
601 struct generic_pm_domain *genpd;
602
603 mutex_lock(&gpd_list_lock);
604
605 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
606 genpd_queue_power_off_work(genpd);
607
608 mutex_unlock(&gpd_list_lock);
609 }
610
611 #else
612
613 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
614
615 #define pm_genpd_runtime_suspend NULL
616 #define pm_genpd_runtime_resume NULL
617
618 #endif /* CONFIG_PM_RUNTIME */
619
620 #ifdef CONFIG_PM_SLEEP
621
622 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
623 struct device *dev)
624 {
625 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
626 }
627
628 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
629 {
630 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
631 }
632
633 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
634 {
635 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
636 }
637
638 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
639 {
640 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
641 }
642
643 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
644 {
645 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
646 }
647
648 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
649 {
650 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
651 }
652
653 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
654 {
655 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
656 }
657
658 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
659 {
660 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
661 }
662
663 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
664 {
665 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
666 }
667
668 /**
669 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
670 * @genpd: PM domain to power off, if possible.
671 *
672 * Check if the given PM domain can be powered off (during system suspend or
673 * hibernation) and do that if so. Also, in that case propagate to its masters.
674 *
675 * This function is only called in "noirq" stages of system power transitions,
676 * so it need not acquire locks (all of the "noirq" callbacks are executed
677 * sequentially, so it is guaranteed that it will never run twice in parallel).
678 */
679 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
680 {
681 struct gpd_link *link;
682
683 if (genpd->status == GPD_STATE_POWER_OFF)
684 return;
685
686 if (genpd->suspended_count != genpd->device_count
687 || atomic_read(&genpd->sd_count) > 0)
688 return;
689
690 if (genpd->power_off)
691 genpd->power_off(genpd);
692
693 genpd->status = GPD_STATE_POWER_OFF;
694
695 list_for_each_entry(link, &genpd->slave_links, slave_node) {
696 genpd_sd_counter_dec(link->master);
697 pm_genpd_sync_poweroff(link->master);
698 }
699 }
700
701 /**
702 * resume_needed - Check whether to resume a device before system suspend.
703 * @dev: Device to check.
704 * @genpd: PM domain the device belongs to.
705 *
706 * There are two cases in which a device that can wake up the system from sleep
707 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
708 * to wake up the system and it has to remain active for this purpose while the
709 * system is in the sleep state and (2) if the device is not enabled to wake up
710 * the system from sleep states and it generally doesn't generate wakeup signals
711 * by itself (those signals are generated on its behalf by other parts of the
712 * system). In the latter case it may be necessary to reconfigure the device's
713 * wakeup settings during system suspend, because it may have been set up to
714 * signal remote wakeup from the system's working state as needed by runtime PM.
715 * Return 'true' in either of the above cases.
716 */
717 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
718 {
719 bool active_wakeup;
720
721 if (!device_can_wakeup(dev))
722 return false;
723
724 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
725 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
726 }
727
728 /**
729 * pm_genpd_prepare - Start power transition of a device in a PM domain.
730 * @dev: Device to start the transition of.
731 *
732 * Start a power transition of a device (during a system-wide power transition)
733 * under the assumption that its pm_domain field points to the domain member of
734 * an object of type struct generic_pm_domain representing a PM domain
735 * consisting of I/O devices.
736 */
737 static int pm_genpd_prepare(struct device *dev)
738 {
739 struct generic_pm_domain *genpd;
740 int ret;
741
742 dev_dbg(dev, "%s()\n", __func__);
743
744 genpd = dev_to_genpd(dev);
745 if (IS_ERR(genpd))
746 return -EINVAL;
747
748 /*
749 * If a wakeup request is pending for the device, it should be woken up
750 * at this point and a system wakeup event should be reported if it's
751 * set up to wake up the system from sleep states.
752 */
753 pm_runtime_get_noresume(dev);
754 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
755 pm_wakeup_event(dev, 0);
756
757 if (pm_wakeup_pending()) {
758 pm_runtime_put_sync(dev);
759 return -EBUSY;
760 }
761
762 if (resume_needed(dev, genpd))
763 pm_runtime_resume(dev);
764
765 genpd_acquire_lock(genpd);
766
767 if (genpd->prepared_count++ == 0) {
768 genpd->suspended_count = 0;
769 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
770 }
771
772 genpd_release_lock(genpd);
773
774 if (genpd->suspend_power_off) {
775 pm_runtime_put_noidle(dev);
776 return 0;
777 }
778
779 /*
780 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
781 * so pm_genpd_poweron() will return immediately, but if the device
782 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
783 * to make it operational.
784 */
785 pm_runtime_resume(dev);
786 __pm_runtime_disable(dev, false);
787
788 ret = pm_generic_prepare(dev);
789 if (ret) {
790 mutex_lock(&genpd->lock);
791
792 if (--genpd->prepared_count == 0)
793 genpd->suspend_power_off = false;
794
795 mutex_unlock(&genpd->lock);
796 pm_runtime_enable(dev);
797 }
798
799 pm_runtime_put_sync(dev);
800 return ret;
801 }
802
803 /**
804 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
805 * @dev: Device to suspend.
806 *
807 * Suspend a device under the assumption that its pm_domain field points to the
808 * domain member of an object of type struct generic_pm_domain representing
809 * a PM domain consisting of I/O devices.
810 */
811 static int pm_genpd_suspend(struct device *dev)
812 {
813 struct generic_pm_domain *genpd;
814
815 dev_dbg(dev, "%s()\n", __func__);
816
817 genpd = dev_to_genpd(dev);
818 if (IS_ERR(genpd))
819 return -EINVAL;
820
821 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
822 }
823
824 /**
825 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
826 * @dev: Device to suspend.
827 *
828 * Carry out a late suspend of a device under the assumption that its
829 * pm_domain field points to the domain member of an object of type
830 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
831 */
832 static int pm_genpd_suspend_late(struct device *dev)
833 {
834 struct generic_pm_domain *genpd;
835
836 dev_dbg(dev, "%s()\n", __func__);
837
838 genpd = dev_to_genpd(dev);
839 if (IS_ERR(genpd))
840 return -EINVAL;
841
842 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
843 }
844
845 /**
846 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
847 * @dev: Device to suspend.
848 *
849 * Stop the device and remove power from the domain if all devices in it have
850 * been stopped.
851 */
852 static int pm_genpd_suspend_noirq(struct device *dev)
853 {
854 struct generic_pm_domain *genpd;
855
856 dev_dbg(dev, "%s()\n", __func__);
857
858 genpd = dev_to_genpd(dev);
859 if (IS_ERR(genpd))
860 return -EINVAL;
861
862 if (genpd->suspend_power_off
863 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
864 return 0;
865
866 genpd_stop_dev(genpd, dev);
867
868 /*
869 * Since all of the "noirq" callbacks are executed sequentially, it is
870 * guaranteed that this function will never run twice in parallel for
871 * the same PM domain, so it is not necessary to use locking here.
872 */
873 genpd->suspended_count++;
874 pm_genpd_sync_poweroff(genpd);
875
876 return 0;
877 }
878
879 /**
880 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
881 * @dev: Device to resume.
882 *
883 * Restore power to the device's PM domain, if necessary, and start the device.
884 */
885 static int pm_genpd_resume_noirq(struct device *dev)
886 {
887 struct generic_pm_domain *genpd;
888
889 dev_dbg(dev, "%s()\n", __func__);
890
891 genpd = dev_to_genpd(dev);
892 if (IS_ERR(genpd))
893 return -EINVAL;
894
895 if (genpd->suspend_power_off
896 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
897 return 0;
898
899 /*
900 * Since all of the "noirq" callbacks are executed sequentially, it is
901 * guaranteed that this function will never run twice in parallel for
902 * the same PM domain, so it is not necessary to use locking here.
903 */
904 pm_genpd_poweron(genpd);
905 genpd->suspended_count--;
906
907 return genpd_start_dev(genpd, dev);
908 }
909
910 /**
911 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
912 * @dev: Device to resume.
913 *
914 * Carry out an early resume of a device under the assumption that its
915 * pm_domain field points to the domain member of an object of type
916 * struct generic_pm_domain representing a power domain consisting of I/O
917 * devices.
918 */
919 static int pm_genpd_resume_early(struct device *dev)
920 {
921 struct generic_pm_domain *genpd;
922
923 dev_dbg(dev, "%s()\n", __func__);
924
925 genpd = dev_to_genpd(dev);
926 if (IS_ERR(genpd))
927 return -EINVAL;
928
929 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
930 }
931
932 /**
933 * pm_genpd_resume - Resume of device in an I/O PM domain.
934 * @dev: Device to resume.
935 *
936 * Resume a device under the assumption that its pm_domain field points to the
937 * domain member of an object of type struct generic_pm_domain representing
938 * a power domain consisting of I/O devices.
939 */
940 static int pm_genpd_resume(struct device *dev)
941 {
942 struct generic_pm_domain *genpd;
943
944 dev_dbg(dev, "%s()\n", __func__);
945
946 genpd = dev_to_genpd(dev);
947 if (IS_ERR(genpd))
948 return -EINVAL;
949
950 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
951 }
952
953 /**
954 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
955 * @dev: Device to freeze.
956 *
957 * Freeze a device under the assumption that its pm_domain field points to the
958 * domain member of an object of type struct generic_pm_domain representing
959 * a power domain consisting of I/O devices.
960 */
961 static int pm_genpd_freeze(struct device *dev)
962 {
963 struct generic_pm_domain *genpd;
964
965 dev_dbg(dev, "%s()\n", __func__);
966
967 genpd = dev_to_genpd(dev);
968 if (IS_ERR(genpd))
969 return -EINVAL;
970
971 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
972 }
973
974 /**
975 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
976 * @dev: Device to freeze.
977 *
978 * Carry out a late freeze of a device under the assumption that its
979 * pm_domain field points to the domain member of an object of type
980 * struct generic_pm_domain representing a power domain consisting of I/O
981 * devices.
982 */
983 static int pm_genpd_freeze_late(struct device *dev)
984 {
985 struct generic_pm_domain *genpd;
986
987 dev_dbg(dev, "%s()\n", __func__);
988
989 genpd = dev_to_genpd(dev);
990 if (IS_ERR(genpd))
991 return -EINVAL;
992
993 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
994 }
995
996 /**
997 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
998 * @dev: Device to freeze.
999 *
1000 * Carry out a late freeze of a device under the assumption that its
1001 * pm_domain field points to the domain member of an object of type
1002 * struct generic_pm_domain representing a power domain consisting of I/O
1003 * devices.
1004 */
1005 static int pm_genpd_freeze_noirq(struct device *dev)
1006 {
1007 struct generic_pm_domain *genpd;
1008
1009 dev_dbg(dev, "%s()\n", __func__);
1010
1011 genpd = dev_to_genpd(dev);
1012 if (IS_ERR(genpd))
1013 return -EINVAL;
1014
1015 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1016 }
1017
1018 /**
1019 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1020 * @dev: Device to thaw.
1021 *
1022 * Start the device, unless power has been removed from the domain already
1023 * before the system transition.
1024 */
1025 static int pm_genpd_thaw_noirq(struct device *dev)
1026 {
1027 struct generic_pm_domain *genpd;
1028
1029 dev_dbg(dev, "%s()\n", __func__);
1030
1031 genpd = dev_to_genpd(dev);
1032 if (IS_ERR(genpd))
1033 return -EINVAL;
1034
1035 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1036 }
1037
1038 /**
1039 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1040 * @dev: Device to thaw.
1041 *
1042 * Carry out an early thaw of a device under the assumption that its
1043 * pm_domain field points to the domain member of an object of type
1044 * struct generic_pm_domain representing a power domain consisting of I/O
1045 * devices.
1046 */
1047 static int pm_genpd_thaw_early(struct device *dev)
1048 {
1049 struct generic_pm_domain *genpd;
1050
1051 dev_dbg(dev, "%s()\n", __func__);
1052
1053 genpd = dev_to_genpd(dev);
1054 if (IS_ERR(genpd))
1055 return -EINVAL;
1056
1057 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1058 }
1059
1060 /**
1061 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1062 * @dev: Device to thaw.
1063 *
1064 * Thaw a device under the assumption that its pm_domain field points to the
1065 * domain member of an object of type struct generic_pm_domain representing
1066 * a power domain consisting of I/O devices.
1067 */
1068 static int pm_genpd_thaw(struct device *dev)
1069 {
1070 struct generic_pm_domain *genpd;
1071
1072 dev_dbg(dev, "%s()\n", __func__);
1073
1074 genpd = dev_to_genpd(dev);
1075 if (IS_ERR(genpd))
1076 return -EINVAL;
1077
1078 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1079 }
1080
1081 /**
1082 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1083 * @dev: Device to resume.
1084 *
1085 * Make sure the domain will be in the same power state as before the
1086 * hibernation the system is resuming from and start the device if necessary.
1087 */
1088 static int pm_genpd_restore_noirq(struct device *dev)
1089 {
1090 struct generic_pm_domain *genpd;
1091
1092 dev_dbg(dev, "%s()\n", __func__);
1093
1094 genpd = dev_to_genpd(dev);
1095 if (IS_ERR(genpd))
1096 return -EINVAL;
1097
1098 /*
1099 * Since all of the "noirq" callbacks are executed sequentially, it is
1100 * guaranteed that this function will never run twice in parallel for
1101 * the same PM domain, so it is not necessary to use locking here.
1102 *
1103 * At this point suspended_count == 0 means we are being run for the
1104 * first time for the given domain in the present cycle.
1105 */
1106 if (genpd->suspended_count++ == 0) {
1107 /*
1108 * The boot kernel might put the domain into arbitrary state,
1109 * so make it appear as powered off to pm_genpd_poweron(), so
1110 * that it tries to power it on in case it was really off.
1111 */
1112 genpd->status = GPD_STATE_POWER_OFF;
1113 if (genpd->suspend_power_off) {
1114 /*
1115 * If the domain was off before the hibernation, make
1116 * sure it will be off going forward.
1117 */
1118 if (genpd->power_off)
1119 genpd->power_off(genpd);
1120
1121 return 0;
1122 }
1123 }
1124
1125 pm_genpd_poweron(genpd);
1126
1127 return genpd_start_dev(genpd, dev);
1128 }
1129
1130 /**
1131 * pm_genpd_complete - Complete power transition of a device in a power domain.
1132 * @dev: Device to complete the transition of.
1133 *
1134 * Complete a power transition of a device (during a system-wide power
1135 * transition) under the assumption that its pm_domain field points to the
1136 * domain member of an object of type struct generic_pm_domain representing
1137 * a power domain consisting of I/O devices.
1138 */
1139 static void pm_genpd_complete(struct device *dev)
1140 {
1141 struct generic_pm_domain *genpd;
1142 bool run_complete;
1143
1144 dev_dbg(dev, "%s()\n", __func__);
1145
1146 genpd = dev_to_genpd(dev);
1147 if (IS_ERR(genpd))
1148 return;
1149
1150 mutex_lock(&genpd->lock);
1151
1152 run_complete = !genpd->suspend_power_off;
1153 if (--genpd->prepared_count == 0)
1154 genpd->suspend_power_off = false;
1155
1156 mutex_unlock(&genpd->lock);
1157
1158 if (run_complete) {
1159 pm_generic_complete(dev);
1160 pm_runtime_set_active(dev);
1161 pm_runtime_enable(dev);
1162 pm_runtime_idle(dev);
1163 }
1164 }
1165
1166 #else
1167
1168 #define pm_genpd_prepare NULL
1169 #define pm_genpd_suspend NULL
1170 #define pm_genpd_suspend_late NULL
1171 #define pm_genpd_suspend_noirq NULL
1172 #define pm_genpd_resume_early NULL
1173 #define pm_genpd_resume_noirq NULL
1174 #define pm_genpd_resume NULL
1175 #define pm_genpd_freeze NULL
1176 #define pm_genpd_freeze_late NULL
1177 #define pm_genpd_freeze_noirq NULL
1178 #define pm_genpd_thaw_early NULL
1179 #define pm_genpd_thaw_noirq NULL
1180 #define pm_genpd_thaw NULL
1181 #define pm_genpd_restore_noirq NULL
1182 #define pm_genpd_complete NULL
1183
1184 #endif /* CONFIG_PM_SLEEP */
1185
1186 /**
1187 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1188 * @genpd: PM domain to add the device to.
1189 * @dev: Device to be added.
1190 * @td: Set of PM QoS timing parameters to attach to the device.
1191 */
1192 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1193 struct gpd_timing_data *td)
1194 {
1195 struct generic_pm_domain_data *gpd_data;
1196 struct pm_domain_data *pdd;
1197 int ret = 0;
1198
1199 dev_dbg(dev, "%s()\n", __func__);
1200
1201 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1202 return -EINVAL;
1203
1204 genpd_acquire_lock(genpd);
1205
1206 if (genpd->status == GPD_STATE_POWER_OFF) {
1207 ret = -EINVAL;
1208 goto out;
1209 }
1210
1211 if (genpd->prepared_count > 0) {
1212 ret = -EAGAIN;
1213 goto out;
1214 }
1215
1216 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1217 if (pdd->dev == dev) {
1218 ret = -EINVAL;
1219 goto out;
1220 }
1221
1222 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1223 if (!gpd_data) {
1224 ret = -ENOMEM;
1225 goto out;
1226 }
1227
1228 genpd->device_count++;
1229
1230 dev->pm_domain = &genpd->domain;
1231 dev_pm_get_subsys_data(dev);
1232 dev->power.subsys_data->domain_data = &gpd_data->base;
1233 gpd_data->base.dev = dev;
1234 gpd_data->need_restore = false;
1235 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1236 if (td)
1237 gpd_data->td = *td;
1238
1239 out:
1240 genpd_release_lock(genpd);
1241
1242 return ret;
1243 }
1244
1245 /**
1246 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1247 * @genpd_node: Device tree node pointer representing a PM domain to which the
1248 * the device is added to.
1249 * @dev: Device to be added.
1250 * @td: Set of PM QoS timing parameters to attach to the device.
1251 */
1252 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1253 struct gpd_timing_data *td)
1254 {
1255 struct generic_pm_domain *genpd = NULL, *gpd;
1256
1257 dev_dbg(dev, "%s()\n", __func__);
1258
1259 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1260 return -EINVAL;
1261
1262 mutex_lock(&gpd_list_lock);
1263 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1264 if (gpd->of_node == genpd_node) {
1265 genpd = gpd;
1266 break;
1267 }
1268 }
1269 mutex_unlock(&gpd_list_lock);
1270
1271 if (!genpd)
1272 return -EINVAL;
1273
1274 return __pm_genpd_add_device(genpd, dev, td);
1275 }
1276
1277 /**
1278 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1279 * @genpd: PM domain to remove the device from.
1280 * @dev: Device to be removed.
1281 */
1282 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1283 struct device *dev)
1284 {
1285 struct pm_domain_data *pdd;
1286 int ret = -EINVAL;
1287
1288 dev_dbg(dev, "%s()\n", __func__);
1289
1290 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1291 return -EINVAL;
1292
1293 genpd_acquire_lock(genpd);
1294
1295 if (genpd->prepared_count > 0) {
1296 ret = -EAGAIN;
1297 goto out;
1298 }
1299
1300 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1301 if (pdd->dev != dev)
1302 continue;
1303
1304 list_del_init(&pdd->list_node);
1305 pdd->dev = NULL;
1306 dev_pm_put_subsys_data(dev);
1307 dev->pm_domain = NULL;
1308 kfree(to_gpd_data(pdd));
1309
1310 genpd->device_count--;
1311
1312 ret = 0;
1313 break;
1314 }
1315
1316 out:
1317 genpd_release_lock(genpd);
1318
1319 return ret;
1320 }
1321
1322 /**
1323 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1324 * @genpd: Master PM domain to add the subdomain to.
1325 * @subdomain: Subdomain to be added.
1326 */
1327 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1328 struct generic_pm_domain *subdomain)
1329 {
1330 struct gpd_link *link;
1331 int ret = 0;
1332
1333 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1334 return -EINVAL;
1335
1336 start:
1337 genpd_acquire_lock(genpd);
1338 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1339
1340 if (subdomain->status != GPD_STATE_POWER_OFF
1341 && subdomain->status != GPD_STATE_ACTIVE) {
1342 mutex_unlock(&subdomain->lock);
1343 genpd_release_lock(genpd);
1344 goto start;
1345 }
1346
1347 if (genpd->status == GPD_STATE_POWER_OFF
1348 && subdomain->status != GPD_STATE_POWER_OFF) {
1349 ret = -EINVAL;
1350 goto out;
1351 }
1352
1353 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1354 if (link->slave == subdomain && link->master == genpd) {
1355 ret = -EINVAL;
1356 goto out;
1357 }
1358 }
1359
1360 link = kzalloc(sizeof(*link), GFP_KERNEL);
1361 if (!link) {
1362 ret = -ENOMEM;
1363 goto out;
1364 }
1365 link->master = genpd;
1366 list_add_tail(&link->master_node, &genpd->master_links);
1367 link->slave = subdomain;
1368 list_add_tail(&link->slave_node, &subdomain->slave_links);
1369 if (subdomain->status != GPD_STATE_POWER_OFF)
1370 genpd_sd_counter_inc(genpd);
1371
1372 out:
1373 mutex_unlock(&subdomain->lock);
1374 genpd_release_lock(genpd);
1375
1376 return ret;
1377 }
1378
1379 /**
1380 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1381 * @genpd: Master PM domain to remove the subdomain from.
1382 * @subdomain: Subdomain to be removed.
1383 */
1384 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1385 struct generic_pm_domain *subdomain)
1386 {
1387 struct gpd_link *link;
1388 int ret = -EINVAL;
1389
1390 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1391 return -EINVAL;
1392
1393 start:
1394 genpd_acquire_lock(genpd);
1395
1396 list_for_each_entry(link, &genpd->master_links, master_node) {
1397 if (link->slave != subdomain)
1398 continue;
1399
1400 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1401
1402 if (subdomain->status != GPD_STATE_POWER_OFF
1403 && subdomain->status != GPD_STATE_ACTIVE) {
1404 mutex_unlock(&subdomain->lock);
1405 genpd_release_lock(genpd);
1406 goto start;
1407 }
1408
1409 list_del(&link->master_node);
1410 list_del(&link->slave_node);
1411 kfree(link);
1412 if (subdomain->status != GPD_STATE_POWER_OFF)
1413 genpd_sd_counter_dec(genpd);
1414
1415 mutex_unlock(&subdomain->lock);
1416
1417 ret = 0;
1418 break;
1419 }
1420
1421 genpd_release_lock(genpd);
1422
1423 return ret;
1424 }
1425
1426 /**
1427 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1428 * @dev: Device to add the callbacks to.
1429 * @ops: Set of callbacks to add.
1430 * @td: Timing data to add to the device along with the callbacks (optional).
1431 */
1432 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1433 struct gpd_timing_data *td)
1434 {
1435 struct pm_domain_data *pdd;
1436 int ret = 0;
1437
1438 if (!(dev && dev->power.subsys_data && ops))
1439 return -EINVAL;
1440
1441 pm_runtime_disable(dev);
1442 device_pm_lock();
1443
1444 pdd = dev->power.subsys_data->domain_data;
1445 if (pdd) {
1446 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1447
1448 gpd_data->ops = *ops;
1449 if (td)
1450 gpd_data->td = *td;
1451 } else {
1452 ret = -EINVAL;
1453 }
1454
1455 device_pm_unlock();
1456 pm_runtime_enable(dev);
1457
1458 return ret;
1459 }
1460 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1461
1462 /**
1463 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1464 * @dev: Device to remove the callbacks from.
1465 * @clear_td: If set, clear the device's timing data too.
1466 */
1467 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1468 {
1469 struct pm_domain_data *pdd;
1470 int ret = 0;
1471
1472 if (!(dev && dev->power.subsys_data))
1473 return -EINVAL;
1474
1475 pm_runtime_disable(dev);
1476 device_pm_lock();
1477
1478 pdd = dev->power.subsys_data->domain_data;
1479 if (pdd) {
1480 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1481
1482 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1483 if (clear_td)
1484 gpd_data->td = (struct gpd_timing_data){ 0 };
1485 } else {
1486 ret = -EINVAL;
1487 }
1488
1489 device_pm_unlock();
1490 pm_runtime_enable(dev);
1491
1492 return ret;
1493 }
1494 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1495
1496 /* Default device callbacks for generic PM domains. */
1497
1498 /**
1499 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1500 * @dev: Device to handle.
1501 */
1502 static int pm_genpd_default_save_state(struct device *dev)
1503 {
1504 int (*cb)(struct device *__dev);
1505 struct device_driver *drv = dev->driver;
1506
1507 cb = dev_gpd_data(dev)->ops.save_state;
1508 if (cb)
1509 return cb(dev);
1510
1511 if (drv && drv->pm && drv->pm->runtime_suspend)
1512 return drv->pm->runtime_suspend(dev);
1513
1514 return 0;
1515 }
1516
1517 /**
1518 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1519 * @dev: Device to handle.
1520 */
1521 static int pm_genpd_default_restore_state(struct device *dev)
1522 {
1523 int (*cb)(struct device *__dev);
1524 struct device_driver *drv = dev->driver;
1525
1526 cb = dev_gpd_data(dev)->ops.restore_state;
1527 if (cb)
1528 return cb(dev);
1529
1530 if (drv && drv->pm && drv->pm->runtime_resume)
1531 return drv->pm->runtime_resume(dev);
1532
1533 return 0;
1534 }
1535
1536 #ifdef CONFIG_PM_SLEEP
1537
1538 /**
1539 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1540 * @dev: Device to handle.
1541 */
1542 static int pm_genpd_default_suspend(struct device *dev)
1543 {
1544 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1545
1546 return cb ? cb(dev) : pm_generic_suspend(dev);
1547 }
1548
1549 /**
1550 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1551 * @dev: Device to handle.
1552 */
1553 static int pm_genpd_default_suspend_late(struct device *dev)
1554 {
1555 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1556
1557 return cb ? cb(dev) : pm_generic_suspend_late(dev);
1558 }
1559
1560 /**
1561 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1562 * @dev: Device to handle.
1563 */
1564 static int pm_genpd_default_resume_early(struct device *dev)
1565 {
1566 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1567
1568 return cb ? cb(dev) : pm_generic_resume_early(dev);
1569 }
1570
1571 /**
1572 * pm_genpd_default_resume - Default "device resume" for PM domians.
1573 * @dev: Device to handle.
1574 */
1575 static int pm_genpd_default_resume(struct device *dev)
1576 {
1577 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1578
1579 return cb ? cb(dev) : pm_generic_resume(dev);
1580 }
1581
1582 /**
1583 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1584 * @dev: Device to handle.
1585 */
1586 static int pm_genpd_default_freeze(struct device *dev)
1587 {
1588 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1589
1590 return cb ? cb(dev) : pm_generic_freeze(dev);
1591 }
1592
1593 /**
1594 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1595 * @dev: Device to handle.
1596 */
1597 static int pm_genpd_default_freeze_late(struct device *dev)
1598 {
1599 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1600
1601 return cb ? cb(dev) : pm_generic_freeze_late(dev);
1602 }
1603
1604 /**
1605 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1606 * @dev: Device to handle.
1607 */
1608 static int pm_genpd_default_thaw_early(struct device *dev)
1609 {
1610 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1611
1612 return cb ? cb(dev) : pm_generic_thaw_early(dev);
1613 }
1614
1615 /**
1616 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1617 * @dev: Device to handle.
1618 */
1619 static int pm_genpd_default_thaw(struct device *dev)
1620 {
1621 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1622
1623 return cb ? cb(dev) : pm_generic_thaw(dev);
1624 }
1625
1626 #else /* !CONFIG_PM_SLEEP */
1627
1628 #define pm_genpd_default_suspend NULL
1629 #define pm_genpd_default_suspend_late NULL
1630 #define pm_genpd_default_resume_early NULL
1631 #define pm_genpd_default_resume NULL
1632 #define pm_genpd_default_freeze NULL
1633 #define pm_genpd_default_freeze_late NULL
1634 #define pm_genpd_default_thaw_early NULL
1635 #define pm_genpd_default_thaw NULL
1636
1637 #endif /* !CONFIG_PM_SLEEP */
1638
1639 /**
1640 * pm_genpd_init - Initialize a generic I/O PM domain object.
1641 * @genpd: PM domain object to initialize.
1642 * @gov: PM domain governor to associate with the domain (may be NULL).
1643 * @is_off: Initial value of the domain's power_is_off field.
1644 */
1645 void pm_genpd_init(struct generic_pm_domain *genpd,
1646 struct dev_power_governor *gov, bool is_off)
1647 {
1648 if (IS_ERR_OR_NULL(genpd))
1649 return;
1650
1651 INIT_LIST_HEAD(&genpd->master_links);
1652 INIT_LIST_HEAD(&genpd->slave_links);
1653 INIT_LIST_HEAD(&genpd->dev_list);
1654 mutex_init(&genpd->lock);
1655 genpd->gov = gov;
1656 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1657 genpd->in_progress = 0;
1658 atomic_set(&genpd->sd_count, 0);
1659 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1660 init_waitqueue_head(&genpd->status_wait_queue);
1661 genpd->poweroff_task = NULL;
1662 genpd->resume_count = 0;
1663 genpd->device_count = 0;
1664 genpd->max_off_time_ns = -1;
1665 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1666 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1667 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1668 genpd->domain.ops.prepare = pm_genpd_prepare;
1669 genpd->domain.ops.suspend = pm_genpd_suspend;
1670 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1671 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1672 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1673 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1674 genpd->domain.ops.resume = pm_genpd_resume;
1675 genpd->domain.ops.freeze = pm_genpd_freeze;
1676 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1677 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1678 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1679 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1680 genpd->domain.ops.thaw = pm_genpd_thaw;
1681 genpd->domain.ops.poweroff = pm_genpd_suspend;
1682 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1683 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1684 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1685 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1686 genpd->domain.ops.restore = pm_genpd_resume;
1687 genpd->domain.ops.complete = pm_genpd_complete;
1688 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1689 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1690 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1691 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1692 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1693 genpd->dev_ops.resume = pm_genpd_default_resume;
1694 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1695 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1696 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1697 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1698 mutex_lock(&gpd_list_lock);
1699 list_add(&genpd->gpd_list_node, &gpd_list);
1700 mutex_unlock(&gpd_list_lock);
1701 }
This page took 0.086678 seconds and 4 git commands to generate.