PM / Domains: Add default power off governor function (v4)
[deliverable/linux.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
d5e4cbfe
RW
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
f721889f 35
5125bbf3
RW
36static LIST_HEAD(gpd_list);
37static DEFINE_MUTEX(gpd_list_lock);
38
5248051b
RW
39#ifdef CONFIG_PM
40
b02c999a 41struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
42{
43 if (IS_ERR_OR_NULL(dev->pm_domain))
44 return ERR_PTR(-EINVAL);
45
596ba34b 46 return pd_to_genpd(dev->pm_domain);
5248051b 47}
f721889f 48
d5e4cbfe
RW
49static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
50{
51 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
52}
53
54static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
55{
56 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
57}
58
ecf00475
RW
59static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
60{
61 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
62}
63
64static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
65{
66 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
67}
68
c4bb3160 69static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 70{
c4bb3160
RW
71 bool ret = false;
72
73 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
74 ret = !!atomic_dec_and_test(&genpd->sd_count);
75
76 return ret;
77}
78
79static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
80{
81 atomic_inc(&genpd->sd_count);
82 smp_mb__after_atomic_inc();
f721889f
RW
83}
84
17b75eca
RW
85static void genpd_acquire_lock(struct generic_pm_domain *genpd)
86{
87 DEFINE_WAIT(wait);
88
89 mutex_lock(&genpd->lock);
90 /*
91 * Wait for the domain to transition into either the active,
92 * or the power off state.
93 */
94 for (;;) {
95 prepare_to_wait(&genpd->status_wait_queue, &wait,
96 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
97 if (genpd->status == GPD_STATE_ACTIVE
98 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
99 break;
100 mutex_unlock(&genpd->lock);
101
102 schedule();
103
104 mutex_lock(&genpd->lock);
105 }
106 finish_wait(&genpd->status_wait_queue, &wait);
107}
108
109static void genpd_release_lock(struct generic_pm_domain *genpd)
110{
111 mutex_unlock(&genpd->lock);
112}
113
c6d22b37
RW
114static void genpd_set_active(struct generic_pm_domain *genpd)
115{
116 if (genpd->resume_count == 0)
117 genpd->status = GPD_STATE_ACTIVE;
118}
119
5248051b 120/**
5063ce15 121 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
122 * @genpd: PM domain to power up.
123 *
5063ce15 124 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
125 * resume a device belonging to it.
126 */
3f241775
RW
127int __pm_genpd_poweron(struct generic_pm_domain *genpd)
128 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 129{
5063ce15 130 struct gpd_link *link;
3f241775 131 DEFINE_WAIT(wait);
5248051b
RW
132 int ret = 0;
133
5063ce15 134 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
135 for (;;) {
136 prepare_to_wait(&genpd->status_wait_queue, &wait,
137 TASK_UNINTERRUPTIBLE);
17877eb5 138 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
139 break;
140 mutex_unlock(&genpd->lock);
17b75eca 141
3f241775
RW
142 schedule();
143
144 mutex_lock(&genpd->lock);
145 }
146 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 147
17b75eca 148 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 149 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 150 return 0;
5248051b 151
c6d22b37
RW
152 if (genpd->status != GPD_STATE_POWER_OFF) {
153 genpd_set_active(genpd);
3f241775 154 return 0;
c6d22b37
RW
155 }
156
5063ce15
RW
157 /*
158 * The list is guaranteed not to change while the loop below is being
159 * executed, unless one of the masters' .power_on() callbacks fiddles
160 * with it.
161 */
162 list_for_each_entry(link, &genpd->slave_links, slave_node) {
163 genpd_sd_counter_inc(link->master);
17877eb5 164 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 165
5248051b 166 mutex_unlock(&genpd->lock);
5248051b 167
5063ce15 168 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
169
170 mutex_lock(&genpd->lock);
171
3f241775
RW
172 /*
173 * The "wait for parent" status is guaranteed not to change
5063ce15 174 * while the master is powering on.
3f241775
RW
175 */
176 genpd->status = GPD_STATE_POWER_OFF;
177 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
178 if (ret) {
179 genpd_sd_counter_dec(link->master);
9e08cf42 180 goto err;
5063ce15 181 }
5248051b
RW
182 }
183
9e08cf42 184 if (genpd->power_on) {
fe202fde 185 ret = genpd->power_on(genpd);
9e08cf42
RW
186 if (ret)
187 goto err;
3c07cbc4 188 }
5248051b 189
9e08cf42
RW
190 genpd_set_active(genpd);
191
3f241775 192 return 0;
9e08cf42
RW
193
194 err:
5063ce15
RW
195 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
196 genpd_sd_counter_dec(link->master);
9e08cf42 197
3f241775
RW
198 return ret;
199}
200
201/**
5063ce15 202 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
203 * @genpd: PM domain to power up.
204 */
205int pm_genpd_poweron(struct generic_pm_domain *genpd)
206{
207 int ret;
208
209 mutex_lock(&genpd->lock);
210 ret = __pm_genpd_poweron(genpd);
211 mutex_unlock(&genpd->lock);
212 return ret;
5248051b
RW
213}
214
215#endif /* CONFIG_PM */
216
217#ifdef CONFIG_PM_RUNTIME
218
f721889f
RW
219/**
220 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 221 * @pdd: Domain data of the device to save the state of.
f721889f
RW
222 * @genpd: PM domain the device belongs to.
223 */
4605ab65 224static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 225 struct generic_pm_domain *genpd)
17b75eca 226 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 227{
cd0ea672 228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 229 struct device *dev = pdd->dev;
f721889f
RW
230 int ret = 0;
231
cd0ea672 232 if (gpd_data->need_restore)
f721889f
RW
233 return 0;
234
17b75eca
RW
235 mutex_unlock(&genpd->lock);
236
ecf00475
RW
237 genpd_start_dev(genpd, dev);
238 ret = genpd_save_dev(genpd, dev);
239 genpd_stop_dev(genpd, dev);
f721889f 240
17b75eca
RW
241 mutex_lock(&genpd->lock);
242
f721889f 243 if (!ret)
cd0ea672 244 gpd_data->need_restore = true;
f721889f
RW
245
246 return ret;
247}
248
249/**
250 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 251 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
252 * @genpd: PM domain the device belongs to.
253 */
4605ab65 254static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 255 struct generic_pm_domain *genpd)
17b75eca 256 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 257{
cd0ea672 258 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 259 struct device *dev = pdd->dev;
f721889f 260
cd0ea672 261 if (!gpd_data->need_restore)
f721889f
RW
262 return;
263
17b75eca
RW
264 mutex_unlock(&genpd->lock);
265
ecf00475
RW
266 genpd_start_dev(genpd, dev);
267 genpd_restore_dev(genpd, dev);
268 genpd_stop_dev(genpd, dev);
f721889f 269
17b75eca
RW
270 mutex_lock(&genpd->lock);
271
cd0ea672 272 gpd_data->need_restore = false;
f721889f
RW
273}
274
c6d22b37
RW
275/**
276 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
277 * @genpd: PM domain to check.
278 *
279 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
280 * a "power off" operation, which means that a "power on" has occured in the
281 * meantime, or if its resume_count field is different from zero, which means
282 * that one of its devices has been resumed in the meantime.
283 */
284static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
285{
17877eb5 286 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 287 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
288}
289
56375fd4
RW
290/**
291 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
292 * @genpd: PM domait to power off.
293 *
294 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
295 * before.
296 */
0bc5b2de 297void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
298{
299 if (!work_pending(&genpd->power_off_work))
300 queue_work(pm_wq, &genpd->power_off_work);
301}
302
f721889f
RW
303/**
304 * pm_genpd_poweroff - Remove power from a given PM domain.
305 * @genpd: PM domain to power down.
306 *
307 * If all of the @genpd's devices have been suspended and all of its subdomains
308 * have been powered down, run the runtime suspend callbacks provided by all of
309 * the @genpd's devices' drivers and remove power from @genpd.
310 */
311static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 312 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 313{
4605ab65 314 struct pm_domain_data *pdd;
5063ce15 315 struct gpd_link *link;
f721889f 316 unsigned int not_suspended;
c6d22b37 317 int ret = 0;
f721889f 318
c6d22b37
RW
319 start:
320 /*
321 * Do not try to power off the domain in the following situations:
322 * (1) The domain is already in the "power off" state.
5063ce15 323 * (2) The domain is waiting for its master to power up.
c6d22b37 324 * (3) One of the domain's devices is being resumed right now.
3f241775 325 * (4) System suspend is in progress.
c6d22b37 326 */
3f241775 327 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 328 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 329 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
330 return 0;
331
c4bb3160 332 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
333 return -EBUSY;
334
335 not_suspended = 0;
4605ab65 336 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221
RW
337 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
338 || pdd->dev->power.irq_safe))
f721889f
RW
339 not_suspended++;
340
341 if (not_suspended > genpd->in_progress)
342 return -EBUSY;
343
c6d22b37
RW
344 if (genpd->poweroff_task) {
345 /*
346 * Another instance of pm_genpd_poweroff() is executing
347 * callbacks, so tell it to start over and return.
348 */
349 genpd->status = GPD_STATE_REPEAT;
350 return 0;
351 }
352
f721889f
RW
353 if (genpd->gov && genpd->gov->power_down_ok) {
354 if (!genpd->gov->power_down_ok(&genpd->domain))
355 return -EAGAIN;
356 }
357
17b75eca 358 genpd->status = GPD_STATE_BUSY;
c6d22b37 359 genpd->poweroff_task = current;
17b75eca 360
4605ab65 361 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 362 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 363 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
364
365 if (genpd_abort_poweroff(genpd))
366 goto out;
367
697a7f37
RW
368 if (ret) {
369 genpd_set_active(genpd);
370 goto out;
371 }
f721889f 372
c6d22b37
RW
373 if (genpd->status == GPD_STATE_REPEAT) {
374 genpd->poweroff_task = NULL;
375 goto start;
376 }
377 }
17b75eca 378
3c07cbc4
RW
379 if (genpd->power_off) {
380 if (atomic_read(&genpd->sd_count) > 0) {
381 ret = -EBUSY;
c6d22b37
RW
382 goto out;
383 }
17b75eca 384
3c07cbc4 385 /*
5063ce15
RW
386 * If sd_count > 0 at this point, one of the subdomains hasn't
387 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
388 * incrementing it. In that case pm_genpd_poweron() will wait
389 * for us to drop the lock, so we can call .power_off() and let
390 * the pm_genpd_poweron() restore power for us (this shouldn't
391 * happen very often).
392 */
d2805402
RW
393 ret = genpd->power_off(genpd);
394 if (ret == -EBUSY) {
395 genpd_set_active(genpd);
d2805402
RW
396 goto out;
397 }
398 }
f721889f 399
17b75eca 400 genpd->status = GPD_STATE_POWER_OFF;
221e9b58
RW
401 genpd->power_off_time = ktime_get();
402
403 /* Update PM QoS information for devices in the domain. */
404 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
405 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
406
407 pm_runtime_update_max_time_suspended(pdd->dev,
408 td->start_latency_ns +
409 td->restore_state_latency_ns +
410 genpd->power_on_latency_ns);
411 }
f721889f 412
5063ce15
RW
413 list_for_each_entry(link, &genpd->slave_links, slave_node) {
414 genpd_sd_counter_dec(link->master);
415 genpd_queue_power_off_work(link->master);
416 }
f721889f 417
c6d22b37
RW
418 out:
419 genpd->poweroff_task = NULL;
420 wake_up_all(&genpd->status_wait_queue);
421 return ret;
f721889f
RW
422}
423
424/**
425 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
426 * @work: Work structure used for scheduling the execution of this function.
427 */
428static void genpd_power_off_work_fn(struct work_struct *work)
429{
430 struct generic_pm_domain *genpd;
431
432 genpd = container_of(work, struct generic_pm_domain, power_off_work);
433
17b75eca 434 genpd_acquire_lock(genpd);
f721889f 435 pm_genpd_poweroff(genpd);
17b75eca 436 genpd_release_lock(genpd);
f721889f
RW
437}
438
439/**
440 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
441 * @dev: Device to suspend.
442 *
443 * Carry out a runtime suspend of a device under the assumption that its
444 * pm_domain field points to the domain member of an object of type
445 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
446 */
447static int pm_genpd_runtime_suspend(struct device *dev)
448{
449 struct generic_pm_domain *genpd;
b02c999a 450 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 451 int ret;
f721889f
RW
452
453 dev_dbg(dev, "%s()\n", __func__);
454
5248051b
RW
455 genpd = dev_to_genpd(dev);
456 if (IS_ERR(genpd))
f721889f
RW
457 return -EINVAL;
458
0aa2a221
RW
459 might_sleep_if(!genpd->dev_irq_safe);
460
b02c999a
RW
461 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
462 if (stop_ok && !stop_ok(dev))
463 return -EBUSY;
464
d5e4cbfe
RW
465 ret = genpd_stop_dev(genpd, dev);
466 if (ret)
467 return ret;
17b75eca 468
b02c999a
RW
469 pm_runtime_update_max_time_suspended(dev,
470 dev_gpd_data(dev)->td.start_latency_ns);
471
0aa2a221
RW
472 /*
473 * If power.irq_safe is set, this routine will be run with interrupts
474 * off, so it can't use mutexes.
475 */
476 if (dev->power.irq_safe)
477 return 0;
478
c6d22b37 479 mutex_lock(&genpd->lock);
f721889f
RW
480 genpd->in_progress++;
481 pm_genpd_poweroff(genpd);
482 genpd->in_progress--;
c6d22b37 483 mutex_unlock(&genpd->lock);
f721889f
RW
484
485 return 0;
486}
487
f721889f
RW
488/**
489 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
490 * @dev: Device to resume.
491 *
492 * Carry out a runtime resume of a device under the assumption that its
493 * pm_domain field points to the domain member of an object of type
494 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
495 */
496static int pm_genpd_runtime_resume(struct device *dev)
497{
498 struct generic_pm_domain *genpd;
c6d22b37 499 DEFINE_WAIT(wait);
f721889f
RW
500 int ret;
501
502 dev_dbg(dev, "%s()\n", __func__);
503
5248051b
RW
504 genpd = dev_to_genpd(dev);
505 if (IS_ERR(genpd))
f721889f
RW
506 return -EINVAL;
507
0aa2a221
RW
508 might_sleep_if(!genpd->dev_irq_safe);
509
510 /* If power.irq_safe, the PM domain is never powered off. */
511 if (dev->power.irq_safe)
512 goto out;
513
c6d22b37 514 mutex_lock(&genpd->lock);
3f241775
RW
515 ret = __pm_genpd_poweron(genpd);
516 if (ret) {
517 mutex_unlock(&genpd->lock);
518 return ret;
519 }
17b75eca 520 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
521 genpd->resume_count++;
522 for (;;) {
523 prepare_to_wait(&genpd->status_wait_queue, &wait,
524 TASK_UNINTERRUPTIBLE);
525 /*
526 * If current is the powering off task, we have been called
527 * reentrantly from one of the device callbacks, so we should
528 * not wait.
529 */
530 if (!genpd->poweroff_task || genpd->poweroff_task == current)
531 break;
532 mutex_unlock(&genpd->lock);
533
534 schedule();
535
536 mutex_lock(&genpd->lock);
537 }
538 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 539 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
540 genpd->resume_count--;
541 genpd_set_active(genpd);
17b75eca 542 wake_up_all(&genpd->status_wait_queue);
c6d22b37 543 mutex_unlock(&genpd->lock);
17b75eca 544
0aa2a221 545 out:
d5e4cbfe 546 genpd_start_dev(genpd, dev);
f721889f
RW
547
548 return 0;
549}
550
17f2ae7f
RW
551/**
552 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
553 */
554void pm_genpd_poweroff_unused(void)
555{
556 struct generic_pm_domain *genpd;
557
558 mutex_lock(&gpd_list_lock);
559
560 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
561 genpd_queue_power_off_work(genpd);
562
563 mutex_unlock(&gpd_list_lock);
564}
565
f721889f
RW
566#else
567
568static inline void genpd_power_off_work_fn(struct work_struct *work) {}
569
570#define pm_genpd_runtime_suspend NULL
571#define pm_genpd_runtime_resume NULL
572
573#endif /* CONFIG_PM_RUNTIME */
574
596ba34b
RW
575#ifdef CONFIG_PM_SLEEP
576
d5e4cbfe
RW
577static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
578 struct device *dev)
579{
580 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
581}
582
d23b9b00
RW
583static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
584{
585 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
586}
587
588static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
589{
590 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
591}
592
593static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
594{
595 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
596}
597
598static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
599{
600 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
601}
602
603static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
604{
605 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
606}
607
608static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
609{
610 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
611}
612
613static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
614{
615 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
616}
617
618static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
619{
620 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
621}
622
596ba34b 623/**
5063ce15 624 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
625 * @genpd: PM domain to power off, if possible.
626 *
627 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 628 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
629 *
630 * This function is only called in "noirq" stages of system power transitions,
631 * so it need not acquire locks (all of the "noirq" callbacks are executed
632 * sequentially, so it is guaranteed that it will never run twice in parallel).
633 */
634static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
635{
5063ce15 636 struct gpd_link *link;
596ba34b 637
17b75eca 638 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
639 return;
640
c4bb3160
RW
641 if (genpd->suspended_count != genpd->device_count
642 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
643 return;
644
645 if (genpd->power_off)
646 genpd->power_off(genpd);
647
17b75eca 648 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
649
650 list_for_each_entry(link, &genpd->slave_links, slave_node) {
651 genpd_sd_counter_dec(link->master);
652 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
653 }
654}
655
4ecd6e65
RW
656/**
657 * resume_needed - Check whether to resume a device before system suspend.
658 * @dev: Device to check.
659 * @genpd: PM domain the device belongs to.
660 *
661 * There are two cases in which a device that can wake up the system from sleep
662 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
663 * to wake up the system and it has to remain active for this purpose while the
664 * system is in the sleep state and (2) if the device is not enabled to wake up
665 * the system from sleep states and it generally doesn't generate wakeup signals
666 * by itself (those signals are generated on its behalf by other parts of the
667 * system). In the latter case it may be necessary to reconfigure the device's
668 * wakeup settings during system suspend, because it may have been set up to
669 * signal remote wakeup from the system's working state as needed by runtime PM.
670 * Return 'true' in either of the above cases.
671 */
672static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
673{
674 bool active_wakeup;
675
676 if (!device_can_wakeup(dev))
677 return false;
678
d5e4cbfe 679 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
680 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
681}
682
596ba34b
RW
683/**
684 * pm_genpd_prepare - Start power transition of a device in a PM domain.
685 * @dev: Device to start the transition of.
686 *
687 * Start a power transition of a device (during a system-wide power transition)
688 * under the assumption that its pm_domain field points to the domain member of
689 * an object of type struct generic_pm_domain representing a PM domain
690 * consisting of I/O devices.
691 */
692static int pm_genpd_prepare(struct device *dev)
693{
694 struct generic_pm_domain *genpd;
b6c10c84 695 int ret;
596ba34b
RW
696
697 dev_dbg(dev, "%s()\n", __func__);
698
699 genpd = dev_to_genpd(dev);
700 if (IS_ERR(genpd))
701 return -EINVAL;
702
17b75eca
RW
703 /*
704 * If a wakeup request is pending for the device, it should be woken up
705 * at this point and a system wakeup event should be reported if it's
706 * set up to wake up the system from sleep states.
707 */
708 pm_runtime_get_noresume(dev);
709 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
710 pm_wakeup_event(dev, 0);
711
712 if (pm_wakeup_pending()) {
713 pm_runtime_put_sync(dev);
714 return -EBUSY;
715 }
716
4ecd6e65
RW
717 if (resume_needed(dev, genpd))
718 pm_runtime_resume(dev);
719
17b75eca 720 genpd_acquire_lock(genpd);
596ba34b
RW
721
722 if (genpd->prepared_count++ == 0)
17b75eca
RW
723 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
724
725 genpd_release_lock(genpd);
596ba34b
RW
726
727 if (genpd->suspend_power_off) {
17b75eca 728 pm_runtime_put_noidle(dev);
596ba34b
RW
729 return 0;
730 }
731
732 /*
17b75eca
RW
733 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
734 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 735 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 736 * to make it operational.
596ba34b 737 */
17b75eca 738 pm_runtime_resume(dev);
596ba34b
RW
739 __pm_runtime_disable(dev, false);
740
b6c10c84
RW
741 ret = pm_generic_prepare(dev);
742 if (ret) {
743 mutex_lock(&genpd->lock);
744
745 if (--genpd->prepared_count == 0)
746 genpd->suspend_power_off = false;
747
748 mutex_unlock(&genpd->lock);
17b75eca 749 pm_runtime_enable(dev);
b6c10c84 750 }
17b75eca
RW
751
752 pm_runtime_put_sync(dev);
b6c10c84 753 return ret;
596ba34b
RW
754}
755
756/**
757 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
758 * @dev: Device to suspend.
759 *
760 * Suspend a device under the assumption that its pm_domain field points to the
761 * domain member of an object of type struct generic_pm_domain representing
762 * a PM domain consisting of I/O devices.
763 */
764static int pm_genpd_suspend(struct device *dev)
765{
766 struct generic_pm_domain *genpd;
767
768 dev_dbg(dev, "%s()\n", __func__);
769
770 genpd = dev_to_genpd(dev);
771 if (IS_ERR(genpd))
772 return -EINVAL;
773
d23b9b00 774 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
775}
776
777/**
778 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
779 * @dev: Device to suspend.
780 *
781 * Carry out a late suspend of a device under the assumption that its
782 * pm_domain field points to the domain member of an object of type
783 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
784 */
785static int pm_genpd_suspend_noirq(struct device *dev)
786{
787 struct generic_pm_domain *genpd;
788 int ret;
789
790 dev_dbg(dev, "%s()\n", __func__);
791
792 genpd = dev_to_genpd(dev);
793 if (IS_ERR(genpd))
794 return -EINVAL;
795
796 if (genpd->suspend_power_off)
797 return 0;
798
d23b9b00 799 ret = genpd_suspend_late(genpd, dev);
596ba34b
RW
800 if (ret)
801 return ret;
802
d5e4cbfe 803 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
d4f2d87a
RW
804 return 0;
805
d5e4cbfe 806 genpd_stop_dev(genpd, dev);
596ba34b
RW
807
808 /*
809 * Since all of the "noirq" callbacks are executed sequentially, it is
810 * guaranteed that this function will never run twice in parallel for
811 * the same PM domain, so it is not necessary to use locking here.
812 */
813 genpd->suspended_count++;
814 pm_genpd_sync_poweroff(genpd);
815
816 return 0;
817}
818
819/**
820 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
821 * @dev: Device to resume.
822 *
823 * Carry out an early resume of a device under the assumption that its
824 * pm_domain field points to the domain member of an object of type
825 * struct generic_pm_domain representing a power domain consisting of I/O
826 * devices.
827 */
828static int pm_genpd_resume_noirq(struct device *dev)
829{
830 struct generic_pm_domain *genpd;
831
832 dev_dbg(dev, "%s()\n", __func__);
833
834 genpd = dev_to_genpd(dev);
835 if (IS_ERR(genpd))
836 return -EINVAL;
837
838 if (genpd->suspend_power_off)
839 return 0;
840
841 /*
842 * Since all of the "noirq" callbacks are executed sequentially, it is
843 * guaranteed that this function will never run twice in parallel for
844 * the same PM domain, so it is not necessary to use locking here.
845 */
846 pm_genpd_poweron(genpd);
847 genpd->suspended_count--;
d5e4cbfe 848 genpd_start_dev(genpd, dev);
596ba34b 849
d23b9b00 850 return genpd_resume_early(genpd, dev);
596ba34b
RW
851}
852
853/**
854 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
855 * @dev: Device to resume.
856 *
857 * Resume a device under the assumption that its pm_domain field points to the
858 * domain member of an object of type struct generic_pm_domain representing
859 * a power domain consisting of I/O devices.
860 */
861static int pm_genpd_resume(struct device *dev)
862{
863 struct generic_pm_domain *genpd;
864
865 dev_dbg(dev, "%s()\n", __func__);
866
867 genpd = dev_to_genpd(dev);
868 if (IS_ERR(genpd))
869 return -EINVAL;
870
d23b9b00 871 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
872}
873
874/**
875 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
876 * @dev: Device to freeze.
877 *
878 * Freeze a device under the assumption that its pm_domain field points to the
879 * domain member of an object of type struct generic_pm_domain representing
880 * a power domain consisting of I/O devices.
881 */
882static int pm_genpd_freeze(struct device *dev)
883{
884 struct generic_pm_domain *genpd;
885
886 dev_dbg(dev, "%s()\n", __func__);
887
888 genpd = dev_to_genpd(dev);
889 if (IS_ERR(genpd))
890 return -EINVAL;
891
d23b9b00 892 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
893}
894
895/**
896 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
897 * @dev: Device to freeze.
898 *
899 * Carry out a late freeze of a device under the assumption that its
900 * pm_domain field points to the domain member of an object of type
901 * struct generic_pm_domain representing a power domain consisting of I/O
902 * devices.
903 */
904static int pm_genpd_freeze_noirq(struct device *dev)
905{
906 struct generic_pm_domain *genpd;
907 int ret;
908
909 dev_dbg(dev, "%s()\n", __func__);
910
911 genpd = dev_to_genpd(dev);
912 if (IS_ERR(genpd))
913 return -EINVAL;
914
915 if (genpd->suspend_power_off)
916 return 0;
917
d23b9b00 918 ret = genpd_freeze_late(genpd, dev);
596ba34b
RW
919 if (ret)
920 return ret;
921
d5e4cbfe 922 genpd_stop_dev(genpd, dev);
596ba34b
RW
923
924 return 0;
925}
926
927/**
928 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
929 * @dev: Device to thaw.
930 *
931 * Carry out an early thaw of a device under the assumption that its
932 * pm_domain field points to the domain member of an object of type
933 * struct generic_pm_domain representing a power domain consisting of I/O
934 * devices.
935 */
936static int pm_genpd_thaw_noirq(struct device *dev)
937{
938 struct generic_pm_domain *genpd;
939
940 dev_dbg(dev, "%s()\n", __func__);
941
942 genpd = dev_to_genpd(dev);
943 if (IS_ERR(genpd))
944 return -EINVAL;
945
946 if (genpd->suspend_power_off)
947 return 0;
948
d5e4cbfe 949 genpd_start_dev(genpd, dev);
596ba34b 950
d23b9b00 951 return genpd_thaw_early(genpd, dev);
596ba34b
RW
952}
953
954/**
955 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
956 * @dev: Device to thaw.
957 *
958 * Thaw a device under the assumption that its pm_domain field points to the
959 * domain member of an object of type struct generic_pm_domain representing
960 * a power domain consisting of I/O devices.
961 */
962static int pm_genpd_thaw(struct device *dev)
963{
964 struct generic_pm_domain *genpd;
965
966 dev_dbg(dev, "%s()\n", __func__);
967
968 genpd = dev_to_genpd(dev);
969 if (IS_ERR(genpd))
970 return -EINVAL;
971
d23b9b00 972 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
973}
974
975/**
976 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
977 * @dev: Device to resume.
978 *
979 * Carry out an early restore of a device under the assumption that its
980 * pm_domain field points to the domain member of an object of type
981 * struct generic_pm_domain representing a power domain consisting of I/O
982 * devices.
983 */
984static int pm_genpd_restore_noirq(struct device *dev)
985{
986 struct generic_pm_domain *genpd;
987
988 dev_dbg(dev, "%s()\n", __func__);
989
990 genpd = dev_to_genpd(dev);
991 if (IS_ERR(genpd))
992 return -EINVAL;
993
994 /*
995 * Since all of the "noirq" callbacks are executed sequentially, it is
996 * guaranteed that this function will never run twice in parallel for
997 * the same PM domain, so it is not necessary to use locking here.
998 */
17b75eca 999 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
1000 if (genpd->suspend_power_off) {
1001 /*
1002 * The boot kernel might put the domain into the power on state,
1003 * so make sure it really is powered off.
1004 */
1005 if (genpd->power_off)
1006 genpd->power_off(genpd);
1007 return 0;
1008 }
1009
1010 pm_genpd_poweron(genpd);
1011 genpd->suspended_count--;
d5e4cbfe 1012 genpd_start_dev(genpd, dev);
596ba34b 1013
d23b9b00 1014 return genpd_resume_early(genpd, dev);
596ba34b
RW
1015}
1016
1017/**
1018 * pm_genpd_complete - Complete power transition of a device in a power domain.
1019 * @dev: Device to complete the transition of.
1020 *
1021 * Complete a power transition of a device (during a system-wide power
1022 * transition) under the assumption that its pm_domain field points to the
1023 * domain member of an object of type struct generic_pm_domain representing
1024 * a power domain consisting of I/O devices.
1025 */
1026static void pm_genpd_complete(struct device *dev)
1027{
1028 struct generic_pm_domain *genpd;
1029 bool run_complete;
1030
1031 dev_dbg(dev, "%s()\n", __func__);
1032
1033 genpd = dev_to_genpd(dev);
1034 if (IS_ERR(genpd))
1035 return;
1036
1037 mutex_lock(&genpd->lock);
1038
1039 run_complete = !genpd->suspend_power_off;
1040 if (--genpd->prepared_count == 0)
1041 genpd->suspend_power_off = false;
1042
1043 mutex_unlock(&genpd->lock);
1044
1045 if (run_complete) {
1046 pm_generic_complete(dev);
6f00ff78 1047 pm_runtime_set_active(dev);
596ba34b 1048 pm_runtime_enable(dev);
6f00ff78 1049 pm_runtime_idle(dev);
596ba34b
RW
1050 }
1051}
1052
1053#else
1054
1055#define pm_genpd_prepare NULL
1056#define pm_genpd_suspend NULL
1057#define pm_genpd_suspend_noirq NULL
1058#define pm_genpd_resume_noirq NULL
1059#define pm_genpd_resume NULL
1060#define pm_genpd_freeze NULL
1061#define pm_genpd_freeze_noirq NULL
1062#define pm_genpd_thaw_noirq NULL
1063#define pm_genpd_thaw NULL
596ba34b 1064#define pm_genpd_restore_noirq NULL
596ba34b
RW
1065#define pm_genpd_complete NULL
1066
1067#endif /* CONFIG_PM_SLEEP */
1068
f721889f 1069/**
b02c999a 1070 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1071 * @genpd: PM domain to add the device to.
1072 * @dev: Device to be added.
b02c999a 1073 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1074 */
b02c999a
RW
1075int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1076 struct gpd_timing_data *td)
f721889f 1077{
cd0ea672 1078 struct generic_pm_domain_data *gpd_data;
4605ab65 1079 struct pm_domain_data *pdd;
f721889f
RW
1080 int ret = 0;
1081
1082 dev_dbg(dev, "%s()\n", __func__);
1083
1084 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1085 return -EINVAL;
1086
17b75eca 1087 genpd_acquire_lock(genpd);
f721889f 1088
17b75eca 1089 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1090 ret = -EINVAL;
1091 goto out;
1092 }
1093
596ba34b
RW
1094 if (genpd->prepared_count > 0) {
1095 ret = -EAGAIN;
1096 goto out;
1097 }
1098
4605ab65
RW
1099 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1100 if (pdd->dev == dev) {
f721889f
RW
1101 ret = -EINVAL;
1102 goto out;
1103 }
1104
cd0ea672
RW
1105 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1106 if (!gpd_data) {
1107 ret = -ENOMEM;
1108 goto out;
1109 }
1110
596ba34b 1111 genpd->device_count++;
f721889f 1112
f721889f 1113 dev->pm_domain = &genpd->domain;
4605ab65 1114 dev_pm_get_subsys_data(dev);
cd0ea672
RW
1115 dev->power.subsys_data->domain_data = &gpd_data->base;
1116 gpd_data->base.dev = dev;
1117 gpd_data->need_restore = false;
1118 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
b02c999a
RW
1119 if (td)
1120 gpd_data->td = *td;
f721889f
RW
1121
1122 out:
17b75eca 1123 genpd_release_lock(genpd);
f721889f
RW
1124
1125 return ret;
1126}
1127
1128/**
1129 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1130 * @genpd: PM domain to remove the device from.
1131 * @dev: Device to be removed.
1132 */
1133int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1134 struct device *dev)
1135{
4605ab65 1136 struct pm_domain_data *pdd;
f721889f
RW
1137 int ret = -EINVAL;
1138
1139 dev_dbg(dev, "%s()\n", __func__);
1140
1141 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1142 return -EINVAL;
1143
17b75eca 1144 genpd_acquire_lock(genpd);
f721889f 1145
596ba34b
RW
1146 if (genpd->prepared_count > 0) {
1147 ret = -EAGAIN;
1148 goto out;
1149 }
1150
4605ab65
RW
1151 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1152 if (pdd->dev != dev)
f721889f
RW
1153 continue;
1154
4605ab65
RW
1155 list_del_init(&pdd->list_node);
1156 pdd->dev = NULL;
1157 dev_pm_put_subsys_data(dev);
f721889f 1158 dev->pm_domain = NULL;
cd0ea672 1159 kfree(to_gpd_data(pdd));
f721889f 1160
596ba34b 1161 genpd->device_count--;
f721889f
RW
1162
1163 ret = 0;
1164 break;
1165 }
1166
596ba34b 1167 out:
17b75eca 1168 genpd_release_lock(genpd);
f721889f
RW
1169
1170 return ret;
1171}
1172
1173/**
1174 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1175 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1176 * @subdomain: Subdomain to be added.
f721889f
RW
1177 */
1178int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1179 struct generic_pm_domain *subdomain)
f721889f 1180{
5063ce15 1181 struct gpd_link *link;
f721889f
RW
1182 int ret = 0;
1183
bc0403ff 1184 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1185 return -EINVAL;
1186
17b75eca
RW
1187 start:
1188 genpd_acquire_lock(genpd);
bc0403ff 1189 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1190
bc0403ff
RW
1191 if (subdomain->status != GPD_STATE_POWER_OFF
1192 && subdomain->status != GPD_STATE_ACTIVE) {
1193 mutex_unlock(&subdomain->lock);
17b75eca
RW
1194 genpd_release_lock(genpd);
1195 goto start;
1196 }
1197
1198 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1199 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1200 ret = -EINVAL;
1201 goto out;
1202 }
1203
5063ce15 1204 list_for_each_entry(link, &genpd->slave_links, slave_node) {
bc0403ff 1205 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1206 ret = -EINVAL;
1207 goto out;
1208 }
1209 }
1210
5063ce15
RW
1211 link = kzalloc(sizeof(*link), GFP_KERNEL);
1212 if (!link) {
1213 ret = -ENOMEM;
1214 goto out;
1215 }
1216 link->master = genpd;
1217 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1218 link->slave = subdomain;
1219 list_add_tail(&link->slave_node, &subdomain->slave_links);
1220 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1221 genpd_sd_counter_inc(genpd);
f721889f 1222
f721889f 1223 out:
bc0403ff 1224 mutex_unlock(&subdomain->lock);
17b75eca 1225 genpd_release_lock(genpd);
f721889f
RW
1226
1227 return ret;
1228}
1229
1230/**
1231 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1232 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1233 * @subdomain: Subdomain to be removed.
f721889f
RW
1234 */
1235int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1236 struct generic_pm_domain *subdomain)
f721889f 1237{
5063ce15 1238 struct gpd_link *link;
f721889f
RW
1239 int ret = -EINVAL;
1240
5063ce15 1241 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1242 return -EINVAL;
1243
17b75eca
RW
1244 start:
1245 genpd_acquire_lock(genpd);
f721889f 1246
5063ce15
RW
1247 list_for_each_entry(link, &genpd->master_links, master_node) {
1248 if (link->slave != subdomain)
f721889f
RW
1249 continue;
1250
1251 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1252
17b75eca
RW
1253 if (subdomain->status != GPD_STATE_POWER_OFF
1254 && subdomain->status != GPD_STATE_ACTIVE) {
1255 mutex_unlock(&subdomain->lock);
1256 genpd_release_lock(genpd);
1257 goto start;
1258 }
1259
5063ce15
RW
1260 list_del(&link->master_node);
1261 list_del(&link->slave_node);
1262 kfree(link);
17b75eca 1263 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1264 genpd_sd_counter_dec(genpd);
1265
1266 mutex_unlock(&subdomain->lock);
1267
1268 ret = 0;
1269 break;
1270 }
1271
17b75eca 1272 genpd_release_lock(genpd);
f721889f
RW
1273
1274 return ret;
1275}
1276
d5e4cbfe
RW
1277/**
1278 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1279 * @dev: Device to add the callbacks to.
1280 * @ops: Set of callbacks to add.
b02c999a 1281 * @td: Timing data to add to the device along with the callbacks (optional).
d5e4cbfe 1282 */
b02c999a
RW
1283int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1284 struct gpd_timing_data *td)
d5e4cbfe
RW
1285{
1286 struct pm_domain_data *pdd;
1287 int ret = 0;
1288
1289 if (!(dev && dev->power.subsys_data && ops))
1290 return -EINVAL;
1291
1292 pm_runtime_disable(dev);
1293 device_pm_lock();
1294
1295 pdd = dev->power.subsys_data->domain_data;
1296 if (pdd) {
1297 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1298
1299 gpd_data->ops = *ops;
b02c999a
RW
1300 if (td)
1301 gpd_data->td = *td;
d5e4cbfe
RW
1302 } else {
1303 ret = -EINVAL;
1304 }
1305
1306 device_pm_unlock();
1307 pm_runtime_enable(dev);
1308
1309 return ret;
1310}
1311EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1312
1313/**
b02c999a 1314 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
d5e4cbfe 1315 * @dev: Device to remove the callbacks from.
b02c999a 1316 * @clear_td: If set, clear the device's timing data too.
d5e4cbfe 1317 */
b02c999a 1318int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
d5e4cbfe
RW
1319{
1320 struct pm_domain_data *pdd;
1321 int ret = 0;
1322
1323 if (!(dev && dev->power.subsys_data))
1324 return -EINVAL;
1325
1326 pm_runtime_disable(dev);
1327 device_pm_lock();
1328
1329 pdd = dev->power.subsys_data->domain_data;
1330 if (pdd) {
1331 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1332
1333 gpd_data->ops = (struct gpd_dev_ops){ 0 };
b02c999a
RW
1334 if (clear_td)
1335 gpd_data->td = (struct gpd_timing_data){ 0 };
d5e4cbfe
RW
1336 } else {
1337 ret = -EINVAL;
1338 }
1339
1340 device_pm_unlock();
1341 pm_runtime_enable(dev);
1342
1343 return ret;
1344}
b02c999a 1345EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
d5e4cbfe 1346
d23b9b00
RW
1347/* Default device callbacks for generic PM domains. */
1348
ecf00475
RW
1349/**
1350 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1351 * @dev: Device to handle.
1352 */
1353static int pm_genpd_default_save_state(struct device *dev)
1354{
1355 int (*cb)(struct device *__dev);
1356 struct device_driver *drv = dev->driver;
1357
1358 cb = dev_gpd_data(dev)->ops.save_state;
1359 if (cb)
1360 return cb(dev);
1361
1362 if (drv && drv->pm && drv->pm->runtime_suspend)
1363 return drv->pm->runtime_suspend(dev);
1364
1365 return 0;
1366}
1367
1368/**
1369 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1370 * @dev: Device to handle.
1371 */
1372static int pm_genpd_default_restore_state(struct device *dev)
1373{
1374 int (*cb)(struct device *__dev);
1375 struct device_driver *drv = dev->driver;
1376
1377 cb = dev_gpd_data(dev)->ops.restore_state;
1378 if (cb)
1379 return cb(dev);
1380
1381 if (drv && drv->pm && drv->pm->runtime_resume)
1382 return drv->pm->runtime_resume(dev);
1383
1384 return 0;
1385}
1386
d23b9b00
RW
1387/**
1388 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1389 * @dev: Device to handle.
1390 */
1391static int pm_genpd_default_suspend(struct device *dev)
1392{
1393 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1394
1395 return cb ? cb(dev) : pm_generic_suspend(dev);
1396}
1397
1398/**
1399 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1400 * @dev: Device to handle.
1401 */
1402static int pm_genpd_default_suspend_late(struct device *dev)
1403{
1404 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1405
1406 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1407}
1408
1409/**
1410 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1411 * @dev: Device to handle.
1412 */
1413static int pm_genpd_default_resume_early(struct device *dev)
1414{
1415 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1416
1417 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1418}
1419
1420/**
1421 * pm_genpd_default_resume - Default "device resume" for PM domians.
1422 * @dev: Device to handle.
1423 */
1424static int pm_genpd_default_resume(struct device *dev)
1425{
1426 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1427
1428 return cb ? cb(dev) : pm_generic_resume(dev);
1429}
1430
1431/**
1432 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1433 * @dev: Device to handle.
1434 */
1435static int pm_genpd_default_freeze(struct device *dev)
1436{
1437 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1438
1439 return cb ? cb(dev) : pm_generic_freeze(dev);
1440}
1441
1442/**
1443 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1444 * @dev: Device to handle.
1445 */
1446static int pm_genpd_default_freeze_late(struct device *dev)
1447{
1448 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1449
1450 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1451}
1452
1453/**
1454 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1455 * @dev: Device to handle.
1456 */
1457static int pm_genpd_default_thaw_early(struct device *dev)
1458{
1459 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1460
1461 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1462}
1463
1464/**
1465 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1466 * @dev: Device to handle.
1467 */
1468static int pm_genpd_default_thaw(struct device *dev)
1469{
1470 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1471
1472 return cb ? cb(dev) : pm_generic_thaw(dev);
1473}
1474
f721889f
RW
1475/**
1476 * pm_genpd_init - Initialize a generic I/O PM domain object.
1477 * @genpd: PM domain object to initialize.
1478 * @gov: PM domain governor to associate with the domain (may be NULL).
1479 * @is_off: Initial value of the domain's power_is_off field.
1480 */
1481void pm_genpd_init(struct generic_pm_domain *genpd,
1482 struct dev_power_governor *gov, bool is_off)
1483{
1484 if (IS_ERR_OR_NULL(genpd))
1485 return;
1486
5063ce15
RW
1487 INIT_LIST_HEAD(&genpd->master_links);
1488 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1489 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1490 mutex_init(&genpd->lock);
1491 genpd->gov = gov;
1492 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1493 genpd->in_progress = 0;
c4bb3160 1494 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1495 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1496 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1497 genpd->poweroff_task = NULL;
1498 genpd->resume_count = 0;
596ba34b
RW
1499 genpd->device_count = 0;
1500 genpd->suspended_count = 0;
221e9b58 1501 genpd->max_off_time_ns = -1;
f721889f
RW
1502 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1503 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1504 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1505 genpd->domain.ops.prepare = pm_genpd_prepare;
1506 genpd->domain.ops.suspend = pm_genpd_suspend;
1507 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1508 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1509 genpd->domain.ops.resume = pm_genpd_resume;
1510 genpd->domain.ops.freeze = pm_genpd_freeze;
1511 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1512 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1513 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00
RW
1514 genpd->domain.ops.poweroff = pm_genpd_suspend;
1515 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1516 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
d23b9b00 1517 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1518 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1519 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1520 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
d23b9b00
RW
1521 genpd->dev_ops.freeze = pm_genpd_default_suspend;
1522 genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
1523 genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
1524 genpd->dev_ops.thaw = pm_genpd_default_resume;
1525 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1526 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1527 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1528 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
1529 mutex_lock(&gpd_list_lock);
1530 list_add(&genpd->gpd_list_node, &gpd_list);
1531 mutex_unlock(&gpd_list_lock);
1532}
This page took 0.136043 seconds and 5 git commands to generate.