PM / Domains: Rework system suspend callback routines (v2)
[deliverable/linux.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
d5e4cbfe
RW
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
f721889f 35
5125bbf3
RW
36static LIST_HEAD(gpd_list);
37static DEFINE_MUTEX(gpd_list_lock);
38
5248051b
RW
39#ifdef CONFIG_PM
40
41static struct generic_pm_domain *dev_to_genpd(struct device *dev)
42{
43 if (IS_ERR_OR_NULL(dev->pm_domain))
44 return ERR_PTR(-EINVAL);
45
596ba34b 46 return pd_to_genpd(dev->pm_domain);
5248051b 47}
f721889f 48
d5e4cbfe
RW
49static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
50{
51 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
52}
53
54static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
55{
56 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
57}
58
ecf00475
RW
59static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
60{
61 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
62}
63
64static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
65{
66 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
67}
68
c4bb3160 69static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 70{
c4bb3160
RW
71 bool ret = false;
72
73 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
74 ret = !!atomic_dec_and_test(&genpd->sd_count);
75
76 return ret;
77}
78
79static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
80{
81 atomic_inc(&genpd->sd_count);
82 smp_mb__after_atomic_inc();
f721889f
RW
83}
84
17b75eca
RW
85static void genpd_acquire_lock(struct generic_pm_domain *genpd)
86{
87 DEFINE_WAIT(wait);
88
89 mutex_lock(&genpd->lock);
90 /*
91 * Wait for the domain to transition into either the active,
92 * or the power off state.
93 */
94 for (;;) {
95 prepare_to_wait(&genpd->status_wait_queue, &wait,
96 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
97 if (genpd->status == GPD_STATE_ACTIVE
98 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
99 break;
100 mutex_unlock(&genpd->lock);
101
102 schedule();
103
104 mutex_lock(&genpd->lock);
105 }
106 finish_wait(&genpd->status_wait_queue, &wait);
107}
108
109static void genpd_release_lock(struct generic_pm_domain *genpd)
110{
111 mutex_unlock(&genpd->lock);
112}
113
c6d22b37
RW
114static void genpd_set_active(struct generic_pm_domain *genpd)
115{
116 if (genpd->resume_count == 0)
117 genpd->status = GPD_STATE_ACTIVE;
118}
119
5248051b 120/**
5063ce15 121 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
122 * @genpd: PM domain to power up.
123 *
5063ce15 124 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
125 * resume a device belonging to it.
126 */
3f241775
RW
127int __pm_genpd_poweron(struct generic_pm_domain *genpd)
128 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 129{
5063ce15 130 struct gpd_link *link;
3f241775 131 DEFINE_WAIT(wait);
5248051b
RW
132 int ret = 0;
133
5063ce15 134 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
135 for (;;) {
136 prepare_to_wait(&genpd->status_wait_queue, &wait,
137 TASK_UNINTERRUPTIBLE);
17877eb5 138 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
139 break;
140 mutex_unlock(&genpd->lock);
17b75eca 141
3f241775
RW
142 schedule();
143
144 mutex_lock(&genpd->lock);
145 }
146 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 147
17b75eca 148 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 149 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 150 return 0;
5248051b 151
c6d22b37
RW
152 if (genpd->status != GPD_STATE_POWER_OFF) {
153 genpd_set_active(genpd);
3f241775 154 return 0;
c6d22b37
RW
155 }
156
5063ce15
RW
157 /*
158 * The list is guaranteed not to change while the loop below is being
159 * executed, unless one of the masters' .power_on() callbacks fiddles
160 * with it.
161 */
162 list_for_each_entry(link, &genpd->slave_links, slave_node) {
163 genpd_sd_counter_inc(link->master);
17877eb5 164 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 165
5248051b 166 mutex_unlock(&genpd->lock);
5248051b 167
5063ce15 168 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
169
170 mutex_lock(&genpd->lock);
171
3f241775
RW
172 /*
173 * The "wait for parent" status is guaranteed not to change
5063ce15 174 * while the master is powering on.
3f241775
RW
175 */
176 genpd->status = GPD_STATE_POWER_OFF;
177 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
178 if (ret) {
179 genpd_sd_counter_dec(link->master);
9e08cf42 180 goto err;
5063ce15 181 }
5248051b
RW
182 }
183
9e08cf42 184 if (genpd->power_on) {
fe202fde 185 ret = genpd->power_on(genpd);
9e08cf42
RW
186 if (ret)
187 goto err;
3c07cbc4 188 }
5248051b 189
9e08cf42
RW
190 genpd_set_active(genpd);
191
3f241775 192 return 0;
9e08cf42
RW
193
194 err:
5063ce15
RW
195 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
196 genpd_sd_counter_dec(link->master);
9e08cf42 197
3f241775
RW
198 return ret;
199}
200
201/**
5063ce15 202 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
203 * @genpd: PM domain to power up.
204 */
205int pm_genpd_poweron(struct generic_pm_domain *genpd)
206{
207 int ret;
208
209 mutex_lock(&genpd->lock);
210 ret = __pm_genpd_poweron(genpd);
211 mutex_unlock(&genpd->lock);
212 return ret;
5248051b
RW
213}
214
215#endif /* CONFIG_PM */
216
217#ifdef CONFIG_PM_RUNTIME
218
f721889f
RW
219/**
220 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 221 * @pdd: Domain data of the device to save the state of.
f721889f
RW
222 * @genpd: PM domain the device belongs to.
223 */
4605ab65 224static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 225 struct generic_pm_domain *genpd)
17b75eca 226 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 227{
cd0ea672 228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 229 struct device *dev = pdd->dev;
f721889f
RW
230 int ret = 0;
231
cd0ea672 232 if (gpd_data->need_restore)
f721889f
RW
233 return 0;
234
17b75eca
RW
235 mutex_unlock(&genpd->lock);
236
ecf00475
RW
237 genpd_start_dev(genpd, dev);
238 ret = genpd_save_dev(genpd, dev);
239 genpd_stop_dev(genpd, dev);
f721889f 240
17b75eca
RW
241 mutex_lock(&genpd->lock);
242
f721889f 243 if (!ret)
cd0ea672 244 gpd_data->need_restore = true;
f721889f
RW
245
246 return ret;
247}
248
249/**
250 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 251 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
252 * @genpd: PM domain the device belongs to.
253 */
4605ab65 254static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 255 struct generic_pm_domain *genpd)
17b75eca 256 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 257{
cd0ea672 258 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 259 struct device *dev = pdd->dev;
f721889f 260
cd0ea672 261 if (!gpd_data->need_restore)
f721889f
RW
262 return;
263
17b75eca
RW
264 mutex_unlock(&genpd->lock);
265
ecf00475
RW
266 genpd_start_dev(genpd, dev);
267 genpd_restore_dev(genpd, dev);
268 genpd_stop_dev(genpd, dev);
f721889f 269
17b75eca
RW
270 mutex_lock(&genpd->lock);
271
cd0ea672 272 gpd_data->need_restore = false;
f721889f
RW
273}
274
c6d22b37
RW
275/**
276 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
277 * @genpd: PM domain to check.
278 *
279 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
280 * a "power off" operation, which means that a "power on" has occured in the
281 * meantime, or if its resume_count field is different from zero, which means
282 * that one of its devices has been resumed in the meantime.
283 */
284static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
285{
17877eb5 286 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 287 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
288}
289
56375fd4
RW
290/**
291 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
292 * @genpd: PM domait to power off.
293 *
294 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
295 * before.
296 */
0bc5b2de 297void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
298{
299 if (!work_pending(&genpd->power_off_work))
300 queue_work(pm_wq, &genpd->power_off_work);
301}
302
f721889f
RW
303/**
304 * pm_genpd_poweroff - Remove power from a given PM domain.
305 * @genpd: PM domain to power down.
306 *
307 * If all of the @genpd's devices have been suspended and all of its subdomains
308 * have been powered down, run the runtime suspend callbacks provided by all of
309 * the @genpd's devices' drivers and remove power from @genpd.
310 */
311static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 312 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 313{
4605ab65 314 struct pm_domain_data *pdd;
5063ce15 315 struct gpd_link *link;
f721889f 316 unsigned int not_suspended;
c6d22b37 317 int ret = 0;
f721889f 318
c6d22b37
RW
319 start:
320 /*
321 * Do not try to power off the domain in the following situations:
322 * (1) The domain is already in the "power off" state.
5063ce15 323 * (2) The domain is waiting for its master to power up.
c6d22b37 324 * (3) One of the domain's devices is being resumed right now.
3f241775 325 * (4) System suspend is in progress.
c6d22b37 326 */
3f241775 327 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 328 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 329 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
330 return 0;
331
c4bb3160 332 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
333 return -EBUSY;
334
335 not_suspended = 0;
4605ab65 336 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221
RW
337 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
338 || pdd->dev->power.irq_safe))
f721889f
RW
339 not_suspended++;
340
341 if (not_suspended > genpd->in_progress)
342 return -EBUSY;
343
c6d22b37
RW
344 if (genpd->poweroff_task) {
345 /*
346 * Another instance of pm_genpd_poweroff() is executing
347 * callbacks, so tell it to start over and return.
348 */
349 genpd->status = GPD_STATE_REPEAT;
350 return 0;
351 }
352
f721889f
RW
353 if (genpd->gov && genpd->gov->power_down_ok) {
354 if (!genpd->gov->power_down_ok(&genpd->domain))
355 return -EAGAIN;
356 }
357
17b75eca 358 genpd->status = GPD_STATE_BUSY;
c6d22b37 359 genpd->poweroff_task = current;
17b75eca 360
4605ab65 361 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 362 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 363 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
364
365 if (genpd_abort_poweroff(genpd))
366 goto out;
367
697a7f37
RW
368 if (ret) {
369 genpd_set_active(genpd);
370 goto out;
371 }
f721889f 372
c6d22b37
RW
373 if (genpd->status == GPD_STATE_REPEAT) {
374 genpd->poweroff_task = NULL;
375 goto start;
376 }
377 }
17b75eca 378
3c07cbc4
RW
379 if (genpd->power_off) {
380 if (atomic_read(&genpd->sd_count) > 0) {
381 ret = -EBUSY;
c6d22b37
RW
382 goto out;
383 }
17b75eca 384
3c07cbc4 385 /*
5063ce15
RW
386 * If sd_count > 0 at this point, one of the subdomains hasn't
387 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
388 * incrementing it. In that case pm_genpd_poweron() will wait
389 * for us to drop the lock, so we can call .power_off() and let
390 * the pm_genpd_poweron() restore power for us (this shouldn't
391 * happen very often).
392 */
d2805402
RW
393 ret = genpd->power_off(genpd);
394 if (ret == -EBUSY) {
395 genpd_set_active(genpd);
d2805402
RW
396 goto out;
397 }
398 }
f721889f 399
17b75eca 400 genpd->status = GPD_STATE_POWER_OFF;
f721889f 401
5063ce15
RW
402 list_for_each_entry(link, &genpd->slave_links, slave_node) {
403 genpd_sd_counter_dec(link->master);
404 genpd_queue_power_off_work(link->master);
405 }
f721889f 406
c6d22b37
RW
407 out:
408 genpd->poweroff_task = NULL;
409 wake_up_all(&genpd->status_wait_queue);
410 return ret;
f721889f
RW
411}
412
413/**
414 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
415 * @work: Work structure used for scheduling the execution of this function.
416 */
417static void genpd_power_off_work_fn(struct work_struct *work)
418{
419 struct generic_pm_domain *genpd;
420
421 genpd = container_of(work, struct generic_pm_domain, power_off_work);
422
17b75eca 423 genpd_acquire_lock(genpd);
f721889f 424 pm_genpd_poweroff(genpd);
17b75eca 425 genpd_release_lock(genpd);
f721889f
RW
426}
427
428/**
429 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
430 * @dev: Device to suspend.
431 *
432 * Carry out a runtime suspend of a device under the assumption that its
433 * pm_domain field points to the domain member of an object of type
434 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
435 */
436static int pm_genpd_runtime_suspend(struct device *dev)
437{
438 struct generic_pm_domain *genpd;
d5e4cbfe 439 int ret;
f721889f
RW
440
441 dev_dbg(dev, "%s()\n", __func__);
442
5248051b
RW
443 genpd = dev_to_genpd(dev);
444 if (IS_ERR(genpd))
f721889f
RW
445 return -EINVAL;
446
0aa2a221
RW
447 might_sleep_if(!genpd->dev_irq_safe);
448
d5e4cbfe
RW
449 ret = genpd_stop_dev(genpd, dev);
450 if (ret)
451 return ret;
17b75eca 452
0aa2a221
RW
453 /*
454 * If power.irq_safe is set, this routine will be run with interrupts
455 * off, so it can't use mutexes.
456 */
457 if (dev->power.irq_safe)
458 return 0;
459
c6d22b37 460 mutex_lock(&genpd->lock);
f721889f
RW
461 genpd->in_progress++;
462 pm_genpd_poweroff(genpd);
463 genpd->in_progress--;
c6d22b37 464 mutex_unlock(&genpd->lock);
f721889f
RW
465
466 return 0;
467}
468
f721889f
RW
469/**
470 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
471 * @dev: Device to resume.
472 *
473 * Carry out a runtime resume of a device under the assumption that its
474 * pm_domain field points to the domain member of an object of type
475 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
476 */
477static int pm_genpd_runtime_resume(struct device *dev)
478{
479 struct generic_pm_domain *genpd;
c6d22b37 480 DEFINE_WAIT(wait);
f721889f
RW
481 int ret;
482
483 dev_dbg(dev, "%s()\n", __func__);
484
5248051b
RW
485 genpd = dev_to_genpd(dev);
486 if (IS_ERR(genpd))
f721889f
RW
487 return -EINVAL;
488
0aa2a221
RW
489 might_sleep_if(!genpd->dev_irq_safe);
490
491 /* If power.irq_safe, the PM domain is never powered off. */
492 if (dev->power.irq_safe)
493 goto out;
494
c6d22b37 495 mutex_lock(&genpd->lock);
3f241775
RW
496 ret = __pm_genpd_poweron(genpd);
497 if (ret) {
498 mutex_unlock(&genpd->lock);
499 return ret;
500 }
17b75eca 501 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
502 genpd->resume_count++;
503 for (;;) {
504 prepare_to_wait(&genpd->status_wait_queue, &wait,
505 TASK_UNINTERRUPTIBLE);
506 /*
507 * If current is the powering off task, we have been called
508 * reentrantly from one of the device callbacks, so we should
509 * not wait.
510 */
511 if (!genpd->poweroff_task || genpd->poweroff_task == current)
512 break;
513 mutex_unlock(&genpd->lock);
514
515 schedule();
516
517 mutex_lock(&genpd->lock);
518 }
519 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 520 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
521 genpd->resume_count--;
522 genpd_set_active(genpd);
17b75eca 523 wake_up_all(&genpd->status_wait_queue);
c6d22b37 524 mutex_unlock(&genpd->lock);
17b75eca 525
0aa2a221 526 out:
d5e4cbfe 527 genpd_start_dev(genpd, dev);
f721889f
RW
528
529 return 0;
530}
531
17f2ae7f
RW
532/**
533 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
534 */
535void pm_genpd_poweroff_unused(void)
536{
537 struct generic_pm_domain *genpd;
538
539 mutex_lock(&gpd_list_lock);
540
541 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
542 genpd_queue_power_off_work(genpd);
543
544 mutex_unlock(&gpd_list_lock);
545}
546
f721889f
RW
547#else
548
549static inline void genpd_power_off_work_fn(struct work_struct *work) {}
550
551#define pm_genpd_runtime_suspend NULL
552#define pm_genpd_runtime_resume NULL
553
554#endif /* CONFIG_PM_RUNTIME */
555
596ba34b
RW
556#ifdef CONFIG_PM_SLEEP
557
d5e4cbfe
RW
558static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
559 struct device *dev)
560{
561 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
562}
563
d23b9b00
RW
564static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
565{
566 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
567}
568
569static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
570{
571 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
572}
573
574static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
575{
576 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
577}
578
579static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
580{
581 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
582}
583
584static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
585{
586 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
587}
588
589static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
590{
591 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
592}
593
594static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
595{
596 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
597}
598
599static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
600{
601 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
602}
603
596ba34b 604/**
5063ce15 605 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
606 * @genpd: PM domain to power off, if possible.
607 *
608 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 609 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
610 *
611 * This function is only called in "noirq" stages of system power transitions,
612 * so it need not acquire locks (all of the "noirq" callbacks are executed
613 * sequentially, so it is guaranteed that it will never run twice in parallel).
614 */
615static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
616{
5063ce15 617 struct gpd_link *link;
596ba34b 618
17b75eca 619 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
620 return;
621
c4bb3160
RW
622 if (genpd->suspended_count != genpd->device_count
623 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
624 return;
625
626 if (genpd->power_off)
627 genpd->power_off(genpd);
628
17b75eca 629 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
630
631 list_for_each_entry(link, &genpd->slave_links, slave_node) {
632 genpd_sd_counter_dec(link->master);
633 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
634 }
635}
636
4ecd6e65
RW
637/**
638 * resume_needed - Check whether to resume a device before system suspend.
639 * @dev: Device to check.
640 * @genpd: PM domain the device belongs to.
641 *
642 * There are two cases in which a device that can wake up the system from sleep
643 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
644 * to wake up the system and it has to remain active for this purpose while the
645 * system is in the sleep state and (2) if the device is not enabled to wake up
646 * the system from sleep states and it generally doesn't generate wakeup signals
647 * by itself (those signals are generated on its behalf by other parts of the
648 * system). In the latter case it may be necessary to reconfigure the device's
649 * wakeup settings during system suspend, because it may have been set up to
650 * signal remote wakeup from the system's working state as needed by runtime PM.
651 * Return 'true' in either of the above cases.
652 */
653static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
654{
655 bool active_wakeup;
656
657 if (!device_can_wakeup(dev))
658 return false;
659
d5e4cbfe 660 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
661 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
662}
663
596ba34b
RW
664/**
665 * pm_genpd_prepare - Start power transition of a device in a PM domain.
666 * @dev: Device to start the transition of.
667 *
668 * Start a power transition of a device (during a system-wide power transition)
669 * under the assumption that its pm_domain field points to the domain member of
670 * an object of type struct generic_pm_domain representing a PM domain
671 * consisting of I/O devices.
672 */
673static int pm_genpd_prepare(struct device *dev)
674{
675 struct generic_pm_domain *genpd;
b6c10c84 676 int ret;
596ba34b
RW
677
678 dev_dbg(dev, "%s()\n", __func__);
679
680 genpd = dev_to_genpd(dev);
681 if (IS_ERR(genpd))
682 return -EINVAL;
683
17b75eca
RW
684 /*
685 * If a wakeup request is pending for the device, it should be woken up
686 * at this point and a system wakeup event should be reported if it's
687 * set up to wake up the system from sleep states.
688 */
689 pm_runtime_get_noresume(dev);
690 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
691 pm_wakeup_event(dev, 0);
692
693 if (pm_wakeup_pending()) {
694 pm_runtime_put_sync(dev);
695 return -EBUSY;
696 }
697
4ecd6e65
RW
698 if (resume_needed(dev, genpd))
699 pm_runtime_resume(dev);
700
17b75eca 701 genpd_acquire_lock(genpd);
596ba34b
RW
702
703 if (genpd->prepared_count++ == 0)
17b75eca
RW
704 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
705
706 genpd_release_lock(genpd);
596ba34b
RW
707
708 if (genpd->suspend_power_off) {
17b75eca 709 pm_runtime_put_noidle(dev);
596ba34b
RW
710 return 0;
711 }
712
713 /*
17b75eca
RW
714 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
715 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 716 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 717 * to make it operational.
596ba34b 718 */
17b75eca 719 pm_runtime_resume(dev);
596ba34b
RW
720 __pm_runtime_disable(dev, false);
721
b6c10c84
RW
722 ret = pm_generic_prepare(dev);
723 if (ret) {
724 mutex_lock(&genpd->lock);
725
726 if (--genpd->prepared_count == 0)
727 genpd->suspend_power_off = false;
728
729 mutex_unlock(&genpd->lock);
17b75eca 730 pm_runtime_enable(dev);
b6c10c84 731 }
17b75eca
RW
732
733 pm_runtime_put_sync(dev);
b6c10c84 734 return ret;
596ba34b
RW
735}
736
737/**
738 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
739 * @dev: Device to suspend.
740 *
741 * Suspend a device under the assumption that its pm_domain field points to the
742 * domain member of an object of type struct generic_pm_domain representing
743 * a PM domain consisting of I/O devices.
744 */
745static int pm_genpd_suspend(struct device *dev)
746{
747 struct generic_pm_domain *genpd;
748
749 dev_dbg(dev, "%s()\n", __func__);
750
751 genpd = dev_to_genpd(dev);
752 if (IS_ERR(genpd))
753 return -EINVAL;
754
d23b9b00 755 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
756}
757
758/**
759 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
760 * @dev: Device to suspend.
761 *
762 * Carry out a late suspend of a device under the assumption that its
763 * pm_domain field points to the domain member of an object of type
764 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
765 */
766static int pm_genpd_suspend_noirq(struct device *dev)
767{
768 struct generic_pm_domain *genpd;
769 int ret;
770
771 dev_dbg(dev, "%s()\n", __func__);
772
773 genpd = dev_to_genpd(dev);
774 if (IS_ERR(genpd))
775 return -EINVAL;
776
777 if (genpd->suspend_power_off)
778 return 0;
779
d23b9b00 780 ret = genpd_suspend_late(genpd, dev);
596ba34b
RW
781 if (ret)
782 return ret;
783
d5e4cbfe 784 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
d4f2d87a
RW
785 return 0;
786
d5e4cbfe 787 genpd_stop_dev(genpd, dev);
596ba34b
RW
788
789 /*
790 * Since all of the "noirq" callbacks are executed sequentially, it is
791 * guaranteed that this function will never run twice in parallel for
792 * the same PM domain, so it is not necessary to use locking here.
793 */
794 genpd->suspended_count++;
795 pm_genpd_sync_poweroff(genpd);
796
797 return 0;
798}
799
800/**
801 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
802 * @dev: Device to resume.
803 *
804 * Carry out an early resume of a device under the assumption that its
805 * pm_domain field points to the domain member of an object of type
806 * struct generic_pm_domain representing a power domain consisting of I/O
807 * devices.
808 */
809static int pm_genpd_resume_noirq(struct device *dev)
810{
811 struct generic_pm_domain *genpd;
812
813 dev_dbg(dev, "%s()\n", __func__);
814
815 genpd = dev_to_genpd(dev);
816 if (IS_ERR(genpd))
817 return -EINVAL;
818
819 if (genpd->suspend_power_off)
820 return 0;
821
822 /*
823 * Since all of the "noirq" callbacks are executed sequentially, it is
824 * guaranteed that this function will never run twice in parallel for
825 * the same PM domain, so it is not necessary to use locking here.
826 */
827 pm_genpd_poweron(genpd);
828 genpd->suspended_count--;
d5e4cbfe 829 genpd_start_dev(genpd, dev);
596ba34b 830
d23b9b00 831 return genpd_resume_early(genpd, dev);
596ba34b
RW
832}
833
834/**
835 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
836 * @dev: Device to resume.
837 *
838 * Resume a device under the assumption that its pm_domain field points to the
839 * domain member of an object of type struct generic_pm_domain representing
840 * a power domain consisting of I/O devices.
841 */
842static int pm_genpd_resume(struct device *dev)
843{
844 struct generic_pm_domain *genpd;
845
846 dev_dbg(dev, "%s()\n", __func__);
847
848 genpd = dev_to_genpd(dev);
849 if (IS_ERR(genpd))
850 return -EINVAL;
851
d23b9b00 852 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
853}
854
855/**
856 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
857 * @dev: Device to freeze.
858 *
859 * Freeze a device under the assumption that its pm_domain field points to the
860 * domain member of an object of type struct generic_pm_domain representing
861 * a power domain consisting of I/O devices.
862 */
863static int pm_genpd_freeze(struct device *dev)
864{
865 struct generic_pm_domain *genpd;
866
867 dev_dbg(dev, "%s()\n", __func__);
868
869 genpd = dev_to_genpd(dev);
870 if (IS_ERR(genpd))
871 return -EINVAL;
872
d23b9b00 873 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
874}
875
876/**
877 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
878 * @dev: Device to freeze.
879 *
880 * Carry out a late freeze of a device under the assumption that its
881 * pm_domain field points to the domain member of an object of type
882 * struct generic_pm_domain representing a power domain consisting of I/O
883 * devices.
884 */
885static int pm_genpd_freeze_noirq(struct device *dev)
886{
887 struct generic_pm_domain *genpd;
888 int ret;
889
890 dev_dbg(dev, "%s()\n", __func__);
891
892 genpd = dev_to_genpd(dev);
893 if (IS_ERR(genpd))
894 return -EINVAL;
895
896 if (genpd->suspend_power_off)
897 return 0;
898
d23b9b00 899 ret = genpd_freeze_late(genpd, dev);
596ba34b
RW
900 if (ret)
901 return ret;
902
d5e4cbfe 903 genpd_stop_dev(genpd, dev);
596ba34b
RW
904
905 return 0;
906}
907
908/**
909 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
910 * @dev: Device to thaw.
911 *
912 * Carry out an early thaw of a device under the assumption that its
913 * pm_domain field points to the domain member of an object of type
914 * struct generic_pm_domain representing a power domain consisting of I/O
915 * devices.
916 */
917static int pm_genpd_thaw_noirq(struct device *dev)
918{
919 struct generic_pm_domain *genpd;
920
921 dev_dbg(dev, "%s()\n", __func__);
922
923 genpd = dev_to_genpd(dev);
924 if (IS_ERR(genpd))
925 return -EINVAL;
926
927 if (genpd->suspend_power_off)
928 return 0;
929
d5e4cbfe 930 genpd_start_dev(genpd, dev);
596ba34b 931
d23b9b00 932 return genpd_thaw_early(genpd, dev);
596ba34b
RW
933}
934
935/**
936 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
937 * @dev: Device to thaw.
938 *
939 * Thaw a device under the assumption that its pm_domain field points to the
940 * domain member of an object of type struct generic_pm_domain representing
941 * a power domain consisting of I/O devices.
942 */
943static int pm_genpd_thaw(struct device *dev)
944{
945 struct generic_pm_domain *genpd;
946
947 dev_dbg(dev, "%s()\n", __func__);
948
949 genpd = dev_to_genpd(dev);
950 if (IS_ERR(genpd))
951 return -EINVAL;
952
d23b9b00 953 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
954}
955
956/**
957 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
958 * @dev: Device to resume.
959 *
960 * Carry out an early restore of a device under the assumption that its
961 * pm_domain field points to the domain member of an object of type
962 * struct generic_pm_domain representing a power domain consisting of I/O
963 * devices.
964 */
965static int pm_genpd_restore_noirq(struct device *dev)
966{
967 struct generic_pm_domain *genpd;
968
969 dev_dbg(dev, "%s()\n", __func__);
970
971 genpd = dev_to_genpd(dev);
972 if (IS_ERR(genpd))
973 return -EINVAL;
974
975 /*
976 * Since all of the "noirq" callbacks are executed sequentially, it is
977 * guaranteed that this function will never run twice in parallel for
978 * the same PM domain, so it is not necessary to use locking here.
979 */
17b75eca 980 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
981 if (genpd->suspend_power_off) {
982 /*
983 * The boot kernel might put the domain into the power on state,
984 * so make sure it really is powered off.
985 */
986 if (genpd->power_off)
987 genpd->power_off(genpd);
988 return 0;
989 }
990
991 pm_genpd_poweron(genpd);
992 genpd->suspended_count--;
d5e4cbfe 993 genpd_start_dev(genpd, dev);
596ba34b 994
d23b9b00 995 return genpd_resume_early(genpd, dev);
596ba34b
RW
996}
997
998/**
999 * pm_genpd_complete - Complete power transition of a device in a power domain.
1000 * @dev: Device to complete the transition of.
1001 *
1002 * Complete a power transition of a device (during a system-wide power
1003 * transition) under the assumption that its pm_domain field points to the
1004 * domain member of an object of type struct generic_pm_domain representing
1005 * a power domain consisting of I/O devices.
1006 */
1007static void pm_genpd_complete(struct device *dev)
1008{
1009 struct generic_pm_domain *genpd;
1010 bool run_complete;
1011
1012 dev_dbg(dev, "%s()\n", __func__);
1013
1014 genpd = dev_to_genpd(dev);
1015 if (IS_ERR(genpd))
1016 return;
1017
1018 mutex_lock(&genpd->lock);
1019
1020 run_complete = !genpd->suspend_power_off;
1021 if (--genpd->prepared_count == 0)
1022 genpd->suspend_power_off = false;
1023
1024 mutex_unlock(&genpd->lock);
1025
1026 if (run_complete) {
1027 pm_generic_complete(dev);
6f00ff78 1028 pm_runtime_set_active(dev);
596ba34b 1029 pm_runtime_enable(dev);
6f00ff78 1030 pm_runtime_idle(dev);
596ba34b
RW
1031 }
1032}
1033
1034#else
1035
1036#define pm_genpd_prepare NULL
1037#define pm_genpd_suspend NULL
1038#define pm_genpd_suspend_noirq NULL
1039#define pm_genpd_resume_noirq NULL
1040#define pm_genpd_resume NULL
1041#define pm_genpd_freeze NULL
1042#define pm_genpd_freeze_noirq NULL
1043#define pm_genpd_thaw_noirq NULL
1044#define pm_genpd_thaw NULL
596ba34b 1045#define pm_genpd_restore_noirq NULL
596ba34b
RW
1046#define pm_genpd_complete NULL
1047
1048#endif /* CONFIG_PM_SLEEP */
1049
f721889f
RW
1050/**
1051 * pm_genpd_add_device - Add a device to an I/O PM domain.
1052 * @genpd: PM domain to add the device to.
1053 * @dev: Device to be added.
1054 */
1055int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1056{
cd0ea672 1057 struct generic_pm_domain_data *gpd_data;
4605ab65 1058 struct pm_domain_data *pdd;
f721889f
RW
1059 int ret = 0;
1060
1061 dev_dbg(dev, "%s()\n", __func__);
1062
1063 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1064 return -EINVAL;
1065
17b75eca 1066 genpd_acquire_lock(genpd);
f721889f 1067
17b75eca 1068 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
596ba34b
RW
1073 if (genpd->prepared_count > 0) {
1074 ret = -EAGAIN;
1075 goto out;
1076 }
1077
4605ab65
RW
1078 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1079 if (pdd->dev == dev) {
f721889f
RW
1080 ret = -EINVAL;
1081 goto out;
1082 }
1083
cd0ea672
RW
1084 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1085 if (!gpd_data) {
1086 ret = -ENOMEM;
1087 goto out;
1088 }
1089
596ba34b 1090 genpd->device_count++;
f721889f 1091
f721889f 1092 dev->pm_domain = &genpd->domain;
4605ab65 1093 dev_pm_get_subsys_data(dev);
cd0ea672
RW
1094 dev->power.subsys_data->domain_data = &gpd_data->base;
1095 gpd_data->base.dev = dev;
1096 gpd_data->need_restore = false;
1097 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
f721889f
RW
1098
1099 out:
17b75eca 1100 genpd_release_lock(genpd);
f721889f
RW
1101
1102 return ret;
1103}
1104
1105/**
1106 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1107 * @genpd: PM domain to remove the device from.
1108 * @dev: Device to be removed.
1109 */
1110int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1111 struct device *dev)
1112{
4605ab65 1113 struct pm_domain_data *pdd;
f721889f
RW
1114 int ret = -EINVAL;
1115
1116 dev_dbg(dev, "%s()\n", __func__);
1117
1118 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1119 return -EINVAL;
1120
17b75eca 1121 genpd_acquire_lock(genpd);
f721889f 1122
596ba34b
RW
1123 if (genpd->prepared_count > 0) {
1124 ret = -EAGAIN;
1125 goto out;
1126 }
1127
4605ab65
RW
1128 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1129 if (pdd->dev != dev)
f721889f
RW
1130 continue;
1131
4605ab65
RW
1132 list_del_init(&pdd->list_node);
1133 pdd->dev = NULL;
1134 dev_pm_put_subsys_data(dev);
f721889f 1135 dev->pm_domain = NULL;
cd0ea672 1136 kfree(to_gpd_data(pdd));
f721889f 1137
596ba34b 1138 genpd->device_count--;
f721889f
RW
1139
1140 ret = 0;
1141 break;
1142 }
1143
596ba34b 1144 out:
17b75eca 1145 genpd_release_lock(genpd);
f721889f
RW
1146
1147 return ret;
1148}
1149
1150/**
1151 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1152 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1153 * @subdomain: Subdomain to be added.
f721889f
RW
1154 */
1155int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1156 struct generic_pm_domain *subdomain)
f721889f 1157{
5063ce15 1158 struct gpd_link *link;
f721889f
RW
1159 int ret = 0;
1160
bc0403ff 1161 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1162 return -EINVAL;
1163
17b75eca
RW
1164 start:
1165 genpd_acquire_lock(genpd);
bc0403ff 1166 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1167
bc0403ff
RW
1168 if (subdomain->status != GPD_STATE_POWER_OFF
1169 && subdomain->status != GPD_STATE_ACTIVE) {
1170 mutex_unlock(&subdomain->lock);
17b75eca
RW
1171 genpd_release_lock(genpd);
1172 goto start;
1173 }
1174
1175 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1176 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1177 ret = -EINVAL;
1178 goto out;
1179 }
1180
5063ce15 1181 list_for_each_entry(link, &genpd->slave_links, slave_node) {
bc0403ff 1182 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1183 ret = -EINVAL;
1184 goto out;
1185 }
1186 }
1187
5063ce15
RW
1188 link = kzalloc(sizeof(*link), GFP_KERNEL);
1189 if (!link) {
1190 ret = -ENOMEM;
1191 goto out;
1192 }
1193 link->master = genpd;
1194 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1195 link->slave = subdomain;
1196 list_add_tail(&link->slave_node, &subdomain->slave_links);
1197 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1198 genpd_sd_counter_inc(genpd);
f721889f 1199
f721889f 1200 out:
bc0403ff 1201 mutex_unlock(&subdomain->lock);
17b75eca 1202 genpd_release_lock(genpd);
f721889f
RW
1203
1204 return ret;
1205}
1206
1207/**
1208 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1209 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1210 * @subdomain: Subdomain to be removed.
f721889f
RW
1211 */
1212int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1213 struct generic_pm_domain *subdomain)
f721889f 1214{
5063ce15 1215 struct gpd_link *link;
f721889f
RW
1216 int ret = -EINVAL;
1217
5063ce15 1218 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1219 return -EINVAL;
1220
17b75eca
RW
1221 start:
1222 genpd_acquire_lock(genpd);
f721889f 1223
5063ce15
RW
1224 list_for_each_entry(link, &genpd->master_links, master_node) {
1225 if (link->slave != subdomain)
f721889f
RW
1226 continue;
1227
1228 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1229
17b75eca
RW
1230 if (subdomain->status != GPD_STATE_POWER_OFF
1231 && subdomain->status != GPD_STATE_ACTIVE) {
1232 mutex_unlock(&subdomain->lock);
1233 genpd_release_lock(genpd);
1234 goto start;
1235 }
1236
5063ce15
RW
1237 list_del(&link->master_node);
1238 list_del(&link->slave_node);
1239 kfree(link);
17b75eca 1240 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1241 genpd_sd_counter_dec(genpd);
1242
1243 mutex_unlock(&subdomain->lock);
1244
1245 ret = 0;
1246 break;
1247 }
1248
17b75eca 1249 genpd_release_lock(genpd);
f721889f
RW
1250
1251 return ret;
1252}
1253
d5e4cbfe
RW
1254/**
1255 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1256 * @dev: Device to add the callbacks to.
1257 * @ops: Set of callbacks to add.
1258 */
1259int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
1260{
1261 struct pm_domain_data *pdd;
1262 int ret = 0;
1263
1264 if (!(dev && dev->power.subsys_data && ops))
1265 return -EINVAL;
1266
1267 pm_runtime_disable(dev);
1268 device_pm_lock();
1269
1270 pdd = dev->power.subsys_data->domain_data;
1271 if (pdd) {
1272 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1273
1274 gpd_data->ops = *ops;
1275 } else {
1276 ret = -EINVAL;
1277 }
1278
1279 device_pm_unlock();
1280 pm_runtime_enable(dev);
1281
1282 return ret;
1283}
1284EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1285
1286/**
1287 * pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1288 * @dev: Device to remove the callbacks from.
1289 */
1290int pm_genpd_remove_callbacks(struct device *dev)
1291{
1292 struct pm_domain_data *pdd;
1293 int ret = 0;
1294
1295 if (!(dev && dev->power.subsys_data))
1296 return -EINVAL;
1297
1298 pm_runtime_disable(dev);
1299 device_pm_lock();
1300
1301 pdd = dev->power.subsys_data->domain_data;
1302 if (pdd) {
1303 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1304
1305 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1306 } else {
1307 ret = -EINVAL;
1308 }
1309
1310 device_pm_unlock();
1311 pm_runtime_enable(dev);
1312
1313 return ret;
1314}
1315EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
1316
d23b9b00
RW
1317/* Default device callbacks for generic PM domains. */
1318
ecf00475
RW
1319/**
1320 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1321 * @dev: Device to handle.
1322 */
1323static int pm_genpd_default_save_state(struct device *dev)
1324{
1325 int (*cb)(struct device *__dev);
1326 struct device_driver *drv = dev->driver;
1327
1328 cb = dev_gpd_data(dev)->ops.save_state;
1329 if (cb)
1330 return cb(dev);
1331
1332 if (drv && drv->pm && drv->pm->runtime_suspend)
1333 return drv->pm->runtime_suspend(dev);
1334
1335 return 0;
1336}
1337
1338/**
1339 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1340 * @dev: Device to handle.
1341 */
1342static int pm_genpd_default_restore_state(struct device *dev)
1343{
1344 int (*cb)(struct device *__dev);
1345 struct device_driver *drv = dev->driver;
1346
1347 cb = dev_gpd_data(dev)->ops.restore_state;
1348 if (cb)
1349 return cb(dev);
1350
1351 if (drv && drv->pm && drv->pm->runtime_resume)
1352 return drv->pm->runtime_resume(dev);
1353
1354 return 0;
1355}
1356
d23b9b00
RW
1357/**
1358 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1359 * @dev: Device to handle.
1360 */
1361static int pm_genpd_default_suspend(struct device *dev)
1362{
1363 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1364
1365 return cb ? cb(dev) : pm_generic_suspend(dev);
1366}
1367
1368/**
1369 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1370 * @dev: Device to handle.
1371 */
1372static int pm_genpd_default_suspend_late(struct device *dev)
1373{
1374 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1375
1376 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1377}
1378
1379/**
1380 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1381 * @dev: Device to handle.
1382 */
1383static int pm_genpd_default_resume_early(struct device *dev)
1384{
1385 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1386
1387 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1388}
1389
1390/**
1391 * pm_genpd_default_resume - Default "device resume" for PM domians.
1392 * @dev: Device to handle.
1393 */
1394static int pm_genpd_default_resume(struct device *dev)
1395{
1396 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1397
1398 return cb ? cb(dev) : pm_generic_resume(dev);
1399}
1400
1401/**
1402 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1403 * @dev: Device to handle.
1404 */
1405static int pm_genpd_default_freeze(struct device *dev)
1406{
1407 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1408
1409 return cb ? cb(dev) : pm_generic_freeze(dev);
1410}
1411
1412/**
1413 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1414 * @dev: Device to handle.
1415 */
1416static int pm_genpd_default_freeze_late(struct device *dev)
1417{
1418 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1419
1420 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1421}
1422
1423/**
1424 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1425 * @dev: Device to handle.
1426 */
1427static int pm_genpd_default_thaw_early(struct device *dev)
1428{
1429 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1430
1431 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1432}
1433
1434/**
1435 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1436 * @dev: Device to handle.
1437 */
1438static int pm_genpd_default_thaw(struct device *dev)
1439{
1440 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1441
1442 return cb ? cb(dev) : pm_generic_thaw(dev);
1443}
1444
f721889f
RW
1445/**
1446 * pm_genpd_init - Initialize a generic I/O PM domain object.
1447 * @genpd: PM domain object to initialize.
1448 * @gov: PM domain governor to associate with the domain (may be NULL).
1449 * @is_off: Initial value of the domain's power_is_off field.
1450 */
1451void pm_genpd_init(struct generic_pm_domain *genpd,
1452 struct dev_power_governor *gov, bool is_off)
1453{
1454 if (IS_ERR_OR_NULL(genpd))
1455 return;
1456
5063ce15
RW
1457 INIT_LIST_HEAD(&genpd->master_links);
1458 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1459 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1460 mutex_init(&genpd->lock);
1461 genpd->gov = gov;
1462 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1463 genpd->in_progress = 0;
c4bb3160 1464 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1465 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1466 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1467 genpd->poweroff_task = NULL;
1468 genpd->resume_count = 0;
596ba34b
RW
1469 genpd->device_count = 0;
1470 genpd->suspended_count = 0;
f721889f
RW
1471 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1472 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1473 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1474 genpd->domain.ops.prepare = pm_genpd_prepare;
1475 genpd->domain.ops.suspend = pm_genpd_suspend;
1476 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1477 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1478 genpd->domain.ops.resume = pm_genpd_resume;
1479 genpd->domain.ops.freeze = pm_genpd_freeze;
1480 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1481 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1482 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00
RW
1483 genpd->domain.ops.poweroff = pm_genpd_suspend;
1484 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1485 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
d23b9b00 1486 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1487 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1488 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1489 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
d23b9b00
RW
1490 genpd->dev_ops.freeze = pm_genpd_default_suspend;
1491 genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
1492 genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
1493 genpd->dev_ops.thaw = pm_genpd_default_resume;
1494 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1495 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1496 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1497 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
1498 mutex_lock(&gpd_list_lock);
1499 list_add(&genpd->gpd_list_node, &gpd_list);
1500 mutex_unlock(&gpd_list_lock);
1501}
This page took 0.115435 seconds and 5 git commands to generate.