PM / Domains: Automatically update overoptimistic latency information
[deliverable/linux.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
d5e4cbfe
RW
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
f721889f 35
0140d8bd
RW
36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48})
49
5125bbf3
RW
50static LIST_HEAD(gpd_list);
51static DEFINE_MUTEX(gpd_list_lock);
52
5248051b
RW
53#ifdef CONFIG_PM
54
b02c999a 55struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
56{
57 if (IS_ERR_OR_NULL(dev->pm_domain))
58 return ERR_PTR(-EINVAL);
59
596ba34b 60 return pd_to_genpd(dev->pm_domain);
5248051b 61}
f721889f 62
d5e4cbfe
RW
63static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64{
0140d8bd
RW
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
d5e4cbfe
RW
67}
68
69static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70{
0140d8bd
RW
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
d5e4cbfe
RW
73}
74
ecf00475
RW
75static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76{
0140d8bd
RW
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
ecf00475
RW
79}
80
81static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82{
0140d8bd
RW
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
ecf00475
RW
86}
87
c4bb3160 88static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 89{
c4bb3160
RW
90 bool ret = false;
91
92 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
93 ret = !!atomic_dec_and_test(&genpd->sd_count);
94
95 return ret;
96}
97
98static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
99{
100 atomic_inc(&genpd->sd_count);
101 smp_mb__after_atomic_inc();
f721889f
RW
102}
103
17b75eca
RW
104static void genpd_acquire_lock(struct generic_pm_domain *genpd)
105{
106 DEFINE_WAIT(wait);
107
108 mutex_lock(&genpd->lock);
109 /*
110 * Wait for the domain to transition into either the active,
111 * or the power off state.
112 */
113 for (;;) {
114 prepare_to_wait(&genpd->status_wait_queue, &wait,
115 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
116 if (genpd->status == GPD_STATE_ACTIVE
117 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
118 break;
119 mutex_unlock(&genpd->lock);
120
121 schedule();
122
123 mutex_lock(&genpd->lock);
124 }
125 finish_wait(&genpd->status_wait_queue, &wait);
126}
127
128static void genpd_release_lock(struct generic_pm_domain *genpd)
129{
130 mutex_unlock(&genpd->lock);
131}
132
c6d22b37
RW
133static void genpd_set_active(struct generic_pm_domain *genpd)
134{
135 if (genpd->resume_count == 0)
136 genpd->status = GPD_STATE_ACTIVE;
137}
138
5248051b 139/**
5063ce15 140 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
141 * @genpd: PM domain to power up.
142 *
5063ce15 143 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
144 * resume a device belonging to it.
145 */
3f241775
RW
146int __pm_genpd_poweron(struct generic_pm_domain *genpd)
147 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 148{
5063ce15 149 struct gpd_link *link;
3f241775 150 DEFINE_WAIT(wait);
5248051b
RW
151 int ret = 0;
152
5063ce15 153 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
154 for (;;) {
155 prepare_to_wait(&genpd->status_wait_queue, &wait,
156 TASK_UNINTERRUPTIBLE);
17877eb5 157 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
158 break;
159 mutex_unlock(&genpd->lock);
17b75eca 160
3f241775
RW
161 schedule();
162
163 mutex_lock(&genpd->lock);
164 }
165 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 166
17b75eca 167 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 168 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 169 return 0;
5248051b 170
c6d22b37
RW
171 if (genpd->status != GPD_STATE_POWER_OFF) {
172 genpd_set_active(genpd);
3f241775 173 return 0;
c6d22b37
RW
174 }
175
5063ce15
RW
176 /*
177 * The list is guaranteed not to change while the loop below is being
178 * executed, unless one of the masters' .power_on() callbacks fiddles
179 * with it.
180 */
181 list_for_each_entry(link, &genpd->slave_links, slave_node) {
182 genpd_sd_counter_inc(link->master);
17877eb5 183 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 184
5248051b 185 mutex_unlock(&genpd->lock);
5248051b 186
5063ce15 187 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
188
189 mutex_lock(&genpd->lock);
190
3f241775
RW
191 /*
192 * The "wait for parent" status is guaranteed not to change
5063ce15 193 * while the master is powering on.
3f241775
RW
194 */
195 genpd->status = GPD_STATE_POWER_OFF;
196 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
197 if (ret) {
198 genpd_sd_counter_dec(link->master);
9e08cf42 199 goto err;
5063ce15 200 }
5248051b
RW
201 }
202
9e08cf42 203 if (genpd->power_on) {
0140d8bd
RW
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
fe202fde 207 ret = genpd->power_on(genpd);
9e08cf42
RW
208 if (ret)
209 goto err;
0140d8bd
RW
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns)
213 genpd->power_on_latency_ns = elapsed_ns;
3c07cbc4 214 }
5248051b 215
9e08cf42
RW
216 genpd_set_active(genpd);
217
3f241775 218 return 0;
9e08cf42
RW
219
220 err:
5063ce15
RW
221 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
222 genpd_sd_counter_dec(link->master);
9e08cf42 223
3f241775
RW
224 return ret;
225}
226
227/**
5063ce15 228 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
229 * @genpd: PM domain to power up.
230 */
231int pm_genpd_poweron(struct generic_pm_domain *genpd)
232{
233 int ret;
234
235 mutex_lock(&genpd->lock);
236 ret = __pm_genpd_poweron(genpd);
237 mutex_unlock(&genpd->lock);
238 return ret;
5248051b
RW
239}
240
241#endif /* CONFIG_PM */
242
243#ifdef CONFIG_PM_RUNTIME
244
f721889f
RW
245/**
246 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 247 * @pdd: Domain data of the device to save the state of.
f721889f
RW
248 * @genpd: PM domain the device belongs to.
249 */
4605ab65 250static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 251 struct generic_pm_domain *genpd)
17b75eca 252 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 253{
cd0ea672 254 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 255 struct device *dev = pdd->dev;
f721889f
RW
256 int ret = 0;
257
cd0ea672 258 if (gpd_data->need_restore)
f721889f
RW
259 return 0;
260
17b75eca
RW
261 mutex_unlock(&genpd->lock);
262
ecf00475
RW
263 genpd_start_dev(genpd, dev);
264 ret = genpd_save_dev(genpd, dev);
265 genpd_stop_dev(genpd, dev);
f721889f 266
17b75eca
RW
267 mutex_lock(&genpd->lock);
268
f721889f 269 if (!ret)
cd0ea672 270 gpd_data->need_restore = true;
f721889f
RW
271
272 return ret;
273}
274
275/**
276 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 277 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
278 * @genpd: PM domain the device belongs to.
279 */
4605ab65 280static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 281 struct generic_pm_domain *genpd)
17b75eca 282 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 283{
cd0ea672 284 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 285 struct device *dev = pdd->dev;
f721889f 286
cd0ea672 287 if (!gpd_data->need_restore)
f721889f
RW
288 return;
289
17b75eca
RW
290 mutex_unlock(&genpd->lock);
291
ecf00475
RW
292 genpd_start_dev(genpd, dev);
293 genpd_restore_dev(genpd, dev);
294 genpd_stop_dev(genpd, dev);
f721889f 295
17b75eca
RW
296 mutex_lock(&genpd->lock);
297
cd0ea672 298 gpd_data->need_restore = false;
f721889f
RW
299}
300
c6d22b37
RW
301/**
302 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
303 * @genpd: PM domain to check.
304 *
305 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
306 * a "power off" operation, which means that a "power on" has occured in the
307 * meantime, or if its resume_count field is different from zero, which means
308 * that one of its devices has been resumed in the meantime.
309 */
310static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
311{
17877eb5 312 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 313 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
314}
315
56375fd4
RW
316/**
317 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
318 * @genpd: PM domait to power off.
319 *
320 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
321 * before.
322 */
0bc5b2de 323void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
324{
325 if (!work_pending(&genpd->power_off_work))
326 queue_work(pm_wq, &genpd->power_off_work);
327}
328
f721889f
RW
329/**
330 * pm_genpd_poweroff - Remove power from a given PM domain.
331 * @genpd: PM domain to power down.
332 *
333 * If all of the @genpd's devices have been suspended and all of its subdomains
334 * have been powered down, run the runtime suspend callbacks provided by all of
335 * the @genpd's devices' drivers and remove power from @genpd.
336 */
337static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 338 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 339{
4605ab65 340 struct pm_domain_data *pdd;
5063ce15 341 struct gpd_link *link;
f721889f 342 unsigned int not_suspended;
c6d22b37 343 int ret = 0;
f721889f 344
c6d22b37
RW
345 start:
346 /*
347 * Do not try to power off the domain in the following situations:
348 * (1) The domain is already in the "power off" state.
5063ce15 349 * (2) The domain is waiting for its master to power up.
c6d22b37 350 * (3) One of the domain's devices is being resumed right now.
3f241775 351 * (4) System suspend is in progress.
c6d22b37 352 */
3f241775 353 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 354 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 355 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
356 return 0;
357
c4bb3160 358 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
359 return -EBUSY;
360
361 not_suspended = 0;
4605ab65 362 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221
RW
363 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
364 || pdd->dev->power.irq_safe))
f721889f
RW
365 not_suspended++;
366
367 if (not_suspended > genpd->in_progress)
368 return -EBUSY;
369
c6d22b37
RW
370 if (genpd->poweroff_task) {
371 /*
372 * Another instance of pm_genpd_poweroff() is executing
373 * callbacks, so tell it to start over and return.
374 */
375 genpd->status = GPD_STATE_REPEAT;
376 return 0;
377 }
378
f721889f
RW
379 if (genpd->gov && genpd->gov->power_down_ok) {
380 if (!genpd->gov->power_down_ok(&genpd->domain))
381 return -EAGAIN;
382 }
383
17b75eca 384 genpd->status = GPD_STATE_BUSY;
c6d22b37 385 genpd->poweroff_task = current;
17b75eca 386
4605ab65 387 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 388 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 389 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
390
391 if (genpd_abort_poweroff(genpd))
392 goto out;
393
697a7f37
RW
394 if (ret) {
395 genpd_set_active(genpd);
396 goto out;
397 }
f721889f 398
c6d22b37
RW
399 if (genpd->status == GPD_STATE_REPEAT) {
400 genpd->poweroff_task = NULL;
401 goto start;
402 }
403 }
17b75eca 404
3c07cbc4 405 if (genpd->power_off) {
0140d8bd
RW
406 ktime_t time_start;
407 s64 elapsed_ns;
408
3c07cbc4
RW
409 if (atomic_read(&genpd->sd_count) > 0) {
410 ret = -EBUSY;
c6d22b37
RW
411 goto out;
412 }
17b75eca 413
0140d8bd
RW
414 time_start = ktime_get();
415
3c07cbc4 416 /*
5063ce15
RW
417 * If sd_count > 0 at this point, one of the subdomains hasn't
418 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
419 * incrementing it. In that case pm_genpd_poweron() will wait
420 * for us to drop the lock, so we can call .power_off() and let
421 * the pm_genpd_poweron() restore power for us (this shouldn't
422 * happen very often).
423 */
d2805402
RW
424 ret = genpd->power_off(genpd);
425 if (ret == -EBUSY) {
426 genpd_set_active(genpd);
d2805402
RW
427 goto out;
428 }
0140d8bd
RW
429
430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
431 if (elapsed_ns > genpd->power_off_latency_ns)
432 genpd->power_off_latency_ns = elapsed_ns;
d2805402 433 }
f721889f 434
17b75eca 435 genpd->status = GPD_STATE_POWER_OFF;
221e9b58
RW
436 genpd->power_off_time = ktime_get();
437
438 /* Update PM QoS information for devices in the domain. */
439 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
440 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
441
442 pm_runtime_update_max_time_suspended(pdd->dev,
443 td->start_latency_ns +
444 td->restore_state_latency_ns +
445 genpd->power_on_latency_ns);
446 }
f721889f 447
5063ce15
RW
448 list_for_each_entry(link, &genpd->slave_links, slave_node) {
449 genpd_sd_counter_dec(link->master);
450 genpd_queue_power_off_work(link->master);
451 }
f721889f 452
c6d22b37
RW
453 out:
454 genpd->poweroff_task = NULL;
455 wake_up_all(&genpd->status_wait_queue);
456 return ret;
f721889f
RW
457}
458
459/**
460 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
461 * @work: Work structure used for scheduling the execution of this function.
462 */
463static void genpd_power_off_work_fn(struct work_struct *work)
464{
465 struct generic_pm_domain *genpd;
466
467 genpd = container_of(work, struct generic_pm_domain, power_off_work);
468
17b75eca 469 genpd_acquire_lock(genpd);
f721889f 470 pm_genpd_poweroff(genpd);
17b75eca 471 genpd_release_lock(genpd);
f721889f
RW
472}
473
474/**
475 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
476 * @dev: Device to suspend.
477 *
478 * Carry out a runtime suspend of a device under the assumption that its
479 * pm_domain field points to the domain member of an object of type
480 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
481 */
482static int pm_genpd_runtime_suspend(struct device *dev)
483{
484 struct generic_pm_domain *genpd;
b02c999a 485 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 486 int ret;
f721889f
RW
487
488 dev_dbg(dev, "%s()\n", __func__);
489
5248051b
RW
490 genpd = dev_to_genpd(dev);
491 if (IS_ERR(genpd))
f721889f
RW
492 return -EINVAL;
493
0aa2a221
RW
494 might_sleep_if(!genpd->dev_irq_safe);
495
b02c999a
RW
496 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
497 if (stop_ok && !stop_ok(dev))
498 return -EBUSY;
499
d5e4cbfe
RW
500 ret = genpd_stop_dev(genpd, dev);
501 if (ret)
502 return ret;
17b75eca 503
b02c999a
RW
504 pm_runtime_update_max_time_suspended(dev,
505 dev_gpd_data(dev)->td.start_latency_ns);
506
0aa2a221
RW
507 /*
508 * If power.irq_safe is set, this routine will be run with interrupts
509 * off, so it can't use mutexes.
510 */
511 if (dev->power.irq_safe)
512 return 0;
513
c6d22b37 514 mutex_lock(&genpd->lock);
f721889f
RW
515 genpd->in_progress++;
516 pm_genpd_poweroff(genpd);
517 genpd->in_progress--;
c6d22b37 518 mutex_unlock(&genpd->lock);
f721889f
RW
519
520 return 0;
521}
522
f721889f
RW
523/**
524 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
525 * @dev: Device to resume.
526 *
527 * Carry out a runtime resume of a device under the assumption that its
528 * pm_domain field points to the domain member of an object of type
529 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
530 */
531static int pm_genpd_runtime_resume(struct device *dev)
532{
533 struct generic_pm_domain *genpd;
c6d22b37 534 DEFINE_WAIT(wait);
f721889f
RW
535 int ret;
536
537 dev_dbg(dev, "%s()\n", __func__);
538
5248051b
RW
539 genpd = dev_to_genpd(dev);
540 if (IS_ERR(genpd))
f721889f
RW
541 return -EINVAL;
542
0aa2a221
RW
543 might_sleep_if(!genpd->dev_irq_safe);
544
545 /* If power.irq_safe, the PM domain is never powered off. */
546 if (dev->power.irq_safe)
547 goto out;
548
c6d22b37 549 mutex_lock(&genpd->lock);
3f241775
RW
550 ret = __pm_genpd_poweron(genpd);
551 if (ret) {
552 mutex_unlock(&genpd->lock);
553 return ret;
554 }
17b75eca 555 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
556 genpd->resume_count++;
557 for (;;) {
558 prepare_to_wait(&genpd->status_wait_queue, &wait,
559 TASK_UNINTERRUPTIBLE);
560 /*
561 * If current is the powering off task, we have been called
562 * reentrantly from one of the device callbacks, so we should
563 * not wait.
564 */
565 if (!genpd->poweroff_task || genpd->poweroff_task == current)
566 break;
567 mutex_unlock(&genpd->lock);
568
569 schedule();
570
571 mutex_lock(&genpd->lock);
572 }
573 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 574 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
575 genpd->resume_count--;
576 genpd_set_active(genpd);
17b75eca 577 wake_up_all(&genpd->status_wait_queue);
c6d22b37 578 mutex_unlock(&genpd->lock);
17b75eca 579
0aa2a221 580 out:
d5e4cbfe 581 genpd_start_dev(genpd, dev);
f721889f
RW
582
583 return 0;
584}
585
17f2ae7f
RW
586/**
587 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
588 */
589void pm_genpd_poweroff_unused(void)
590{
591 struct generic_pm_domain *genpd;
592
593 mutex_lock(&gpd_list_lock);
594
595 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
596 genpd_queue_power_off_work(genpd);
597
598 mutex_unlock(&gpd_list_lock);
599}
600
f721889f
RW
601#else
602
603static inline void genpd_power_off_work_fn(struct work_struct *work) {}
604
605#define pm_genpd_runtime_suspend NULL
606#define pm_genpd_runtime_resume NULL
607
608#endif /* CONFIG_PM_RUNTIME */
609
596ba34b
RW
610#ifdef CONFIG_PM_SLEEP
611
d5e4cbfe
RW
612static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
613 struct device *dev)
614{
615 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
616}
617
d23b9b00
RW
618static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
619{
620 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
621}
622
623static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
624{
625 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
626}
627
628static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
629{
630 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
631}
632
633static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
634{
635 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
636}
637
638static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
639{
640 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
641}
642
643static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
644{
645 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
646}
647
648static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
649{
650 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
651}
652
653static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
654{
655 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
656}
657
596ba34b 658/**
5063ce15 659 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
660 * @genpd: PM domain to power off, if possible.
661 *
662 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 663 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
664 *
665 * This function is only called in "noirq" stages of system power transitions,
666 * so it need not acquire locks (all of the "noirq" callbacks are executed
667 * sequentially, so it is guaranteed that it will never run twice in parallel).
668 */
669static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
670{
5063ce15 671 struct gpd_link *link;
596ba34b 672
17b75eca 673 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
674 return;
675
c4bb3160
RW
676 if (genpd->suspended_count != genpd->device_count
677 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
678 return;
679
680 if (genpd->power_off)
681 genpd->power_off(genpd);
682
17b75eca 683 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
684
685 list_for_each_entry(link, &genpd->slave_links, slave_node) {
686 genpd_sd_counter_dec(link->master);
687 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
688 }
689}
690
4ecd6e65
RW
691/**
692 * resume_needed - Check whether to resume a device before system suspend.
693 * @dev: Device to check.
694 * @genpd: PM domain the device belongs to.
695 *
696 * There are two cases in which a device that can wake up the system from sleep
697 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
698 * to wake up the system and it has to remain active for this purpose while the
699 * system is in the sleep state and (2) if the device is not enabled to wake up
700 * the system from sleep states and it generally doesn't generate wakeup signals
701 * by itself (those signals are generated on its behalf by other parts of the
702 * system). In the latter case it may be necessary to reconfigure the device's
703 * wakeup settings during system suspend, because it may have been set up to
704 * signal remote wakeup from the system's working state as needed by runtime PM.
705 * Return 'true' in either of the above cases.
706 */
707static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
708{
709 bool active_wakeup;
710
711 if (!device_can_wakeup(dev))
712 return false;
713
d5e4cbfe 714 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
715 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
716}
717
596ba34b
RW
718/**
719 * pm_genpd_prepare - Start power transition of a device in a PM domain.
720 * @dev: Device to start the transition of.
721 *
722 * Start a power transition of a device (during a system-wide power transition)
723 * under the assumption that its pm_domain field points to the domain member of
724 * an object of type struct generic_pm_domain representing a PM domain
725 * consisting of I/O devices.
726 */
727static int pm_genpd_prepare(struct device *dev)
728{
729 struct generic_pm_domain *genpd;
b6c10c84 730 int ret;
596ba34b
RW
731
732 dev_dbg(dev, "%s()\n", __func__);
733
734 genpd = dev_to_genpd(dev);
735 if (IS_ERR(genpd))
736 return -EINVAL;
737
17b75eca
RW
738 /*
739 * If a wakeup request is pending for the device, it should be woken up
740 * at this point and a system wakeup event should be reported if it's
741 * set up to wake up the system from sleep states.
742 */
743 pm_runtime_get_noresume(dev);
744 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
745 pm_wakeup_event(dev, 0);
746
747 if (pm_wakeup_pending()) {
748 pm_runtime_put_sync(dev);
749 return -EBUSY;
750 }
751
4ecd6e65
RW
752 if (resume_needed(dev, genpd))
753 pm_runtime_resume(dev);
754
17b75eca 755 genpd_acquire_lock(genpd);
596ba34b
RW
756
757 if (genpd->prepared_count++ == 0)
17b75eca
RW
758 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
759
760 genpd_release_lock(genpd);
596ba34b
RW
761
762 if (genpd->suspend_power_off) {
17b75eca 763 pm_runtime_put_noidle(dev);
596ba34b
RW
764 return 0;
765 }
766
767 /*
17b75eca
RW
768 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
769 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 770 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 771 * to make it operational.
596ba34b 772 */
17b75eca 773 pm_runtime_resume(dev);
596ba34b
RW
774 __pm_runtime_disable(dev, false);
775
b6c10c84
RW
776 ret = pm_generic_prepare(dev);
777 if (ret) {
778 mutex_lock(&genpd->lock);
779
780 if (--genpd->prepared_count == 0)
781 genpd->suspend_power_off = false;
782
783 mutex_unlock(&genpd->lock);
17b75eca 784 pm_runtime_enable(dev);
b6c10c84 785 }
17b75eca
RW
786
787 pm_runtime_put_sync(dev);
b6c10c84 788 return ret;
596ba34b
RW
789}
790
791/**
792 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
793 * @dev: Device to suspend.
794 *
795 * Suspend a device under the assumption that its pm_domain field points to the
796 * domain member of an object of type struct generic_pm_domain representing
797 * a PM domain consisting of I/O devices.
798 */
799static int pm_genpd_suspend(struct device *dev)
800{
801 struct generic_pm_domain *genpd;
802
803 dev_dbg(dev, "%s()\n", __func__);
804
805 genpd = dev_to_genpd(dev);
806 if (IS_ERR(genpd))
807 return -EINVAL;
808
d23b9b00 809 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
810}
811
812/**
813 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
814 * @dev: Device to suspend.
815 *
816 * Carry out a late suspend of a device under the assumption that its
817 * pm_domain field points to the domain member of an object of type
818 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
819 */
820static int pm_genpd_suspend_noirq(struct device *dev)
821{
822 struct generic_pm_domain *genpd;
823 int ret;
824
825 dev_dbg(dev, "%s()\n", __func__);
826
827 genpd = dev_to_genpd(dev);
828 if (IS_ERR(genpd))
829 return -EINVAL;
830
831 if (genpd->suspend_power_off)
832 return 0;
833
d23b9b00 834 ret = genpd_suspend_late(genpd, dev);
596ba34b
RW
835 if (ret)
836 return ret;
837
d5e4cbfe 838 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
d4f2d87a
RW
839 return 0;
840
d5e4cbfe 841 genpd_stop_dev(genpd, dev);
596ba34b
RW
842
843 /*
844 * Since all of the "noirq" callbacks are executed sequentially, it is
845 * guaranteed that this function will never run twice in parallel for
846 * the same PM domain, so it is not necessary to use locking here.
847 */
848 genpd->suspended_count++;
849 pm_genpd_sync_poweroff(genpd);
850
851 return 0;
852}
853
854/**
855 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
856 * @dev: Device to resume.
857 *
858 * Carry out an early resume of a device under the assumption that its
859 * pm_domain field points to the domain member of an object of type
860 * struct generic_pm_domain representing a power domain consisting of I/O
861 * devices.
862 */
863static int pm_genpd_resume_noirq(struct device *dev)
864{
865 struct generic_pm_domain *genpd;
866
867 dev_dbg(dev, "%s()\n", __func__);
868
869 genpd = dev_to_genpd(dev);
870 if (IS_ERR(genpd))
871 return -EINVAL;
872
873 if (genpd->suspend_power_off)
874 return 0;
875
876 /*
877 * Since all of the "noirq" callbacks are executed sequentially, it is
878 * guaranteed that this function will never run twice in parallel for
879 * the same PM domain, so it is not necessary to use locking here.
880 */
881 pm_genpd_poweron(genpd);
882 genpd->suspended_count--;
d5e4cbfe 883 genpd_start_dev(genpd, dev);
596ba34b 884
d23b9b00 885 return genpd_resume_early(genpd, dev);
596ba34b
RW
886}
887
888/**
889 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
890 * @dev: Device to resume.
891 *
892 * Resume a device under the assumption that its pm_domain field points to the
893 * domain member of an object of type struct generic_pm_domain representing
894 * a power domain consisting of I/O devices.
895 */
896static int pm_genpd_resume(struct device *dev)
897{
898 struct generic_pm_domain *genpd;
899
900 dev_dbg(dev, "%s()\n", __func__);
901
902 genpd = dev_to_genpd(dev);
903 if (IS_ERR(genpd))
904 return -EINVAL;
905
d23b9b00 906 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
907}
908
909/**
910 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
911 * @dev: Device to freeze.
912 *
913 * Freeze a device under the assumption that its pm_domain field points to the
914 * domain member of an object of type struct generic_pm_domain representing
915 * a power domain consisting of I/O devices.
916 */
917static int pm_genpd_freeze(struct device *dev)
918{
919 struct generic_pm_domain *genpd;
920
921 dev_dbg(dev, "%s()\n", __func__);
922
923 genpd = dev_to_genpd(dev);
924 if (IS_ERR(genpd))
925 return -EINVAL;
926
d23b9b00 927 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
928}
929
930/**
931 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
932 * @dev: Device to freeze.
933 *
934 * Carry out a late freeze of a device under the assumption that its
935 * pm_domain field points to the domain member of an object of type
936 * struct generic_pm_domain representing a power domain consisting of I/O
937 * devices.
938 */
939static int pm_genpd_freeze_noirq(struct device *dev)
940{
941 struct generic_pm_domain *genpd;
942 int ret;
943
944 dev_dbg(dev, "%s()\n", __func__);
945
946 genpd = dev_to_genpd(dev);
947 if (IS_ERR(genpd))
948 return -EINVAL;
949
950 if (genpd->suspend_power_off)
951 return 0;
952
d23b9b00 953 ret = genpd_freeze_late(genpd, dev);
596ba34b
RW
954 if (ret)
955 return ret;
956
d5e4cbfe 957 genpd_stop_dev(genpd, dev);
596ba34b
RW
958
959 return 0;
960}
961
962/**
963 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
964 * @dev: Device to thaw.
965 *
966 * Carry out an early thaw of a device under the assumption that its
967 * pm_domain field points to the domain member of an object of type
968 * struct generic_pm_domain representing a power domain consisting of I/O
969 * devices.
970 */
971static int pm_genpd_thaw_noirq(struct device *dev)
972{
973 struct generic_pm_domain *genpd;
974
975 dev_dbg(dev, "%s()\n", __func__);
976
977 genpd = dev_to_genpd(dev);
978 if (IS_ERR(genpd))
979 return -EINVAL;
980
981 if (genpd->suspend_power_off)
982 return 0;
983
d5e4cbfe 984 genpd_start_dev(genpd, dev);
596ba34b 985
d23b9b00 986 return genpd_thaw_early(genpd, dev);
596ba34b
RW
987}
988
989/**
990 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
991 * @dev: Device to thaw.
992 *
993 * Thaw a device under the assumption that its pm_domain field points to the
994 * domain member of an object of type struct generic_pm_domain representing
995 * a power domain consisting of I/O devices.
996 */
997static int pm_genpd_thaw(struct device *dev)
998{
999 struct generic_pm_domain *genpd;
1000
1001 dev_dbg(dev, "%s()\n", __func__);
1002
1003 genpd = dev_to_genpd(dev);
1004 if (IS_ERR(genpd))
1005 return -EINVAL;
1006
d23b9b00 1007 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
1008}
1009
1010/**
1011 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
1012 * @dev: Device to resume.
1013 *
1014 * Carry out an early restore of a device under the assumption that its
1015 * pm_domain field points to the domain member of an object of type
1016 * struct generic_pm_domain representing a power domain consisting of I/O
1017 * devices.
1018 */
1019static int pm_genpd_restore_noirq(struct device *dev)
1020{
1021 struct generic_pm_domain *genpd;
1022
1023 dev_dbg(dev, "%s()\n", __func__);
1024
1025 genpd = dev_to_genpd(dev);
1026 if (IS_ERR(genpd))
1027 return -EINVAL;
1028
1029 /*
1030 * Since all of the "noirq" callbacks are executed sequentially, it is
1031 * guaranteed that this function will never run twice in parallel for
1032 * the same PM domain, so it is not necessary to use locking here.
1033 */
17b75eca 1034 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
1035 if (genpd->suspend_power_off) {
1036 /*
1037 * The boot kernel might put the domain into the power on state,
1038 * so make sure it really is powered off.
1039 */
1040 if (genpd->power_off)
1041 genpd->power_off(genpd);
1042 return 0;
1043 }
1044
1045 pm_genpd_poweron(genpd);
1046 genpd->suspended_count--;
d5e4cbfe 1047 genpd_start_dev(genpd, dev);
596ba34b 1048
d23b9b00 1049 return genpd_resume_early(genpd, dev);
596ba34b
RW
1050}
1051
1052/**
1053 * pm_genpd_complete - Complete power transition of a device in a power domain.
1054 * @dev: Device to complete the transition of.
1055 *
1056 * Complete a power transition of a device (during a system-wide power
1057 * transition) under the assumption that its pm_domain field points to the
1058 * domain member of an object of type struct generic_pm_domain representing
1059 * a power domain consisting of I/O devices.
1060 */
1061static void pm_genpd_complete(struct device *dev)
1062{
1063 struct generic_pm_domain *genpd;
1064 bool run_complete;
1065
1066 dev_dbg(dev, "%s()\n", __func__);
1067
1068 genpd = dev_to_genpd(dev);
1069 if (IS_ERR(genpd))
1070 return;
1071
1072 mutex_lock(&genpd->lock);
1073
1074 run_complete = !genpd->suspend_power_off;
1075 if (--genpd->prepared_count == 0)
1076 genpd->suspend_power_off = false;
1077
1078 mutex_unlock(&genpd->lock);
1079
1080 if (run_complete) {
1081 pm_generic_complete(dev);
6f00ff78 1082 pm_runtime_set_active(dev);
596ba34b 1083 pm_runtime_enable(dev);
6f00ff78 1084 pm_runtime_idle(dev);
596ba34b
RW
1085 }
1086}
1087
1088#else
1089
1090#define pm_genpd_prepare NULL
1091#define pm_genpd_suspend NULL
1092#define pm_genpd_suspend_noirq NULL
1093#define pm_genpd_resume_noirq NULL
1094#define pm_genpd_resume NULL
1095#define pm_genpd_freeze NULL
1096#define pm_genpd_freeze_noirq NULL
1097#define pm_genpd_thaw_noirq NULL
1098#define pm_genpd_thaw NULL
596ba34b 1099#define pm_genpd_restore_noirq NULL
596ba34b
RW
1100#define pm_genpd_complete NULL
1101
1102#endif /* CONFIG_PM_SLEEP */
1103
f721889f 1104/**
b02c999a 1105 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1106 * @genpd: PM domain to add the device to.
1107 * @dev: Device to be added.
b02c999a 1108 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1109 */
b02c999a
RW
1110int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1111 struct gpd_timing_data *td)
f721889f 1112{
cd0ea672 1113 struct generic_pm_domain_data *gpd_data;
4605ab65 1114 struct pm_domain_data *pdd;
f721889f
RW
1115 int ret = 0;
1116
1117 dev_dbg(dev, "%s()\n", __func__);
1118
1119 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1120 return -EINVAL;
1121
17b75eca 1122 genpd_acquire_lock(genpd);
f721889f 1123
17b75eca 1124 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1125 ret = -EINVAL;
1126 goto out;
1127 }
1128
596ba34b
RW
1129 if (genpd->prepared_count > 0) {
1130 ret = -EAGAIN;
1131 goto out;
1132 }
1133
4605ab65
RW
1134 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1135 if (pdd->dev == dev) {
f721889f
RW
1136 ret = -EINVAL;
1137 goto out;
1138 }
1139
cd0ea672
RW
1140 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1141 if (!gpd_data) {
1142 ret = -ENOMEM;
1143 goto out;
1144 }
1145
596ba34b 1146 genpd->device_count++;
f721889f 1147
f721889f 1148 dev->pm_domain = &genpd->domain;
4605ab65 1149 dev_pm_get_subsys_data(dev);
cd0ea672
RW
1150 dev->power.subsys_data->domain_data = &gpd_data->base;
1151 gpd_data->base.dev = dev;
1152 gpd_data->need_restore = false;
1153 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
b02c999a
RW
1154 if (td)
1155 gpd_data->td = *td;
f721889f
RW
1156
1157 out:
17b75eca 1158 genpd_release_lock(genpd);
f721889f
RW
1159
1160 return ret;
1161}
1162
1163/**
1164 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1165 * @genpd: PM domain to remove the device from.
1166 * @dev: Device to be removed.
1167 */
1168int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1169 struct device *dev)
1170{
4605ab65 1171 struct pm_domain_data *pdd;
f721889f
RW
1172 int ret = -EINVAL;
1173
1174 dev_dbg(dev, "%s()\n", __func__);
1175
1176 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1177 return -EINVAL;
1178
17b75eca 1179 genpd_acquire_lock(genpd);
f721889f 1180
596ba34b
RW
1181 if (genpd->prepared_count > 0) {
1182 ret = -EAGAIN;
1183 goto out;
1184 }
1185
4605ab65
RW
1186 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1187 if (pdd->dev != dev)
f721889f
RW
1188 continue;
1189
4605ab65
RW
1190 list_del_init(&pdd->list_node);
1191 pdd->dev = NULL;
1192 dev_pm_put_subsys_data(dev);
f721889f 1193 dev->pm_domain = NULL;
cd0ea672 1194 kfree(to_gpd_data(pdd));
f721889f 1195
596ba34b 1196 genpd->device_count--;
f721889f
RW
1197
1198 ret = 0;
1199 break;
1200 }
1201
596ba34b 1202 out:
17b75eca 1203 genpd_release_lock(genpd);
f721889f
RW
1204
1205 return ret;
1206}
1207
1208/**
1209 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1210 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1211 * @subdomain: Subdomain to be added.
f721889f
RW
1212 */
1213int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1214 struct generic_pm_domain *subdomain)
f721889f 1215{
5063ce15 1216 struct gpd_link *link;
f721889f
RW
1217 int ret = 0;
1218
bc0403ff 1219 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1220 return -EINVAL;
1221
17b75eca
RW
1222 start:
1223 genpd_acquire_lock(genpd);
bc0403ff 1224 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1225
bc0403ff
RW
1226 if (subdomain->status != GPD_STATE_POWER_OFF
1227 && subdomain->status != GPD_STATE_ACTIVE) {
1228 mutex_unlock(&subdomain->lock);
17b75eca
RW
1229 genpd_release_lock(genpd);
1230 goto start;
1231 }
1232
1233 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1234 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1235 ret = -EINVAL;
1236 goto out;
1237 }
1238
5063ce15 1239 list_for_each_entry(link, &genpd->slave_links, slave_node) {
bc0403ff 1240 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1241 ret = -EINVAL;
1242 goto out;
1243 }
1244 }
1245
5063ce15
RW
1246 link = kzalloc(sizeof(*link), GFP_KERNEL);
1247 if (!link) {
1248 ret = -ENOMEM;
1249 goto out;
1250 }
1251 link->master = genpd;
1252 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1253 link->slave = subdomain;
1254 list_add_tail(&link->slave_node, &subdomain->slave_links);
1255 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1256 genpd_sd_counter_inc(genpd);
f721889f 1257
f721889f 1258 out:
bc0403ff 1259 mutex_unlock(&subdomain->lock);
17b75eca 1260 genpd_release_lock(genpd);
f721889f
RW
1261
1262 return ret;
1263}
1264
1265/**
1266 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1267 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1268 * @subdomain: Subdomain to be removed.
f721889f
RW
1269 */
1270int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1271 struct generic_pm_domain *subdomain)
f721889f 1272{
5063ce15 1273 struct gpd_link *link;
f721889f
RW
1274 int ret = -EINVAL;
1275
5063ce15 1276 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1277 return -EINVAL;
1278
17b75eca
RW
1279 start:
1280 genpd_acquire_lock(genpd);
f721889f 1281
5063ce15
RW
1282 list_for_each_entry(link, &genpd->master_links, master_node) {
1283 if (link->slave != subdomain)
f721889f
RW
1284 continue;
1285
1286 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1287
17b75eca
RW
1288 if (subdomain->status != GPD_STATE_POWER_OFF
1289 && subdomain->status != GPD_STATE_ACTIVE) {
1290 mutex_unlock(&subdomain->lock);
1291 genpd_release_lock(genpd);
1292 goto start;
1293 }
1294
5063ce15
RW
1295 list_del(&link->master_node);
1296 list_del(&link->slave_node);
1297 kfree(link);
17b75eca 1298 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1299 genpd_sd_counter_dec(genpd);
1300
1301 mutex_unlock(&subdomain->lock);
1302
1303 ret = 0;
1304 break;
1305 }
1306
17b75eca 1307 genpd_release_lock(genpd);
f721889f
RW
1308
1309 return ret;
1310}
1311
d5e4cbfe
RW
1312/**
1313 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1314 * @dev: Device to add the callbacks to.
1315 * @ops: Set of callbacks to add.
b02c999a 1316 * @td: Timing data to add to the device along with the callbacks (optional).
d5e4cbfe 1317 */
b02c999a
RW
1318int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1319 struct gpd_timing_data *td)
d5e4cbfe
RW
1320{
1321 struct pm_domain_data *pdd;
1322 int ret = 0;
1323
1324 if (!(dev && dev->power.subsys_data && ops))
1325 return -EINVAL;
1326
1327 pm_runtime_disable(dev);
1328 device_pm_lock();
1329
1330 pdd = dev->power.subsys_data->domain_data;
1331 if (pdd) {
1332 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1333
1334 gpd_data->ops = *ops;
b02c999a
RW
1335 if (td)
1336 gpd_data->td = *td;
d5e4cbfe
RW
1337 } else {
1338 ret = -EINVAL;
1339 }
1340
1341 device_pm_unlock();
1342 pm_runtime_enable(dev);
1343
1344 return ret;
1345}
1346EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1347
1348/**
b02c999a 1349 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
d5e4cbfe 1350 * @dev: Device to remove the callbacks from.
b02c999a 1351 * @clear_td: If set, clear the device's timing data too.
d5e4cbfe 1352 */
b02c999a 1353int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
d5e4cbfe
RW
1354{
1355 struct pm_domain_data *pdd;
1356 int ret = 0;
1357
1358 if (!(dev && dev->power.subsys_data))
1359 return -EINVAL;
1360
1361 pm_runtime_disable(dev);
1362 device_pm_lock();
1363
1364 pdd = dev->power.subsys_data->domain_data;
1365 if (pdd) {
1366 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1367
1368 gpd_data->ops = (struct gpd_dev_ops){ 0 };
b02c999a
RW
1369 if (clear_td)
1370 gpd_data->td = (struct gpd_timing_data){ 0 };
d5e4cbfe
RW
1371 } else {
1372 ret = -EINVAL;
1373 }
1374
1375 device_pm_unlock();
1376 pm_runtime_enable(dev);
1377
1378 return ret;
1379}
b02c999a 1380EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
d5e4cbfe 1381
d23b9b00
RW
1382/* Default device callbacks for generic PM domains. */
1383
ecf00475
RW
1384/**
1385 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1386 * @dev: Device to handle.
1387 */
1388static int pm_genpd_default_save_state(struct device *dev)
1389{
1390 int (*cb)(struct device *__dev);
1391 struct device_driver *drv = dev->driver;
1392
1393 cb = dev_gpd_data(dev)->ops.save_state;
1394 if (cb)
1395 return cb(dev);
1396
1397 if (drv && drv->pm && drv->pm->runtime_suspend)
1398 return drv->pm->runtime_suspend(dev);
1399
1400 return 0;
1401}
1402
1403/**
1404 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1405 * @dev: Device to handle.
1406 */
1407static int pm_genpd_default_restore_state(struct device *dev)
1408{
1409 int (*cb)(struct device *__dev);
1410 struct device_driver *drv = dev->driver;
1411
1412 cb = dev_gpd_data(dev)->ops.restore_state;
1413 if (cb)
1414 return cb(dev);
1415
1416 if (drv && drv->pm && drv->pm->runtime_resume)
1417 return drv->pm->runtime_resume(dev);
1418
1419 return 0;
1420}
1421
d23b9b00
RW
1422/**
1423 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1424 * @dev: Device to handle.
1425 */
1426static int pm_genpd_default_suspend(struct device *dev)
1427{
1428 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1429
1430 return cb ? cb(dev) : pm_generic_suspend(dev);
1431}
1432
1433/**
1434 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1435 * @dev: Device to handle.
1436 */
1437static int pm_genpd_default_suspend_late(struct device *dev)
1438{
1439 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1440
1441 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1442}
1443
1444/**
1445 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1446 * @dev: Device to handle.
1447 */
1448static int pm_genpd_default_resume_early(struct device *dev)
1449{
1450 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1451
1452 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1453}
1454
1455/**
1456 * pm_genpd_default_resume - Default "device resume" for PM domians.
1457 * @dev: Device to handle.
1458 */
1459static int pm_genpd_default_resume(struct device *dev)
1460{
1461 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1462
1463 return cb ? cb(dev) : pm_generic_resume(dev);
1464}
1465
1466/**
1467 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1468 * @dev: Device to handle.
1469 */
1470static int pm_genpd_default_freeze(struct device *dev)
1471{
1472 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1473
1474 return cb ? cb(dev) : pm_generic_freeze(dev);
1475}
1476
1477/**
1478 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1479 * @dev: Device to handle.
1480 */
1481static int pm_genpd_default_freeze_late(struct device *dev)
1482{
1483 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1484
1485 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1486}
1487
1488/**
1489 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1490 * @dev: Device to handle.
1491 */
1492static int pm_genpd_default_thaw_early(struct device *dev)
1493{
1494 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1495
1496 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1497}
1498
1499/**
1500 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1501 * @dev: Device to handle.
1502 */
1503static int pm_genpd_default_thaw(struct device *dev)
1504{
1505 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1506
1507 return cb ? cb(dev) : pm_generic_thaw(dev);
1508}
1509
f721889f
RW
1510/**
1511 * pm_genpd_init - Initialize a generic I/O PM domain object.
1512 * @genpd: PM domain object to initialize.
1513 * @gov: PM domain governor to associate with the domain (may be NULL).
1514 * @is_off: Initial value of the domain's power_is_off field.
1515 */
1516void pm_genpd_init(struct generic_pm_domain *genpd,
1517 struct dev_power_governor *gov, bool is_off)
1518{
1519 if (IS_ERR_OR_NULL(genpd))
1520 return;
1521
5063ce15
RW
1522 INIT_LIST_HEAD(&genpd->master_links);
1523 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1524 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1525 mutex_init(&genpd->lock);
1526 genpd->gov = gov;
1527 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1528 genpd->in_progress = 0;
c4bb3160 1529 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1530 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1531 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1532 genpd->poweroff_task = NULL;
1533 genpd->resume_count = 0;
596ba34b
RW
1534 genpd->device_count = 0;
1535 genpd->suspended_count = 0;
221e9b58 1536 genpd->max_off_time_ns = -1;
f721889f
RW
1537 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1538 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1539 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1540 genpd->domain.ops.prepare = pm_genpd_prepare;
1541 genpd->domain.ops.suspend = pm_genpd_suspend;
1542 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1543 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1544 genpd->domain.ops.resume = pm_genpd_resume;
1545 genpd->domain.ops.freeze = pm_genpd_freeze;
1546 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1547 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1548 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00
RW
1549 genpd->domain.ops.poweroff = pm_genpd_suspend;
1550 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1551 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
d23b9b00 1552 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1553 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1554 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1555 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
d23b9b00
RW
1556 genpd->dev_ops.freeze = pm_genpd_default_suspend;
1557 genpd->dev_ops.freeze_late = pm_genpd_default_suspend_late;
1558 genpd->dev_ops.thaw_early = pm_genpd_default_resume_early;
1559 genpd->dev_ops.thaw = pm_genpd_default_resume;
1560 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1561 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1562 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1563 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
1564 mutex_lock(&gpd_list_lock);
1565 list_add(&genpd->gpd_list_node, &gpd_list);
1566 mutex_unlock(&gpd_list_lock);
1567}
This page took 0.115066 seconds and 5 git commands to generate.