Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / drivers / base / power / domain.c
1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/pm_clock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sched.h>
19 #include <linux/suspend.h>
20 #include <linux/export.h>
21
22 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23 ({ \
24 type (*__routine)(struct device *__d); \
25 type __ret = (type)0; \
26 \
27 __routine = genpd->dev_ops.callback; \
28 if (__routine) { \
29 __ret = __routine(dev); \
30 } \
31 __ret; \
32 })
33
34 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
35 ({ \
36 ktime_t __start = ktime_get(); \
37 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
38 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
39 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
40 if (!__retval && __elapsed > __td->field) { \
41 __td->field = __elapsed; \
42 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
43 __elapsed); \
44 genpd->max_off_time_changed = true; \
45 __td->constraint_changed = true; \
46 } \
47 __retval; \
48 })
49
50 static LIST_HEAD(gpd_list);
51 static DEFINE_MUTEX(gpd_list_lock);
52
53 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
54 {
55 struct generic_pm_domain *genpd = NULL, *gpd;
56
57 if (IS_ERR_OR_NULL(domain_name))
58 return NULL;
59
60 mutex_lock(&gpd_list_lock);
61 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
62 if (!strcmp(gpd->name, domain_name)) {
63 genpd = gpd;
64 break;
65 }
66 }
67 mutex_unlock(&gpd_list_lock);
68 return genpd;
69 }
70
71 struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73 if (IS_ERR_OR_NULL(dev->pm_domain))
74 return ERR_PTR(-EINVAL);
75
76 return pd_to_genpd(dev->pm_domain);
77 }
78
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
82 stop_latency_ns, "stop");
83 }
84
85 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86 {
87 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
88 start_latency_ns, "start");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93 bool ret = false;
94
95 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98 return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103 atomic_inc(&genpd->sd_count);
104 smp_mb__after_atomic();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109 DEFINE_WAIT(wait);
110
111 mutex_lock(&genpd->lock);
112 /*
113 * Wait for the domain to transition into either the active,
114 * or the power off state.
115 */
116 for (;;) {
117 prepare_to_wait(&genpd->status_wait_queue, &wait,
118 TASK_UNINTERRUPTIBLE);
119 if (genpd->status == GPD_STATE_ACTIVE
120 || genpd->status == GPD_STATE_POWER_OFF)
121 break;
122 mutex_unlock(&genpd->lock);
123
124 schedule();
125
126 mutex_lock(&genpd->lock);
127 }
128 finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133 mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138 if (genpd->resume_count == 0)
139 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143 {
144 s64 usecs64;
145
146 if (!genpd->cpuidle_data)
147 return;
148
149 usecs64 = genpd->power_on_latency_ns;
150 do_div(usecs64, NSEC_PER_USEC);
151 usecs64 += genpd->cpuidle_data->saved_exit_latency;
152 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
153 }
154
155 static int genpd_power_on(struct generic_pm_domain *genpd)
156 {
157 ktime_t time_start;
158 s64 elapsed_ns;
159 int ret;
160
161 if (!genpd->power_on)
162 return 0;
163
164 time_start = ktime_get();
165 ret = genpd->power_on(genpd);
166 if (ret)
167 return ret;
168
169 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
170 if (elapsed_ns <= genpd->power_on_latency_ns)
171 return ret;
172
173 genpd->power_on_latency_ns = elapsed_ns;
174 genpd->max_off_time_changed = true;
175 genpd_recalc_cpu_exit_latency(genpd);
176 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
177 genpd->name, "on", elapsed_ns);
178
179 return ret;
180 }
181
182 static int genpd_power_off(struct generic_pm_domain *genpd)
183 {
184 ktime_t time_start;
185 s64 elapsed_ns;
186 int ret;
187
188 if (!genpd->power_off)
189 return 0;
190
191 time_start = ktime_get();
192 ret = genpd->power_off(genpd);
193 if (ret == -EBUSY)
194 return ret;
195
196 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
197 if (elapsed_ns <= genpd->power_off_latency_ns)
198 return ret;
199
200 genpd->power_off_latency_ns = elapsed_ns;
201 genpd->max_off_time_changed = true;
202 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
203 genpd->name, "off", elapsed_ns);
204
205 return ret;
206 }
207
208 /**
209 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
210 * @genpd: PM domain to power up.
211 *
212 * Restore power to @genpd and all of its masters so that it is possible to
213 * resume a device belonging to it.
214 */
215 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
216 __releases(&genpd->lock) __acquires(&genpd->lock)
217 {
218 struct gpd_link *link;
219 DEFINE_WAIT(wait);
220 int ret = 0;
221
222 /* If the domain's master is being waited for, we have to wait too. */
223 for (;;) {
224 prepare_to_wait(&genpd->status_wait_queue, &wait,
225 TASK_UNINTERRUPTIBLE);
226 if (genpd->status != GPD_STATE_WAIT_MASTER)
227 break;
228 mutex_unlock(&genpd->lock);
229
230 schedule();
231
232 mutex_lock(&genpd->lock);
233 }
234 finish_wait(&genpd->status_wait_queue, &wait);
235
236 if (genpd->status == GPD_STATE_ACTIVE
237 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
238 return 0;
239
240 if (genpd->status != GPD_STATE_POWER_OFF) {
241 genpd_set_active(genpd);
242 return 0;
243 }
244
245 if (genpd->cpuidle_data) {
246 cpuidle_pause_and_lock();
247 genpd->cpuidle_data->idle_state->disabled = true;
248 cpuidle_resume_and_unlock();
249 goto out;
250 }
251
252 /*
253 * The list is guaranteed not to change while the loop below is being
254 * executed, unless one of the masters' .power_on() callbacks fiddles
255 * with it.
256 */
257 list_for_each_entry(link, &genpd->slave_links, slave_node) {
258 genpd_sd_counter_inc(link->master);
259 genpd->status = GPD_STATE_WAIT_MASTER;
260
261 mutex_unlock(&genpd->lock);
262
263 ret = pm_genpd_poweron(link->master);
264
265 mutex_lock(&genpd->lock);
266
267 /*
268 * The "wait for parent" status is guaranteed not to change
269 * while the master is powering on.
270 */
271 genpd->status = GPD_STATE_POWER_OFF;
272 wake_up_all(&genpd->status_wait_queue);
273 if (ret) {
274 genpd_sd_counter_dec(link->master);
275 goto err;
276 }
277 }
278
279 ret = genpd_power_on(genpd);
280 if (ret)
281 goto err;
282
283 out:
284 genpd_set_active(genpd);
285
286 return 0;
287
288 err:
289 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
290 genpd_sd_counter_dec(link->master);
291
292 return ret;
293 }
294
295 /**
296 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
297 * @genpd: PM domain to power up.
298 */
299 int pm_genpd_poweron(struct generic_pm_domain *genpd)
300 {
301 int ret;
302
303 mutex_lock(&genpd->lock);
304 ret = __pm_genpd_poweron(genpd);
305 mutex_unlock(&genpd->lock);
306 return ret;
307 }
308
309 /**
310 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
311 * @domain_name: Name of the PM domain to power up.
312 */
313 int pm_genpd_name_poweron(const char *domain_name)
314 {
315 struct generic_pm_domain *genpd;
316
317 genpd = pm_genpd_lookup_name(domain_name);
318 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
319 }
320
321 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
322 struct device *dev)
323 {
324 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
325 }
326
327 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
328 {
329 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
330 save_state_latency_ns, "state save");
331 }
332
333 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
334 {
335 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
336 restore_state_latency_ns,
337 "state restore");
338 }
339
340 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
341 unsigned long val, void *ptr)
342 {
343 struct generic_pm_domain_data *gpd_data;
344 struct device *dev;
345
346 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
347 dev = gpd_data->base.dev;
348
349 for (;;) {
350 struct generic_pm_domain *genpd;
351 struct pm_domain_data *pdd;
352
353 spin_lock_irq(&dev->power.lock);
354
355 pdd = dev->power.subsys_data ?
356 dev->power.subsys_data->domain_data : NULL;
357 if (pdd && pdd->dev) {
358 to_gpd_data(pdd)->td.constraint_changed = true;
359 genpd = dev_to_genpd(dev);
360 } else {
361 genpd = ERR_PTR(-ENODATA);
362 }
363
364 spin_unlock_irq(&dev->power.lock);
365
366 if (!IS_ERR(genpd)) {
367 mutex_lock(&genpd->lock);
368 genpd->max_off_time_changed = true;
369 mutex_unlock(&genpd->lock);
370 }
371
372 dev = dev->parent;
373 if (!dev || dev->power.ignore_children)
374 break;
375 }
376
377 return NOTIFY_DONE;
378 }
379
380 /**
381 * __pm_genpd_save_device - Save the pre-suspend state of a device.
382 * @pdd: Domain data of the device to save the state of.
383 * @genpd: PM domain the device belongs to.
384 */
385 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
386 struct generic_pm_domain *genpd)
387 __releases(&genpd->lock) __acquires(&genpd->lock)
388 {
389 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
390 struct device *dev = pdd->dev;
391 int ret = 0;
392
393 if (gpd_data->need_restore > 0)
394 return 0;
395
396 /*
397 * If the value of the need_restore flag is still unknown at this point,
398 * we trust that pm_genpd_poweroff() has verified that the device is
399 * already runtime PM suspended.
400 */
401 if (gpd_data->need_restore < 0) {
402 gpd_data->need_restore = 1;
403 return 0;
404 }
405
406 mutex_unlock(&genpd->lock);
407
408 genpd_start_dev(genpd, dev);
409 ret = genpd_save_dev(genpd, dev);
410 genpd_stop_dev(genpd, dev);
411
412 mutex_lock(&genpd->lock);
413
414 if (!ret)
415 gpd_data->need_restore = 1;
416
417 return ret;
418 }
419
420 /**
421 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
422 * @pdd: Domain data of the device to restore the state of.
423 * @genpd: PM domain the device belongs to.
424 */
425 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
426 struct generic_pm_domain *genpd)
427 __releases(&genpd->lock) __acquires(&genpd->lock)
428 {
429 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
430 struct device *dev = pdd->dev;
431 int need_restore = gpd_data->need_restore;
432
433 gpd_data->need_restore = 0;
434 mutex_unlock(&genpd->lock);
435
436 genpd_start_dev(genpd, dev);
437
438 /*
439 * Call genpd_restore_dev() for recently added devices too (need_restore
440 * is negative then).
441 */
442 if (need_restore)
443 genpd_restore_dev(genpd, dev);
444
445 mutex_lock(&genpd->lock);
446 }
447
448 /**
449 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
450 * @genpd: PM domain to check.
451 *
452 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
453 * a "power off" operation, which means that a "power on" has occured in the
454 * meantime, or if its resume_count field is different from zero, which means
455 * that one of its devices has been resumed in the meantime.
456 */
457 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
458 {
459 return genpd->status == GPD_STATE_WAIT_MASTER
460 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
461 }
462
463 /**
464 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
465 * @genpd: PM domait to power off.
466 *
467 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
468 * before.
469 */
470 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
471 {
472 queue_work(pm_wq, &genpd->power_off_work);
473 }
474
475 /**
476 * pm_genpd_poweroff - Remove power from a given PM domain.
477 * @genpd: PM domain to power down.
478 *
479 * If all of the @genpd's devices have been suspended and all of its subdomains
480 * have been powered down, run the runtime suspend callbacks provided by all of
481 * the @genpd's devices' drivers and remove power from @genpd.
482 */
483 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
484 __releases(&genpd->lock) __acquires(&genpd->lock)
485 {
486 struct pm_domain_data *pdd;
487 struct gpd_link *link;
488 unsigned int not_suspended;
489 int ret = 0;
490
491 start:
492 /*
493 * Do not try to power off the domain in the following situations:
494 * (1) The domain is already in the "power off" state.
495 * (2) The domain is waiting for its master to power up.
496 * (3) One of the domain's devices is being resumed right now.
497 * (4) System suspend is in progress.
498 */
499 if (genpd->status == GPD_STATE_POWER_OFF
500 || genpd->status == GPD_STATE_WAIT_MASTER
501 || genpd->resume_count > 0 || genpd->prepared_count > 0)
502 return 0;
503
504 if (atomic_read(&genpd->sd_count) > 0)
505 return -EBUSY;
506
507 not_suspended = 0;
508 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
509 enum pm_qos_flags_status stat;
510
511 stat = dev_pm_qos_flags(pdd->dev,
512 PM_QOS_FLAG_NO_POWER_OFF
513 | PM_QOS_FLAG_REMOTE_WAKEUP);
514 if (stat > PM_QOS_FLAGS_NONE)
515 return -EBUSY;
516
517 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
518 || pdd->dev->power.irq_safe))
519 not_suspended++;
520 }
521
522 if (not_suspended > genpd->in_progress)
523 return -EBUSY;
524
525 if (genpd->poweroff_task) {
526 /*
527 * Another instance of pm_genpd_poweroff() is executing
528 * callbacks, so tell it to start over and return.
529 */
530 genpd->status = GPD_STATE_REPEAT;
531 return 0;
532 }
533
534 if (genpd->gov && genpd->gov->power_down_ok) {
535 if (!genpd->gov->power_down_ok(&genpd->domain))
536 return -EAGAIN;
537 }
538
539 genpd->status = GPD_STATE_BUSY;
540 genpd->poweroff_task = current;
541
542 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
543 ret = atomic_read(&genpd->sd_count) == 0 ?
544 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
545
546 if (genpd_abort_poweroff(genpd))
547 goto out;
548
549 if (ret) {
550 genpd_set_active(genpd);
551 goto out;
552 }
553
554 if (genpd->status == GPD_STATE_REPEAT) {
555 genpd->poweroff_task = NULL;
556 goto start;
557 }
558 }
559
560 if (genpd->cpuidle_data) {
561 /*
562 * If cpuidle_data is set, cpuidle should turn the domain off
563 * when the CPU in it is idle. In that case we don't decrement
564 * the subdomain counts of the master domains, so that power is
565 * not removed from the current domain prematurely as a result
566 * of cutting off the masters' power.
567 */
568 genpd->status = GPD_STATE_POWER_OFF;
569 cpuidle_pause_and_lock();
570 genpd->cpuidle_data->idle_state->disabled = false;
571 cpuidle_resume_and_unlock();
572 goto out;
573 }
574
575 if (genpd->power_off) {
576 if (atomic_read(&genpd->sd_count) > 0) {
577 ret = -EBUSY;
578 goto out;
579 }
580
581 /*
582 * If sd_count > 0 at this point, one of the subdomains hasn't
583 * managed to call pm_genpd_poweron() for the master yet after
584 * incrementing it. In that case pm_genpd_poweron() will wait
585 * for us to drop the lock, so we can call .power_off() and let
586 * the pm_genpd_poweron() restore power for us (this shouldn't
587 * happen very often).
588 */
589 ret = genpd_power_off(genpd);
590 if (ret == -EBUSY) {
591 genpd_set_active(genpd);
592 goto out;
593 }
594 }
595
596 genpd->status = GPD_STATE_POWER_OFF;
597
598 list_for_each_entry(link, &genpd->slave_links, slave_node) {
599 genpd_sd_counter_dec(link->master);
600 genpd_queue_power_off_work(link->master);
601 }
602
603 out:
604 genpd->poweroff_task = NULL;
605 wake_up_all(&genpd->status_wait_queue);
606 return ret;
607 }
608
609 /**
610 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
611 * @work: Work structure used for scheduling the execution of this function.
612 */
613 static void genpd_power_off_work_fn(struct work_struct *work)
614 {
615 struct generic_pm_domain *genpd;
616
617 genpd = container_of(work, struct generic_pm_domain, power_off_work);
618
619 genpd_acquire_lock(genpd);
620 pm_genpd_poweroff(genpd);
621 genpd_release_lock(genpd);
622 }
623
624 /**
625 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
626 * @dev: Device to suspend.
627 *
628 * Carry out a runtime suspend of a device under the assumption that its
629 * pm_domain field points to the domain member of an object of type
630 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
631 */
632 static int pm_genpd_runtime_suspend(struct device *dev)
633 {
634 struct generic_pm_domain *genpd;
635 struct generic_pm_domain_data *gpd_data;
636 bool (*stop_ok)(struct device *__dev);
637 int ret;
638
639 dev_dbg(dev, "%s()\n", __func__);
640
641 genpd = dev_to_genpd(dev);
642 if (IS_ERR(genpd))
643 return -EINVAL;
644
645 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
646 if (stop_ok && !stop_ok(dev))
647 return -EBUSY;
648
649 ret = genpd_stop_dev(genpd, dev);
650 if (ret)
651 return ret;
652
653 /*
654 * If power.irq_safe is set, this routine will be run with interrupts
655 * off, so it can't use mutexes.
656 */
657 if (dev->power.irq_safe)
658 return 0;
659
660 mutex_lock(&genpd->lock);
661
662 /*
663 * If we have an unknown state of the need_restore flag, it means none
664 * of the runtime PM callbacks has been invoked yet. Let's update the
665 * flag to reflect that the current state is active.
666 */
667 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
668 if (gpd_data->need_restore < 0)
669 gpd_data->need_restore = 0;
670
671 genpd->in_progress++;
672 pm_genpd_poweroff(genpd);
673 genpd->in_progress--;
674 mutex_unlock(&genpd->lock);
675
676 return 0;
677 }
678
679 /**
680 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
681 * @dev: Device to resume.
682 *
683 * Carry out a runtime resume of a device under the assumption that its
684 * pm_domain field points to the domain member of an object of type
685 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
686 */
687 static int pm_genpd_runtime_resume(struct device *dev)
688 {
689 struct generic_pm_domain *genpd;
690 DEFINE_WAIT(wait);
691 int ret;
692
693 dev_dbg(dev, "%s()\n", __func__);
694
695 genpd = dev_to_genpd(dev);
696 if (IS_ERR(genpd))
697 return -EINVAL;
698
699 /* If power.irq_safe, the PM domain is never powered off. */
700 if (dev->power.irq_safe)
701 return genpd_start_dev_no_timing(genpd, dev);
702
703 mutex_lock(&genpd->lock);
704 ret = __pm_genpd_poweron(genpd);
705 if (ret) {
706 mutex_unlock(&genpd->lock);
707 return ret;
708 }
709 genpd->status = GPD_STATE_BUSY;
710 genpd->resume_count++;
711 for (;;) {
712 prepare_to_wait(&genpd->status_wait_queue, &wait,
713 TASK_UNINTERRUPTIBLE);
714 /*
715 * If current is the powering off task, we have been called
716 * reentrantly from one of the device callbacks, so we should
717 * not wait.
718 */
719 if (!genpd->poweroff_task || genpd->poweroff_task == current)
720 break;
721 mutex_unlock(&genpd->lock);
722
723 schedule();
724
725 mutex_lock(&genpd->lock);
726 }
727 finish_wait(&genpd->status_wait_queue, &wait);
728 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
729 genpd->resume_count--;
730 genpd_set_active(genpd);
731 wake_up_all(&genpd->status_wait_queue);
732 mutex_unlock(&genpd->lock);
733
734 return 0;
735 }
736
737 static bool pd_ignore_unused;
738 static int __init pd_ignore_unused_setup(char *__unused)
739 {
740 pd_ignore_unused = true;
741 return 1;
742 }
743 __setup("pd_ignore_unused", pd_ignore_unused_setup);
744
745 /**
746 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
747 */
748 void pm_genpd_poweroff_unused(void)
749 {
750 struct generic_pm_domain *genpd;
751
752 if (pd_ignore_unused) {
753 pr_warn("genpd: Not disabling unused power domains\n");
754 return;
755 }
756
757 mutex_lock(&gpd_list_lock);
758
759 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
760 genpd_queue_power_off_work(genpd);
761
762 mutex_unlock(&gpd_list_lock);
763 }
764
765 static int __init genpd_poweroff_unused(void)
766 {
767 pm_genpd_poweroff_unused();
768 return 0;
769 }
770 late_initcall(genpd_poweroff_unused);
771
772 #ifdef CONFIG_PM_SLEEP
773
774 /**
775 * pm_genpd_present - Check if the given PM domain has been initialized.
776 * @genpd: PM domain to check.
777 */
778 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
779 {
780 const struct generic_pm_domain *gpd;
781
782 if (IS_ERR_OR_NULL(genpd))
783 return false;
784
785 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
786 if (gpd == genpd)
787 return true;
788
789 return false;
790 }
791
792 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
793 struct device *dev)
794 {
795 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
796 }
797
798 /**
799 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
800 * @genpd: PM domain to power off, if possible.
801 *
802 * Check if the given PM domain can be powered off (during system suspend or
803 * hibernation) and do that if so. Also, in that case propagate to its masters.
804 *
805 * This function is only called in "noirq" and "syscore" stages of system power
806 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
807 * executed sequentially, so it is guaranteed that it will never run twice in
808 * parallel).
809 */
810 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
811 {
812 struct gpd_link *link;
813
814 if (genpd->status == GPD_STATE_POWER_OFF)
815 return;
816
817 if (genpd->suspended_count != genpd->device_count
818 || atomic_read(&genpd->sd_count) > 0)
819 return;
820
821 genpd_power_off(genpd);
822
823 genpd->status = GPD_STATE_POWER_OFF;
824
825 list_for_each_entry(link, &genpd->slave_links, slave_node) {
826 genpd_sd_counter_dec(link->master);
827 pm_genpd_sync_poweroff(link->master);
828 }
829 }
830
831 /**
832 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
833 * @genpd: PM domain to power on.
834 *
835 * This function is only called in "noirq" and "syscore" stages of system power
836 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
837 * executed sequentially, so it is guaranteed that it will never run twice in
838 * parallel).
839 */
840 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
841 {
842 struct gpd_link *link;
843
844 if (genpd->status != GPD_STATE_POWER_OFF)
845 return;
846
847 list_for_each_entry(link, &genpd->slave_links, slave_node) {
848 pm_genpd_sync_poweron(link->master);
849 genpd_sd_counter_inc(link->master);
850 }
851
852 genpd_power_on(genpd);
853
854 genpd->status = GPD_STATE_ACTIVE;
855 }
856
857 /**
858 * resume_needed - Check whether to resume a device before system suspend.
859 * @dev: Device to check.
860 * @genpd: PM domain the device belongs to.
861 *
862 * There are two cases in which a device that can wake up the system from sleep
863 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
864 * to wake up the system and it has to remain active for this purpose while the
865 * system is in the sleep state and (2) if the device is not enabled to wake up
866 * the system from sleep states and it generally doesn't generate wakeup signals
867 * by itself (those signals are generated on its behalf by other parts of the
868 * system). In the latter case it may be necessary to reconfigure the device's
869 * wakeup settings during system suspend, because it may have been set up to
870 * signal remote wakeup from the system's working state as needed by runtime PM.
871 * Return 'true' in either of the above cases.
872 */
873 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
874 {
875 bool active_wakeup;
876
877 if (!device_can_wakeup(dev))
878 return false;
879
880 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
881 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
882 }
883
884 /**
885 * pm_genpd_prepare - Start power transition of a device in a PM domain.
886 * @dev: Device to start the transition of.
887 *
888 * Start a power transition of a device (during a system-wide power transition)
889 * under the assumption that its pm_domain field points to the domain member of
890 * an object of type struct generic_pm_domain representing a PM domain
891 * consisting of I/O devices.
892 */
893 static int pm_genpd_prepare(struct device *dev)
894 {
895 struct generic_pm_domain *genpd;
896 int ret;
897
898 dev_dbg(dev, "%s()\n", __func__);
899
900 genpd = dev_to_genpd(dev);
901 if (IS_ERR(genpd))
902 return -EINVAL;
903
904 /*
905 * If a wakeup request is pending for the device, it should be woken up
906 * at this point and a system wakeup event should be reported if it's
907 * set up to wake up the system from sleep states.
908 */
909 pm_runtime_get_noresume(dev);
910 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
911 pm_wakeup_event(dev, 0);
912
913 if (pm_wakeup_pending()) {
914 pm_runtime_put(dev);
915 return -EBUSY;
916 }
917
918 if (resume_needed(dev, genpd))
919 pm_runtime_resume(dev);
920
921 genpd_acquire_lock(genpd);
922
923 if (genpd->prepared_count++ == 0) {
924 genpd->suspended_count = 0;
925 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
926 }
927
928 genpd_release_lock(genpd);
929
930 if (genpd->suspend_power_off) {
931 pm_runtime_put_noidle(dev);
932 return 0;
933 }
934
935 /*
936 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
937 * so pm_genpd_poweron() will return immediately, but if the device
938 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
939 * to make it operational.
940 */
941 pm_runtime_resume(dev);
942 __pm_runtime_disable(dev, false);
943
944 ret = pm_generic_prepare(dev);
945 if (ret) {
946 mutex_lock(&genpd->lock);
947
948 if (--genpd->prepared_count == 0)
949 genpd->suspend_power_off = false;
950
951 mutex_unlock(&genpd->lock);
952 pm_runtime_enable(dev);
953 }
954
955 pm_runtime_put(dev);
956 return ret;
957 }
958
959 /**
960 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
961 * @dev: Device to suspend.
962 *
963 * Suspend a device under the assumption that its pm_domain field points to the
964 * domain member of an object of type struct generic_pm_domain representing
965 * a PM domain consisting of I/O devices.
966 */
967 static int pm_genpd_suspend(struct device *dev)
968 {
969 struct generic_pm_domain *genpd;
970
971 dev_dbg(dev, "%s()\n", __func__);
972
973 genpd = dev_to_genpd(dev);
974 if (IS_ERR(genpd))
975 return -EINVAL;
976
977 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
978 }
979
980 /**
981 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
982 * @dev: Device to suspend.
983 *
984 * Carry out a late suspend of a device under the assumption that its
985 * pm_domain field points to the domain member of an object of type
986 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
987 */
988 static int pm_genpd_suspend_late(struct device *dev)
989 {
990 struct generic_pm_domain *genpd;
991
992 dev_dbg(dev, "%s()\n", __func__);
993
994 genpd = dev_to_genpd(dev);
995 if (IS_ERR(genpd))
996 return -EINVAL;
997
998 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
999 }
1000
1001 /**
1002 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1003 * @dev: Device to suspend.
1004 *
1005 * Stop the device and remove power from the domain if all devices in it have
1006 * been stopped.
1007 */
1008 static int pm_genpd_suspend_noirq(struct device *dev)
1009 {
1010 struct generic_pm_domain *genpd;
1011
1012 dev_dbg(dev, "%s()\n", __func__);
1013
1014 genpd = dev_to_genpd(dev);
1015 if (IS_ERR(genpd))
1016 return -EINVAL;
1017
1018 if (genpd->suspend_power_off
1019 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1020 return 0;
1021
1022 genpd_stop_dev(genpd, dev);
1023
1024 /*
1025 * Since all of the "noirq" callbacks are executed sequentially, it is
1026 * guaranteed that this function will never run twice in parallel for
1027 * the same PM domain, so it is not necessary to use locking here.
1028 */
1029 genpd->suspended_count++;
1030 pm_genpd_sync_poweroff(genpd);
1031
1032 return 0;
1033 }
1034
1035 /**
1036 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1037 * @dev: Device to resume.
1038 *
1039 * Restore power to the device's PM domain, if necessary, and start the device.
1040 */
1041 static int pm_genpd_resume_noirq(struct device *dev)
1042 {
1043 struct generic_pm_domain *genpd;
1044
1045 dev_dbg(dev, "%s()\n", __func__);
1046
1047 genpd = dev_to_genpd(dev);
1048 if (IS_ERR(genpd))
1049 return -EINVAL;
1050
1051 if (genpd->suspend_power_off
1052 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1053 return 0;
1054
1055 /*
1056 * Since all of the "noirq" callbacks are executed sequentially, it is
1057 * guaranteed that this function will never run twice in parallel for
1058 * the same PM domain, so it is not necessary to use locking here.
1059 */
1060 pm_genpd_sync_poweron(genpd);
1061 genpd->suspended_count--;
1062
1063 return genpd_start_dev(genpd, dev);
1064 }
1065
1066 /**
1067 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1068 * @dev: Device to resume.
1069 *
1070 * Carry out an early resume of a device under the assumption that its
1071 * pm_domain field points to the domain member of an object of type
1072 * struct generic_pm_domain representing a power domain consisting of I/O
1073 * devices.
1074 */
1075 static int pm_genpd_resume_early(struct device *dev)
1076 {
1077 struct generic_pm_domain *genpd;
1078
1079 dev_dbg(dev, "%s()\n", __func__);
1080
1081 genpd = dev_to_genpd(dev);
1082 if (IS_ERR(genpd))
1083 return -EINVAL;
1084
1085 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1086 }
1087
1088 /**
1089 * pm_genpd_resume - Resume of device in an I/O PM domain.
1090 * @dev: Device to resume.
1091 *
1092 * Resume a device under the assumption that its pm_domain field points to the
1093 * domain member of an object of type struct generic_pm_domain representing
1094 * a power domain consisting of I/O devices.
1095 */
1096 static int pm_genpd_resume(struct device *dev)
1097 {
1098 struct generic_pm_domain *genpd;
1099
1100 dev_dbg(dev, "%s()\n", __func__);
1101
1102 genpd = dev_to_genpd(dev);
1103 if (IS_ERR(genpd))
1104 return -EINVAL;
1105
1106 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1107 }
1108
1109 /**
1110 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1111 * @dev: Device to freeze.
1112 *
1113 * Freeze a device under the assumption that its pm_domain field points to the
1114 * domain member of an object of type struct generic_pm_domain representing
1115 * a power domain consisting of I/O devices.
1116 */
1117 static int pm_genpd_freeze(struct device *dev)
1118 {
1119 struct generic_pm_domain *genpd;
1120
1121 dev_dbg(dev, "%s()\n", __func__);
1122
1123 genpd = dev_to_genpd(dev);
1124 if (IS_ERR(genpd))
1125 return -EINVAL;
1126
1127 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1128 }
1129
1130 /**
1131 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1132 * @dev: Device to freeze.
1133 *
1134 * Carry out a late freeze of a device under the assumption that its
1135 * pm_domain field points to the domain member of an object of type
1136 * struct generic_pm_domain representing a power domain consisting of I/O
1137 * devices.
1138 */
1139 static int pm_genpd_freeze_late(struct device *dev)
1140 {
1141 struct generic_pm_domain *genpd;
1142
1143 dev_dbg(dev, "%s()\n", __func__);
1144
1145 genpd = dev_to_genpd(dev);
1146 if (IS_ERR(genpd))
1147 return -EINVAL;
1148
1149 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1150 }
1151
1152 /**
1153 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1154 * @dev: Device to freeze.
1155 *
1156 * Carry out a late freeze of a device under the assumption that its
1157 * pm_domain field points to the domain member of an object of type
1158 * struct generic_pm_domain representing a power domain consisting of I/O
1159 * devices.
1160 */
1161 static int pm_genpd_freeze_noirq(struct device *dev)
1162 {
1163 struct generic_pm_domain *genpd;
1164
1165 dev_dbg(dev, "%s()\n", __func__);
1166
1167 genpd = dev_to_genpd(dev);
1168 if (IS_ERR(genpd))
1169 return -EINVAL;
1170
1171 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1172 }
1173
1174 /**
1175 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1176 * @dev: Device to thaw.
1177 *
1178 * Start the device, unless power has been removed from the domain already
1179 * before the system transition.
1180 */
1181 static int pm_genpd_thaw_noirq(struct device *dev)
1182 {
1183 struct generic_pm_domain *genpd;
1184
1185 dev_dbg(dev, "%s()\n", __func__);
1186
1187 genpd = dev_to_genpd(dev);
1188 if (IS_ERR(genpd))
1189 return -EINVAL;
1190
1191 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1192 }
1193
1194 /**
1195 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1196 * @dev: Device to thaw.
1197 *
1198 * Carry out an early thaw of a device under the assumption that its
1199 * pm_domain field points to the domain member of an object of type
1200 * struct generic_pm_domain representing a power domain consisting of I/O
1201 * devices.
1202 */
1203 static int pm_genpd_thaw_early(struct device *dev)
1204 {
1205 struct generic_pm_domain *genpd;
1206
1207 dev_dbg(dev, "%s()\n", __func__);
1208
1209 genpd = dev_to_genpd(dev);
1210 if (IS_ERR(genpd))
1211 return -EINVAL;
1212
1213 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1214 }
1215
1216 /**
1217 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1218 * @dev: Device to thaw.
1219 *
1220 * Thaw a device under the assumption that its pm_domain field points to the
1221 * domain member of an object of type struct generic_pm_domain representing
1222 * a power domain consisting of I/O devices.
1223 */
1224 static int pm_genpd_thaw(struct device *dev)
1225 {
1226 struct generic_pm_domain *genpd;
1227
1228 dev_dbg(dev, "%s()\n", __func__);
1229
1230 genpd = dev_to_genpd(dev);
1231 if (IS_ERR(genpd))
1232 return -EINVAL;
1233
1234 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1235 }
1236
1237 /**
1238 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1239 * @dev: Device to resume.
1240 *
1241 * Make sure the domain will be in the same power state as before the
1242 * hibernation the system is resuming from and start the device if necessary.
1243 */
1244 static int pm_genpd_restore_noirq(struct device *dev)
1245 {
1246 struct generic_pm_domain *genpd;
1247
1248 dev_dbg(dev, "%s()\n", __func__);
1249
1250 genpd = dev_to_genpd(dev);
1251 if (IS_ERR(genpd))
1252 return -EINVAL;
1253
1254 /*
1255 * Since all of the "noirq" callbacks are executed sequentially, it is
1256 * guaranteed that this function will never run twice in parallel for
1257 * the same PM domain, so it is not necessary to use locking here.
1258 *
1259 * At this point suspended_count == 0 means we are being run for the
1260 * first time for the given domain in the present cycle.
1261 */
1262 if (genpd->suspended_count++ == 0) {
1263 /*
1264 * The boot kernel might put the domain into arbitrary state,
1265 * so make it appear as powered off to pm_genpd_sync_poweron(),
1266 * so that it tries to power it on in case it was really off.
1267 */
1268 genpd->status = GPD_STATE_POWER_OFF;
1269 if (genpd->suspend_power_off) {
1270 /*
1271 * If the domain was off before the hibernation, make
1272 * sure it will be off going forward.
1273 */
1274 genpd_power_off(genpd);
1275
1276 return 0;
1277 }
1278 }
1279
1280 if (genpd->suspend_power_off)
1281 return 0;
1282
1283 pm_genpd_sync_poweron(genpd);
1284
1285 return genpd_start_dev(genpd, dev);
1286 }
1287
1288 /**
1289 * pm_genpd_complete - Complete power transition of a device in a power domain.
1290 * @dev: Device to complete the transition of.
1291 *
1292 * Complete a power transition of a device (during a system-wide power
1293 * transition) under the assumption that its pm_domain field points to the
1294 * domain member of an object of type struct generic_pm_domain representing
1295 * a power domain consisting of I/O devices.
1296 */
1297 static void pm_genpd_complete(struct device *dev)
1298 {
1299 struct generic_pm_domain *genpd;
1300 bool run_complete;
1301
1302 dev_dbg(dev, "%s()\n", __func__);
1303
1304 genpd = dev_to_genpd(dev);
1305 if (IS_ERR(genpd))
1306 return;
1307
1308 mutex_lock(&genpd->lock);
1309
1310 run_complete = !genpd->suspend_power_off;
1311 if (--genpd->prepared_count == 0)
1312 genpd->suspend_power_off = false;
1313
1314 mutex_unlock(&genpd->lock);
1315
1316 if (run_complete) {
1317 pm_generic_complete(dev);
1318 pm_runtime_set_active(dev);
1319 pm_runtime_enable(dev);
1320 pm_request_idle(dev);
1321 }
1322 }
1323
1324 /**
1325 * genpd_syscore_switch - Switch power during system core suspend or resume.
1326 * @dev: Device that normally is marked as "always on" to switch power for.
1327 *
1328 * This routine may only be called during the system core (syscore) suspend or
1329 * resume phase for devices whose "always on" flags are set.
1330 */
1331 static void genpd_syscore_switch(struct device *dev, bool suspend)
1332 {
1333 struct generic_pm_domain *genpd;
1334
1335 genpd = dev_to_genpd(dev);
1336 if (!pm_genpd_present(genpd))
1337 return;
1338
1339 if (suspend) {
1340 genpd->suspended_count++;
1341 pm_genpd_sync_poweroff(genpd);
1342 } else {
1343 pm_genpd_sync_poweron(genpd);
1344 genpd->suspended_count--;
1345 }
1346 }
1347
1348 void pm_genpd_syscore_poweroff(struct device *dev)
1349 {
1350 genpd_syscore_switch(dev, true);
1351 }
1352 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1353
1354 void pm_genpd_syscore_poweron(struct device *dev)
1355 {
1356 genpd_syscore_switch(dev, false);
1357 }
1358 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1359
1360 #else /* !CONFIG_PM_SLEEP */
1361
1362 #define pm_genpd_prepare NULL
1363 #define pm_genpd_suspend NULL
1364 #define pm_genpd_suspend_late NULL
1365 #define pm_genpd_suspend_noirq NULL
1366 #define pm_genpd_resume_early NULL
1367 #define pm_genpd_resume_noirq NULL
1368 #define pm_genpd_resume NULL
1369 #define pm_genpd_freeze NULL
1370 #define pm_genpd_freeze_late NULL
1371 #define pm_genpd_freeze_noirq NULL
1372 #define pm_genpd_thaw_early NULL
1373 #define pm_genpd_thaw_noirq NULL
1374 #define pm_genpd_thaw NULL
1375 #define pm_genpd_restore_noirq NULL
1376 #define pm_genpd_complete NULL
1377
1378 #endif /* CONFIG_PM_SLEEP */
1379
1380 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1381 struct generic_pm_domain *genpd,
1382 struct gpd_timing_data *td)
1383 {
1384 struct generic_pm_domain_data *gpd_data;
1385 int ret;
1386
1387 ret = dev_pm_get_subsys_data(dev);
1388 if (ret)
1389 return ERR_PTR(ret);
1390
1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1392 if (!gpd_data) {
1393 ret = -ENOMEM;
1394 goto err_put;
1395 }
1396
1397 if (td)
1398 gpd_data->td = *td;
1399
1400 gpd_data->base.dev = dev;
1401 gpd_data->need_restore = -1;
1402 gpd_data->td.constraint_changed = true;
1403 gpd_data->td.effective_constraint_ns = -1;
1404 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1405
1406 spin_lock_irq(&dev->power.lock);
1407
1408 if (dev->power.subsys_data->domain_data) {
1409 ret = -EINVAL;
1410 goto err_free;
1411 }
1412
1413 dev->power.subsys_data->domain_data = &gpd_data->base;
1414 dev->pm_domain = &genpd->domain;
1415
1416 spin_unlock_irq(&dev->power.lock);
1417
1418 return gpd_data;
1419
1420 err_free:
1421 spin_unlock_irq(&dev->power.lock);
1422 kfree(gpd_data);
1423 err_put:
1424 dev_pm_put_subsys_data(dev);
1425 return ERR_PTR(ret);
1426 }
1427
1428 static void genpd_free_dev_data(struct device *dev,
1429 struct generic_pm_domain_data *gpd_data)
1430 {
1431 spin_lock_irq(&dev->power.lock);
1432
1433 dev->pm_domain = NULL;
1434 dev->power.subsys_data->domain_data = NULL;
1435
1436 spin_unlock_irq(&dev->power.lock);
1437
1438 kfree(gpd_data);
1439 dev_pm_put_subsys_data(dev);
1440 }
1441
1442 /**
1443 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1444 * @genpd: PM domain to add the device to.
1445 * @dev: Device to be added.
1446 * @td: Set of PM QoS timing parameters to attach to the device.
1447 */
1448 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1449 struct gpd_timing_data *td)
1450 {
1451 struct generic_pm_domain_data *gpd_data;
1452 int ret = 0;
1453
1454 dev_dbg(dev, "%s()\n", __func__);
1455
1456 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1457 return -EINVAL;
1458
1459 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1460 if (IS_ERR(gpd_data))
1461 return PTR_ERR(gpd_data);
1462
1463 genpd_acquire_lock(genpd);
1464
1465 if (genpd->prepared_count > 0) {
1466 ret = -EAGAIN;
1467 goto out;
1468 }
1469
1470 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1471 if (ret)
1472 goto out;
1473
1474 genpd->device_count++;
1475 genpd->max_off_time_changed = true;
1476
1477 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1478
1479 out:
1480 genpd_release_lock(genpd);
1481
1482 if (ret)
1483 genpd_free_dev_data(dev, gpd_data);
1484 else
1485 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1486
1487 return ret;
1488 }
1489
1490 /**
1491 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1492 * @domain_name: Name of the PM domain to add the device to.
1493 * @dev: Device to be added.
1494 * @td: Set of PM QoS timing parameters to attach to the device.
1495 */
1496 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1497 struct gpd_timing_data *td)
1498 {
1499 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1500 }
1501
1502 /**
1503 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1504 * @genpd: PM domain to remove the device from.
1505 * @dev: Device to be removed.
1506 */
1507 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1508 struct device *dev)
1509 {
1510 struct generic_pm_domain_data *gpd_data;
1511 struct pm_domain_data *pdd;
1512 int ret = 0;
1513
1514 dev_dbg(dev, "%s()\n", __func__);
1515
1516 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1517 || IS_ERR_OR_NULL(dev->pm_domain)
1518 || pd_to_genpd(dev->pm_domain) != genpd)
1519 return -EINVAL;
1520
1521 /* The above validation also means we have existing domain_data. */
1522 pdd = dev->power.subsys_data->domain_data;
1523 gpd_data = to_gpd_data(pdd);
1524 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1525
1526 genpd_acquire_lock(genpd);
1527
1528 if (genpd->prepared_count > 0) {
1529 ret = -EAGAIN;
1530 goto out;
1531 }
1532
1533 genpd->device_count--;
1534 genpd->max_off_time_changed = true;
1535
1536 if (genpd->detach_dev)
1537 genpd->detach_dev(genpd, dev);
1538
1539 list_del_init(&pdd->list_node);
1540
1541 genpd_release_lock(genpd);
1542
1543 genpd_free_dev_data(dev, gpd_data);
1544
1545 return 0;
1546
1547 out:
1548 genpd_release_lock(genpd);
1549 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1550
1551 return ret;
1552 }
1553
1554 /**
1555 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1556 * @genpd: Master PM domain to add the subdomain to.
1557 * @subdomain: Subdomain to be added.
1558 */
1559 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1560 struct generic_pm_domain *subdomain)
1561 {
1562 struct gpd_link *link;
1563 int ret = 0;
1564
1565 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1566 || genpd == subdomain)
1567 return -EINVAL;
1568
1569 start:
1570 genpd_acquire_lock(genpd);
1571 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1572
1573 if (subdomain->status != GPD_STATE_POWER_OFF
1574 && subdomain->status != GPD_STATE_ACTIVE) {
1575 mutex_unlock(&subdomain->lock);
1576 genpd_release_lock(genpd);
1577 goto start;
1578 }
1579
1580 if (genpd->status == GPD_STATE_POWER_OFF
1581 && subdomain->status != GPD_STATE_POWER_OFF) {
1582 ret = -EINVAL;
1583 goto out;
1584 }
1585
1586 list_for_each_entry(link, &genpd->master_links, master_node) {
1587 if (link->slave == subdomain && link->master == genpd) {
1588 ret = -EINVAL;
1589 goto out;
1590 }
1591 }
1592
1593 link = kzalloc(sizeof(*link), GFP_KERNEL);
1594 if (!link) {
1595 ret = -ENOMEM;
1596 goto out;
1597 }
1598 link->master = genpd;
1599 list_add_tail(&link->master_node, &genpd->master_links);
1600 link->slave = subdomain;
1601 list_add_tail(&link->slave_node, &subdomain->slave_links);
1602 if (subdomain->status != GPD_STATE_POWER_OFF)
1603 genpd_sd_counter_inc(genpd);
1604
1605 out:
1606 mutex_unlock(&subdomain->lock);
1607 genpd_release_lock(genpd);
1608
1609 return ret;
1610 }
1611
1612 /**
1613 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1614 * @master_name: Name of the master PM domain to add the subdomain to.
1615 * @subdomain_name: Name of the subdomain to be added.
1616 */
1617 int pm_genpd_add_subdomain_names(const char *master_name,
1618 const char *subdomain_name)
1619 {
1620 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1621
1622 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1623 return -EINVAL;
1624
1625 mutex_lock(&gpd_list_lock);
1626 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1627 if (!master && !strcmp(gpd->name, master_name))
1628 master = gpd;
1629
1630 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1631 subdomain = gpd;
1632
1633 if (master && subdomain)
1634 break;
1635 }
1636 mutex_unlock(&gpd_list_lock);
1637
1638 return pm_genpd_add_subdomain(master, subdomain);
1639 }
1640
1641 /**
1642 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1643 * @genpd: Master PM domain to remove the subdomain from.
1644 * @subdomain: Subdomain to be removed.
1645 */
1646 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1647 struct generic_pm_domain *subdomain)
1648 {
1649 struct gpd_link *link;
1650 int ret = -EINVAL;
1651
1652 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1653 return -EINVAL;
1654
1655 start:
1656 genpd_acquire_lock(genpd);
1657
1658 list_for_each_entry(link, &genpd->master_links, master_node) {
1659 if (link->slave != subdomain)
1660 continue;
1661
1662 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1663
1664 if (subdomain->status != GPD_STATE_POWER_OFF
1665 && subdomain->status != GPD_STATE_ACTIVE) {
1666 mutex_unlock(&subdomain->lock);
1667 genpd_release_lock(genpd);
1668 goto start;
1669 }
1670
1671 list_del(&link->master_node);
1672 list_del(&link->slave_node);
1673 kfree(link);
1674 if (subdomain->status != GPD_STATE_POWER_OFF)
1675 genpd_sd_counter_dec(genpd);
1676
1677 mutex_unlock(&subdomain->lock);
1678
1679 ret = 0;
1680 break;
1681 }
1682
1683 genpd_release_lock(genpd);
1684
1685 return ret;
1686 }
1687
1688 /**
1689 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1690 * @genpd: PM domain to be connected with cpuidle.
1691 * @state: cpuidle state this domain can disable/enable.
1692 *
1693 * Make a PM domain behave as though it contained a CPU core, that is, instead
1694 * of calling its power down routine it will enable the given cpuidle state so
1695 * that the cpuidle subsystem can power it down (if possible and desirable).
1696 */
1697 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1698 {
1699 struct cpuidle_driver *cpuidle_drv;
1700 struct gpd_cpuidle_data *cpuidle_data;
1701 struct cpuidle_state *idle_state;
1702 int ret = 0;
1703
1704 if (IS_ERR_OR_NULL(genpd) || state < 0)
1705 return -EINVAL;
1706
1707 genpd_acquire_lock(genpd);
1708
1709 if (genpd->cpuidle_data) {
1710 ret = -EEXIST;
1711 goto out;
1712 }
1713 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1714 if (!cpuidle_data) {
1715 ret = -ENOMEM;
1716 goto out;
1717 }
1718 cpuidle_drv = cpuidle_driver_ref();
1719 if (!cpuidle_drv) {
1720 ret = -ENODEV;
1721 goto err_drv;
1722 }
1723 if (cpuidle_drv->state_count <= state) {
1724 ret = -EINVAL;
1725 goto err;
1726 }
1727 idle_state = &cpuidle_drv->states[state];
1728 if (!idle_state->disabled) {
1729 ret = -EAGAIN;
1730 goto err;
1731 }
1732 cpuidle_data->idle_state = idle_state;
1733 cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1734 genpd->cpuidle_data = cpuidle_data;
1735 genpd_recalc_cpu_exit_latency(genpd);
1736
1737 out:
1738 genpd_release_lock(genpd);
1739 return ret;
1740
1741 err:
1742 cpuidle_driver_unref();
1743
1744 err_drv:
1745 kfree(cpuidle_data);
1746 goto out;
1747 }
1748
1749 /**
1750 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1751 * @name: Name of the domain to connect to cpuidle.
1752 * @state: cpuidle state this domain can manipulate.
1753 */
1754 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1755 {
1756 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1757 }
1758
1759 /**
1760 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1761 * @genpd: PM domain to remove the cpuidle connection from.
1762 *
1763 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1764 * given PM domain.
1765 */
1766 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1767 {
1768 struct gpd_cpuidle_data *cpuidle_data;
1769 struct cpuidle_state *idle_state;
1770 int ret = 0;
1771
1772 if (IS_ERR_OR_NULL(genpd))
1773 return -EINVAL;
1774
1775 genpd_acquire_lock(genpd);
1776
1777 cpuidle_data = genpd->cpuidle_data;
1778 if (!cpuidle_data) {
1779 ret = -ENODEV;
1780 goto out;
1781 }
1782 idle_state = cpuidle_data->idle_state;
1783 if (!idle_state->disabled) {
1784 ret = -EAGAIN;
1785 goto out;
1786 }
1787 idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1788 cpuidle_driver_unref();
1789 genpd->cpuidle_data = NULL;
1790 kfree(cpuidle_data);
1791
1792 out:
1793 genpd_release_lock(genpd);
1794 return ret;
1795 }
1796
1797 /**
1798 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1799 * @name: Name of the domain to disconnect cpuidle from.
1800 */
1801 int pm_genpd_name_detach_cpuidle(const char *name)
1802 {
1803 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1804 }
1805
1806 /* Default device callbacks for generic PM domains. */
1807
1808 /**
1809 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1810 * @dev: Device to handle.
1811 */
1812 static int pm_genpd_default_save_state(struct device *dev)
1813 {
1814 int (*cb)(struct device *__dev);
1815
1816 if (dev->type && dev->type->pm)
1817 cb = dev->type->pm->runtime_suspend;
1818 else if (dev->class && dev->class->pm)
1819 cb = dev->class->pm->runtime_suspend;
1820 else if (dev->bus && dev->bus->pm)
1821 cb = dev->bus->pm->runtime_suspend;
1822 else
1823 cb = NULL;
1824
1825 if (!cb && dev->driver && dev->driver->pm)
1826 cb = dev->driver->pm->runtime_suspend;
1827
1828 return cb ? cb(dev) : 0;
1829 }
1830
1831 /**
1832 * pm_genpd_default_restore_state - Default PM domains "restore device state".
1833 * @dev: Device to handle.
1834 */
1835 static int pm_genpd_default_restore_state(struct device *dev)
1836 {
1837 int (*cb)(struct device *__dev);
1838
1839 if (dev->type && dev->type->pm)
1840 cb = dev->type->pm->runtime_resume;
1841 else if (dev->class && dev->class->pm)
1842 cb = dev->class->pm->runtime_resume;
1843 else if (dev->bus && dev->bus->pm)
1844 cb = dev->bus->pm->runtime_resume;
1845 else
1846 cb = NULL;
1847
1848 if (!cb && dev->driver && dev->driver->pm)
1849 cb = dev->driver->pm->runtime_resume;
1850
1851 return cb ? cb(dev) : 0;
1852 }
1853
1854 /**
1855 * pm_genpd_init - Initialize a generic I/O PM domain object.
1856 * @genpd: PM domain object to initialize.
1857 * @gov: PM domain governor to associate with the domain (may be NULL).
1858 * @is_off: Initial value of the domain's power_is_off field.
1859 */
1860 void pm_genpd_init(struct generic_pm_domain *genpd,
1861 struct dev_power_governor *gov, bool is_off)
1862 {
1863 if (IS_ERR_OR_NULL(genpd))
1864 return;
1865
1866 INIT_LIST_HEAD(&genpd->master_links);
1867 INIT_LIST_HEAD(&genpd->slave_links);
1868 INIT_LIST_HEAD(&genpd->dev_list);
1869 mutex_init(&genpd->lock);
1870 genpd->gov = gov;
1871 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1872 genpd->in_progress = 0;
1873 atomic_set(&genpd->sd_count, 0);
1874 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1875 init_waitqueue_head(&genpd->status_wait_queue);
1876 genpd->poweroff_task = NULL;
1877 genpd->resume_count = 0;
1878 genpd->device_count = 0;
1879 genpd->max_off_time_ns = -1;
1880 genpd->max_off_time_changed = true;
1881 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1882 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1883 genpd->domain.ops.prepare = pm_genpd_prepare;
1884 genpd->domain.ops.suspend = pm_genpd_suspend;
1885 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1886 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1887 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1888 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1889 genpd->domain.ops.resume = pm_genpd_resume;
1890 genpd->domain.ops.freeze = pm_genpd_freeze;
1891 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1892 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1893 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1894 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1895 genpd->domain.ops.thaw = pm_genpd_thaw;
1896 genpd->domain.ops.poweroff = pm_genpd_suspend;
1897 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1898 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1899 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1900 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1901 genpd->domain.ops.restore = pm_genpd_resume;
1902 genpd->domain.ops.complete = pm_genpd_complete;
1903 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1904 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1905
1906 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1907 genpd->dev_ops.stop = pm_clk_suspend;
1908 genpd->dev_ops.start = pm_clk_resume;
1909 }
1910
1911 mutex_lock(&gpd_list_lock);
1912 list_add(&genpd->gpd_list_node, &gpd_list);
1913 mutex_unlock(&gpd_list_lock);
1914 }
1915
1916 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1917 /*
1918 * Device Tree based PM domain providers.
1919 *
1920 * The code below implements generic device tree based PM domain providers that
1921 * bind device tree nodes with generic PM domains registered in the system.
1922 *
1923 * Any driver that registers generic PM domains and needs to support binding of
1924 * devices to these domains is supposed to register a PM domain provider, which
1925 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1926 *
1927 * Two simple mapping functions have been provided for convenience:
1928 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1929 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1930 * index.
1931 */
1932
1933 /**
1934 * struct of_genpd_provider - PM domain provider registration structure
1935 * @link: Entry in global list of PM domain providers
1936 * @node: Pointer to device tree node of PM domain provider
1937 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1938 * into a PM domain.
1939 * @data: context pointer to be passed into @xlate callback
1940 */
1941 struct of_genpd_provider {
1942 struct list_head link;
1943 struct device_node *node;
1944 genpd_xlate_t xlate;
1945 void *data;
1946 };
1947
1948 /* List of registered PM domain providers. */
1949 static LIST_HEAD(of_genpd_providers);
1950 /* Mutex to protect the list above. */
1951 static DEFINE_MUTEX(of_genpd_mutex);
1952
1953 /**
1954 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1955 * @genpdspec: OF phandle args to map into a PM domain
1956 * @data: xlate function private data - pointer to struct generic_pm_domain
1957 *
1958 * This is a generic xlate function that can be used to model PM domains that
1959 * have their own device tree nodes. The private data of xlate function needs
1960 * to be a valid pointer to struct generic_pm_domain.
1961 */
1962 struct generic_pm_domain *__of_genpd_xlate_simple(
1963 struct of_phandle_args *genpdspec,
1964 void *data)
1965 {
1966 if (genpdspec->args_count != 0)
1967 return ERR_PTR(-EINVAL);
1968 return data;
1969 }
1970 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1971
1972 /**
1973 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1974 * @genpdspec: OF phandle args to map into a PM domain
1975 * @data: xlate function private data - pointer to struct genpd_onecell_data
1976 *
1977 * This is a generic xlate function that can be used to model simple PM domain
1978 * controllers that have one device tree node and provide multiple PM domains.
1979 * A single cell is used as an index into an array of PM domains specified in
1980 * the genpd_onecell_data struct when registering the provider.
1981 */
1982 struct generic_pm_domain *__of_genpd_xlate_onecell(
1983 struct of_phandle_args *genpdspec,
1984 void *data)
1985 {
1986 struct genpd_onecell_data *genpd_data = data;
1987 unsigned int idx = genpdspec->args[0];
1988
1989 if (genpdspec->args_count != 1)
1990 return ERR_PTR(-EINVAL);
1991
1992 if (idx >= genpd_data->num_domains) {
1993 pr_err("%s: invalid domain index %u\n", __func__, idx);
1994 return ERR_PTR(-EINVAL);
1995 }
1996
1997 if (!genpd_data->domains[idx])
1998 return ERR_PTR(-ENOENT);
1999
2000 return genpd_data->domains[idx];
2001 }
2002 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2003
2004 /**
2005 * __of_genpd_add_provider() - Register a PM domain provider for a node
2006 * @np: Device node pointer associated with the PM domain provider.
2007 * @xlate: Callback for decoding PM domain from phandle arguments.
2008 * @data: Context pointer for @xlate callback.
2009 */
2010 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2011 void *data)
2012 {
2013 struct of_genpd_provider *cp;
2014
2015 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2016 if (!cp)
2017 return -ENOMEM;
2018
2019 cp->node = of_node_get(np);
2020 cp->data = data;
2021 cp->xlate = xlate;
2022
2023 mutex_lock(&of_genpd_mutex);
2024 list_add(&cp->link, &of_genpd_providers);
2025 mutex_unlock(&of_genpd_mutex);
2026 pr_debug("Added domain provider from %s\n", np->full_name);
2027
2028 return 0;
2029 }
2030 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2031
2032 /**
2033 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2034 * @np: Device node pointer associated with the PM domain provider
2035 */
2036 void of_genpd_del_provider(struct device_node *np)
2037 {
2038 struct of_genpd_provider *cp;
2039
2040 mutex_lock(&of_genpd_mutex);
2041 list_for_each_entry(cp, &of_genpd_providers, link) {
2042 if (cp->node == np) {
2043 list_del(&cp->link);
2044 of_node_put(cp->node);
2045 kfree(cp);
2046 break;
2047 }
2048 }
2049 mutex_unlock(&of_genpd_mutex);
2050 }
2051 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2052
2053 /**
2054 * of_genpd_get_from_provider() - Look-up PM domain
2055 * @genpdspec: OF phandle args to use for look-up
2056 *
2057 * Looks for a PM domain provider under the node specified by @genpdspec and if
2058 * found, uses xlate function of the provider to map phandle args to a PM
2059 * domain.
2060 *
2061 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2062 * on failure.
2063 */
2064 struct generic_pm_domain *of_genpd_get_from_provider(
2065 struct of_phandle_args *genpdspec)
2066 {
2067 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2068 struct of_genpd_provider *provider;
2069
2070 mutex_lock(&of_genpd_mutex);
2071
2072 /* Check if we have such a provider in our array */
2073 list_for_each_entry(provider, &of_genpd_providers, link) {
2074 if (provider->node == genpdspec->np)
2075 genpd = provider->xlate(genpdspec, provider->data);
2076 if (!IS_ERR(genpd))
2077 break;
2078 }
2079
2080 mutex_unlock(&of_genpd_mutex);
2081
2082 return genpd;
2083 }
2084 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2085
2086 /**
2087 * genpd_dev_pm_detach - Detach a device from its PM domain.
2088 * @dev: Device to attach.
2089 * @power_off: Currently not used
2090 *
2091 * Try to locate a corresponding generic PM domain, which the device was
2092 * attached to previously. If such is found, the device is detached from it.
2093 */
2094 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2095 {
2096 struct generic_pm_domain *pd = NULL, *gpd;
2097 int ret = 0;
2098
2099 if (!dev->pm_domain)
2100 return;
2101
2102 mutex_lock(&gpd_list_lock);
2103 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2104 if (&gpd->domain == dev->pm_domain) {
2105 pd = gpd;
2106 break;
2107 }
2108 }
2109 mutex_unlock(&gpd_list_lock);
2110
2111 if (!pd)
2112 return;
2113
2114 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2115
2116 while (1) {
2117 ret = pm_genpd_remove_device(pd, dev);
2118 if (ret != -EAGAIN)
2119 break;
2120 cond_resched();
2121 }
2122
2123 if (ret < 0) {
2124 dev_err(dev, "failed to remove from PM domain %s: %d",
2125 pd->name, ret);
2126 return;
2127 }
2128
2129 /* Check if PM domain can be powered off after removing this device. */
2130 genpd_queue_power_off_work(pd);
2131 }
2132
2133 /**
2134 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2135 * @dev: Device to attach.
2136 *
2137 * Parse device's OF node to find a PM domain specifier. If such is found,
2138 * attaches the device to retrieved pm_domain ops.
2139 *
2140 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2141 * backwards compatibility with existing DTBs.
2142 *
2143 * Returns 0 on successfully attached PM domain or negative error code.
2144 */
2145 int genpd_dev_pm_attach(struct device *dev)
2146 {
2147 struct of_phandle_args pd_args;
2148 struct generic_pm_domain *pd;
2149 int ret;
2150
2151 if (!dev->of_node)
2152 return -ENODEV;
2153
2154 if (dev->pm_domain)
2155 return -EEXIST;
2156
2157 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2158 "#power-domain-cells", 0, &pd_args);
2159 if (ret < 0) {
2160 if (ret != -ENOENT)
2161 return ret;
2162
2163 /*
2164 * Try legacy Samsung-specific bindings
2165 * (for backwards compatibility of DT ABI)
2166 */
2167 pd_args.args_count = 0;
2168 pd_args.np = of_parse_phandle(dev->of_node,
2169 "samsung,power-domain", 0);
2170 if (!pd_args.np)
2171 return -ENOENT;
2172 }
2173
2174 pd = of_genpd_get_from_provider(&pd_args);
2175 if (IS_ERR(pd)) {
2176 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2177 __func__, PTR_ERR(pd));
2178 of_node_put(dev->of_node);
2179 return PTR_ERR(pd);
2180 }
2181
2182 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2183
2184 while (1) {
2185 ret = pm_genpd_add_device(pd, dev);
2186 if (ret != -EAGAIN)
2187 break;
2188 cond_resched();
2189 }
2190
2191 if (ret < 0) {
2192 dev_err(dev, "failed to add to PM domain %s: %d",
2193 pd->name, ret);
2194 of_node_put(dev->of_node);
2195 return ret;
2196 }
2197
2198 dev->pm_domain->detach = genpd_dev_pm_detach;
2199 pm_genpd_poweron(pd);
2200
2201 return 0;
2202 }
2203 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2204 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2205
2206
2207 /*** debugfs support ***/
2208
2209 #ifdef CONFIG_PM_ADVANCED_DEBUG
2210 #include <linux/pm.h>
2211 #include <linux/device.h>
2212 #include <linux/debugfs.h>
2213 #include <linux/seq_file.h>
2214 #include <linux/init.h>
2215 #include <linux/kobject.h>
2216 static struct dentry *pm_genpd_debugfs_dir;
2217
2218 /*
2219 * TODO: This function is a slightly modified version of rtpm_status_show
2220 * from sysfs.c, so generalize it.
2221 */
2222 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2223 {
2224 static const char * const status_lookup[] = {
2225 [RPM_ACTIVE] = "active",
2226 [RPM_RESUMING] = "resuming",
2227 [RPM_SUSPENDED] = "suspended",
2228 [RPM_SUSPENDING] = "suspending"
2229 };
2230 const char *p = "";
2231
2232 if (dev->power.runtime_error)
2233 p = "error";
2234 else if (dev->power.disable_depth)
2235 p = "unsupported";
2236 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2237 p = status_lookup[dev->power.runtime_status];
2238 else
2239 WARN_ON(1);
2240
2241 seq_puts(s, p);
2242 }
2243
2244 static int pm_genpd_summary_one(struct seq_file *s,
2245 struct generic_pm_domain *genpd)
2246 {
2247 static const char * const status_lookup[] = {
2248 [GPD_STATE_ACTIVE] = "on",
2249 [GPD_STATE_WAIT_MASTER] = "wait-master",
2250 [GPD_STATE_BUSY] = "busy",
2251 [GPD_STATE_REPEAT] = "off-in-progress",
2252 [GPD_STATE_POWER_OFF] = "off"
2253 };
2254 struct pm_domain_data *pm_data;
2255 const char *kobj_path;
2256 struct gpd_link *link;
2257 int ret;
2258
2259 ret = mutex_lock_interruptible(&genpd->lock);
2260 if (ret)
2261 return -ERESTARTSYS;
2262
2263 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2264 goto exit;
2265 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
2266
2267 /*
2268 * Modifications on the list require holding locks on both
2269 * master and slave, so we are safe.
2270 * Also genpd->name is immutable.
2271 */
2272 list_for_each_entry(link, &genpd->master_links, master_node) {
2273 seq_printf(s, "%s", link->slave->name);
2274 if (!list_is_last(&link->master_node, &genpd->master_links))
2275 seq_puts(s, ", ");
2276 }
2277
2278 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2279 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2280 if (kobj_path == NULL)
2281 continue;
2282
2283 seq_printf(s, "\n %-50s ", kobj_path);
2284 rtpm_status_str(s, pm_data->dev);
2285 kfree(kobj_path);
2286 }
2287
2288 seq_puts(s, "\n");
2289 exit:
2290 mutex_unlock(&genpd->lock);
2291
2292 return 0;
2293 }
2294
2295 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2296 {
2297 struct generic_pm_domain *genpd;
2298 int ret = 0;
2299
2300 seq_puts(s, " domain status slaves\n");
2301 seq_puts(s, " /device runtime status\n");
2302 seq_puts(s, "----------------------------------------------------------------------\n");
2303
2304 ret = mutex_lock_interruptible(&gpd_list_lock);
2305 if (ret)
2306 return -ERESTARTSYS;
2307
2308 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2309 ret = pm_genpd_summary_one(s, genpd);
2310 if (ret)
2311 break;
2312 }
2313 mutex_unlock(&gpd_list_lock);
2314
2315 return ret;
2316 }
2317
2318 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2319 {
2320 return single_open(file, pm_genpd_summary_show, NULL);
2321 }
2322
2323 static const struct file_operations pm_genpd_summary_fops = {
2324 .open = pm_genpd_summary_open,
2325 .read = seq_read,
2326 .llseek = seq_lseek,
2327 .release = single_release,
2328 };
2329
2330 static int __init pm_genpd_debug_init(void)
2331 {
2332 struct dentry *d;
2333
2334 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2335
2336 if (!pm_genpd_debugfs_dir)
2337 return -ENOMEM;
2338
2339 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2340 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2341 if (!d)
2342 return -ENOMEM;
2343
2344 return 0;
2345 }
2346 late_initcall(pm_genpd_debug_init);
2347
2348 static void __exit pm_genpd_debug_exit(void)
2349 {
2350 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2351 }
2352 __exitcall(pm_genpd_debug_exit);
2353 #endif /* CONFIG_PM_ADVANCED_DEBUG */
This page took 0.083358 seconds and 5 git commands to generate.