ARM: OMAP4: PM: Add L2X0 cache lowpower support
[deliverable/linux.git] / arch / arm / mach-omap2 / pm44xx.c
CommitLineData
5643aebb
RN
1/*
2 * OMAP4 Power Management Routines
3 *
e44f9a77 4 * Copyright (C) 2010-2011 Texas Instruments, Inc.
5643aebb 5 * Rajendra Nayak <rnayak@ti.com>
e44f9a77 6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
5643aebb
RN
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/pm.h>
14#include <linux/suspend.h>
15#include <linux/module.h>
16#include <linux/list.h>
17#include <linux/err.h>
18#include <linux/slab.h>
19
4e65331c 20#include "common.h"
3c50729b 21#include "clockdomain.h"
72e06d08 22#include "powerdomain.h"
e44f9a77 23#include "pm.h"
5643aebb
RN
24
25struct power_state {
26 struct powerdomain *pwrdm;
27 u32 next_state;
28#ifdef CONFIG_SUSPEND
29 u32 saved_state;
30#endif
31 struct list_head node;
32};
33
34static LIST_HEAD(pwrst_list);
35
36#ifdef CONFIG_SUSPEND
5643aebb
RN
37static int omap4_pm_suspend(void)
38{
e44f9a77
SS
39 struct power_state *pwrst;
40 int state, ret = 0;
41 u32 cpu_id = smp_processor_id();
42
43 /* Save current powerdomain state */
44 list_for_each_entry(pwrst, &pwrst_list, node) {
45 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
46 }
47
48 /* Set targeted power domain states by suspend */
49 list_for_each_entry(pwrst, &pwrst_list, node) {
50 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
51 }
52
53 /*
54 * For MPUSS to hit power domain retention(CSWR or OSWR),
55 * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
56 * since CPU power domain CSWR is not supported by hardware
57 * Only master CPU follows suspend path. All other CPUs follow
58 * CPU hotplug path in system wide suspend. On OMAP4, CPU power
59 * domain CSWR is not supported by hardware.
60 * More details can be found in OMAP4430 TRM section 4.3.4.2.
61 */
62 omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
63
64 /* Restore next powerdomain state */
65 list_for_each_entry(pwrst, &pwrst_list, node) {
66 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
67 if (state > pwrst->next_state) {
68 pr_info("Powerdomain (%s) didn't enter "
69 "target state %d\n",
70 pwrst->pwrdm->name, pwrst->next_state);
71 ret = -1;
72 }
73 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
74 }
75 if (ret)
76 pr_crit("Could not enter target state in pm_suspend\n");
77 else
78 pr_info("Successfully put all powerdomains to target state\n");
79
5643aebb
RN
80 return 0;
81}
82
83static int omap4_pm_enter(suspend_state_t suspend_state)
84{
85 int ret = 0;
86
87 switch (suspend_state) {
88 case PM_SUSPEND_STANDBY:
89 case PM_SUSPEND_MEM:
90 ret = omap4_pm_suspend();
91 break;
92 default:
93 ret = -EINVAL;
94 }
95
96 return ret;
97}
98
5643aebb
RN
99static int omap4_pm_begin(suspend_state_t state)
100{
c166381d 101 disable_hlt();
5643aebb
RN
102 return 0;
103}
104
105static void omap4_pm_end(void)
106{
c166381d 107 enable_hlt();
5643aebb
RN
108 return;
109}
110
2f55ac07 111static const struct platform_suspend_ops omap_pm_ops = {
5643aebb
RN
112 .begin = omap4_pm_begin,
113 .end = omap4_pm_end,
5643aebb 114 .enter = omap4_pm_enter,
5643aebb
RN
115 .valid = suspend_valid_only_mem,
116};
117#endif /* CONFIG_SUSPEND */
118
3c50729b
SS
119/*
120 * Enable hardware supervised mode for all clockdomains if it's
121 * supported. Initiate sleep transition for other clockdomains, if
122 * they are not used
123 */
124static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
125{
126 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
127 clkdm_allow_idle(clkdm);
128 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
129 atomic_read(&clkdm->usecount) == 0)
130 clkdm_sleep(clkdm);
131 return 0;
132}
133
134
5643aebb
RN
135static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
136{
137 struct power_state *pwrst;
138
139 if (!pwrdm->pwrsts)
140 return 0;
141
e44f9a77
SS
142 /*
143 * Skip CPU0 and CPU1 power domains. CPU1 is programmed
144 * through hotplug path and CPU0 explicitly programmed
145 * further down in the code path
146 */
147 if (!strncmp(pwrdm->name, "cpu", 3))
148 return 0;
149
150 /*
151 * FIXME: Remove this check when core retention is supported
152 * Only MPUSS power domain is added in the list.
153 */
154 if (strcmp(pwrdm->name, "mpu_pwrdm"))
155 return 0;
156
5643aebb
RN
157 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
158 if (!pwrst)
159 return -ENOMEM;
e44f9a77 160
5643aebb 161 pwrst->pwrdm = pwrdm;
e44f9a77 162 pwrst->next_state = PWRDM_POWER_RET;
5643aebb
RN
163 list_add(&pwrst->node, &pwrst_list);
164
e44f9a77 165 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
5643aebb
RN
166}
167
72826b9f
SS
168/**
169 * omap_default_idle - OMAP4 default ilde routine.'
170 *
171 * Implements OMAP4 memory, IO ordering requirements which can't be addressed
172 * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
173 * by secondary CPU with CONFIG_CPUIDLE.
174 */
175static void omap_default_idle(void)
176{
177 local_irq_disable();
178 local_fiq_disable();
179
180 omap_do_wfi();
181
182 local_fiq_enable();
183 local_irq_enable();
184}
185
5643aebb
RN
186/**
187 * omap4_pm_init - Init routine for OMAP4 PM
188 *
189 * Initializes all powerdomain and clockdomain target states
190 * and all PRCM settings.
191 */
192static int __init omap4_pm_init(void)
193{
194 int ret;
12f27826
SS
195 struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
196 struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
5643aebb
RN
197
198 if (!cpu_is_omap44xx())
199 return -ENODEV;
200
361b02f3
SS
201 if (omap_rev() == OMAP4430_REV_ES1_0) {
202 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
203 return -ENODEV;
204 }
205
5643aebb
RN
206 pr_err("Power Management for TI OMAP4.\n");
207
5643aebb
RN
208 ret = pwrdm_for_each(pwrdms_setup, NULL);
209 if (ret) {
210 pr_err("Failed to setup powerdomains\n");
211 goto err2;
212 }
5643aebb 213
12f27826
SS
214 /*
215 * The dynamic dependency between MPUSS -> MEMIF and
216 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
217 * expected. The hardware recommendation is to enable static
218 * dependencies for these to avoid system lock ups or random crashes.
219 */
220 mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
221 emif_clkdm = clkdm_lookup("l3_emif_clkdm");
222 l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
223 l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
224 l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
225 ducati_clkdm = clkdm_lookup("ducati_clkdm");
226 if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
227 (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
228 goto err2;
229
230 ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
231 ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
232 ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
233 ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
234 ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
235 ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
236 if (ret) {
237 pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
238 "wakeup dependency\n");
239 goto err2;
240 }
241
b2b9762f
SS
242 ret = omap4_mpuss_init();
243 if (ret) {
244 pr_err("Failed to initialise OMAP4 MPUSS\n");
245 goto err2;
246 }
247
3c50729b
SS
248 (void) clkdm_for_each(clkdms_setup, NULL);
249
5643aebb
RN
250#ifdef CONFIG_SUSPEND
251 suspend_set_ops(&omap_pm_ops);
252#endif /* CONFIG_SUSPEND */
253
72826b9f
SS
254 /* Overwrite the default arch_idle() */
255 pm_idle = omap_default_idle;
256
5643aebb
RN
257err2:
258 return ret;
259}
260late_initcall(omap4_pm_init);
This page took 0.116026 seconds and 5 git commands to generate.