ARM: OMAP3: define statically the omap3_idle_data
[deliverable/linux.git] / arch / arm / mach-omap2 / cpuidle34xx.c
1 /*
2 * linux/arch/arm/mach-omap2/cpuidle34xx.c
3 *
4 * OMAP3 CPU IDLE Routines
5 *
6 * Copyright (C) 2008 Texas Instruments, Inc.
7 * Rajendra Nayak <rnayak@ti.com>
8 *
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Karthik Dasu <karthik-dp@ti.com>
11 *
12 * Copyright (C) 2006 Nokia Corporation
13 * Tony Lindgren <tony@atomide.com>
14 *
15 * Copyright (C) 2005 Texas Instruments, Inc.
16 * Richard Woodruff <r-woodruff2@ti.com>
17 *
18 * Based on pm.c for omap2
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25 #include <linux/sched.h>
26 #include <linux/cpuidle.h>
27 #include <linux/export.h>
28 #include <linux/cpu_pm.h>
29
30 #include <plat/prcm.h>
31 #include <plat/irqs.h>
32 #include "powerdomain.h"
33 #include "clockdomain.h"
34
35 #include "pm.h"
36 #include "control.h"
37 #include "common.h"
38
39 #ifdef CONFIG_CPU_IDLE
40
41 /* Mach specific information to be recorded in the C-state driver_data */
42 struct omap3_idle_statedata {
43 u32 mpu_state;
44 u32 core_state;
45 };
46
47 struct omap3_idle_statedata omap3_idle_data[] = {
48 {
49 .mpu_state = PWRDM_POWER_ON,
50 .core_state = PWRDM_POWER_ON,
51 },
52 {
53 .mpu_state = PWRDM_POWER_ON,
54 .core_state = PWRDM_POWER_ON,
55 },
56 {
57 .mpu_state = PWRDM_POWER_RET,
58 .core_state = PWRDM_POWER_ON,
59 },
60 {
61 .mpu_state = PWRDM_POWER_OFF,
62 .core_state = PWRDM_POWER_ON,
63 },
64 {
65 .mpu_state = PWRDM_POWER_RET,
66 .core_state = PWRDM_POWER_RET,
67 },
68 {
69 .mpu_state = PWRDM_POWER_OFF,
70 .core_state = PWRDM_POWER_RET,
71 },
72 {
73 .mpu_state = PWRDM_POWER_OFF,
74 .core_state = PWRDM_POWER_OFF,
75 },
76 };
77
78 struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
79
80 static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
81 struct clockdomain *clkdm)
82 {
83 clkdm_allow_idle(clkdm);
84 return 0;
85 }
86
87 static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
88 struct clockdomain *clkdm)
89 {
90 clkdm_deny_idle(clkdm);
91 return 0;
92 }
93
94 static int __omap3_enter_idle(struct cpuidle_device *dev,
95 struct cpuidle_driver *drv,
96 int index)
97 {
98 struct omap3_idle_statedata *cx =
99 cpuidle_get_statedata(&dev->states_usage[index]);
100 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
101
102 local_fiq_disable();
103
104 pwrdm_set_next_pwrst(mpu_pd, mpu_state);
105 pwrdm_set_next_pwrst(core_pd, core_state);
106
107 if (omap_irq_pending() || need_resched())
108 goto return_sleep_time;
109
110 /* Deny idle for C1 */
111 if (index == 0) {
112 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
113 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
114 }
115
116 /*
117 * Call idle CPU PM enter notifier chain so that
118 * VFP context is saved.
119 */
120 if (mpu_state == PWRDM_POWER_OFF)
121 cpu_pm_enter();
122
123 /* Execute ARM wfi */
124 omap_sram_idle();
125
126 /*
127 * Call idle CPU PM enter notifier chain to restore
128 * VFP context.
129 */
130 if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
131 cpu_pm_exit();
132
133 /* Re-allow idle for C1 */
134 if (index == 0) {
135 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
136 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
137 }
138
139 return_sleep_time:
140
141 local_fiq_enable();
142
143 return index;
144 }
145
146 /**
147 * omap3_enter_idle - Programs OMAP3 to enter the specified state
148 * @dev: cpuidle device
149 * @drv: cpuidle driver
150 * @index: the index of state to be entered
151 *
152 * Called from the CPUidle framework to program the device to the
153 * specified target state selected by the governor.
154 */
155 static inline int omap3_enter_idle(struct cpuidle_device *dev,
156 struct cpuidle_driver *drv,
157 int index)
158 {
159 return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
160 }
161
162 /**
163 * next_valid_state - Find next valid C-state
164 * @dev: cpuidle device
165 * @drv: cpuidle driver
166 * @index: Index of currently selected c-state
167 *
168 * If the state corresponding to index is valid, index is returned back
169 * to the caller. Else, this function searches for a lower c-state which is
170 * still valid (as defined in omap3_power_states[]) and returns its index.
171 *
172 * A state is valid if the 'valid' field is enabled and
173 * if it satisfies the enable_off_mode condition.
174 */
175 static int next_valid_state(struct cpuidle_device *dev,
176 struct cpuidle_driver *drv,
177 int index)
178 {
179 struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
180 struct cpuidle_state *curr = &drv->states[index];
181 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
182 u32 mpu_deepest_state = PWRDM_POWER_RET;
183 u32 core_deepest_state = PWRDM_POWER_RET;
184 int next_index = -1;
185
186 if (enable_off_mode) {
187 mpu_deepest_state = PWRDM_POWER_OFF;
188 /*
189 * Erratum i583: valable for ES rev < Es1.2 on 3630.
190 * CORE OFF mode is not supported in a stable form, restrict
191 * instead the CORE state to RET.
192 */
193 if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
194 core_deepest_state = PWRDM_POWER_OFF;
195 }
196
197 /* Check if current state is valid */
198 if ((cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) {
200 return index;
201 } else {
202 int idx = ARRAY_SIZE(omap3_idle_data) - 1;
203
204 /* Reach the current state starting at highest C-state */
205 for (; idx >= 0; idx--) {
206 if (&drv->states[idx] == curr) {
207 next_index = idx;
208 break;
209 }
210 }
211
212 /* Should never hit this condition */
213 WARN_ON(next_index == -1);
214
215 /*
216 * Drop to next valid state.
217 * Start search from the next (lower) state.
218 */
219 idx--;
220 for (; idx >= 0; idx--) {
221 cx = cpuidle_get_statedata(&dev->states_usage[idx]);
222 if ((cx->mpu_state >= mpu_deepest_state) &&
223 (cx->core_state >= core_deepest_state)) {
224 next_index = idx;
225 break;
226 }
227 }
228 /*
229 * C1 is always valid.
230 * So, no need to check for 'next_index == -1' outside
231 * this loop.
232 */
233 }
234
235 return next_index;
236 }
237
238 /**
239 * omap3_enter_idle_bm - Checks for any bus activity
240 * @dev: cpuidle device
241 * @drv: cpuidle driver
242 * @index: array index of target state to be programmed
243 *
244 * This function checks for any pending activity and then programs
245 * the device to the specified or a safer state.
246 */
247 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
248 struct cpuidle_driver *drv,
249 int index)
250 {
251 int new_state_idx;
252 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
253 struct omap3_idle_statedata *cx;
254 int ret;
255
256 /*
257 * Prevent idle completely if CAM is active.
258 * CAM does not have wakeup capability in OMAP3.
259 */
260 cam_state = pwrdm_read_pwrst(cam_pd);
261 if (cam_state == PWRDM_POWER_ON) {
262 new_state_idx = drv->safe_state_index;
263 goto select_state;
264 }
265
266 /*
267 * FIXME: we currently manage device-specific idle states
268 * for PER and CORE in combination with CPU-specific
269 * idle states. This is wrong, and device-specific
270 * idle management needs to be separated out into
271 * its own code.
272 */
273
274 /*
275 * Prevent PER off if CORE is not in retention or off as this
276 * would disable PER wakeups completely.
277 */
278 cx = cpuidle_get_statedata(&dev->states_usage[index]);
279 core_next_state = cx->core_state;
280 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
281 if ((per_next_state == PWRDM_POWER_OFF) &&
282 (core_next_state > PWRDM_POWER_RET))
283 per_next_state = PWRDM_POWER_RET;
284
285 /* Are we changing PER target state? */
286 if (per_next_state != per_saved_state)
287 pwrdm_set_next_pwrst(per_pd, per_next_state);
288
289 new_state_idx = next_valid_state(dev, drv, index);
290
291 select_state:
292 ret = omap3_enter_idle(dev, drv, new_state_idx);
293
294 /* Restore original PER state if it was modified */
295 if (per_next_state != per_saved_state)
296 pwrdm_set_next_pwrst(per_pd, per_saved_state);
297
298 return ret;
299 }
300
301 DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
302
303 struct cpuidle_driver omap3_idle_driver = {
304 .name = "omap3_idle",
305 .owner = THIS_MODULE,
306 .states = {
307 {
308 .enter = omap3_enter_idle,
309 .exit_latency = 2 + 2,
310 .target_residency = 5,
311 .flags = CPUIDLE_FLAG_TIME_VALID,
312 .name = "C1",
313 .desc = "MPU ON + CORE ON",
314 },
315 {
316 .enter = omap3_enter_idle_bm,
317 .exit_latency = 10 + 10,
318 .target_residency = 30,
319 .flags = CPUIDLE_FLAG_TIME_VALID,
320 .name = "C2",
321 .desc = "MPU ON + CORE ON",
322 },
323 {
324 .enter = omap3_enter_idle_bm,
325 .exit_latency = 50 + 50,
326 .target_residency = 300,
327 .flags = CPUIDLE_FLAG_TIME_VALID,
328 .name = "C3",
329 .desc = "MPU RET + CORE ON",
330 },
331 {
332 .enter = omap3_enter_idle_bm,
333 .exit_latency = 1500 + 1800,
334 .target_residency = 4000,
335 .flags = CPUIDLE_FLAG_TIME_VALID,
336 .name = "C4",
337 .desc = "MPU OFF + CORE ON",
338 },
339 {
340 .enter = omap3_enter_idle_bm,
341 .exit_latency = 2500 + 7500,
342 .target_residency = 12000,
343 .flags = CPUIDLE_FLAG_TIME_VALID,
344 .name = "C5",
345 .desc = "MPU RET + CORE RET",
346 },
347 {
348 .enter = omap3_enter_idle_bm,
349 .exit_latency = 3000 + 8500,
350 .target_residency = 15000,
351 .flags = CPUIDLE_FLAG_TIME_VALID,
352 .name = "C6",
353 .desc = "MPU OFF + CORE RET",
354 },
355 {
356 .enter = omap3_enter_idle_bm,
357 .exit_latency = 10000 + 30000,
358 .target_residency = 30000,
359 .flags = CPUIDLE_FLAG_TIME_VALID,
360 .name = "C7",
361 .desc = "MPU OFF + CORE OFF",
362 },
363 },
364 .state_count = ARRAY_SIZE(omap3_idle_data),
365 .safe_state_index = 0,
366 };
367
368 /* Helper to register the driver_data */
369 static inline struct omap3_idle_statedata *_fill_cstate_usage(
370 struct cpuidle_device *dev,
371 int idx)
372 {
373 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
374 struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
375
376 cpuidle_set_statedata(state_usage, cx);
377
378 return cx;
379 }
380
381 /**
382 * omap3_idle_init - Init routine for OMAP3 idle
383 *
384 * Registers the OMAP3 specific cpuidle driver to the cpuidle
385 * framework with the valid set of states.
386 */
387 int __init omap3_idle_init(void)
388 {
389 struct cpuidle_device *dev;
390 struct omap3_idle_statedata *cx;
391
392 mpu_pd = pwrdm_lookup("mpu_pwrdm");
393 core_pd = pwrdm_lookup("core_pwrdm");
394 per_pd = pwrdm_lookup("per_pwrdm");
395 cam_pd = pwrdm_lookup("cam_pwrdm");
396
397
398 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
399
400 /* C1 . MPU WFI + Core active */
401 cx = _fill_cstate_usage(dev, 0);
402 cx->mpu_state = PWRDM_POWER_ON;
403 cx->core_state = PWRDM_POWER_ON;
404
405 /* C2 . MPU WFI + Core inactive */
406 cx = _fill_cstate_usage(dev, 1);
407 cx->mpu_state = PWRDM_POWER_ON;
408 cx->core_state = PWRDM_POWER_ON;
409
410 /* C3 . MPU CSWR + Core inactive */
411 cx = _fill_cstate_usage(dev, 2);
412 cx->mpu_state = PWRDM_POWER_RET;
413 cx->core_state = PWRDM_POWER_ON;
414
415 /* C4 . MPU OFF + Core inactive */
416 cx = _fill_cstate_usage(dev, 3);
417 cx->mpu_state = PWRDM_POWER_OFF;
418 cx->core_state = PWRDM_POWER_ON;
419
420 /* C5 . MPU RET + Core RET */
421 cx = _fill_cstate_usage(dev, 4);
422 cx->mpu_state = PWRDM_POWER_RET;
423 cx->core_state = PWRDM_POWER_RET;
424
425 /* C6 . MPU OFF + Core RET */
426 cx = _fill_cstate_usage(dev, 5);
427 cx->mpu_state = PWRDM_POWER_OFF;
428 cx->core_state = PWRDM_POWER_RET;
429
430 /* C7 . MPU OFF + Core OFF */
431 cx = _fill_cstate_usage(dev, 6);
432 cx->mpu_state = PWRDM_POWER_OFF;
433 cx->core_state = PWRDM_POWER_OFF;
434
435 cpuidle_register_driver(&omap3_idle_driver);
436
437 if (cpuidle_register_device(dev)) {
438 printk(KERN_ERR "%s: CPUidle register device failed\n",
439 __func__);
440 return -EIO;
441 }
442
443 return 0;
444 }
445 #else
446 int __init omap3_idle_init(void)
447 {
448 return 0;
449 }
450 #endif /* CONFIG_CPU_IDLE */
This page took 0.052886 seconds and 5 git commands to generate.