Merge tag 'stable/for-linus-3.16-rc7-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / arch / arm / mach-omap2 / pm34xx.c
1 /*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/omap-dma.h>
32 #include <linux/platform_data/gpio-omap.h>
33
34 #include <trace/events/power.h>
35
36 #include <asm/fncpy.h>
37 #include <asm/suspend.h>
38 #include <asm/system_misc.h>
39
40 #include "clockdomain.h"
41 #include "powerdomain.h"
42 #include "soc.h"
43 #include "common.h"
44 #include "cm3xxx.h"
45 #include "cm-regbits-34xx.h"
46 #include "gpmc.h"
47 #include "prm-regbits-34xx.h"
48 #include "prm3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "sram.h"
52 #include "control.h"
53 #include "vc.h"
54
55 /* pm34xx errata defined in pm.h */
56 u16 pm34xx_errata;
57
58 struct power_state {
59 struct powerdomain *pwrdm;
60 u32 next_state;
61 #ifdef CONFIG_SUSPEND
62 u32 saved_state;
63 #endif
64 struct list_head node;
65 };
66
67 static LIST_HEAD(pwrst_list);
68
69 static int (*_omap_save_secure_sram)(u32 *addr);
70 void (*omap3_do_wfi_sram)(void);
71
72 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
73 static struct powerdomain *core_pwrdm, *per_pwrdm;
74
75 static void omap3_core_save_context(void)
76 {
77 omap3_ctrl_save_padconf();
78
79 /*
80 * Force write last pad into memory, as this can fail in some
81 * cases according to errata 1.157, 1.185
82 */
83 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
84 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
85
86 /* Save the Interrupt controller context */
87 omap_intc_save_context();
88 /* Save the GPMC context */
89 omap3_gpmc_save_context();
90 /* Save the system control module context, padconf already save above*/
91 omap3_control_save_context();
92 omap_dma_global_context_save();
93 }
94
95 static void omap3_core_restore_context(void)
96 {
97 /* Restore the control module context, padconf restored by h/w */
98 omap3_control_restore_context();
99 /* Restore the GPMC context */
100 omap3_gpmc_restore_context();
101 /* Restore the interrupt controller context */
102 omap_intc_restore_context();
103 omap_dma_global_context_restore();
104 }
105
106 /*
107 * FIXME: This function should be called before entering off-mode after
108 * OMAP3 secure services have been accessed. Currently it is only called
109 * once during boot sequence, but this works as we are not using secure
110 * services.
111 */
112 static void omap3_save_secure_ram_context(void)
113 {
114 u32 ret;
115 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
116
117 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
118 /*
119 * MPU next state must be set to POWER_ON temporarily,
120 * otherwise the WFI executed inside the ROM code
121 * will hang the system.
122 */
123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
124 ret = _omap_save_secure_sram((u32 *)(unsigned long)
125 __pa(omap3_secure_ram_storage));
126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
127 /* Following is for error tracking, it should not happen */
128 if (ret) {
129 pr_err("save_secure_sram() returns %08x\n", ret);
130 while (1)
131 ;
132 }
133 }
134 }
135
136 /*
137 * PRCM Interrupt Handler Helper Function
138 *
139 * The purpose of this function is to clear any wake-up events latched
140 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
141 * may occur whilst attempting to clear a PM_WKST_x register and thus
142 * set another bit in this register. A while loop is used to ensure
143 * that any peripheral wake-up events occurring while attempting to
144 * clear the PM_WKST_x are detected and cleared.
145 */
146 static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
147 {
148 u32 wkst, fclk, iclk, clken;
149 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
150 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
151 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
152 u16 grpsel_off = (regs == 3) ?
153 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
154 int c = 0;
155
156 wkst = omap2_prm_read_mod_reg(module, wkst_off);
157 wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
158 wkst &= ~ignore_bits;
159 if (wkst) {
160 iclk = omap2_cm_read_mod_reg(module, iclk_off);
161 fclk = omap2_cm_read_mod_reg(module, fclk_off);
162 while (wkst) {
163 clken = wkst;
164 omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
165 /*
166 * For USBHOST, we don't know whether HOST1 or
167 * HOST2 woke us up, so enable both f-clocks
168 */
169 if (module == OMAP3430ES2_USBHOST_MOD)
170 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
171 omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
172 omap2_prm_write_mod_reg(wkst, module, wkst_off);
173 wkst = omap2_prm_read_mod_reg(module, wkst_off);
174 wkst &= ~ignore_bits;
175 c++;
176 }
177 omap2_cm_write_mod_reg(iclk, module, iclk_off);
178 omap2_cm_write_mod_reg(fclk, module, fclk_off);
179 }
180
181 return c;
182 }
183
184 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
185 {
186 int c;
187
188 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
189 ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
190
191 return c ? IRQ_HANDLED : IRQ_NONE;
192 }
193
194 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
195 {
196 int c;
197
198 /*
199 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
200 * these are handled in a separate handler to avoid acking
201 * IO events before parsing in mux code
202 */
203 c = prcm_clear_mod_irqs(WKUP_MOD, 1,
204 OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
205 c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
206 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
207 if (omap_rev() > OMAP3430_REV_ES1_0) {
208 c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
209 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
210 }
211
212 return c ? IRQ_HANDLED : IRQ_NONE;
213 }
214
215 static void omap34xx_save_context(u32 *save)
216 {
217 u32 val;
218
219 /* Read Auxiliary Control Register */
220 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
221 *save++ = 1;
222 *save++ = val;
223
224 /* Read L2 AUX ctrl register */
225 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
226 *save++ = 1;
227 *save++ = val;
228 }
229
230 static int omap34xx_do_sram_idle(unsigned long save_state)
231 {
232 omap34xx_cpu_suspend(save_state);
233 return 0;
234 }
235
236 void omap_sram_idle(void)
237 {
238 /* Variable to tell what needs to be saved and restored
239 * in omap_sram_idle*/
240 /* save_state = 0 => Nothing to save and restored */
241 /* save_state = 1 => Only L1 and logic lost */
242 /* save_state = 2 => Only L2 lost */
243 /* save_state = 3 => L1, L2 and logic lost */
244 int save_state = 0;
245 int mpu_next_state = PWRDM_POWER_ON;
246 int per_next_state = PWRDM_POWER_ON;
247 int core_next_state = PWRDM_POWER_ON;
248 int per_going_off;
249 int core_prev_state;
250 u32 sdrc_pwr = 0;
251
252 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
253 switch (mpu_next_state) {
254 case PWRDM_POWER_ON:
255 case PWRDM_POWER_RET:
256 /* No need to save context */
257 save_state = 0;
258 break;
259 case PWRDM_POWER_OFF:
260 save_state = 3;
261 break;
262 default:
263 /* Invalid state */
264 pr_err("Invalid mpu state in sram_idle\n");
265 return;
266 }
267
268 /* NEON control */
269 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
270 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
271
272 /* Enable IO-PAD and IO-CHAIN wakeups */
273 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
274 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
275
276 pwrdm_pre_transition(NULL);
277
278 /* PER */
279 if (per_next_state < PWRDM_POWER_ON) {
280 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
281 omap2_gpio_prepare_for_idle(per_going_off);
282 }
283
284 /* CORE */
285 if (core_next_state < PWRDM_POWER_ON) {
286 if (core_next_state == PWRDM_POWER_OFF) {
287 omap3_core_save_context();
288 omap3_cm_save_context();
289 }
290 }
291
292 /* Configure PMIC signaling for I2C4 or sys_off_mode */
293 omap3_vc_set_pmic_signaling(core_next_state);
294
295 omap3_intc_prepare_idle();
296
297 /*
298 * On EMU/HS devices ROM code restores a SRDC value
299 * from scratchpad which has automatic self refresh on timeout
300 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
301 * Hence store/restore the SDRC_POWER register here.
302 */
303 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
304 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
305 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
306 core_next_state == PWRDM_POWER_OFF)
307 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
308
309 /*
310 * omap3_arm_context is the location where some ARM context
311 * get saved. The rest is placed on the stack, and restored
312 * from there before resuming.
313 */
314 if (save_state)
315 omap34xx_save_context(omap3_arm_context);
316 if (save_state == 1 || save_state == 3)
317 cpu_suspend(save_state, omap34xx_do_sram_idle);
318 else
319 omap34xx_do_sram_idle(save_state);
320
321 /* Restore normal SDRC POWER settings */
322 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
323 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
324 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
325 core_next_state == PWRDM_POWER_OFF)
326 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
327
328 /* CORE */
329 if (core_next_state < PWRDM_POWER_ON) {
330 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
331 if (core_prev_state == PWRDM_POWER_OFF) {
332 omap3_core_restore_context();
333 omap3_cm_restore_context();
334 omap3_sram_restore_context();
335 omap2_sms_restore_context();
336 }
337 }
338 omap3_intc_resume_idle();
339
340 pwrdm_post_transition(NULL);
341
342 /* PER */
343 if (per_next_state < PWRDM_POWER_ON)
344 omap2_gpio_resume_after_idle();
345 }
346
347 static void omap3_pm_idle(void)
348 {
349 if (omap_irq_pending())
350 return;
351
352 trace_cpu_idle(1, smp_processor_id());
353
354 omap_sram_idle();
355
356 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
357 }
358
359 #ifdef CONFIG_SUSPEND
360 static int omap3_pm_suspend(void)
361 {
362 struct power_state *pwrst;
363 int state, ret = 0;
364
365 /* Read current next_pwrsts */
366 list_for_each_entry(pwrst, &pwrst_list, node)
367 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
368 /* Set ones wanted by suspend */
369 list_for_each_entry(pwrst, &pwrst_list, node) {
370 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
371 goto restore;
372 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
373 goto restore;
374 }
375
376 omap3_intc_suspend();
377
378 omap_sram_idle();
379
380 restore:
381 /* Restore next_pwrsts */
382 list_for_each_entry(pwrst, &pwrst_list, node) {
383 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
384 if (state > pwrst->next_state) {
385 pr_info("Powerdomain (%s) didn't enter target state %d\n",
386 pwrst->pwrdm->name, pwrst->next_state);
387 ret = -1;
388 }
389 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
390 }
391 if (ret)
392 pr_err("Could not enter target state in pm_suspend\n");
393 else
394 pr_info("Successfully put all powerdomains to target state\n");
395
396 return ret;
397 }
398 #else
399 #define omap3_pm_suspend NULL
400 #endif /* CONFIG_SUSPEND */
401
402
403 /**
404 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
405 * retention
406 *
407 * In cases where IVA2 is activated by bootcode, it may prevent
408 * full-chip retention or off-mode because it is not idle. This
409 * function forces the IVA2 into idle state so it can go
410 * into retention/off and thus allow full-chip retention/off.
411 *
412 **/
413 static void __init omap3_iva_idle(void)
414 {
415 /* ensure IVA2 clock is disabled */
416 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
417
418 /* if no clock activity, nothing else to do */
419 if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
420 OMAP3430_CLKACTIVITY_IVA2_MASK))
421 return;
422
423 /* Reset IVA2 */
424 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
425 OMAP3430_RST2_IVA2_MASK |
426 OMAP3430_RST3_IVA2_MASK,
427 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
428
429 /* Enable IVA2 clock */
430 omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
431 OMAP3430_IVA2_MOD, CM_FCLKEN);
432
433 /* Set IVA2 boot mode to 'idle' */
434 omap3_ctrl_set_iva_bootmode_idle();
435
436 /* Un-reset IVA2 */
437 omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
438
439 /* Disable IVA2 clock */
440 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
441
442 /* Reset IVA2 */
443 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
444 OMAP3430_RST2_IVA2_MASK |
445 OMAP3430_RST3_IVA2_MASK,
446 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 }
448
449 static void __init omap3_d2d_idle(void)
450 {
451 u16 mask, padconf;
452
453 /* In a stand alone OMAP3430 where there is not a stacked
454 * modem for the D2D Idle Ack and D2D MStandby must be pulled
455 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
456 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
457 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
458 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
459 padconf |= mask;
460 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
461
462 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
463 padconf |= mask;
464 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
465
466 /* reset modem */
467 omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
468 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
469 CORE_MOD, OMAP2_RM_RSTCTRL);
470 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
471 }
472
473 static void __init prcm_setup_regs(void)
474 {
475 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
476 OMAP3630_EN_UART4_MASK : 0;
477 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
478 OMAP3630_GRPSEL_UART4_MASK : 0;
479
480 /* XXX This should be handled by hwmod code or SCM init code */
481 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
482
483 /*
484 * Enable control of expternal oscillator through
485 * sys_clkreq. In the long run clock framework should
486 * take care of this.
487 */
488 omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
489 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
490 OMAP3430_GR_MOD,
491 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
492
493 /* setup wakup source */
494 omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
495 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
496 WKUP_MOD, PM_WKEN);
497 /* No need to write EN_IO, that is always enabled */
498 omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
499 OMAP3430_GRPSEL_GPT1_MASK |
500 OMAP3430_GRPSEL_GPT12_MASK,
501 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
502
503 /* Enable PM_WKEN to support DSS LPR */
504 omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
505 OMAP3430_DSS_MOD, PM_WKEN);
506
507 /* Enable wakeups in PER */
508 omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
509 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
510 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
511 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
512 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
513 OMAP3430_EN_MCBSP4_MASK,
514 OMAP3430_PER_MOD, PM_WKEN);
515 /* and allow them to wake up MPU */
516 omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
517 OMAP3430_GRPSEL_GPIO2_MASK |
518 OMAP3430_GRPSEL_GPIO3_MASK |
519 OMAP3430_GRPSEL_GPIO4_MASK |
520 OMAP3430_GRPSEL_GPIO5_MASK |
521 OMAP3430_GRPSEL_GPIO6_MASK |
522 OMAP3430_GRPSEL_UART3_MASK |
523 OMAP3430_GRPSEL_MCBSP2_MASK |
524 OMAP3430_GRPSEL_MCBSP3_MASK |
525 OMAP3430_GRPSEL_MCBSP4_MASK,
526 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
527
528 /* Don't attach IVA interrupts */
529 if (omap3_has_iva()) {
530 omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
531 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
532 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
533 omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD,
534 OMAP3430_PM_IVAGRPSEL);
535 }
536
537 /* Clear any pending 'reset' flags */
538 omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
539 omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
540 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
541 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
542 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
543 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
544 omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
545
546 /* Clear any pending PRCM interrupts */
547 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
548
549 /*
550 * We need to idle iva2_pwrdm even on am3703 with no iva2.
551 */
552 omap3_iva_idle();
553
554 omap3_d2d_idle();
555 }
556
557 void omap3_pm_off_mode_enable(int enable)
558 {
559 struct power_state *pwrst;
560 u32 state;
561
562 if (enable)
563 state = PWRDM_POWER_OFF;
564 else
565 state = PWRDM_POWER_RET;
566
567 list_for_each_entry(pwrst, &pwrst_list, node) {
568 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
569 pwrst->pwrdm == core_pwrdm &&
570 state == PWRDM_POWER_OFF) {
571 pwrst->next_state = PWRDM_POWER_RET;
572 pr_warn("%s: Core OFF disabled due to errata i583\n",
573 __func__);
574 } else {
575 pwrst->next_state = state;
576 }
577 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
578 }
579 }
580
581 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
582 {
583 struct power_state *pwrst;
584
585 list_for_each_entry(pwrst, &pwrst_list, node) {
586 if (pwrst->pwrdm == pwrdm)
587 return pwrst->next_state;
588 }
589 return -EINVAL;
590 }
591
592 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
593 {
594 struct power_state *pwrst;
595
596 list_for_each_entry(pwrst, &pwrst_list, node) {
597 if (pwrst->pwrdm == pwrdm) {
598 pwrst->next_state = state;
599 return 0;
600 }
601 }
602 return -EINVAL;
603 }
604
605 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
606 {
607 struct power_state *pwrst;
608
609 if (!pwrdm->pwrsts)
610 return 0;
611
612 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
613 if (!pwrst)
614 return -ENOMEM;
615 pwrst->pwrdm = pwrdm;
616 pwrst->next_state = PWRDM_POWER_RET;
617 list_add(&pwrst->node, &pwrst_list);
618
619 if (pwrdm_has_hdwr_sar(pwrdm))
620 pwrdm_enable_hdwr_sar(pwrdm);
621
622 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
623 }
624
625 /*
626 * Push functions to SRAM
627 *
628 * The minimum set of functions is pushed to SRAM for execution:
629 * - omap3_do_wfi for erratum i581 WA,
630 * - save_secure_ram_context for security extensions.
631 */
632 void omap_push_sram_idle(void)
633 {
634 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
635
636 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
637 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
638 save_secure_ram_context_sz);
639 }
640
641 static void __init pm_errata_configure(void)
642 {
643 if (cpu_is_omap3630()) {
644 pm34xx_errata |= PM_RTA_ERRATUM_i608;
645 /* Enable the l2 cache toggling in sleep logic */
646 enable_omap3630_toggle_l2_on_restore();
647 if (omap_rev() < OMAP3630_REV_ES1_2)
648 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
649 PM_PER_MEMORIES_ERRATUM_i582);
650 } else if (cpu_is_omap34xx()) {
651 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
652 }
653 }
654
655 int __init omap3_pm_init(void)
656 {
657 struct power_state *pwrst, *tmp;
658 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
659 int ret;
660
661 if (!omap3_has_io_chain_ctrl())
662 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
663
664 pm_errata_configure();
665
666 /* XXX prcm_setup_regs needs to be before enabling hw
667 * supervised mode for powerdomains */
668 prcm_setup_regs();
669
670 ret = request_irq(omap_prcm_event_to_irq("wkup"),
671 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
672
673 if (ret) {
674 pr_err("pm: Failed to request pm_wkup irq\n");
675 goto err1;
676 }
677
678 /* IO interrupt is shared with mux code */
679 ret = request_irq(omap_prcm_event_to_irq("io"),
680 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
681 omap3_pm_init);
682 enable_irq(omap_prcm_event_to_irq("io"));
683
684 if (ret) {
685 pr_err("pm: Failed to request pm_io irq\n");
686 goto err2;
687 }
688
689 ret = pwrdm_for_each(pwrdms_setup, NULL);
690 if (ret) {
691 pr_err("Failed to setup powerdomains\n");
692 goto err3;
693 }
694
695 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
696
697 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
698 if (mpu_pwrdm == NULL) {
699 pr_err("Failed to get mpu_pwrdm\n");
700 ret = -EINVAL;
701 goto err3;
702 }
703
704 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
705 per_pwrdm = pwrdm_lookup("per_pwrdm");
706 core_pwrdm = pwrdm_lookup("core_pwrdm");
707
708 neon_clkdm = clkdm_lookup("neon_clkdm");
709 mpu_clkdm = clkdm_lookup("mpu_clkdm");
710 per_clkdm = clkdm_lookup("per_clkdm");
711 wkup_clkdm = clkdm_lookup("wkup_clkdm");
712
713 omap_common_suspend_init(omap3_pm_suspend);
714
715 arm_pm_idle = omap3_pm_idle;
716 omap3_idle_init();
717
718 /*
719 * RTA is disabled during initialization as per erratum i608
720 * it is safer to disable RTA by the bootloader, but we would like
721 * to be doubly sure here and prevent any mishaps.
722 */
723 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
724 omap3630_ctrl_disable_rta();
725
726 /*
727 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
728 * not correctly reset when the PER powerdomain comes back
729 * from OFF or OSWR when the CORE powerdomain is kept active.
730 * See OMAP36xx Erratum i582 "PER Domain reset issue after
731 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
732 * complete workaround. The kernel must also prevent the PER
733 * powerdomain from going to OSWR/OFF while the CORE
734 * powerdomain is not going to OSWR/OFF. And if PER last
735 * power state was off while CORE last power state was ON, the
736 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
737 * self-test using their loopback tests; if that fails, those
738 * devices are unusable until the PER/CORE can complete a transition
739 * from ON to OSWR/OFF and then back to ON.
740 *
741 * XXX Technically this workaround is only needed if off-mode
742 * or OSWR is enabled.
743 */
744 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
745 clkdm_add_wkdep(per_clkdm, wkup_clkdm);
746
747 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
748 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
749 omap3_secure_ram_storage =
750 kmalloc(0x803F, GFP_KERNEL);
751 if (!omap3_secure_ram_storage)
752 pr_err("Memory allocation failed when allocating for secure sram context\n");
753
754 local_irq_disable();
755
756 omap_dma_global_context_save();
757 omap3_save_secure_ram_context();
758 omap_dma_global_context_restore();
759
760 local_irq_enable();
761 }
762
763 omap3_save_scratchpad_contents();
764 return ret;
765
766 err3:
767 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
768 list_del(&pwrst->node);
769 kfree(pwrst);
770 }
771 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
772 err2:
773 free_irq(omap_prcm_event_to_irq("wkup"), NULL);
774 err1:
775 return ret;
776 }
This page took 0.094666 seconds and 5 git commands to generate.