Merge branch 'for-linus' of git://git.infradead.org/ubi-2.6
[deliverable/linux.git] / arch / arm / mach-omap2 / pm34xx.c
1 /*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/console.h>
32
33 #include <plat/sram.h>
34 #include "clockdomain.h"
35 #include "powerdomain.h"
36 #include <plat/serial.h>
37 #include <plat/sdrc.h>
38 #include <plat/prcm.h>
39 #include <plat/gpmc.h>
40 #include <plat/dma.h>
41
42 #include <asm/tlbflush.h>
43
44 #include "cm2xxx_3xxx.h"
45 #include "cm-regbits-34xx.h"
46 #include "prm-regbits-34xx.h"
47
48 #include "prm2xxx_3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "control.h"
52
53 #ifdef CONFIG_SUSPEND
54 static suspend_state_t suspend_state = PM_SUSPEND_ON;
55 static inline bool is_suspending(void)
56 {
57 return (suspend_state != PM_SUSPEND_ON);
58 }
59 #else
60 static inline bool is_suspending(void)
61 {
62 return false;
63 }
64 #endif
65
66 /* Scratchpad offsets */
67 #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
68 #define OMAP343X_TABLE_VALUE_OFFSET 0xc0
69 #define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
70
71 /* pm34xx errata defined in pm.h */
72 u16 pm34xx_errata;
73
74 struct power_state {
75 struct powerdomain *pwrdm;
76 u32 next_state;
77 #ifdef CONFIG_SUSPEND
78 u32 saved_state;
79 #endif
80 struct list_head node;
81 };
82
83 static LIST_HEAD(pwrst_list);
84
85 static void (*_omap_sram_idle)(u32 *addr, int save_state);
86
87 static int (*_omap_save_secure_sram)(u32 *addr);
88
89 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
90 static struct powerdomain *core_pwrdm, *per_pwrdm;
91 static struct powerdomain *cam_pwrdm;
92
93 static inline void omap3_per_save_context(void)
94 {
95 omap_gpio_save_context();
96 }
97
98 static inline void omap3_per_restore_context(void)
99 {
100 omap_gpio_restore_context();
101 }
102
103 static void omap3_enable_io_chain(void)
104 {
105 int timeout = 0;
106
107 if (omap_rev() >= OMAP3430_REV_ES3_1) {
108 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
109 PM_WKEN);
110 /* Do a readback to assure write has been done */
111 omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
112
113 while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
114 OMAP3430_ST_IO_CHAIN_MASK)) {
115 timeout++;
116 if (timeout > 1000) {
117 printk(KERN_ERR "Wake up daisy chain "
118 "activation failed.\n");
119 return;
120 }
121 omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
122 WKUP_MOD, PM_WKEN);
123 }
124 }
125 }
126
127 static void omap3_disable_io_chain(void)
128 {
129 if (omap_rev() >= OMAP3430_REV_ES3_1)
130 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
131 PM_WKEN);
132 }
133
134 static void omap3_core_save_context(void)
135 {
136 omap3_ctrl_save_padconf();
137
138 /*
139 * Force write last pad into memory, as this can fail in some
140 * cases according to errata 1.157, 1.185
141 */
142 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
143 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
144
145 /* Save the Interrupt controller context */
146 omap_intc_save_context();
147 /* Save the GPMC context */
148 omap3_gpmc_save_context();
149 /* Save the system control module context, padconf already save above*/
150 omap3_control_save_context();
151 omap_dma_global_context_save();
152 }
153
154 static void omap3_core_restore_context(void)
155 {
156 /* Restore the control module context, padconf restored by h/w */
157 omap3_control_restore_context();
158 /* Restore the GPMC context */
159 omap3_gpmc_restore_context();
160 /* Restore the interrupt controller context */
161 omap_intc_restore_context();
162 omap_dma_global_context_restore();
163 }
164
165 /*
166 * FIXME: This function should be called before entering off-mode after
167 * OMAP3 secure services have been accessed. Currently it is only called
168 * once during boot sequence, but this works as we are not using secure
169 * services.
170 */
171 static void omap3_save_secure_ram_context(u32 target_mpu_state)
172 {
173 u32 ret;
174
175 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
176 /*
177 * MPU next state must be set to POWER_ON temporarily,
178 * otherwise the WFI executed inside the ROM code
179 * will hang the system.
180 */
181 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
182 ret = _omap_save_secure_sram((u32 *)
183 __pa(omap3_secure_ram_storage));
184 pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
185 /* Following is for error tracking, it should not happen */
186 if (ret) {
187 printk(KERN_ERR "save_secure_sram() returns %08x\n",
188 ret);
189 while (1)
190 ;
191 }
192 }
193 }
194
195 /*
196 * PRCM Interrupt Handler Helper Function
197 *
198 * The purpose of this function is to clear any wake-up events latched
199 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
200 * may occur whilst attempting to clear a PM_WKST_x register and thus
201 * set another bit in this register. A while loop is used to ensure
202 * that any peripheral wake-up events occurring while attempting to
203 * clear the PM_WKST_x are detected and cleared.
204 */
205 static int prcm_clear_mod_irqs(s16 module, u8 regs)
206 {
207 u32 wkst, fclk, iclk, clken;
208 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
209 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
210 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
211 u16 grpsel_off = (regs == 3) ?
212 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
213 int c = 0;
214
215 wkst = omap2_prm_read_mod_reg(module, wkst_off);
216 wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
217 if (wkst) {
218 iclk = omap2_cm_read_mod_reg(module, iclk_off);
219 fclk = omap2_cm_read_mod_reg(module, fclk_off);
220 while (wkst) {
221 clken = wkst;
222 omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
223 /*
224 * For USBHOST, we don't know whether HOST1 or
225 * HOST2 woke us up, so enable both f-clocks
226 */
227 if (module == OMAP3430ES2_USBHOST_MOD)
228 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
229 omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
230 omap2_prm_write_mod_reg(wkst, module, wkst_off);
231 wkst = omap2_prm_read_mod_reg(module, wkst_off);
232 c++;
233 }
234 omap2_cm_write_mod_reg(iclk, module, iclk_off);
235 omap2_cm_write_mod_reg(fclk, module, fclk_off);
236 }
237
238 return c;
239 }
240
241 static int _prcm_int_handle_wakeup(void)
242 {
243 int c;
244
245 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
246 c += prcm_clear_mod_irqs(CORE_MOD, 1);
247 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
248 if (omap_rev() > OMAP3430_REV_ES1_0) {
249 c += prcm_clear_mod_irqs(CORE_MOD, 3);
250 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
251 }
252
253 return c;
254 }
255
256 /*
257 * PRCM Interrupt Handler
258 *
259 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
260 * interrupts from the PRCM for the MPU. These bits must be cleared in
261 * order to clear the PRCM interrupt. The PRCM interrupt handler is
262 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
263 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
264 * register indicates that a wake-up event is pending for the MPU and
265 * this bit can only be cleared if the all the wake-up events latched
266 * in the various PM_WKST_x registers have been cleared. The interrupt
267 * handler is implemented using a do-while loop so that if a wake-up
268 * event occurred during the processing of the prcm interrupt handler
269 * (setting a bit in the corresponding PM_WKST_x register and thus
270 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
271 * this would be handled.
272 */
273 static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
274 {
275 u32 irqenable_mpu, irqstatus_mpu;
276 int c = 0;
277
278 irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
279 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
280 irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
281 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
282 irqstatus_mpu &= irqenable_mpu;
283
284 do {
285 if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
286 OMAP3430_IO_ST_MASK)) {
287 c = _prcm_int_handle_wakeup();
288
289 /*
290 * Is the MPU PRCM interrupt handler racing with the
291 * IVA2 PRCM interrupt handler ?
292 */
293 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
294 "but no wakeup sources are marked\n");
295 } else {
296 /* XXX we need to expand our PRCM interrupt handler */
297 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
298 "no code to handle it (%08x)\n", irqstatus_mpu);
299 }
300
301 omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
302 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
303
304 irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
305 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
306 irqstatus_mpu &= irqenable_mpu;
307
308 } while (irqstatus_mpu);
309
310 return IRQ_HANDLED;
311 }
312
313 static void restore_control_register(u32 val)
314 {
315 __asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
316 }
317
318 /* Function to restore the table entry that was modified for enabling MMU */
319 static void restore_table_entry(void)
320 {
321 void __iomem *scratchpad_address;
322 u32 previous_value, control_reg_value;
323 u32 *address;
324
325 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
326
327 /* Get address of entry that was modified */
328 address = (u32 *)__raw_readl(scratchpad_address +
329 OMAP343X_TABLE_ADDRESS_OFFSET);
330 /* Get the previous value which needs to be restored */
331 previous_value = __raw_readl(scratchpad_address +
332 OMAP343X_TABLE_VALUE_OFFSET);
333 address = __va(address);
334 *address = previous_value;
335 flush_tlb_all();
336 control_reg_value = __raw_readl(scratchpad_address
337 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
338 /* This will enable caches and prediction */
339 restore_control_register(control_reg_value);
340 }
341
342 void omap_sram_idle(void)
343 {
344 /* Variable to tell what needs to be saved and restored
345 * in omap_sram_idle*/
346 /* save_state = 0 => Nothing to save and restored */
347 /* save_state = 1 => Only L1 and logic lost */
348 /* save_state = 2 => Only L2 lost */
349 /* save_state = 3 => L1, L2 and logic lost */
350 int save_state = 0;
351 int mpu_next_state = PWRDM_POWER_ON;
352 int per_next_state = PWRDM_POWER_ON;
353 int core_next_state = PWRDM_POWER_ON;
354 int per_going_off;
355 int core_prev_state, per_prev_state;
356 u32 sdrc_pwr = 0;
357
358 if (!_omap_sram_idle)
359 return;
360
361 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
362 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
363 pwrdm_clear_all_prev_pwrst(core_pwrdm);
364 pwrdm_clear_all_prev_pwrst(per_pwrdm);
365
366 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
367 switch (mpu_next_state) {
368 case PWRDM_POWER_ON:
369 case PWRDM_POWER_RET:
370 /* No need to save context */
371 save_state = 0;
372 break;
373 case PWRDM_POWER_OFF:
374 save_state = 3;
375 break;
376 default:
377 /* Invalid state */
378 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
379 return;
380 }
381 pwrdm_pre_transition();
382
383 /* NEON control */
384 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
385 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
386
387 /* Enable IO-PAD and IO-CHAIN wakeups */
388 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
389 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
390 if (omap3_has_io_wakeup() &&
391 (per_next_state < PWRDM_POWER_ON ||
392 core_next_state < PWRDM_POWER_ON)) {
393 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
394 omap3_enable_io_chain();
395 }
396
397 /* Block console output in case it is on one of the OMAP UARTs */
398 if (!is_suspending())
399 if (per_next_state < PWRDM_POWER_ON ||
400 core_next_state < PWRDM_POWER_ON)
401 if (!console_trylock())
402 goto console_still_active;
403
404 /* PER */
405 if (per_next_state < PWRDM_POWER_ON) {
406 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
407 omap_uart_prepare_idle(2);
408 omap_uart_prepare_idle(3);
409 omap2_gpio_prepare_for_idle(per_going_off);
410 if (per_next_state == PWRDM_POWER_OFF)
411 omap3_per_save_context();
412 }
413
414 /* CORE */
415 if (core_next_state < PWRDM_POWER_ON) {
416 omap_uart_prepare_idle(0);
417 omap_uart_prepare_idle(1);
418 if (core_next_state == PWRDM_POWER_OFF) {
419 omap3_core_save_context();
420 omap3_cm_save_context();
421 }
422 }
423
424 omap3_intc_prepare_idle();
425
426 /*
427 * On EMU/HS devices ROM code restores a SRDC value
428 * from scratchpad which has automatic self refresh on timeout
429 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
430 * Hence store/restore the SDRC_POWER register here.
431 */
432 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
433 omap_type() != OMAP2_DEVICE_TYPE_GP &&
434 core_next_state == PWRDM_POWER_OFF)
435 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
436
437 /*
438 * omap3_arm_context is the location where ARM registers
439 * get saved. The restore path then reads from this
440 * location and restores them back.
441 */
442 _omap_sram_idle(omap3_arm_context, save_state);
443 cpu_init();
444
445 /* Restore normal SDRC POWER settings */
446 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
447 omap_type() != OMAP2_DEVICE_TYPE_GP &&
448 core_next_state == PWRDM_POWER_OFF)
449 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
450
451 /* Restore table entry modified during MMU restoration */
452 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
453 restore_table_entry();
454
455 /* CORE */
456 if (core_next_state < PWRDM_POWER_ON) {
457 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
458 if (core_prev_state == PWRDM_POWER_OFF) {
459 omap3_core_restore_context();
460 omap3_cm_restore_context();
461 omap3_sram_restore_context();
462 omap2_sms_restore_context();
463 }
464 omap_uart_resume_idle(0);
465 omap_uart_resume_idle(1);
466 if (core_next_state == PWRDM_POWER_OFF)
467 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
468 OMAP3430_GR_MOD,
469 OMAP3_PRM_VOLTCTRL_OFFSET);
470 }
471 omap3_intc_resume_idle();
472
473 /* PER */
474 if (per_next_state < PWRDM_POWER_ON) {
475 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
476 omap2_gpio_resume_after_idle();
477 if (per_prev_state == PWRDM_POWER_OFF)
478 omap3_per_restore_context();
479 omap_uart_resume_idle(2);
480 omap_uart_resume_idle(3);
481 }
482
483 if (!is_suspending())
484 console_unlock();
485
486 console_still_active:
487 /* Disable IO-PAD and IO-CHAIN wakeup */
488 if (omap3_has_io_wakeup() &&
489 (per_next_state < PWRDM_POWER_ON ||
490 core_next_state < PWRDM_POWER_ON)) {
491 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
492 PM_WKEN);
493 omap3_disable_io_chain();
494 }
495
496 pwrdm_post_transition();
497
498 omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
499 }
500
501 int omap3_can_sleep(void)
502 {
503 if (!sleep_while_idle)
504 return 0;
505 if (!omap_uart_can_sleep())
506 return 0;
507 return 1;
508 }
509
510 static void omap3_pm_idle(void)
511 {
512 local_irq_disable();
513 local_fiq_disable();
514
515 if (!omap3_can_sleep())
516 goto out;
517
518 if (omap_irq_pending() || need_resched())
519 goto out;
520
521 omap_sram_idle();
522
523 out:
524 local_fiq_enable();
525 local_irq_enable();
526 }
527
528 #ifdef CONFIG_SUSPEND
529 static int omap3_pm_suspend(void)
530 {
531 struct power_state *pwrst;
532 int state, ret = 0;
533
534 if (wakeup_timer_seconds || wakeup_timer_milliseconds)
535 omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
536 wakeup_timer_milliseconds);
537
538 /* Read current next_pwrsts */
539 list_for_each_entry(pwrst, &pwrst_list, node)
540 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
541 /* Set ones wanted by suspend */
542 list_for_each_entry(pwrst, &pwrst_list, node) {
543 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
544 goto restore;
545 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
546 goto restore;
547 }
548
549 omap_uart_prepare_suspend();
550 omap3_intc_suspend();
551
552 omap_sram_idle();
553
554 restore:
555 /* Restore next_pwrsts */
556 list_for_each_entry(pwrst, &pwrst_list, node) {
557 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
558 if (state > pwrst->next_state) {
559 printk(KERN_INFO "Powerdomain (%s) didn't enter "
560 "target state %d\n",
561 pwrst->pwrdm->name, pwrst->next_state);
562 ret = -1;
563 }
564 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
565 }
566 if (ret)
567 printk(KERN_ERR "Could not enter target state in pm_suspend\n");
568 else
569 printk(KERN_INFO "Successfully put all powerdomains "
570 "to target state\n");
571
572 return ret;
573 }
574
575 static int omap3_pm_enter(suspend_state_t unused)
576 {
577 int ret = 0;
578
579 switch (suspend_state) {
580 case PM_SUSPEND_STANDBY:
581 case PM_SUSPEND_MEM:
582 ret = omap3_pm_suspend();
583 break;
584 default:
585 ret = -EINVAL;
586 }
587
588 return ret;
589 }
590
591 /* Hooks to enable / disable UART interrupts during suspend */
592 static int omap3_pm_begin(suspend_state_t state)
593 {
594 disable_hlt();
595 suspend_state = state;
596 omap_uart_enable_irqs(0);
597 return 0;
598 }
599
600 static void omap3_pm_end(void)
601 {
602 suspend_state = PM_SUSPEND_ON;
603 omap_uart_enable_irqs(1);
604 enable_hlt();
605 return;
606 }
607
608 static const struct platform_suspend_ops omap_pm_ops = {
609 .begin = omap3_pm_begin,
610 .end = omap3_pm_end,
611 .enter = omap3_pm_enter,
612 .valid = suspend_valid_only_mem,
613 };
614 #endif /* CONFIG_SUSPEND */
615
616
617 /**
618 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
619 * retention
620 *
621 * In cases where IVA2 is activated by bootcode, it may prevent
622 * full-chip retention or off-mode because it is not idle. This
623 * function forces the IVA2 into idle state so it can go
624 * into retention/off and thus allow full-chip retention/off.
625 *
626 **/
627 static void __init omap3_iva_idle(void)
628 {
629 /* ensure IVA2 clock is disabled */
630 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
631
632 /* if no clock activity, nothing else to do */
633 if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
634 OMAP3430_CLKACTIVITY_IVA2_MASK))
635 return;
636
637 /* Reset IVA2 */
638 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
639 OMAP3430_RST2_IVA2_MASK |
640 OMAP3430_RST3_IVA2_MASK,
641 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
642
643 /* Enable IVA2 clock */
644 omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
645 OMAP3430_IVA2_MOD, CM_FCLKEN);
646
647 /* Set IVA2 boot mode to 'idle' */
648 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
649 OMAP343X_CONTROL_IVA2_BOOTMOD);
650
651 /* Un-reset IVA2 */
652 omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
653
654 /* Disable IVA2 clock */
655 omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
656
657 /* Reset IVA2 */
658 omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
659 OMAP3430_RST2_IVA2_MASK |
660 OMAP3430_RST3_IVA2_MASK,
661 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
662 }
663
664 static void __init omap3_d2d_idle(void)
665 {
666 u16 mask, padconf;
667
668 /* In a stand alone OMAP3430 where there is not a stacked
669 * modem for the D2D Idle Ack and D2D MStandby must be pulled
670 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
671 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
672 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
673 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
674 padconf |= mask;
675 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
676
677 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
678 padconf |= mask;
679 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
680
681 /* reset modem */
682 omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
683 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
684 CORE_MOD, OMAP2_RM_RSTCTRL);
685 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
686 }
687
688 static void __init prcm_setup_regs(void)
689 {
690 u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
691 OMAP3630_AUTO_UART4_MASK : 0;
692 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
693 OMAP3630_EN_UART4_MASK : 0;
694 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
695 OMAP3630_GRPSEL_UART4_MASK : 0;
696
697
698 /* XXX Reset all wkdeps. This should be done when initializing
699 * powerdomains */
700 omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
701 omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
702 omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
703 omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
704 omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
705 omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
706 if (omap_rev() > OMAP3430_REV_ES1_0) {
707 omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
708 omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
709 } else
710 omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
711
712 /*
713 * Enable interface clock autoidle for all modules.
714 * Note that in the long run this should be done by clockfw
715 */
716 omap2_cm_write_mod_reg(
717 OMAP3430_AUTO_MODEM_MASK |
718 OMAP3430ES2_AUTO_MMC3_MASK |
719 OMAP3430ES2_AUTO_ICR_MASK |
720 OMAP3430_AUTO_AES2_MASK |
721 OMAP3430_AUTO_SHA12_MASK |
722 OMAP3430_AUTO_DES2_MASK |
723 OMAP3430_AUTO_MMC2_MASK |
724 OMAP3430_AUTO_MMC1_MASK |
725 OMAP3430_AUTO_MSPRO_MASK |
726 OMAP3430_AUTO_HDQ_MASK |
727 OMAP3430_AUTO_MCSPI4_MASK |
728 OMAP3430_AUTO_MCSPI3_MASK |
729 OMAP3430_AUTO_MCSPI2_MASK |
730 OMAP3430_AUTO_MCSPI1_MASK |
731 OMAP3430_AUTO_I2C3_MASK |
732 OMAP3430_AUTO_I2C2_MASK |
733 OMAP3430_AUTO_I2C1_MASK |
734 OMAP3430_AUTO_UART2_MASK |
735 OMAP3430_AUTO_UART1_MASK |
736 OMAP3430_AUTO_GPT11_MASK |
737 OMAP3430_AUTO_GPT10_MASK |
738 OMAP3430_AUTO_MCBSP5_MASK |
739 OMAP3430_AUTO_MCBSP1_MASK |
740 OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
741 OMAP3430_AUTO_MAILBOXES_MASK |
742 OMAP3430_AUTO_OMAPCTRL_MASK |
743 OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
744 OMAP3430_AUTO_HSOTGUSB_MASK |
745 OMAP3430_AUTO_SAD2D_MASK |
746 OMAP3430_AUTO_SSI_MASK,
747 CORE_MOD, CM_AUTOIDLE1);
748
749 omap2_cm_write_mod_reg(
750 OMAP3430_AUTO_PKA_MASK |
751 OMAP3430_AUTO_AES1_MASK |
752 OMAP3430_AUTO_RNG_MASK |
753 OMAP3430_AUTO_SHA11_MASK |
754 OMAP3430_AUTO_DES1_MASK,
755 CORE_MOD, CM_AUTOIDLE2);
756
757 if (omap_rev() > OMAP3430_REV_ES1_0) {
758 omap2_cm_write_mod_reg(
759 OMAP3430_AUTO_MAD2D_MASK |
760 OMAP3430ES2_AUTO_USBTLL_MASK,
761 CORE_MOD, CM_AUTOIDLE3);
762 }
763
764 omap2_cm_write_mod_reg(
765 OMAP3430_AUTO_WDT2_MASK |
766 OMAP3430_AUTO_WDT1_MASK |
767 OMAP3430_AUTO_GPIO1_MASK |
768 OMAP3430_AUTO_32KSYNC_MASK |
769 OMAP3430_AUTO_GPT12_MASK |
770 OMAP3430_AUTO_GPT1_MASK,
771 WKUP_MOD, CM_AUTOIDLE);
772
773 omap2_cm_write_mod_reg(
774 OMAP3430_AUTO_DSS_MASK,
775 OMAP3430_DSS_MOD,
776 CM_AUTOIDLE);
777
778 omap2_cm_write_mod_reg(
779 OMAP3430_AUTO_CAM_MASK,
780 OMAP3430_CAM_MOD,
781 CM_AUTOIDLE);
782
783 omap2_cm_write_mod_reg(
784 omap3630_auto_uart4_mask |
785 OMAP3430_AUTO_GPIO6_MASK |
786 OMAP3430_AUTO_GPIO5_MASK |
787 OMAP3430_AUTO_GPIO4_MASK |
788 OMAP3430_AUTO_GPIO3_MASK |
789 OMAP3430_AUTO_GPIO2_MASK |
790 OMAP3430_AUTO_WDT3_MASK |
791 OMAP3430_AUTO_UART3_MASK |
792 OMAP3430_AUTO_GPT9_MASK |
793 OMAP3430_AUTO_GPT8_MASK |
794 OMAP3430_AUTO_GPT7_MASK |
795 OMAP3430_AUTO_GPT6_MASK |
796 OMAP3430_AUTO_GPT5_MASK |
797 OMAP3430_AUTO_GPT4_MASK |
798 OMAP3430_AUTO_GPT3_MASK |
799 OMAP3430_AUTO_GPT2_MASK |
800 OMAP3430_AUTO_MCBSP4_MASK |
801 OMAP3430_AUTO_MCBSP3_MASK |
802 OMAP3430_AUTO_MCBSP2_MASK,
803 OMAP3430_PER_MOD,
804 CM_AUTOIDLE);
805
806 if (omap_rev() > OMAP3430_REV_ES1_0) {
807 omap2_cm_write_mod_reg(
808 OMAP3430ES2_AUTO_USBHOST_MASK,
809 OMAP3430ES2_USBHOST_MOD,
810 CM_AUTOIDLE);
811 }
812
813 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
814
815 /*
816 * Set all plls to autoidle. This is needed until autoidle is
817 * enabled by clockfw
818 */
819 omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
820 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
821 omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
822 MPU_MOD,
823 CM_AUTOIDLE2);
824 omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
825 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
826 PLL_MOD,
827 CM_AUTOIDLE);
828 omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
829 PLL_MOD,
830 CM_AUTOIDLE2);
831
832 /*
833 * Enable control of expternal oscillator through
834 * sys_clkreq. In the long run clock framework should
835 * take care of this.
836 */
837 omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
838 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
839 OMAP3430_GR_MOD,
840 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
841
842 /* setup wakup source */
843 omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
844 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
845 WKUP_MOD, PM_WKEN);
846 /* No need to write EN_IO, that is always enabled */
847 omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
848 OMAP3430_GRPSEL_GPT1_MASK |
849 OMAP3430_GRPSEL_GPT12_MASK,
850 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
851 /* For some reason IO doesn't generate wakeup event even if
852 * it is selected to mpu wakeup goup */
853 omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
854 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
855
856 /* Enable PM_WKEN to support DSS LPR */
857 omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
858 OMAP3430_DSS_MOD, PM_WKEN);
859
860 /* Enable wakeups in PER */
861 omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
862 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
863 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
864 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
865 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
866 OMAP3430_EN_MCBSP4_MASK,
867 OMAP3430_PER_MOD, PM_WKEN);
868 /* and allow them to wake up MPU */
869 omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
870 OMAP3430_GRPSEL_GPIO2_MASK |
871 OMAP3430_GRPSEL_GPIO3_MASK |
872 OMAP3430_GRPSEL_GPIO4_MASK |
873 OMAP3430_GRPSEL_GPIO5_MASK |
874 OMAP3430_GRPSEL_GPIO6_MASK |
875 OMAP3430_GRPSEL_UART3_MASK |
876 OMAP3430_GRPSEL_MCBSP2_MASK |
877 OMAP3430_GRPSEL_MCBSP3_MASK |
878 OMAP3430_GRPSEL_MCBSP4_MASK,
879 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
880
881 /* Don't attach IVA interrupts */
882 omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
883 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
884 omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
885 omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
886
887 /* Clear any pending 'reset' flags */
888 omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
889 omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
890 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
891 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
892 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
893 omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
894 omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
895
896 /* Clear any pending PRCM interrupts */
897 omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
898
899 omap3_iva_idle();
900 omap3_d2d_idle();
901 }
902
903 void omap3_pm_off_mode_enable(int enable)
904 {
905 struct power_state *pwrst;
906 u32 state;
907
908 if (enable)
909 state = PWRDM_POWER_OFF;
910 else
911 state = PWRDM_POWER_RET;
912
913 #ifdef CONFIG_CPU_IDLE
914 /*
915 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
916 * enable OFF mode in a stable form for previous revisions, restrict
917 * instead to RET
918 */
919 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
920 omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
921 else
922 omap3_cpuidle_update_states(state, state);
923 #endif
924
925 list_for_each_entry(pwrst, &pwrst_list, node) {
926 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
927 pwrst->pwrdm == core_pwrdm &&
928 state == PWRDM_POWER_OFF) {
929 pwrst->next_state = PWRDM_POWER_RET;
930 WARN_ONCE(1,
931 "%s: Core OFF disabled due to errata i583\n",
932 __func__);
933 } else {
934 pwrst->next_state = state;
935 }
936 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
937 }
938 }
939
940 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
941 {
942 struct power_state *pwrst;
943
944 list_for_each_entry(pwrst, &pwrst_list, node) {
945 if (pwrst->pwrdm == pwrdm)
946 return pwrst->next_state;
947 }
948 return -EINVAL;
949 }
950
951 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
952 {
953 struct power_state *pwrst;
954
955 list_for_each_entry(pwrst, &pwrst_list, node) {
956 if (pwrst->pwrdm == pwrdm) {
957 pwrst->next_state = state;
958 return 0;
959 }
960 }
961 return -EINVAL;
962 }
963
964 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
965 {
966 struct power_state *pwrst;
967
968 if (!pwrdm->pwrsts)
969 return 0;
970
971 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
972 if (!pwrst)
973 return -ENOMEM;
974 pwrst->pwrdm = pwrdm;
975 pwrst->next_state = PWRDM_POWER_RET;
976 list_add(&pwrst->node, &pwrst_list);
977
978 if (pwrdm_has_hdwr_sar(pwrdm))
979 pwrdm_enable_hdwr_sar(pwrdm);
980
981 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
982 }
983
984 /*
985 * Enable hw supervised mode for all clockdomains if it's
986 * supported. Initiate sleep transition for other clockdomains, if
987 * they are not used
988 */
989 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
990 {
991 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
992 omap2_clkdm_allow_idle(clkdm);
993 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
994 atomic_read(&clkdm->usecount) == 0)
995 omap2_clkdm_sleep(clkdm);
996 return 0;
997 }
998
999 void omap_push_sram_idle(void)
1000 {
1001 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
1002 omap34xx_cpu_suspend_sz);
1003 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
1004 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
1005 save_secure_ram_context_sz);
1006 }
1007
1008 static void __init pm_errata_configure(void)
1009 {
1010 if (cpu_is_omap3630()) {
1011 pm34xx_errata |= PM_RTA_ERRATUM_i608;
1012 /* Enable the l2 cache toggling in sleep logic */
1013 enable_omap3630_toggle_l2_on_restore();
1014 if (omap_rev() < OMAP3630_REV_ES1_2)
1015 pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
1016 }
1017 }
1018
1019 static int __init omap3_pm_init(void)
1020 {
1021 struct power_state *pwrst, *tmp;
1022 struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
1023 int ret;
1024
1025 if (!cpu_is_omap34xx())
1026 return -ENODEV;
1027
1028 pm_errata_configure();
1029
1030 printk(KERN_ERR "Power Management for TI OMAP3.\n");
1031
1032 /* XXX prcm_setup_regs needs to be before enabling hw
1033 * supervised mode for powerdomains */
1034 prcm_setup_regs();
1035
1036 ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
1037 (irq_handler_t)prcm_interrupt_handler,
1038 IRQF_DISABLED, "prcm", NULL);
1039 if (ret) {
1040 printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1041 INT_34XX_PRCM_MPU_IRQ);
1042 goto err1;
1043 }
1044
1045 ret = pwrdm_for_each(pwrdms_setup, NULL);
1046 if (ret) {
1047 printk(KERN_ERR "Failed to setup powerdomains\n");
1048 goto err2;
1049 }
1050
1051 (void) clkdm_for_each(clkdms_setup, NULL);
1052
1053 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1054 if (mpu_pwrdm == NULL) {
1055 printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1056 goto err2;
1057 }
1058
1059 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1060 per_pwrdm = pwrdm_lookup("per_pwrdm");
1061 core_pwrdm = pwrdm_lookup("core_pwrdm");
1062 cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1063
1064 neon_clkdm = clkdm_lookup("neon_clkdm");
1065 mpu_clkdm = clkdm_lookup("mpu_clkdm");
1066 per_clkdm = clkdm_lookup("per_clkdm");
1067 core_clkdm = clkdm_lookup("core_clkdm");
1068
1069 omap_push_sram_idle();
1070 #ifdef CONFIG_SUSPEND
1071 suspend_set_ops(&omap_pm_ops);
1072 #endif /* CONFIG_SUSPEND */
1073
1074 pm_idle = omap3_pm_idle;
1075 omap3_idle_init();
1076
1077 /*
1078 * RTA is disabled during initialization as per erratum i608
1079 * it is safer to disable RTA by the bootloader, but we would like
1080 * to be doubly sure here and prevent any mishaps.
1081 */
1082 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
1083 omap3630_ctrl_disable_rta();
1084
1085 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1086 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1087 omap3_secure_ram_storage =
1088 kmalloc(0x803F, GFP_KERNEL);
1089 if (!omap3_secure_ram_storage)
1090 printk(KERN_ERR "Memory allocation failed when"
1091 "allocating for secure sram context\n");
1092
1093 local_irq_disable();
1094 local_fiq_disable();
1095
1096 omap_dma_global_context_save();
1097 omap3_save_secure_ram_context(PWRDM_POWER_ON);
1098 omap_dma_global_context_restore();
1099
1100 local_irq_enable();
1101 local_fiq_enable();
1102 }
1103
1104 omap3_save_scratchpad_contents();
1105 err1:
1106 return ret;
1107 err2:
1108 free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1109 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1110 list_del(&pwrst->node);
1111 kfree(pwrst);
1112 }
1113 return ret;
1114 }
1115
1116 late_initcall(omap3_pm_init);
This page took 0.189547 seconds and 5 git commands to generate.