cfq-iosched: fix use-after-free of cfqq
[deliverable/linux.git] / arch / arm / mach-shmobile / pm-sh7372.c
1 /*
2 * sh7372 Power management support
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/pm.h>
12 #include <linux/suspend.h>
13 #include <linux/cpuidle.h>
14 #include <linux/module.h>
15 #include <linux/list.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/pm_clock.h>
19 #include <linux/platform_device.h>
20 #include <linux/delay.h>
21 #include <linux/irq.h>
22 #include <linux/bitrev.h>
23 #include <linux/console.h>
24 #include <asm/system.h>
25 #include <asm/io.h>
26 #include <asm/tlbflush.h>
27 #include <asm/suspend.h>
28 #include <mach/common.h>
29 #include <mach/sh7372.h>
30
31 /* DBG */
32 #define DBGREG1 0xe6100020
33 #define DBGREG9 0xe6100040
34
35 /* CPGA */
36 #define SYSTBCR 0xe6150024
37 #define MSTPSR0 0xe6150030
38 #define MSTPSR1 0xe6150038
39 #define MSTPSR2 0xe6150040
40 #define MSTPSR3 0xe6150048
41 #define MSTPSR4 0xe615004c
42 #define PLLC01STPCR 0xe61500c8
43
44 /* SYSC */
45 #define SPDCR 0xe6180008
46 #define SWUCR 0xe6180014
47 #define SBAR 0xe6180020
48 #define WUPRMSK 0xe6180028
49 #define WUPSMSK 0xe618002c
50 #define WUPSMSK2 0xe6180048
51 #define PSTR 0xe6180080
52 #define WUPSFAC 0xe6180098
53 #define IRQCR 0xe618022c
54 #define IRQCR2 0xe6180238
55 #define IRQCR3 0xe6180244
56 #define IRQCR4 0xe6180248
57 #define PDNSEL 0xe6180254
58
59 /* INTC */
60 #define ICR1A 0xe6900000
61 #define ICR2A 0xe6900004
62 #define ICR3A 0xe6900008
63 #define ICR4A 0xe690000c
64 #define INTMSK00A 0xe6900040
65 #define INTMSK10A 0xe6900044
66 #define INTMSK20A 0xe6900048
67 #define INTMSK30A 0xe690004c
68
69 /* MFIS */
70 #define SMFRAM 0xe6a70000
71
72 /* AP-System Core */
73 #define APARMBAREA 0xe6f10020
74
75 #define PSTR_RETRIES 100
76 #define PSTR_DELAY_US 10
77
78 #ifdef CONFIG_PM
79
80 static int pd_power_down(struct generic_pm_domain *genpd)
81 {
82 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
83 unsigned int mask = 1 << sh7372_pd->bit_shift;
84
85 if (sh7372_pd->suspend) {
86 int ret = sh7372_pd->suspend();
87
88 if (ret)
89 return ret;
90 }
91
92 if (__raw_readl(PSTR) & mask) {
93 unsigned int retry_count;
94
95 __raw_writel(mask, SPDCR);
96
97 for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
98 if (!(__raw_readl(SPDCR) & mask))
99 break;
100 cpu_relax();
101 }
102 }
103
104 if (!sh7372_pd->no_debug)
105 pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
106 genpd->name, mask, __raw_readl(PSTR));
107
108 return 0;
109 }
110
111 static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
112 {
113 unsigned int mask = 1 << sh7372_pd->bit_shift;
114 unsigned int retry_count;
115 int ret = 0;
116
117 if (__raw_readl(PSTR) & mask)
118 goto out;
119
120 __raw_writel(mask, SWUCR);
121
122 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
123 if (!(__raw_readl(SWUCR) & mask))
124 break;
125 if (retry_count > PSTR_RETRIES)
126 udelay(PSTR_DELAY_US);
127 else
128 cpu_relax();
129 }
130 if (!retry_count)
131 ret = -EIO;
132
133 if (!sh7372_pd->no_debug)
134 pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
135 sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
136
137 out:
138 if (ret == 0 && sh7372_pd->resume && do_resume)
139 sh7372_pd->resume();
140
141 return ret;
142 }
143
144 static int pd_power_up(struct generic_pm_domain *genpd)
145 {
146 return __pd_power_up(to_sh7372_pd(genpd), true);
147 }
148
149 static int sh7372_a4r_suspend(void)
150 {
151 sh7372_intcs_suspend();
152 __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
153 return 0;
154 }
155
156 static bool pd_active_wakeup(struct device *dev)
157 {
158 bool (*active_wakeup)(struct device *dev);
159
160 active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
161 return active_wakeup ? active_wakeup(dev) : true;
162 }
163
164 static int sh7372_stop_dev(struct device *dev)
165 {
166 int (*stop)(struct device *dev);
167
168 stop = dev_gpd_data(dev)->ops.stop;
169 if (stop) {
170 int ret = stop(dev);
171 if (ret)
172 return ret;
173 }
174 return pm_clk_suspend(dev);
175 }
176
177 static int sh7372_start_dev(struct device *dev)
178 {
179 int (*start)(struct device *dev);
180 int ret;
181
182 ret = pm_clk_resume(dev);
183 if (ret)
184 return ret;
185
186 start = dev_gpd_data(dev)->ops.start;
187 if (start)
188 ret = start(dev);
189
190 return ret;
191 }
192
193 void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
194 {
195 struct generic_pm_domain *genpd = &sh7372_pd->genpd;
196 struct dev_power_governor *gov = sh7372_pd->gov;
197
198 pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
199 genpd->dev_ops.stop = sh7372_stop_dev;
200 genpd->dev_ops.start = sh7372_start_dev;
201 genpd->dev_ops.active_wakeup = pd_active_wakeup;
202 genpd->dev_irq_safe = true;
203 genpd->power_off = pd_power_down;
204 genpd->power_on = pd_power_up;
205 __pd_power_up(sh7372_pd, false);
206 }
207
208 void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
209 struct platform_device *pdev)
210 {
211 struct device *dev = &pdev->dev;
212
213 pm_genpd_add_device(&sh7372_pd->genpd, dev);
214 if (pm_clk_no_clocks(dev))
215 pm_clk_add(dev, NULL);
216 }
217
218 void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
219 struct sh7372_pm_domain *sh7372_sd)
220 {
221 pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
222 }
223
224 struct sh7372_pm_domain sh7372_a4lc = {
225 .genpd.name = "A4LC",
226 .bit_shift = 1,
227 };
228
229 struct sh7372_pm_domain sh7372_a4mp = {
230 .genpd.name = "A4MP",
231 .bit_shift = 2,
232 };
233
234 struct sh7372_pm_domain sh7372_d4 = {
235 .genpd.name = "D4",
236 .bit_shift = 3,
237 };
238
239 struct sh7372_pm_domain sh7372_a4r = {
240 .genpd.name = "A4R",
241 .bit_shift = 5,
242 .suspend = sh7372_a4r_suspend,
243 .resume = sh7372_intcs_resume,
244 };
245
246 struct sh7372_pm_domain sh7372_a3rv = {
247 .genpd.name = "A3RV",
248 .bit_shift = 6,
249 };
250
251 struct sh7372_pm_domain sh7372_a3ri = {
252 .genpd.name = "A3RI",
253 .bit_shift = 8,
254 };
255
256 static int sh7372_a4s_suspend(void)
257 {
258 /*
259 * The A4S domain contains the CPU core and therefore it should
260 * only be turned off if the CPU is in use.
261 */
262 return -EBUSY;
263 }
264
265 struct sh7372_pm_domain sh7372_a4s = {
266 .genpd.name = "A4S",
267 .bit_shift = 10,
268 .gov = &pm_domain_always_on_gov,
269 .no_debug = true,
270 .suspend = sh7372_a4s_suspend,
271 };
272
273 static int sh7372_a3sp_suspend(void)
274 {
275 /*
276 * Serial consoles make use of SCIF hardware located in A3SP,
277 * keep such power domain on if "no_console_suspend" is set.
278 */
279 return console_suspend_enabled ? -EBUSY : 0;
280 }
281
282 struct sh7372_pm_domain sh7372_a3sp = {
283 .genpd.name = "A3SP",
284 .bit_shift = 11,
285 .gov = &pm_domain_always_on_gov,
286 .no_debug = true,
287 .suspend = sh7372_a3sp_suspend,
288 };
289
290 struct sh7372_pm_domain sh7372_a3sg = {
291 .genpd.name = "A3SG",
292 .bit_shift = 13,
293 };
294
295 #else /* !CONFIG_PM */
296
297 static inline void sh7372_a3sp_init(void) {}
298
299 #endif /* !CONFIG_PM */
300
301 #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
302 static int sh7372_do_idle_core_standby(unsigned long unused)
303 {
304 cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
305 return 0;
306 }
307
308 static void sh7372_set_reset_vector(unsigned long address)
309 {
310 /* set reset vector, translate 4k */
311 __raw_writel(address, SBAR);
312 __raw_writel(0, APARMBAREA);
313 }
314
315 static void sh7372_enter_core_standby(void)
316 {
317 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
318
319 /* enter sleep mode with SYSTBCR to 0x10 */
320 __raw_writel(0x10, SYSTBCR);
321 cpu_suspend(0, sh7372_do_idle_core_standby);
322 __raw_writel(0, SYSTBCR);
323
324 /* disable reset vector translation */
325 __raw_writel(0, SBAR);
326 }
327 #endif
328
329 #ifdef CONFIG_SUSPEND
330 static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
331 {
332 if (pllc0_on)
333 __raw_writel(0, PLLC01STPCR);
334 else
335 __raw_writel(1 << 28, PLLC01STPCR);
336
337 __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
338 cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
339 __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
340
341 /* disable reset vector translation */
342 __raw_writel(0, SBAR);
343 }
344
345 static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
346 {
347 unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
348 unsigned long msk, msk2;
349
350 /* check active clocks to determine potential wakeup sources */
351
352 mstpsr0 = __raw_readl(MSTPSR0);
353 if ((mstpsr0 & 0x00000003) != 0x00000003) {
354 pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
355 return 0;
356 }
357
358 mstpsr1 = __raw_readl(MSTPSR1);
359 if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
360 pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
361 return 0;
362 }
363
364 mstpsr2 = __raw_readl(MSTPSR2);
365 if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
366 pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
367 return 0;
368 }
369
370 mstpsr3 = __raw_readl(MSTPSR3);
371 if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
372 pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
373 return 0;
374 }
375
376 mstpsr4 = __raw_readl(MSTPSR4);
377 if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
378 pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
379 return 0;
380 }
381
382 msk = 0;
383 msk2 = 0;
384
385 /* make bitmaps of limited number of wakeup sources */
386
387 if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
388 msk |= 1 << 31;
389
390 if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
391 msk |= 1 << 21;
392
393 if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
394 msk |= 1 << 2;
395
396 if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
397 msk |= 1 << 1;
398
399 if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
400 msk |= 1 << 1;
401
402 if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
403 msk |= 1 << 1;
404
405 if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
406 msk2 |= 1 << 17;
407
408 *mskp = msk;
409 *msk2p = msk2;
410
411 return 1;
412 }
413
414 static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
415 {
416 u16 tmp, irqcr1, irqcr2;
417 int k;
418
419 irqcr1 = 0;
420 irqcr2 = 0;
421
422 /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
423 for (k = 0; k <= 7; k++) {
424 tmp = (icr >> ((7 - k) * 4)) & 0xf;
425 irqcr1 |= (tmp & 0x03) << (k * 2);
426 irqcr2 |= (tmp >> 2) << (k * 2);
427 }
428
429 *irqcr1p = irqcr1;
430 *irqcr2p = irqcr2;
431 }
432
433 static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
434 {
435 u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
436 unsigned long tmp;
437
438 /* read IRQ0A -> IRQ15A mask */
439 tmp = bitrev8(__raw_readb(INTMSK00A));
440 tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
441
442 /* setup WUPSMSK from clocks and external IRQ mask */
443 msk = (~msk & 0xc030000f) | (tmp << 4);
444 __raw_writel(msk, WUPSMSK);
445
446 /* propage level/edge trigger for external IRQ 0->15 */
447 sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
448 sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
449 __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
450 __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
451
452 /* read IRQ16A -> IRQ31A mask */
453 tmp = bitrev8(__raw_readb(INTMSK20A));
454 tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
455
456 /* setup WUPSMSK2 from clocks and external IRQ mask */
457 msk2 = (~msk2 & 0x00030000) | tmp;
458 __raw_writel(msk2, WUPSMSK2);
459
460 /* propage level/edge trigger for external IRQ 16->31 */
461 sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
462 sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
463 __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
464 __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
465 }
466
467 static void sh7372_enter_a3sm_common(int pllc0_on)
468 {
469 sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
470 sh7372_enter_sysc(pllc0_on, 1 << 12);
471 }
472
473 static void sh7372_enter_a4s_common(int pllc0_on)
474 {
475 sh7372_intca_suspend();
476 memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
477 sh7372_set_reset_vector(SMFRAM);
478 sh7372_enter_sysc(pllc0_on, 1 << 10);
479 sh7372_intca_resume();
480 }
481
482 #endif
483
484 #ifdef CONFIG_CPU_IDLE
485
486 static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
487 {
488 struct cpuidle_state *state = &drv->states[drv->state_count];
489
490 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
491 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
492 state->exit_latency = 10;
493 state->target_residency = 20 + 10;
494 state->flags = CPUIDLE_FLAG_TIME_VALID;
495 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
496
497 drv->state_count++;
498 }
499
500 static void sh7372_cpuidle_init(void)
501 {
502 shmobile_cpuidle_setup = sh7372_cpuidle_setup;
503 }
504 #else
505 static void sh7372_cpuidle_init(void) {}
506 #endif
507
508 #ifdef CONFIG_SUSPEND
509
510 static int sh7372_enter_suspend(suspend_state_t suspend_state)
511 {
512 unsigned long msk, msk2;
513
514 /* check active clocks to determine potential wakeup sources */
515 if (sh7372_sysc_valid(&msk, &msk2)) {
516 /* convert INTC mask and sense to SYSC mask and sense */
517 sh7372_setup_sysc(msk, msk2);
518
519 if (!console_suspend_enabled &&
520 sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
521 /* enter A4S sleep with PLLC0 off */
522 pr_debug("entering A4S\n");
523 sh7372_enter_a4s_common(0);
524 } else {
525 /* enter A3SM sleep with PLLC0 off */
526 pr_debug("entering A3SM\n");
527 sh7372_enter_a3sm_common(0);
528 }
529 } else {
530 /* default to Core Standby that supports all wakeup sources */
531 pr_debug("entering Core Standby\n");
532 sh7372_enter_core_standby();
533 }
534 return 0;
535 }
536
537 /**
538 * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
539 * @notifier: Unused.
540 * @pm_event: Event being handled.
541 * @unused: Unused.
542 */
543 static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
544 unsigned long pm_event, void *unused)
545 {
546 switch (pm_event) {
547 case PM_SUSPEND_PREPARE:
548 /*
549 * This is necessary, because the A4R domain has to be "on"
550 * when suspend_device_irqs() and resume_device_irqs() are
551 * executed during system suspend and resume, respectively, so
552 * that those functions don't crash while accessing the INTCS.
553 */
554 pm_genpd_poweron(&sh7372_a4r.genpd);
555 break;
556 case PM_POST_SUSPEND:
557 pm_genpd_poweroff_unused();
558 break;
559 }
560
561 return NOTIFY_DONE;
562 }
563
564 static void sh7372_suspend_init(void)
565 {
566 shmobile_suspend_ops.enter = sh7372_enter_suspend;
567 pm_notifier(sh7372_pm_notifier_fn, 0);
568 }
569 #else
570 static void sh7372_suspend_init(void) {}
571 #endif
572
573 void __init sh7372_pm_init(void)
574 {
575 /* enable DBG hardware block to kick SYSC */
576 __raw_writel(0x0000a500, DBGREG9);
577 __raw_writel(0x0000a501, DBGREG9);
578 __raw_writel(0x00000000, DBGREG1);
579
580 /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
581 __raw_writel(0, PDNSEL);
582
583 sh7372_suspend_init();
584 sh7372_cpuidle_init();
585 }
This page took 0.044273 seconds and 5 git commands to generate.