ARM: SAMSUNG: Correct MIPI-CSIS io memory resource definition
[deliverable/linux.git] / arch / arm / mach-exynos / common.c
CommitLineData
cc511b8d
KK
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Common Codes for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
7affca35 16#include <linux/device.h>
cc511b8d
KK
17#include <linux/gpio.h>
18#include <linux/sched.h>
19#include <linux/serial_core.h>
237c78be
AB
20#include <linux/of.h>
21#include <linux/of_irq.h>
cc511b8d
KK
22
23#include <asm/proc-fns.h>
40ba95fd 24#include <asm/exception.h>
cc511b8d
KK
25#include <asm/hardware/cache-l2x0.h>
26#include <asm/hardware/gic.h>
27#include <asm/mach/map.h>
28#include <asm/mach/irq.h>
b756a50f 29#include <asm/cacheflush.h>
cc511b8d
KK
30
31#include <mach/regs-irq.h>
32#include <mach/regs-pmu.h>
33#include <mach/regs-gpio.h>
b756a50f 34#include <mach/pmu.h>
cc511b8d
KK
35
36#include <plat/cpu.h>
37#include <plat/clock.h>
38#include <plat/devs.h>
39#include <plat/pm.h>
cc511b8d
KK
40#include <plat/sdhci.h>
41#include <plat/gpio-cfg.h>
42#include <plat/adc-core.h>
43#include <plat/fb-core.h>
44#include <plat/fimc-core.h>
45#include <plat/iic-core.h>
46#include <plat/tv-core.h>
47#include <plat/regs-serial.h>
48
49#include "common.h"
6cdeddcc
ADK
50#define L2_AUX_VAL 0x7C470001
51#define L2_AUX_MASK 0xC200ffff
cc511b8d 52
cc511b8d
KK
53static const char name_exynos4210[] = "EXYNOS4210";
54static const char name_exynos4212[] = "EXYNOS4212";
55static const char name_exynos4412[] = "EXYNOS4412";
56
57static struct cpu_table cpu_ids[] __initdata = {
58 {
59 .idcode = EXYNOS4210_CPU_ID,
60 .idmask = EXYNOS4_CPU_MASK,
61 .map_io = exynos4_map_io,
62 .init_clocks = exynos4_init_clocks,
63 .init_uarts = exynos4_init_uarts,
64 .init = exynos_init,
65 .name = name_exynos4210,
66 }, {
67 .idcode = EXYNOS4212_CPU_ID,
68 .idmask = EXYNOS4_CPU_MASK,
69 .map_io = exynos4_map_io,
70 .init_clocks = exynos4_init_clocks,
71 .init_uarts = exynos4_init_uarts,
72 .init = exynos_init,
73 .name = name_exynos4212,
74 }, {
75 .idcode = EXYNOS4412_CPU_ID,
76 .idmask = EXYNOS4_CPU_MASK,
77 .map_io = exynos4_map_io,
78 .init_clocks = exynos4_init_clocks,
79 .init_uarts = exynos4_init_uarts,
80 .init = exynos_init,
81 .name = name_exynos4412,
82 },
83};
84
85/* Initial IO mappings */
86
87static struct map_desc exynos_iodesc[] __initdata = {
88 {
89 .virtual = (unsigned long)S5P_VA_CHIPID,
90 .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID),
91 .length = SZ_4K,
92 .type = MT_DEVICE,
93 }, {
94 .virtual = (unsigned long)S3C_VA_SYS,
95 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
96 .length = SZ_64K,
97 .type = MT_DEVICE,
98 }, {
99 .virtual = (unsigned long)S3C_VA_TIMER,
100 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
101 .length = SZ_16K,
102 .type = MT_DEVICE,
103 }, {
104 .virtual = (unsigned long)S3C_VA_WATCHDOG,
105 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
106 .length = SZ_4K,
107 .type = MT_DEVICE,
108 }, {
109 .virtual = (unsigned long)S5P_VA_SROMC,
110 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
111 .length = SZ_4K,
112 .type = MT_DEVICE,
113 }, {
114 .virtual = (unsigned long)S5P_VA_SYSTIMER,
115 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
116 .length = SZ_4K,
117 .type = MT_DEVICE,
118 }, {
119 .virtual = (unsigned long)S5P_VA_PMU,
120 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
121 .length = SZ_64K,
122 .type = MT_DEVICE,
123 }, {
124 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
125 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
126 .length = SZ_4K,
127 .type = MT_DEVICE,
128 }, {
129 .virtual = (unsigned long)S5P_VA_GIC_CPU,
130 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
131 .length = SZ_64K,
132 .type = MT_DEVICE,
133 }, {
134 .virtual = (unsigned long)S5P_VA_GIC_DIST,
135 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
136 .length = SZ_64K,
137 .type = MT_DEVICE,
138 }, {
139 .virtual = (unsigned long)S3C_VA_UART,
140 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
141 .length = SZ_512K,
142 .type = MT_DEVICE,
143 },
144};
145
146static struct map_desc exynos4_iodesc[] __initdata = {
147 {
148 .virtual = (unsigned long)S5P_VA_CMU,
149 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
150 .length = SZ_128K,
151 .type = MT_DEVICE,
152 }, {
153 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
155 .length = SZ_8K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_L2CC,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
160 .length = SZ_4K,
161 .type = MT_DEVICE,
162 }, {
163 .virtual = (unsigned long)S5P_VA_GPIO1,
164 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
165 .length = SZ_4K,
166 .type = MT_DEVICE,
167 }, {
168 .virtual = (unsigned long)S5P_VA_GPIO2,
169 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
170 .length = SZ_4K,
171 .type = MT_DEVICE,
172 }, {
173 .virtual = (unsigned long)S5P_VA_GPIO3,
174 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
175 .length = SZ_256,
176 .type = MT_DEVICE,
177 }, {
178 .virtual = (unsigned long)S5P_VA_DMC0,
179 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
180 .length = SZ_4K,
181 .type = MT_DEVICE,
cc511b8d
KK
182 }, {
183 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
184 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
185 .length = SZ_4K,
186 .type = MT_DEVICE,
187 },
188};
189
190static struct map_desc exynos4_iodesc0[] __initdata = {
191 {
192 .virtual = (unsigned long)S5P_VA_SYSRAM,
193 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
194 .length = SZ_4K,
195 .type = MT_DEVICE,
196 },
197};
198
199static struct map_desc exynos4_iodesc1[] __initdata = {
200 {
201 .virtual = (unsigned long)S5P_VA_SYSRAM,
202 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
203 .length = SZ_4K,
204 .type = MT_DEVICE,
205 },
206};
207
208static void exynos_idle(void)
209{
210 if (!need_resched())
211 cpu_do_idle();
212
213 local_irq_enable();
214}
215
9eb48595 216void exynos4_restart(char mode, const char *cmd)
cc511b8d
KK
217{
218 __raw_writel(0x1, S5P_SWRESET);
219}
220
221/*
222 * exynos_map_io
223 *
224 * register the standard cpu IO areas
225 */
226
227void __init exynos_init_io(struct map_desc *mach_desc, int size)
228{
229 /* initialize the io descriptors we need for initialization */
230 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
231 if (mach_desc)
232 iotable_init(mach_desc, size);
233
234 /* detect cpu id and rev. */
235 s5p_init_cpu(S5P_VA_CHIPID);
236
237 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
238}
239
240void __init exynos4_map_io(void)
241{
242 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
243
244 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
245 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
246 else
247 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
248
249 /* initialize device information early */
250 exynos4_default_sdhci0();
251 exynos4_default_sdhci1();
252 exynos4_default_sdhci2();
253 exynos4_default_sdhci3();
254
255 s3c_adc_setname("samsung-adc-v3");
256
257 s3c_fimc_setname(0, "exynos4-fimc");
258 s3c_fimc_setname(1, "exynos4-fimc");
259 s3c_fimc_setname(2, "exynos4-fimc");
260 s3c_fimc_setname(3, "exynos4-fimc");
261
262 /* The I2C bus controllers are directly compatible with s3c2440 */
263 s3c_i2c0_setname("s3c2440-i2c");
264 s3c_i2c1_setname("s3c2440-i2c");
265 s3c_i2c2_setname("s3c2440-i2c");
266
267 s5p_fb_setname(0, "exynos4-fb");
268 s5p_hdmi_setname("exynos4-hdmi");
269}
270
271void __init exynos4_init_clocks(int xtal)
272{
273 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
274
275 s3c24xx_register_baseclocks(xtal);
276 s5p_register_clocks(xtal);
277
278 if (soc_is_exynos4210())
279 exynos4210_register_clocks();
280 else if (soc_is_exynos4212() || soc_is_exynos4412())
281 exynos4212_register_clocks();
282
283 exynos4_register_clocks();
284 exynos4_setup_clocks();
285}
286
287#define COMBINER_ENABLE_SET 0x0
288#define COMBINER_ENABLE_CLEAR 0x4
289#define COMBINER_INT_STATUS 0xC
290
291static DEFINE_SPINLOCK(irq_controller_lock);
292
293struct combiner_chip_data {
294 unsigned int irq_offset;
295 unsigned int irq_mask;
296 void __iomem *base;
297};
298
299static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
300
301static inline void __iomem *combiner_base(struct irq_data *data)
302{
303 struct combiner_chip_data *combiner_data =
304 irq_data_get_irq_chip_data(data);
305
306 return combiner_data->base;
307}
308
309static void combiner_mask_irq(struct irq_data *data)
310{
311 u32 mask = 1 << (data->irq % 32);
312
313 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
314}
315
316static void combiner_unmask_irq(struct irq_data *data)
317{
318 u32 mask = 1 << (data->irq % 32);
319
320 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
321}
322
323static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
324{
325 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
326 struct irq_chip *chip = irq_get_chip(irq);
327 unsigned int cascade_irq, combiner_irq;
328 unsigned long status;
329
330 chained_irq_enter(chip, desc);
331
332 spin_lock(&irq_controller_lock);
333 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
334 spin_unlock(&irq_controller_lock);
335 status &= chip_data->irq_mask;
336
337 if (status == 0)
338 goto out;
339
340 combiner_irq = __ffs(status);
341
342 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
343 if (unlikely(cascade_irq >= NR_IRQS))
344 do_bad_IRQ(cascade_irq, desc);
345 else
346 generic_handle_irq(cascade_irq);
347
348 out:
349 chained_irq_exit(chip, desc);
350}
351
352static struct irq_chip combiner_chip = {
353 .name = "COMBINER",
354 .irq_mask = combiner_mask_irq,
355 .irq_unmask = combiner_unmask_irq,
356};
357
358static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
359{
360 if (combiner_nr >= MAX_COMBINER_NR)
361 BUG();
362 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
363 BUG();
364 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
365}
366
367static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
368 unsigned int irq_start)
369{
370 unsigned int i;
371
372 if (combiner_nr >= MAX_COMBINER_NR)
373 BUG();
374
375 combiner_data[combiner_nr].base = base;
376 combiner_data[combiner_nr].irq_offset = irq_start;
377 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
378
379 /* Disable all interrupts */
380
381 __raw_writel(combiner_data[combiner_nr].irq_mask,
382 base + COMBINER_ENABLE_CLEAR);
383
384 /* Setup the Linux IRQ subsystem */
385
386 for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
387 + MAX_IRQ_IN_COMBINER; i++) {
388 irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
389 irq_set_chip_data(i, &combiner_data[combiner_nr]);
390 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
391 }
392}
393
237c78be
AB
394#ifdef CONFIG_OF
395static const struct of_device_id exynos4_dt_irq_match[] = {
396 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
397 {},
398};
399#endif
cc511b8d
KK
400
401void __init exynos4_init_irq(void)
402{
403 int irq;
40ba95fd 404 unsigned int gic_bank_offset;
cc511b8d
KK
405
406 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
407
237c78be
AB
408 if (!of_have_populated_dt())
409 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset);
410#ifdef CONFIG_OF
411 else
412 of_irq_init(exynos4_dt_irq_match);
413#endif
cc511b8d
KK
414
415 for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
416
417 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
418 COMBINER_IRQ(irq, 0));
419 combiner_cascade_irq(irq, IRQ_SPI(irq));
420 }
421
422 /*
423 * The parameters of s5p_init_irq() are for VIC init.
424 * Theses parameters should be NULL and 0 because EXYNOS4
425 * uses GIC instead of VIC.
426 */
427 s5p_init_irq(NULL, 0);
428}
429
7affca35
LT
430struct bus_type exynos4_subsys = {
431 .name = "exynos4-core",
432 .dev_name = "exynos4-core",
cc511b8d
KK
433};
434
7affca35
LT
435static struct device exynos4_dev = {
436 .bus = &exynos4_subsys,
cc511b8d
KK
437};
438
439static int __init exynos4_core_init(void)
440{
7affca35 441 return subsys_system_register(&exynos4_subsys, NULL);
cc511b8d
KK
442}
443core_initcall(exynos4_core_init);
444
445#ifdef CONFIG_CACHE_L2X0
446static int __init exynos4_l2x0_cache_init(void)
447{
6cdeddcc
ADK
448 int ret;
449 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
450 if (!ret) {
451 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
452 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
453 return 0;
454 }
455
b756a50f
ADK
456 if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
457 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
458 /* TAG, Data Latency Control: 2 cycles */
459 l2x0_saved_regs.tag_latency = 0x110;
cc511b8d 460
b756a50f
ADK
461 if (soc_is_exynos4212() || soc_is_exynos4412())
462 l2x0_saved_regs.data_latency = 0x120;
463 else
464 l2x0_saved_regs.data_latency = 0x110;
465
466 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
467 l2x0_saved_regs.pwr_ctrl =
468 (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
469
470 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
cc511b8d 471
b756a50f
ADK
472 __raw_writel(l2x0_saved_regs.tag_latency,
473 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
474 __raw_writel(l2x0_saved_regs.data_latency,
475 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
cc511b8d 476
b756a50f
ADK
477 /* L2X0 Prefetch Control */
478 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
479 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
480
481 /* L2X0 Power Control */
482 __raw_writel(l2x0_saved_regs.pwr_ctrl,
483 S5P_VA_L2CC + L2X0_POWER_CTRL);
484
485 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
486 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
487 }
cc511b8d 488
6cdeddcc 489 l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
cc511b8d
KK
490 return 0;
491}
492
493early_initcall(exynos4_l2x0_cache_init);
494#endif
495
496int __init exynos_init(void)
497{
498 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
499
500 /* set idle function */
501 pm_idle = exynos_idle;
502
7affca35 503 return device_register(&exynos4_dev);
cc511b8d
KK
504}
505
cc511b8d
KK
506/* uart registration process */
507
508void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
509{
510 struct s3c2410_uartcfg *tcfg = cfg;
511 u32 ucnt;
512
237c78be
AB
513 for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
514 tcfg->has_fracval = 1;
cc511b8d 515
237c78be 516 s3c24xx_init_uartdevs("exynos4210-uart", s5p_uart_resources, cfg, no);
cc511b8d
KK
517}
518
519static DEFINE_SPINLOCK(eint_lock);
520
521static unsigned int eint0_15_data[16];
522
523static unsigned int exynos4_get_irq_nr(unsigned int number)
524{
525 u32 ret = 0;
526
527 switch (number) {
528 case 0 ... 3:
529 ret = (number + IRQ_EINT0);
530 break;
531 case 4 ... 7:
532 ret = (number + (IRQ_EINT4 - 4));
533 break;
534 case 8 ... 15:
535 ret = (number + (IRQ_EINT8 - 8));
536 break;
537 default:
538 printk(KERN_ERR "number available : %d\n", number);
539 }
540
541 return ret;
542}
543
544static inline void exynos4_irq_eint_mask(struct irq_data *data)
545{
546 u32 mask;
547
548 spin_lock(&eint_lock);
549 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
550 mask |= eint_irq_to_bit(data->irq);
551 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
552 spin_unlock(&eint_lock);
553}
554
555static void exynos4_irq_eint_unmask(struct irq_data *data)
556{
557 u32 mask;
558
559 spin_lock(&eint_lock);
560 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
561 mask &= ~(eint_irq_to_bit(data->irq));
562 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
563 spin_unlock(&eint_lock);
564}
565
566static inline void exynos4_irq_eint_ack(struct irq_data *data)
567{
568 __raw_writel(eint_irq_to_bit(data->irq),
569 S5P_EINT_PEND(EINT_REG_NR(data->irq)));
570}
571
572static void exynos4_irq_eint_maskack(struct irq_data *data)
573{
574 exynos4_irq_eint_mask(data);
575 exynos4_irq_eint_ack(data);
576}
577
578static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
579{
580 int offs = EINT_OFFSET(data->irq);
581 int shift;
582 u32 ctrl, mask;
583 u32 newvalue = 0;
584
585 switch (type) {
586 case IRQ_TYPE_EDGE_RISING:
587 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
588 break;
589
590 case IRQ_TYPE_EDGE_FALLING:
591 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
592 break;
593
594 case IRQ_TYPE_EDGE_BOTH:
595 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
596 break;
597
598 case IRQ_TYPE_LEVEL_LOW:
599 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
600 break;
601
602 case IRQ_TYPE_LEVEL_HIGH:
603 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
604 break;
605
606 default:
607 printk(KERN_ERR "No such irq type %d", type);
608 return -EINVAL;
609 }
610
611 shift = (offs & 0x7) * 4;
612 mask = 0x7 << shift;
613
614 spin_lock(&eint_lock);
615 ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
616 ctrl &= ~mask;
617 ctrl |= newvalue << shift;
618 __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
619 spin_unlock(&eint_lock);
620
621 switch (offs) {
622 case 0 ... 7:
623 s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
624 break;
625 case 8 ... 15:
626 s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
627 break;
628 case 16 ... 23:
629 s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
630 break;
631 case 24 ... 31:
632 s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
633 break;
634 default:
635 printk(KERN_ERR "No such irq number %d", offs);
636 }
637
638 return 0;
639}
640
641static struct irq_chip exynos4_irq_eint = {
642 .name = "exynos4-eint",
643 .irq_mask = exynos4_irq_eint_mask,
644 .irq_unmask = exynos4_irq_eint_unmask,
645 .irq_mask_ack = exynos4_irq_eint_maskack,
646 .irq_ack = exynos4_irq_eint_ack,
647 .irq_set_type = exynos4_irq_eint_set_type,
648#ifdef CONFIG_PM
649 .irq_set_wake = s3c_irqext_wake,
650#endif
651};
652
653/*
654 * exynos4_irq_demux_eint
655 *
656 * This function demuxes the IRQ from from EINTs 16 to 31.
657 * It is designed to be inlined into the specific handler
658 * s5p_irq_demux_eintX_Y.
659 *
660 * Each EINT pend/mask registers handle eight of them.
661 */
662static inline void exynos4_irq_demux_eint(unsigned int start)
663{
664 unsigned int irq;
665
666 u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
667 u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
668
669 status &= ~mask;
670 status &= 0xff;
671
672 while (status) {
673 irq = fls(status) - 1;
674 generic_handle_irq(irq + start);
675 status &= ~(1 << irq);
676 }
677}
678
679static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
680{
681 struct irq_chip *chip = irq_get_chip(irq);
682 chained_irq_enter(chip, desc);
683 exynos4_irq_demux_eint(IRQ_EINT(16));
684 exynos4_irq_demux_eint(IRQ_EINT(24));
685 chained_irq_exit(chip, desc);
686}
687
688static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
689{
690 u32 *irq_data = irq_get_handler_data(irq);
691 struct irq_chip *chip = irq_get_chip(irq);
692
693 chained_irq_enter(chip, desc);
694 chip->irq_mask(&desc->irq_data);
695
696 if (chip->irq_ack)
697 chip->irq_ack(&desc->irq_data);
698
699 generic_handle_irq(*irq_data);
700
701 chip->irq_unmask(&desc->irq_data);
702 chained_irq_exit(chip, desc);
703}
704
705int __init exynos4_init_irq_eint(void)
706{
707 int irq;
708
709 for (irq = 0 ; irq <= 31 ; irq++) {
710 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
711 handle_level_irq);
712 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
713 }
714
715 irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
716
717 for (irq = 0 ; irq <= 15 ; irq++) {
718 eint0_15_data[irq] = IRQ_EINT(irq);
719
720 irq_set_handler_data(exynos4_get_irq_nr(irq),
721 &eint0_15_data[irq]);
722 irq_set_chained_handler(exynos4_get_irq_nr(irq),
723 exynos4_irq_eint0_15);
724 }
725
726 return 0;
727}
728arch_initcall(exynos4_init_irq_eint);
This page took 0.114982 seconds and 5 git commands to generate.