Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / arch / arm / mach-davinci / cp_intc.c
1 /*
2 * TI Common Platform Interrupt Controller (cp_intc) driver
3 *
4 * Author: Steve Chen <schen@mvista.com>
5 * Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.
10 */
11
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20
21 #include <mach/common.h>
22 #include "cp_intc.h"
23
24 static inline unsigned int cp_intc_read(unsigned offset)
25 {
26 return __raw_readl(davinci_intc_base + offset);
27 }
28
29 static inline void cp_intc_write(unsigned long value, unsigned offset)
30 {
31 __raw_writel(value, davinci_intc_base + offset);
32 }
33
34 static void cp_intc_ack_irq(struct irq_data *d)
35 {
36 cp_intc_write(d->hwirq, CP_INTC_SYS_STAT_IDX_CLR);
37 }
38
39 /* Disable interrupt */
40 static void cp_intc_mask_irq(struct irq_data *d)
41 {
42 /* XXX don't know why we need to disable nIRQ here... */
43 cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
44 cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_CLR);
45 cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
46 }
47
48 /* Enable interrupt */
49 static void cp_intc_unmask_irq(struct irq_data *d)
50 {
51 cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_SET);
52 }
53
54 static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
55 {
56 unsigned reg = BIT_WORD(d->hwirq);
57 unsigned mask = BIT_MASK(d->hwirq);
58 unsigned polarity = cp_intc_read(CP_INTC_SYS_POLARITY(reg));
59 unsigned type = cp_intc_read(CP_INTC_SYS_TYPE(reg));
60
61 switch (flow_type) {
62 case IRQ_TYPE_EDGE_RISING:
63 polarity |= mask;
64 type |= mask;
65 break;
66 case IRQ_TYPE_EDGE_FALLING:
67 polarity &= ~mask;
68 type |= mask;
69 break;
70 case IRQ_TYPE_LEVEL_HIGH:
71 polarity |= mask;
72 type &= ~mask;
73 break;
74 case IRQ_TYPE_LEVEL_LOW:
75 polarity &= ~mask;
76 type &= ~mask;
77 break;
78 default:
79 return -EINVAL;
80 }
81
82 cp_intc_write(polarity, CP_INTC_SYS_POLARITY(reg));
83 cp_intc_write(type, CP_INTC_SYS_TYPE(reg));
84
85 return 0;
86 }
87
88 static struct irq_chip cp_intc_irq_chip = {
89 .name = "cp_intc",
90 .irq_ack = cp_intc_ack_irq,
91 .irq_mask = cp_intc_mask_irq,
92 .irq_unmask = cp_intc_unmask_irq,
93 .irq_set_type = cp_intc_set_irq_type,
94 .flags = IRQCHIP_SKIP_SET_WAKE,
95 };
96
97 static struct irq_domain *cp_intc_domain;
98
99 static int cp_intc_host_map(struct irq_domain *h, unsigned int virq,
100 irq_hw_number_t hw)
101 {
102 pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
103
104 irq_set_chip(virq, &cp_intc_irq_chip);
105 irq_set_probe(virq);
106 irq_set_handler(virq, handle_edge_irq);
107 return 0;
108 }
109
110 static const struct irq_domain_ops cp_intc_host_ops = {
111 .map = cp_intc_host_map,
112 .xlate = irq_domain_xlate_onetwocell,
113 };
114
115 int __init cp_intc_of_init(struct device_node *node, struct device_node *parent)
116 {
117 u32 num_irq = davinci_soc_info.intc_irq_num;
118 u8 *irq_prio = davinci_soc_info.intc_irq_prios;
119 u32 *host_map = davinci_soc_info.intc_host_map;
120 unsigned num_reg = BITS_TO_LONGS(num_irq);
121 int i, irq_base;
122
123 davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC;
124 if (node) {
125 davinci_intc_base = of_iomap(node, 0);
126 if (of_property_read_u32(node, "ti,intc-size", &num_irq))
127 pr_warn("unable to get intc-size, default to %d\n",
128 num_irq);
129 } else {
130 davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
131 }
132 if (WARN_ON(!davinci_intc_base))
133 return -EINVAL;
134
135 cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
136
137 /* Disable all host interrupts */
138 cp_intc_write(0, CP_INTC_HOST_ENABLE(0));
139
140 /* Disable system interrupts */
141 for (i = 0; i < num_reg; i++)
142 cp_intc_write(~0, CP_INTC_SYS_ENABLE_CLR(i));
143
144 /* Set to normal mode, no nesting, no priority hold */
145 cp_intc_write(0, CP_INTC_CTRL);
146 cp_intc_write(0, CP_INTC_HOST_CTRL);
147
148 /* Clear system interrupt status */
149 for (i = 0; i < num_reg; i++)
150 cp_intc_write(~0, CP_INTC_SYS_STAT_CLR(i));
151
152 /* Enable nIRQ (what about nFIQ?) */
153 cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
154
155 /*
156 * Priority is determined by host channel: lower channel number has
157 * higher priority i.e. channel 0 has highest priority and channel 31
158 * had the lowest priority.
159 */
160 num_reg = (num_irq + 3) >> 2; /* 4 channels per register */
161 if (irq_prio) {
162 unsigned j, k;
163 u32 val;
164
165 for (k = i = 0; i < num_reg; i++) {
166 for (val = j = 0; j < 4; j++, k++) {
167 val >>= 8;
168 if (k < num_irq)
169 val |= irq_prio[k] << 24;
170 }
171
172 cp_intc_write(val, CP_INTC_CHAN_MAP(i));
173 }
174 } else {
175 /*
176 * Default everything to channel 15 if priority not specified.
177 * Note that channel 0-1 are mapped to nFIQ and channels 2-31
178 * are mapped to nIRQ.
179 */
180 for (i = 0; i < num_reg; i++)
181 cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i));
182 }
183
184 if (host_map)
185 for (i = 0; host_map[i] != -1; i++)
186 cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
187
188 irq_base = irq_alloc_descs(-1, 0, num_irq, 0);
189 if (irq_base < 0) {
190 pr_warn("Couldn't allocate IRQ numbers\n");
191 irq_base = 0;
192 }
193
194 /* create a legacy host */
195 cp_intc_domain = irq_domain_add_legacy(node, num_irq,
196 irq_base, 0, &cp_intc_host_ops, NULL);
197
198 if (!cp_intc_domain) {
199 pr_err("cp_intc: failed to allocate irq host!\n");
200 return -EINVAL;
201 }
202
203 /* Enable global interrupt */
204 cp_intc_write(1, CP_INTC_GLOBAL_ENABLE);
205
206 return 0;
207 }
208
209 void __init cp_intc_init(void)
210 {
211 cp_intc_of_init(NULL, NULL);
212 }
This page took 0.035819 seconds and 5 git commands to generate.