Commit | Line | Data |
---|---|---|
8e4bebe0 HZ |
1 | /* |
2 | * Hisilicon HiP04 INTC | |
3 | * | |
4 | * Copyright (C) 2002-2014 ARM Limited. | |
5 | * Copyright (c) 2013-2014 Hisilicon Ltd. | |
6 | * Copyright (c) 2013-2014 Linaro Ltd. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Interrupt architecture for the HIP04 INTC: | |
13 | * | |
14 | * o There is one Interrupt Distributor, which receives interrupts | |
15 | * from system devices and sends them to the Interrupt Controllers. | |
16 | * | |
17 | * o There is one CPU Interface per CPU, which sends interrupts sent | |
18 | * by the Distributor, and interrupts generated locally, to the | |
19 | * associated CPU. The base address of the CPU interface is usually | |
20 | * aliased so that the same address points to different chips depending | |
21 | * on the CPU it is accessed from. | |
22 | * | |
23 | * Note that IRQs 0-31 are special - they are local to each CPU. | |
24 | * As such, the enable set/clear, pending set/clear and active bit | |
25 | * registers are banked per-cpu for these sources. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/kernel.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/smp.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/cpu_pm.h> | |
36 | #include <linux/cpumask.h> | |
37 | #include <linux/io.h> | |
38 | #include <linux/of.h> | |
39 | #include <linux/of_address.h> | |
40 | #include <linux/of_irq.h> | |
41 | #include <linux/irqdomain.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/slab.h> | |
44 | #include <linux/irqchip/arm-gic.h> | |
45 | ||
46 | #include <asm/irq.h> | |
47 | #include <asm/exception.h> | |
48 | #include <asm/smp_plat.h> | |
49 | ||
50 | #include "irq-gic-common.h" | |
51 | #include "irqchip.h" | |
52 | ||
53 | #define HIP04_MAX_IRQS 510 | |
54 | ||
55 | struct hip04_irq_data { | |
56 | void __iomem *dist_base; | |
57 | void __iomem *cpu_base; | |
58 | struct irq_domain *domain; | |
59 | unsigned int nr_irqs; | |
60 | }; | |
61 | ||
62 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |
63 | ||
64 | /* | |
65 | * The GIC mapping of CPU interfaces does not necessarily match | |
66 | * the logical CPU numbering. Let's use a mapping as returned | |
67 | * by the GIC itself. | |
68 | */ | |
69 | #define NR_HIP04_CPU_IF 16 | |
70 | static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly; | |
71 | ||
72 | static struct hip04_irq_data hip04_data __read_mostly; | |
73 | ||
74 | static inline void __iomem *hip04_dist_base(struct irq_data *d) | |
75 | { | |
76 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
77 | return hip04_data->dist_base; | |
78 | } | |
79 | ||
80 | static inline void __iomem *hip04_cpu_base(struct irq_data *d) | |
81 | { | |
82 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
83 | return hip04_data->cpu_base; | |
84 | } | |
85 | ||
86 | static inline unsigned int hip04_irq(struct irq_data *d) | |
87 | { | |
88 | return d->hwirq; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Routines to acknowledge, disable and enable interrupts | |
93 | */ | |
94 | static void hip04_mask_irq(struct irq_data *d) | |
95 | { | |
96 | u32 mask = 1 << (hip04_irq(d) % 32); | |
97 | ||
98 | raw_spin_lock(&irq_controller_lock); | |
99 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR + | |
100 | (hip04_irq(d) / 32) * 4); | |
101 | raw_spin_unlock(&irq_controller_lock); | |
102 | } | |
103 | ||
104 | static void hip04_unmask_irq(struct irq_data *d) | |
105 | { | |
106 | u32 mask = 1 << (hip04_irq(d) % 32); | |
107 | ||
108 | raw_spin_lock(&irq_controller_lock); | |
109 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET + | |
110 | (hip04_irq(d) / 32) * 4); | |
111 | raw_spin_unlock(&irq_controller_lock); | |
112 | } | |
113 | ||
114 | static void hip04_eoi_irq(struct irq_data *d) | |
115 | { | |
116 | writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI); | |
117 | } | |
118 | ||
119 | static int hip04_irq_set_type(struct irq_data *d, unsigned int type) | |
120 | { | |
121 | void __iomem *base = hip04_dist_base(d); | |
122 | unsigned int irq = hip04_irq(d); | |
fb7e7deb | 123 | int ret; |
8e4bebe0 HZ |
124 | |
125 | /* Interrupt configuration for SGIs can't be changed */ | |
126 | if (irq < 16) | |
127 | return -EINVAL; | |
128 | ||
fb7e7deb LD |
129 | /* SPIs have restrictions on the supported types */ |
130 | if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && | |
131 | type != IRQ_TYPE_EDGE_RISING) | |
8e4bebe0 HZ |
132 | return -EINVAL; |
133 | ||
134 | raw_spin_lock(&irq_controller_lock); | |
135 | ||
fb7e7deb | 136 | ret = gic_configure_irq(irq, type, base, NULL); |
8e4bebe0 HZ |
137 | |
138 | raw_spin_unlock(&irq_controller_lock); | |
139 | ||
fb7e7deb | 140 | return ret; |
8e4bebe0 HZ |
141 | } |
142 | ||
143 | #ifdef CONFIG_SMP | |
144 | static int hip04_irq_set_affinity(struct irq_data *d, | |
145 | const struct cpumask *mask_val, | |
146 | bool force) | |
147 | { | |
148 | void __iomem *reg; | |
149 | unsigned int cpu, shift = (hip04_irq(d) % 2) * 16; | |
150 | u32 val, mask, bit; | |
151 | ||
152 | if (!force) | |
153 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
154 | else | |
155 | cpu = cpumask_first(mask_val); | |
156 | ||
157 | if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) | |
158 | return -EINVAL; | |
159 | ||
160 | raw_spin_lock(&irq_controller_lock); | |
161 | reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3); | |
162 | mask = 0xffff << shift; | |
163 | bit = hip04_cpu_map[cpu] << shift; | |
164 | val = readl_relaxed(reg) & ~mask; | |
165 | writel_relaxed(val | bit, reg); | |
166 | raw_spin_unlock(&irq_controller_lock); | |
167 | ||
168 | return IRQ_SET_MASK_OK; | |
169 | } | |
170 | #endif | |
171 | ||
172 | static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs) | |
173 | { | |
174 | u32 irqstat, irqnr; | |
175 | void __iomem *cpu_base = hip04_data.cpu_base; | |
176 | ||
177 | do { | |
178 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | |
179 | irqnr = irqstat & GICC_IAR_INT_ID_MASK; | |
180 | ||
181 | if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) { | |
3fe14927 | 182 | handle_domain_irq(hip04_data.domain, irqnr, regs); |
8e4bebe0 HZ |
183 | continue; |
184 | } | |
185 | if (irqnr < 16) { | |
186 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | |
187 | #ifdef CONFIG_SMP | |
188 | handle_IPI(irqnr, regs); | |
189 | #endif | |
190 | continue; | |
191 | } | |
192 | break; | |
193 | } while (1); | |
194 | } | |
195 | ||
196 | static struct irq_chip hip04_irq_chip = { | |
197 | .name = "HIP04 INTC", | |
198 | .irq_mask = hip04_mask_irq, | |
199 | .irq_unmask = hip04_unmask_irq, | |
200 | .irq_eoi = hip04_eoi_irq, | |
201 | .irq_set_type = hip04_irq_set_type, | |
202 | #ifdef CONFIG_SMP | |
203 | .irq_set_affinity = hip04_irq_set_affinity, | |
204 | #endif | |
55963c9f | 205 | .flags = IRQCHIP_SET_TYPE_MASKED, |
8e4bebe0 HZ |
206 | }; |
207 | ||
208 | static u16 hip04_get_cpumask(struct hip04_irq_data *intc) | |
209 | { | |
210 | void __iomem *base = intc->dist_base; | |
211 | u32 mask, i; | |
212 | ||
213 | for (i = mask = 0; i < 32; i += 2) { | |
214 | mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2); | |
215 | mask |= mask >> 16; | |
216 | if (mask) | |
217 | break; | |
218 | } | |
219 | ||
220 | if (!mask) | |
221 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | |
222 | ||
223 | return mask; | |
224 | } | |
225 | ||
226 | static void __init hip04_irq_dist_init(struct hip04_irq_data *intc) | |
227 | { | |
228 | unsigned int i; | |
229 | u32 cpumask; | |
230 | unsigned int nr_irqs = intc->nr_irqs; | |
231 | void __iomem *base = intc->dist_base; | |
232 | ||
233 | writel_relaxed(0, base + GIC_DIST_CTRL); | |
234 | ||
235 | /* | |
236 | * Set all global interrupts to this CPU only. | |
237 | */ | |
238 | cpumask = hip04_get_cpumask(intc); | |
239 | cpumask |= cpumask << 16; | |
240 | for (i = 32; i < nr_irqs; i += 2) | |
241 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3)); | |
242 | ||
243 | gic_dist_config(base, nr_irqs, NULL); | |
244 | ||
245 | writel_relaxed(1, base + GIC_DIST_CTRL); | |
246 | } | |
247 | ||
248 | static void hip04_irq_cpu_init(struct hip04_irq_data *intc) | |
249 | { | |
250 | void __iomem *dist_base = intc->dist_base; | |
251 | void __iomem *base = intc->cpu_base; | |
252 | unsigned int cpu_mask, cpu = smp_processor_id(); | |
253 | int i; | |
254 | ||
255 | /* | |
256 | * Get what the GIC says our CPU mask is. | |
257 | */ | |
258 | BUG_ON(cpu >= NR_HIP04_CPU_IF); | |
259 | cpu_mask = hip04_get_cpumask(intc); | |
260 | hip04_cpu_map[cpu] = cpu_mask; | |
261 | ||
262 | /* | |
263 | * Clear our mask from the other map entries in case they're | |
264 | * still undefined. | |
265 | */ | |
266 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
267 | if (i != cpu) | |
268 | hip04_cpu_map[i] &= ~cpu_mask; | |
269 | ||
270 | gic_cpu_config(dist_base, NULL); | |
271 | ||
272 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | |
273 | writel_relaxed(1, base + GIC_CPU_CTRL); | |
274 | } | |
275 | ||
276 | #ifdef CONFIG_SMP | |
277 | static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq) | |
278 | { | |
279 | int cpu; | |
280 | unsigned long flags, map = 0; | |
281 | ||
282 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | |
283 | ||
284 | /* Convert our logical CPU mask into a physical one. */ | |
285 | for_each_cpu(cpu, mask) | |
286 | map |= hip04_cpu_map[cpu]; | |
287 | ||
288 | /* | |
289 | * Ensure that stores to Normal memory are visible to the | |
290 | * other CPUs before they observe us issuing the IPI. | |
291 | */ | |
292 | dmb(ishst); | |
293 | ||
294 | /* this always happens on GIC0 */ | |
295 | writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT); | |
296 | ||
297 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | |
298 | } | |
299 | #endif | |
300 | ||
301 | static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, | |
302 | irq_hw_number_t hw) | |
303 | { | |
304 | if (hw < 32) { | |
305 | irq_set_percpu_devid(irq); | |
306 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
307 | handle_percpu_devid_irq); | |
308 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | |
309 | } else { | |
310 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
311 | handle_fasteoi_irq); | |
312 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | |
313 | } | |
314 | irq_set_chip_data(irq, d->host_data); | |
315 | return 0; | |
316 | } | |
317 | ||
318 | static int hip04_irq_domain_xlate(struct irq_domain *d, | |
319 | struct device_node *controller, | |
320 | const u32 *intspec, unsigned int intsize, | |
321 | unsigned long *out_hwirq, | |
322 | unsigned int *out_type) | |
323 | { | |
324 | unsigned long ret = 0; | |
325 | ||
326 | if (d->of_node != controller) | |
327 | return -EINVAL; | |
328 | if (intsize < 3) | |
329 | return -EINVAL; | |
330 | ||
331 | /* Get the interrupt number and add 16 to skip over SGIs */ | |
332 | *out_hwirq = intspec[1] + 16; | |
333 | ||
334 | /* For SPIs, we need to add 16 more to get the irq ID number */ | |
335 | if (!intspec[0]) | |
336 | *out_hwirq += 16; | |
337 | ||
338 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
339 | ||
340 | return ret; | |
341 | } | |
342 | ||
343 | #ifdef CONFIG_SMP | |
344 | static int hip04_irq_secondary_init(struct notifier_block *nfb, | |
345 | unsigned long action, | |
346 | void *hcpu) | |
347 | { | |
348 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | |
349 | hip04_irq_cpu_init(&hip04_data); | |
350 | return NOTIFY_OK; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Notifier for enabling the INTC CPU interface. Set an arbitrarily high | |
355 | * priority because the GIC needs to be up before the ARM generic timers. | |
356 | */ | |
357 | static struct notifier_block hip04_irq_cpu_notifier = { | |
358 | .notifier_call = hip04_irq_secondary_init, | |
359 | .priority = 100, | |
360 | }; | |
361 | #endif | |
362 | ||
363 | static const struct irq_domain_ops hip04_irq_domain_ops = { | |
364 | .map = hip04_irq_domain_map, | |
365 | .xlate = hip04_irq_domain_xlate, | |
366 | }; | |
367 | ||
368 | static int __init | |
369 | hip04_of_init(struct device_node *node, struct device_node *parent) | |
370 | { | |
371 | irq_hw_number_t hwirq_base = 16; | |
372 | int nr_irqs, irq_base, i; | |
373 | ||
374 | if (WARN_ON(!node)) | |
375 | return -ENODEV; | |
376 | ||
377 | hip04_data.dist_base = of_iomap(node, 0); | |
378 | WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n"); | |
379 | ||
380 | hip04_data.cpu_base = of_iomap(node, 1); | |
381 | WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n"); | |
382 | ||
383 | /* | |
384 | * Initialize the CPU interface map to all CPUs. | |
385 | * It will be refined as each CPU probes its ID. | |
386 | */ | |
387 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
03d3d45b | 388 | hip04_cpu_map[i] = 0xffff; |
8e4bebe0 HZ |
389 | |
390 | /* | |
391 | * Find out how many interrupts are supported. | |
392 | * The HIP04 INTC only supports up to 510 interrupt sources. | |
393 | */ | |
394 | nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f; | |
395 | nr_irqs = (nr_irqs + 1) * 32; | |
396 | if (nr_irqs > HIP04_MAX_IRQS) | |
397 | nr_irqs = HIP04_MAX_IRQS; | |
398 | hip04_data.nr_irqs = nr_irqs; | |
399 | ||
400 | nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | |
401 | ||
402 | irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); | |
403 | if (IS_ERR_VALUE(irq_base)) { | |
404 | pr_err("failed to allocate IRQ numbers\n"); | |
405 | return -EINVAL; | |
406 | } | |
407 | ||
408 | hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, | |
409 | hwirq_base, | |
410 | &hip04_irq_domain_ops, | |
411 | &hip04_data); | |
412 | ||
413 | if (WARN_ON(!hip04_data.domain)) | |
414 | return -EINVAL; | |
415 | ||
416 | #ifdef CONFIG_SMP | |
417 | set_smp_cross_call(hip04_raise_softirq); | |
418 | register_cpu_notifier(&hip04_irq_cpu_notifier); | |
419 | #endif | |
420 | set_handle_irq(hip04_handle_irq); | |
421 | ||
422 | hip04_irq_dist_init(&hip04_data); | |
423 | hip04_irq_cpu_init(&hip04_data); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); |