crypto: mxs-dcp - mxs-dcp is an stmp device
[deliverable/linux.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
23
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
25
26 unsigned int gic_present;
27
28 struct gic_pcpu_mask {
29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 };
31
32 static void __iomem *gic_base;
33 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
34 static DEFINE_SPINLOCK(gic_lock);
35 static struct irq_domain *gic_irq_domain;
36 static int gic_shared_intrs;
37 static int gic_vpes;
38 static unsigned int gic_cpu_pin;
39 static unsigned int timer_cpu_pin;
40 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41
42 static void __gic_irq_dispatch(void);
43
44 static inline u32 gic_read32(unsigned int reg)
45 {
46 return __raw_readl(gic_base + reg);
47 }
48
49 static inline u64 gic_read64(unsigned int reg)
50 {
51 return __raw_readq(gic_base + reg);
52 }
53
54 static inline unsigned long gic_read(unsigned int reg)
55 {
56 if (!mips_cm_is64)
57 return gic_read32(reg);
58 else
59 return gic_read64(reg);
60 }
61
62 static inline void gic_write32(unsigned int reg, u32 val)
63 {
64 return __raw_writel(val, gic_base + reg);
65 }
66
67 static inline void gic_write64(unsigned int reg, u64 val)
68 {
69 return __raw_writeq(val, gic_base + reg);
70 }
71
72 static inline void gic_write(unsigned int reg, unsigned long val)
73 {
74 if (!mips_cm_is64)
75 return gic_write32(reg, (u32)val);
76 else
77 return gic_write64(reg, (u64)val);
78 }
79
80 static inline void gic_update_bits(unsigned int reg, unsigned long mask,
81 unsigned long val)
82 {
83 unsigned long regval;
84
85 regval = gic_read(reg);
86 regval &= ~mask;
87 regval |= val;
88 gic_write(reg, regval);
89 }
90
91 static inline void gic_reset_mask(unsigned int intr)
92 {
93 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
94 1ul << GIC_INTR_BIT(intr));
95 }
96
97 static inline void gic_set_mask(unsigned int intr)
98 {
99 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
100 1ul << GIC_INTR_BIT(intr));
101 }
102
103 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
104 {
105 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
106 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
107 (unsigned long)pol << GIC_INTR_BIT(intr));
108 }
109
110 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
111 {
112 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
113 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
114 (unsigned long)trig << GIC_INTR_BIT(intr));
115 }
116
117 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
118 {
119 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
120 1ul << GIC_INTR_BIT(intr),
121 (unsigned long)dual << GIC_INTR_BIT(intr));
122 }
123
124 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
125 {
126 gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
127 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
128 }
129
130 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
131 {
132 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
133 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
134 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
135 }
136
137 #ifdef CONFIG_CLKSRC_MIPS_GIC
138 cycle_t gic_read_count(void)
139 {
140 unsigned int hi, hi2, lo;
141
142 if (mips_cm_is64)
143 return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
144
145 do {
146 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
147 lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
148 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
149 } while (hi2 != hi);
150
151 return (((cycle_t) hi) << 32) + lo;
152 }
153
154 unsigned int gic_get_count_width(void)
155 {
156 unsigned int bits, config;
157
158 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
159 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
160 GIC_SH_CONFIG_COUNTBITS_SHF);
161
162 return bits;
163 }
164
165 void gic_write_compare(cycle_t cnt)
166 {
167 if (mips_cm_is64) {
168 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
169 } else {
170 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
171 (int)(cnt >> 32));
172 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
173 (int)(cnt & 0xffffffff));
174 }
175 }
176
177 void gic_write_cpu_compare(cycle_t cnt, int cpu)
178 {
179 unsigned long flags;
180
181 local_irq_save(flags);
182
183 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
184
185 if (mips_cm_is64) {
186 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
187 } else {
188 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
189 (int)(cnt >> 32));
190 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
191 (int)(cnt & 0xffffffff));
192 }
193
194 local_irq_restore(flags);
195 }
196
197 cycle_t gic_read_compare(void)
198 {
199 unsigned int hi, lo;
200
201 if (mips_cm_is64)
202 return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
203
204 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
205 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
206
207 return (((cycle_t) hi) << 32) + lo;
208 }
209
210 void gic_start_count(void)
211 {
212 u32 gicconfig;
213
214 /* Start the counter */
215 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
216 gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
217 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
218 }
219
220 void gic_stop_count(void)
221 {
222 u32 gicconfig;
223
224 /* Stop the counter */
225 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
226 gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
227 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
228 }
229
230 #endif
231
232 static bool gic_local_irq_is_routable(int intr)
233 {
234 u32 vpe_ctl;
235
236 /* All local interrupts are routable in EIC mode. */
237 if (cpu_has_veic)
238 return true;
239
240 vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
241 switch (intr) {
242 case GIC_LOCAL_INT_TIMER:
243 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
244 case GIC_LOCAL_INT_PERFCTR:
245 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
246 case GIC_LOCAL_INT_FDC:
247 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
248 case GIC_LOCAL_INT_SWINT0:
249 case GIC_LOCAL_INT_SWINT1:
250 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
251 default:
252 return true;
253 }
254 }
255
256 static void gic_bind_eic_interrupt(int irq, int set)
257 {
258 /* Convert irq vector # to hw int # */
259 irq -= GIC_PIN_TO_VEC_OFFSET;
260
261 /* Set irq to use shadow set */
262 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
263 GIC_VPE_EIC_SS(irq), set);
264 }
265
266 void gic_send_ipi(unsigned int intr)
267 {
268 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
269 }
270
271 int gic_get_c0_compare_int(void)
272 {
273 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
274 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
275 return irq_create_mapping(gic_irq_domain,
276 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
277 }
278
279 int gic_get_c0_perfcount_int(void)
280 {
281 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
282 /* Is the performance counter shared with the timer? */
283 if (cp0_perfcount_irq < 0)
284 return -1;
285 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
286 }
287 return irq_create_mapping(gic_irq_domain,
288 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
289 }
290
291 int gic_get_c0_fdc_int(void)
292 {
293 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
294 /* Is the FDC IRQ even present? */
295 if (cp0_fdc_irq < 0)
296 return -1;
297 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
298 }
299
300 return irq_create_mapping(gic_irq_domain,
301 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
302 }
303
304 static void gic_handle_shared_int(bool chained)
305 {
306 unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
307 unsigned long *pcpu_mask;
308 unsigned long pending_reg, intrmask_reg;
309 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
310 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
311
312 /* Get per-cpu bitmaps */
313 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
314
315 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
316 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
317
318 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
319 pending[i] = gic_read(pending_reg);
320 intrmask[i] = gic_read(intrmask_reg);
321 pending_reg += gic_reg_step;
322 intrmask_reg += gic_reg_step;
323 }
324
325 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
326 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
327
328 intr = find_first_bit(pending, gic_shared_intrs);
329 while (intr != gic_shared_intrs) {
330 virq = irq_linear_revmap(gic_irq_domain,
331 GIC_SHARED_TO_HWIRQ(intr));
332 if (chained)
333 generic_handle_irq(virq);
334 else
335 do_IRQ(virq);
336
337 /* go to next pending bit */
338 bitmap_clear(pending, intr, 1);
339 intr = find_first_bit(pending, gic_shared_intrs);
340 }
341 }
342
343 static void gic_mask_irq(struct irq_data *d)
344 {
345 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
346 }
347
348 static void gic_unmask_irq(struct irq_data *d)
349 {
350 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
351 }
352
353 static void gic_ack_irq(struct irq_data *d)
354 {
355 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
356
357 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
358 }
359
360 static int gic_set_type(struct irq_data *d, unsigned int type)
361 {
362 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
363 unsigned long flags;
364 bool is_edge;
365
366 spin_lock_irqsave(&gic_lock, flags);
367 switch (type & IRQ_TYPE_SENSE_MASK) {
368 case IRQ_TYPE_EDGE_FALLING:
369 gic_set_polarity(irq, GIC_POL_NEG);
370 gic_set_trigger(irq, GIC_TRIG_EDGE);
371 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
372 is_edge = true;
373 break;
374 case IRQ_TYPE_EDGE_RISING:
375 gic_set_polarity(irq, GIC_POL_POS);
376 gic_set_trigger(irq, GIC_TRIG_EDGE);
377 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
378 is_edge = true;
379 break;
380 case IRQ_TYPE_EDGE_BOTH:
381 /* polarity is irrelevant in this case */
382 gic_set_trigger(irq, GIC_TRIG_EDGE);
383 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
384 is_edge = true;
385 break;
386 case IRQ_TYPE_LEVEL_LOW:
387 gic_set_polarity(irq, GIC_POL_NEG);
388 gic_set_trigger(irq, GIC_TRIG_LEVEL);
389 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
390 is_edge = false;
391 break;
392 case IRQ_TYPE_LEVEL_HIGH:
393 default:
394 gic_set_polarity(irq, GIC_POL_POS);
395 gic_set_trigger(irq, GIC_TRIG_LEVEL);
396 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
397 is_edge = false;
398 break;
399 }
400
401 if (is_edge)
402 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
403 handle_edge_irq, NULL);
404 else
405 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
406 handle_level_irq, NULL);
407 spin_unlock_irqrestore(&gic_lock, flags);
408
409 return 0;
410 }
411
412 #ifdef CONFIG_SMP
413 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
414 bool force)
415 {
416 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
417 cpumask_t tmp = CPU_MASK_NONE;
418 unsigned long flags;
419 int i;
420
421 cpumask_and(&tmp, cpumask, cpu_online_mask);
422 if (cpumask_empty(&tmp))
423 return -EINVAL;
424
425 /* Assumption : cpumask refers to a single CPU */
426 spin_lock_irqsave(&gic_lock, flags);
427
428 /* Re-route this IRQ */
429 gic_map_to_vpe(irq, cpumask_first(&tmp));
430
431 /* Update the pcpu_masks */
432 for (i = 0; i < NR_CPUS; i++)
433 clear_bit(irq, pcpu_masks[i].pcpu_mask);
434 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
435
436 cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
437 spin_unlock_irqrestore(&gic_lock, flags);
438
439 return IRQ_SET_MASK_OK_NOCOPY;
440 }
441 #endif
442
443 static struct irq_chip gic_level_irq_controller = {
444 .name = "MIPS GIC",
445 .irq_mask = gic_mask_irq,
446 .irq_unmask = gic_unmask_irq,
447 .irq_set_type = gic_set_type,
448 #ifdef CONFIG_SMP
449 .irq_set_affinity = gic_set_affinity,
450 #endif
451 };
452
453 static struct irq_chip gic_edge_irq_controller = {
454 .name = "MIPS GIC",
455 .irq_ack = gic_ack_irq,
456 .irq_mask = gic_mask_irq,
457 .irq_unmask = gic_unmask_irq,
458 .irq_set_type = gic_set_type,
459 #ifdef CONFIG_SMP
460 .irq_set_affinity = gic_set_affinity,
461 #endif
462 };
463
464 static void gic_handle_local_int(bool chained)
465 {
466 unsigned long pending, masked;
467 unsigned int intr, virq;
468
469 pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
470 masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
471
472 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
473
474 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
475 while (intr != GIC_NUM_LOCAL_INTRS) {
476 virq = irq_linear_revmap(gic_irq_domain,
477 GIC_LOCAL_TO_HWIRQ(intr));
478 if (chained)
479 generic_handle_irq(virq);
480 else
481 do_IRQ(virq);
482
483 /* go to next pending bit */
484 bitmap_clear(&pending, intr, 1);
485 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
486 }
487 }
488
489 static void gic_mask_local_irq(struct irq_data *d)
490 {
491 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
492
493 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
494 }
495
496 static void gic_unmask_local_irq(struct irq_data *d)
497 {
498 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
499
500 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
501 }
502
503 static struct irq_chip gic_local_irq_controller = {
504 .name = "MIPS GIC Local",
505 .irq_mask = gic_mask_local_irq,
506 .irq_unmask = gic_unmask_local_irq,
507 };
508
509 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
510 {
511 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
512 int i;
513 unsigned long flags;
514
515 spin_lock_irqsave(&gic_lock, flags);
516 for (i = 0; i < gic_vpes; i++) {
517 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
518 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
519 }
520 spin_unlock_irqrestore(&gic_lock, flags);
521 }
522
523 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
524 {
525 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
526 int i;
527 unsigned long flags;
528
529 spin_lock_irqsave(&gic_lock, flags);
530 for (i = 0; i < gic_vpes; i++) {
531 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
532 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
533 }
534 spin_unlock_irqrestore(&gic_lock, flags);
535 }
536
537 static struct irq_chip gic_all_vpes_local_irq_controller = {
538 .name = "MIPS GIC Local",
539 .irq_mask = gic_mask_local_irq_all_vpes,
540 .irq_unmask = gic_unmask_local_irq_all_vpes,
541 };
542
543 static void __gic_irq_dispatch(void)
544 {
545 gic_handle_local_int(false);
546 gic_handle_shared_int(false);
547 }
548
549 static void gic_irq_dispatch(struct irq_desc *desc)
550 {
551 gic_handle_local_int(true);
552 gic_handle_shared_int(true);
553 }
554
555 #ifdef CONFIG_MIPS_GIC_IPI
556 static int gic_resched_int_base;
557 static int gic_call_int_base;
558
559 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
560 {
561 return gic_resched_int_base + cpu;
562 }
563
564 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
565 {
566 return gic_call_int_base + cpu;
567 }
568
569 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
570 {
571 scheduler_ipi();
572
573 return IRQ_HANDLED;
574 }
575
576 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
577 {
578 generic_smp_call_function_interrupt();
579
580 return IRQ_HANDLED;
581 }
582
583 static struct irqaction irq_resched = {
584 .handler = ipi_resched_interrupt,
585 .flags = IRQF_PERCPU,
586 .name = "IPI resched"
587 };
588
589 static struct irqaction irq_call = {
590 .handler = ipi_call_interrupt,
591 .flags = IRQF_PERCPU,
592 .name = "IPI call"
593 };
594
595 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
596 struct irqaction *action)
597 {
598 int virq = irq_create_mapping(gic_irq_domain,
599 GIC_SHARED_TO_HWIRQ(intr));
600 int i;
601
602 gic_map_to_vpe(intr, cpu);
603 for (i = 0; i < NR_CPUS; i++)
604 clear_bit(intr, pcpu_masks[i].pcpu_mask);
605 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
606
607 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
608
609 irq_set_handler(virq, handle_percpu_irq);
610 setup_irq(virq, action);
611 }
612
613 static __init void gic_ipi_init(void)
614 {
615 int i;
616
617 /* Use last 2 * NR_CPUS interrupts as IPIs */
618 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
619 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
620
621 for (i = 0; i < nr_cpu_ids; i++) {
622 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
623 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
624 }
625 }
626 #else
627 static inline void gic_ipi_init(void)
628 {
629 }
630 #endif
631
632 static void __init gic_basic_init(void)
633 {
634 unsigned int i;
635
636 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
637
638 /* Setup defaults */
639 for (i = 0; i < gic_shared_intrs; i++) {
640 gic_set_polarity(i, GIC_POL_POS);
641 gic_set_trigger(i, GIC_TRIG_LEVEL);
642 gic_reset_mask(i);
643 }
644
645 for (i = 0; i < gic_vpes; i++) {
646 unsigned int j;
647
648 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
649 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
650 if (!gic_local_irq_is_routable(j))
651 continue;
652 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
653 }
654 }
655 }
656
657 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
658 irq_hw_number_t hw)
659 {
660 int intr = GIC_HWIRQ_TO_LOCAL(hw);
661 int ret = 0;
662 int i;
663 unsigned long flags;
664
665 if (!gic_local_irq_is_routable(intr))
666 return -EPERM;
667
668 /*
669 * HACK: These are all really percpu interrupts, but the rest
670 * of the MIPS kernel code does not use the percpu IRQ API for
671 * the CP0 timer and performance counter interrupts.
672 */
673 switch (intr) {
674 case GIC_LOCAL_INT_TIMER:
675 case GIC_LOCAL_INT_PERFCTR:
676 case GIC_LOCAL_INT_FDC:
677 irq_set_chip_and_handler(virq,
678 &gic_all_vpes_local_irq_controller,
679 handle_percpu_irq);
680 break;
681 default:
682 irq_set_chip_and_handler(virq,
683 &gic_local_irq_controller,
684 handle_percpu_devid_irq);
685 irq_set_percpu_devid(virq);
686 break;
687 }
688
689 spin_lock_irqsave(&gic_lock, flags);
690 for (i = 0; i < gic_vpes; i++) {
691 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
692
693 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
694
695 switch (intr) {
696 case GIC_LOCAL_INT_WD:
697 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
698 break;
699 case GIC_LOCAL_INT_COMPARE:
700 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
701 val);
702 break;
703 case GIC_LOCAL_INT_TIMER:
704 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
705 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
706 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
707 val);
708 break;
709 case GIC_LOCAL_INT_PERFCTR:
710 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
711 val);
712 break;
713 case GIC_LOCAL_INT_SWINT0:
714 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
715 val);
716 break;
717 case GIC_LOCAL_INT_SWINT1:
718 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
719 val);
720 break;
721 case GIC_LOCAL_INT_FDC:
722 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
723 break;
724 default:
725 pr_err("Invalid local IRQ %d\n", intr);
726 ret = -EINVAL;
727 break;
728 }
729 }
730 spin_unlock_irqrestore(&gic_lock, flags);
731
732 return ret;
733 }
734
735 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
736 irq_hw_number_t hw)
737 {
738 int intr = GIC_HWIRQ_TO_SHARED(hw);
739 unsigned long flags;
740
741 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
742 handle_level_irq);
743
744 spin_lock_irqsave(&gic_lock, flags);
745 gic_map_to_pin(intr, gic_cpu_pin);
746 /* Map to VPE 0 by default */
747 gic_map_to_vpe(intr, 0);
748 set_bit(intr, pcpu_masks[0].pcpu_mask);
749 spin_unlock_irqrestore(&gic_lock, flags);
750
751 return 0;
752 }
753
754 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
755 irq_hw_number_t hw)
756 {
757 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
758 return gic_local_irq_domain_map(d, virq, hw);
759 return gic_shared_irq_domain_map(d, virq, hw);
760 }
761
762 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
763 const u32 *intspec, unsigned int intsize,
764 irq_hw_number_t *out_hwirq,
765 unsigned int *out_type)
766 {
767 if (intsize != 3)
768 return -EINVAL;
769
770 if (intspec[0] == GIC_SHARED)
771 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
772 else if (intspec[0] == GIC_LOCAL)
773 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
774 else
775 return -EINVAL;
776 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
777
778 return 0;
779 }
780
781 static const struct irq_domain_ops gic_irq_domain_ops = {
782 .map = gic_irq_domain_map,
783 .xlate = gic_irq_domain_xlate,
784 };
785
786 static void __init __gic_init(unsigned long gic_base_addr,
787 unsigned long gic_addrspace_size,
788 unsigned int cpu_vec, unsigned int irqbase,
789 struct device_node *node)
790 {
791 unsigned int gicconfig;
792
793 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
794
795 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
796 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
797 GIC_SH_CONFIG_NUMINTRS_SHF;
798 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
799
800 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
801 GIC_SH_CONFIG_NUMVPES_SHF;
802 gic_vpes = gic_vpes + 1;
803
804 if (cpu_has_veic) {
805 /* Always use vector 1 in EIC mode */
806 gic_cpu_pin = 0;
807 timer_cpu_pin = gic_cpu_pin;
808 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
809 __gic_irq_dispatch);
810 } else {
811 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
812 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
813 gic_irq_dispatch);
814 /*
815 * With the CMP implementation of SMP (deprecated), other CPUs
816 * are started by the bootloader and put into a timer based
817 * waiting poll loop. We must not re-route those CPU's local
818 * timer interrupts as the wait instruction will never finish,
819 * so just handle whatever CPU interrupt it is routed to by
820 * default.
821 *
822 * This workaround should be removed when CMP support is
823 * dropped.
824 */
825 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
826 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
827 timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
828 GIC_VPE_TIMER_MAP)) &
829 GIC_MAP_MSK;
830 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
831 GIC_CPU_PIN_OFFSET +
832 timer_cpu_pin,
833 gic_irq_dispatch);
834 } else {
835 timer_cpu_pin = gic_cpu_pin;
836 }
837 }
838
839 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
840 gic_shared_intrs, irqbase,
841 &gic_irq_domain_ops, NULL);
842 if (!gic_irq_domain)
843 panic("Failed to add GIC IRQ domain");
844
845 gic_basic_init();
846
847 gic_ipi_init();
848 }
849
850 void __init gic_init(unsigned long gic_base_addr,
851 unsigned long gic_addrspace_size,
852 unsigned int cpu_vec, unsigned int irqbase)
853 {
854 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
855 }
856
857 static int __init gic_of_init(struct device_node *node,
858 struct device_node *parent)
859 {
860 struct resource res;
861 unsigned int cpu_vec, i = 0, reserved = 0;
862 phys_addr_t gic_base;
863 size_t gic_len;
864
865 /* Find the first available CPU vector. */
866 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
867 i++, &cpu_vec))
868 reserved |= BIT(cpu_vec);
869 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
870 if (!(reserved & BIT(cpu_vec)))
871 break;
872 }
873 if (cpu_vec == 8) {
874 pr_err("No CPU vectors available for GIC\n");
875 return -ENODEV;
876 }
877
878 if (of_address_to_resource(node, 0, &res)) {
879 /*
880 * Probe the CM for the GIC base address if not specified
881 * in the device-tree.
882 */
883 if (mips_cm_present()) {
884 gic_base = read_gcr_gic_base() &
885 ~CM_GCR_GIC_BASE_GICEN_MSK;
886 gic_len = 0x20000;
887 } else {
888 pr_err("Failed to get GIC memory range\n");
889 return -ENODEV;
890 }
891 } else {
892 gic_base = res.start;
893 gic_len = resource_size(&res);
894 }
895
896 if (mips_cm_present())
897 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
898 gic_present = true;
899
900 __gic_init(gic_base, gic_len, cpu_vec, 0, node);
901
902 return 0;
903 }
904 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
This page took 0.052264 seconds and 5 git commands to generate.