i2c: au1550: relax bus timings a bit
[deliverable/linux.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
23
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
25
26 unsigned int gic_present;
27
28 struct gic_pcpu_mask {
29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 };
31
32 static void __iomem *gic_base;
33 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
34 static DEFINE_SPINLOCK(gic_lock);
35 static struct irq_domain *gic_irq_domain;
36 static int gic_shared_intrs;
37 static int gic_vpes;
38 static unsigned int gic_cpu_pin;
39 static unsigned int timer_cpu_pin;
40 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41
42 static void __gic_irq_dispatch(void);
43
44 static inline u32 gic_read32(unsigned int reg)
45 {
46 return __raw_readl(gic_base + reg);
47 }
48
49 static inline u64 gic_read64(unsigned int reg)
50 {
51 return __raw_readq(gic_base + reg);
52 }
53
54 static inline unsigned long gic_read(unsigned int reg)
55 {
56 if (!mips_cm_is64)
57 return gic_read32(reg);
58 else
59 return gic_read64(reg);
60 }
61
62 static inline void gic_write32(unsigned int reg, u32 val)
63 {
64 return __raw_writel(val, gic_base + reg);
65 }
66
67 static inline void gic_write64(unsigned int reg, u64 val)
68 {
69 return __raw_writeq(val, gic_base + reg);
70 }
71
72 static inline void gic_write(unsigned int reg, unsigned long val)
73 {
74 if (!mips_cm_is64)
75 return gic_write32(reg, (u32)val);
76 else
77 return gic_write64(reg, (u64)val);
78 }
79
80 static inline void gic_update_bits(unsigned int reg, unsigned long mask,
81 unsigned long val)
82 {
83 unsigned long regval;
84
85 regval = gic_read(reg);
86 regval &= ~mask;
87 regval |= val;
88 gic_write(reg, regval);
89 }
90
91 static inline void gic_reset_mask(unsigned int intr)
92 {
93 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
94 1ul << GIC_INTR_BIT(intr));
95 }
96
97 static inline void gic_set_mask(unsigned int intr)
98 {
99 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
100 1ul << GIC_INTR_BIT(intr));
101 }
102
103 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
104 {
105 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
106 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
107 (unsigned long)pol << GIC_INTR_BIT(intr));
108 }
109
110 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
111 {
112 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
113 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
114 (unsigned long)trig << GIC_INTR_BIT(intr));
115 }
116
117 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
118 {
119 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
120 1ul << GIC_INTR_BIT(intr),
121 (unsigned long)dual << GIC_INTR_BIT(intr));
122 }
123
124 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
125 {
126 gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
127 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
128 }
129
130 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
131 {
132 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
133 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
134 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
135 }
136
137 #ifdef CONFIG_CLKSRC_MIPS_GIC
138 cycle_t gic_read_count(void)
139 {
140 unsigned int hi, hi2, lo;
141
142 if (mips_cm_is64)
143 return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
144
145 do {
146 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
147 lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
148 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
149 } while (hi2 != hi);
150
151 return (((cycle_t) hi) << 32) + lo;
152 }
153
154 unsigned int gic_get_count_width(void)
155 {
156 unsigned int bits, config;
157
158 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
159 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
160 GIC_SH_CONFIG_COUNTBITS_SHF);
161
162 return bits;
163 }
164
165 void gic_write_compare(cycle_t cnt)
166 {
167 if (mips_cm_is64) {
168 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
169 } else {
170 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
171 (int)(cnt >> 32));
172 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
173 (int)(cnt & 0xffffffff));
174 }
175 }
176
177 void gic_write_cpu_compare(cycle_t cnt, int cpu)
178 {
179 unsigned long flags;
180
181 local_irq_save(flags);
182
183 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
184
185 if (mips_cm_is64) {
186 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
187 } else {
188 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
189 (int)(cnt >> 32));
190 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
191 (int)(cnt & 0xffffffff));
192 }
193
194 local_irq_restore(flags);
195 }
196
197 cycle_t gic_read_compare(void)
198 {
199 unsigned int hi, lo;
200
201 if (mips_cm_is64)
202 return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
203
204 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
205 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
206
207 return (((cycle_t) hi) << 32) + lo;
208 }
209
210 void gic_start_count(void)
211 {
212 u32 gicconfig;
213
214 /* Start the counter */
215 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
216 gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
217 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
218 }
219
220 void gic_stop_count(void)
221 {
222 u32 gicconfig;
223
224 /* Stop the counter */
225 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
226 gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
227 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
228 }
229
230 #endif
231
232 static bool gic_local_irq_is_routable(int intr)
233 {
234 u32 vpe_ctl;
235
236 /* All local interrupts are routable in EIC mode. */
237 if (cpu_has_veic)
238 return true;
239
240 vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
241 switch (intr) {
242 case GIC_LOCAL_INT_TIMER:
243 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
244 case GIC_LOCAL_INT_PERFCTR:
245 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
246 case GIC_LOCAL_INT_FDC:
247 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
248 case GIC_LOCAL_INT_SWINT0:
249 case GIC_LOCAL_INT_SWINT1:
250 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
251 default:
252 return true;
253 }
254 }
255
256 static void gic_bind_eic_interrupt(int irq, int set)
257 {
258 /* Convert irq vector # to hw int # */
259 irq -= GIC_PIN_TO_VEC_OFFSET;
260
261 /* Set irq to use shadow set */
262 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
263 GIC_VPE_EIC_SS(irq), set);
264 }
265
266 void gic_send_ipi(unsigned int intr)
267 {
268 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
269 }
270
271 int gic_get_c0_compare_int(void)
272 {
273 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
274 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
275 return irq_create_mapping(gic_irq_domain,
276 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
277 }
278
279 int gic_get_c0_perfcount_int(void)
280 {
281 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
282 /* Is the performance counter shared with the timer? */
283 if (cp0_perfcount_irq < 0)
284 return -1;
285 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
286 }
287 return irq_create_mapping(gic_irq_domain,
288 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
289 }
290
291 int gic_get_c0_fdc_int(void)
292 {
293 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
294 /* Is the FDC IRQ even present? */
295 if (cp0_fdc_irq < 0)
296 return -1;
297 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
298 }
299
300 return irq_create_mapping(gic_irq_domain,
301 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
302 }
303
304 static void gic_handle_shared_int(bool chained)
305 {
306 unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
307 unsigned long *pcpu_mask;
308 unsigned long pending_reg, intrmask_reg;
309 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
310 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
311
312 /* Get per-cpu bitmaps */
313 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
314
315 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
316 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
317
318 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
319 pending[i] = gic_read(pending_reg);
320 intrmask[i] = gic_read(intrmask_reg);
321 pending_reg += gic_reg_step;
322 intrmask_reg += gic_reg_step;
323
324 if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
325 continue;
326
327 pending[i] |= (u64)gic_read(pending_reg) << 32;
328 intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
329 pending_reg += gic_reg_step;
330 intrmask_reg += gic_reg_step;
331 }
332
333 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
334 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
335
336 intr = find_first_bit(pending, gic_shared_intrs);
337 while (intr != gic_shared_intrs) {
338 virq = irq_linear_revmap(gic_irq_domain,
339 GIC_SHARED_TO_HWIRQ(intr));
340 if (chained)
341 generic_handle_irq(virq);
342 else
343 do_IRQ(virq);
344
345 /* go to next pending bit */
346 bitmap_clear(pending, intr, 1);
347 intr = find_first_bit(pending, gic_shared_intrs);
348 }
349 }
350
351 static void gic_mask_irq(struct irq_data *d)
352 {
353 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
354 }
355
356 static void gic_unmask_irq(struct irq_data *d)
357 {
358 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
359 }
360
361 static void gic_ack_irq(struct irq_data *d)
362 {
363 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
364
365 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
366 }
367
368 static int gic_set_type(struct irq_data *d, unsigned int type)
369 {
370 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
371 unsigned long flags;
372 bool is_edge;
373
374 spin_lock_irqsave(&gic_lock, flags);
375 switch (type & IRQ_TYPE_SENSE_MASK) {
376 case IRQ_TYPE_EDGE_FALLING:
377 gic_set_polarity(irq, GIC_POL_NEG);
378 gic_set_trigger(irq, GIC_TRIG_EDGE);
379 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
380 is_edge = true;
381 break;
382 case IRQ_TYPE_EDGE_RISING:
383 gic_set_polarity(irq, GIC_POL_POS);
384 gic_set_trigger(irq, GIC_TRIG_EDGE);
385 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
386 is_edge = true;
387 break;
388 case IRQ_TYPE_EDGE_BOTH:
389 /* polarity is irrelevant in this case */
390 gic_set_trigger(irq, GIC_TRIG_EDGE);
391 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
392 is_edge = true;
393 break;
394 case IRQ_TYPE_LEVEL_LOW:
395 gic_set_polarity(irq, GIC_POL_NEG);
396 gic_set_trigger(irq, GIC_TRIG_LEVEL);
397 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
398 is_edge = false;
399 break;
400 case IRQ_TYPE_LEVEL_HIGH:
401 default:
402 gic_set_polarity(irq, GIC_POL_POS);
403 gic_set_trigger(irq, GIC_TRIG_LEVEL);
404 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
405 is_edge = false;
406 break;
407 }
408
409 if (is_edge)
410 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
411 handle_edge_irq, NULL);
412 else
413 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
414 handle_level_irq, NULL);
415 spin_unlock_irqrestore(&gic_lock, flags);
416
417 return 0;
418 }
419
420 #ifdef CONFIG_SMP
421 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
422 bool force)
423 {
424 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
425 cpumask_t tmp = CPU_MASK_NONE;
426 unsigned long flags;
427 int i;
428
429 cpumask_and(&tmp, cpumask, cpu_online_mask);
430 if (cpumask_empty(&tmp))
431 return -EINVAL;
432
433 /* Assumption : cpumask refers to a single CPU */
434 spin_lock_irqsave(&gic_lock, flags);
435
436 /* Re-route this IRQ */
437 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
438
439 /* Update the pcpu_masks */
440 for (i = 0; i < NR_CPUS; i++)
441 clear_bit(irq, pcpu_masks[i].pcpu_mask);
442 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
443
444 cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
445 spin_unlock_irqrestore(&gic_lock, flags);
446
447 return IRQ_SET_MASK_OK_NOCOPY;
448 }
449 #endif
450
451 static struct irq_chip gic_level_irq_controller = {
452 .name = "MIPS GIC",
453 .irq_mask = gic_mask_irq,
454 .irq_unmask = gic_unmask_irq,
455 .irq_set_type = gic_set_type,
456 #ifdef CONFIG_SMP
457 .irq_set_affinity = gic_set_affinity,
458 #endif
459 };
460
461 static struct irq_chip gic_edge_irq_controller = {
462 .name = "MIPS GIC",
463 .irq_ack = gic_ack_irq,
464 .irq_mask = gic_mask_irq,
465 .irq_unmask = gic_unmask_irq,
466 .irq_set_type = gic_set_type,
467 #ifdef CONFIG_SMP
468 .irq_set_affinity = gic_set_affinity,
469 #endif
470 };
471
472 static void gic_handle_local_int(bool chained)
473 {
474 unsigned long pending, masked;
475 unsigned int intr, virq;
476
477 pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
478 masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
479
480 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
481
482 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
483 while (intr != GIC_NUM_LOCAL_INTRS) {
484 virq = irq_linear_revmap(gic_irq_domain,
485 GIC_LOCAL_TO_HWIRQ(intr));
486 if (chained)
487 generic_handle_irq(virq);
488 else
489 do_IRQ(virq);
490
491 /* go to next pending bit */
492 bitmap_clear(&pending, intr, 1);
493 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
494 }
495 }
496
497 static void gic_mask_local_irq(struct irq_data *d)
498 {
499 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
500
501 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
502 }
503
504 static void gic_unmask_local_irq(struct irq_data *d)
505 {
506 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
507
508 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
509 }
510
511 static struct irq_chip gic_local_irq_controller = {
512 .name = "MIPS GIC Local",
513 .irq_mask = gic_mask_local_irq,
514 .irq_unmask = gic_unmask_local_irq,
515 };
516
517 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
518 {
519 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
520 int i;
521 unsigned long flags;
522
523 spin_lock_irqsave(&gic_lock, flags);
524 for (i = 0; i < gic_vpes; i++) {
525 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
526 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
527 }
528 spin_unlock_irqrestore(&gic_lock, flags);
529 }
530
531 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
532 {
533 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
534 int i;
535 unsigned long flags;
536
537 spin_lock_irqsave(&gic_lock, flags);
538 for (i = 0; i < gic_vpes; i++) {
539 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
540 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
541 }
542 spin_unlock_irqrestore(&gic_lock, flags);
543 }
544
545 static struct irq_chip gic_all_vpes_local_irq_controller = {
546 .name = "MIPS GIC Local",
547 .irq_mask = gic_mask_local_irq_all_vpes,
548 .irq_unmask = gic_unmask_local_irq_all_vpes,
549 };
550
551 static void __gic_irq_dispatch(void)
552 {
553 gic_handle_local_int(false);
554 gic_handle_shared_int(false);
555 }
556
557 static void gic_irq_dispatch(struct irq_desc *desc)
558 {
559 gic_handle_local_int(true);
560 gic_handle_shared_int(true);
561 }
562
563 #ifdef CONFIG_MIPS_GIC_IPI
564 static int gic_resched_int_base;
565 static int gic_call_int_base;
566
567 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
568 {
569 return gic_resched_int_base + cpu;
570 }
571
572 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
573 {
574 return gic_call_int_base + cpu;
575 }
576
577 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
578 {
579 scheduler_ipi();
580
581 return IRQ_HANDLED;
582 }
583
584 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
585 {
586 generic_smp_call_function_interrupt();
587
588 return IRQ_HANDLED;
589 }
590
591 static struct irqaction irq_resched = {
592 .handler = ipi_resched_interrupt,
593 .flags = IRQF_PERCPU,
594 .name = "IPI resched"
595 };
596
597 static struct irqaction irq_call = {
598 .handler = ipi_call_interrupt,
599 .flags = IRQF_PERCPU,
600 .name = "IPI call"
601 };
602
603 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
604 struct irqaction *action)
605 {
606 int virq = irq_create_mapping(gic_irq_domain,
607 GIC_SHARED_TO_HWIRQ(intr));
608 int i;
609
610 gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
611 for (i = 0; i < NR_CPUS; i++)
612 clear_bit(intr, pcpu_masks[i].pcpu_mask);
613 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
614
615 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
616
617 irq_set_handler(virq, handle_percpu_irq);
618 setup_irq(virq, action);
619 }
620
621 static __init void gic_ipi_init(void)
622 {
623 int i;
624
625 /* Use last 2 * NR_CPUS interrupts as IPIs */
626 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
627 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
628
629 for (i = 0; i < nr_cpu_ids; i++) {
630 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
631 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
632 }
633 }
634 #else
635 static inline void gic_ipi_init(void)
636 {
637 }
638 #endif
639
640 static void __init gic_basic_init(void)
641 {
642 unsigned int i;
643
644 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
645
646 /* Setup defaults */
647 for (i = 0; i < gic_shared_intrs; i++) {
648 gic_set_polarity(i, GIC_POL_POS);
649 gic_set_trigger(i, GIC_TRIG_LEVEL);
650 gic_reset_mask(i);
651 }
652
653 for (i = 0; i < gic_vpes; i++) {
654 unsigned int j;
655
656 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
657 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
658 if (!gic_local_irq_is_routable(j))
659 continue;
660 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
661 }
662 }
663 }
664
665 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
666 irq_hw_number_t hw)
667 {
668 int intr = GIC_HWIRQ_TO_LOCAL(hw);
669 int ret = 0;
670 int i;
671 unsigned long flags;
672
673 if (!gic_local_irq_is_routable(intr))
674 return -EPERM;
675
676 /*
677 * HACK: These are all really percpu interrupts, but the rest
678 * of the MIPS kernel code does not use the percpu IRQ API for
679 * the CP0 timer and performance counter interrupts.
680 */
681 switch (intr) {
682 case GIC_LOCAL_INT_TIMER:
683 case GIC_LOCAL_INT_PERFCTR:
684 case GIC_LOCAL_INT_FDC:
685 irq_set_chip_and_handler(virq,
686 &gic_all_vpes_local_irq_controller,
687 handle_percpu_irq);
688 break;
689 default:
690 irq_set_chip_and_handler(virq,
691 &gic_local_irq_controller,
692 handle_percpu_devid_irq);
693 irq_set_percpu_devid(virq);
694 break;
695 }
696
697 spin_lock_irqsave(&gic_lock, flags);
698 for (i = 0; i < gic_vpes; i++) {
699 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
700
701 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
702
703 switch (intr) {
704 case GIC_LOCAL_INT_WD:
705 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
706 break;
707 case GIC_LOCAL_INT_COMPARE:
708 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
709 val);
710 break;
711 case GIC_LOCAL_INT_TIMER:
712 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
713 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
714 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
715 val);
716 break;
717 case GIC_LOCAL_INT_PERFCTR:
718 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
719 val);
720 break;
721 case GIC_LOCAL_INT_SWINT0:
722 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
723 val);
724 break;
725 case GIC_LOCAL_INT_SWINT1:
726 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
727 val);
728 break;
729 case GIC_LOCAL_INT_FDC:
730 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
731 break;
732 default:
733 pr_err("Invalid local IRQ %d\n", intr);
734 ret = -EINVAL;
735 break;
736 }
737 }
738 spin_unlock_irqrestore(&gic_lock, flags);
739
740 return ret;
741 }
742
743 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
744 irq_hw_number_t hw)
745 {
746 int intr = GIC_HWIRQ_TO_SHARED(hw);
747 unsigned long flags;
748
749 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
750 handle_level_irq);
751
752 spin_lock_irqsave(&gic_lock, flags);
753 gic_map_to_pin(intr, gic_cpu_pin);
754 /* Map to VPE 0 by default */
755 gic_map_to_vpe(intr, 0);
756 set_bit(intr, pcpu_masks[0].pcpu_mask);
757 spin_unlock_irqrestore(&gic_lock, flags);
758
759 return 0;
760 }
761
762 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
763 irq_hw_number_t hw)
764 {
765 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
766 return gic_local_irq_domain_map(d, virq, hw);
767 return gic_shared_irq_domain_map(d, virq, hw);
768 }
769
770 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
771 const u32 *intspec, unsigned int intsize,
772 irq_hw_number_t *out_hwirq,
773 unsigned int *out_type)
774 {
775 if (intsize != 3)
776 return -EINVAL;
777
778 if (intspec[0] == GIC_SHARED)
779 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
780 else if (intspec[0] == GIC_LOCAL)
781 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
782 else
783 return -EINVAL;
784 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
785
786 return 0;
787 }
788
789 static const struct irq_domain_ops gic_irq_domain_ops = {
790 .map = gic_irq_domain_map,
791 .xlate = gic_irq_domain_xlate,
792 };
793
794 static void __init __gic_init(unsigned long gic_base_addr,
795 unsigned long gic_addrspace_size,
796 unsigned int cpu_vec, unsigned int irqbase,
797 struct device_node *node)
798 {
799 unsigned int gicconfig;
800
801 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
802
803 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
804 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
805 GIC_SH_CONFIG_NUMINTRS_SHF;
806 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
807
808 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
809 GIC_SH_CONFIG_NUMVPES_SHF;
810 gic_vpes = gic_vpes + 1;
811
812 if (cpu_has_veic) {
813 /* Always use vector 1 in EIC mode */
814 gic_cpu_pin = 0;
815 timer_cpu_pin = gic_cpu_pin;
816 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
817 __gic_irq_dispatch);
818 } else {
819 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
820 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
821 gic_irq_dispatch);
822 /*
823 * With the CMP implementation of SMP (deprecated), other CPUs
824 * are started by the bootloader and put into a timer based
825 * waiting poll loop. We must not re-route those CPU's local
826 * timer interrupts as the wait instruction will never finish,
827 * so just handle whatever CPU interrupt it is routed to by
828 * default.
829 *
830 * This workaround should be removed when CMP support is
831 * dropped.
832 */
833 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
834 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
835 timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
836 GIC_VPE_TIMER_MAP)) &
837 GIC_MAP_MSK;
838 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
839 GIC_CPU_PIN_OFFSET +
840 timer_cpu_pin,
841 gic_irq_dispatch);
842 } else {
843 timer_cpu_pin = gic_cpu_pin;
844 }
845 }
846
847 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
848 gic_shared_intrs, irqbase,
849 &gic_irq_domain_ops, NULL);
850 if (!gic_irq_domain)
851 panic("Failed to add GIC IRQ domain");
852
853 gic_basic_init();
854
855 gic_ipi_init();
856 }
857
858 void __init gic_init(unsigned long gic_base_addr,
859 unsigned long gic_addrspace_size,
860 unsigned int cpu_vec, unsigned int irqbase)
861 {
862 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
863 }
864
865 static int __init gic_of_init(struct device_node *node,
866 struct device_node *parent)
867 {
868 struct resource res;
869 unsigned int cpu_vec, i = 0, reserved = 0;
870 phys_addr_t gic_base;
871 size_t gic_len;
872
873 /* Find the first available CPU vector. */
874 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
875 i++, &cpu_vec))
876 reserved |= BIT(cpu_vec);
877 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
878 if (!(reserved & BIT(cpu_vec)))
879 break;
880 }
881 if (cpu_vec == 8) {
882 pr_err("No CPU vectors available for GIC\n");
883 return -ENODEV;
884 }
885
886 if (of_address_to_resource(node, 0, &res)) {
887 /*
888 * Probe the CM for the GIC base address if not specified
889 * in the device-tree.
890 */
891 if (mips_cm_present()) {
892 gic_base = read_gcr_gic_base() &
893 ~CM_GCR_GIC_BASE_GICEN_MSK;
894 gic_len = 0x20000;
895 } else {
896 pr_err("Failed to get GIC memory range\n");
897 return -ENODEV;
898 }
899 } else {
900 gic_base = res.start;
901 gic_len = resource_size(&res);
902 }
903
904 if (mips_cm_present())
905 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
906 gic_present = true;
907
908 __gic_init(gic_base, gic_len, cpu_vec, 0, node);
909
910 return 0;
911 }
912 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
This page took 0.051341 seconds and 5 git commands to generate.