[SPARC64]: Only use bypass accesses to INO buckets.
[deliverable/linux.git] / arch / sparc64 / kernel / irq.c
CommitLineData
4a907dec 1/* irq.c: UltraSparc IRQ handling/init/registry.
1da177e4 2 *
4a907dec 3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
1da177e4
LT
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/ptrace.h>
11#include <linux/errno.h>
12#include <linux/kernel_stat.h>
13#include <linux/signal.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
b5a37e96 22#include <linux/bootmem.h>
e18e2a00 23#include <linux/irq.h>
1da177e4
LT
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <asm/irq.h>
2e457ef6 30#include <asm/io.h>
1da177e4
LT
31#include <asm/sbus.h>
32#include <asm/iommu.h>
33#include <asm/upa.h>
34#include <asm/oplib.h>
25c7581b 35#include <asm/prom.h>
1da177e4
LT
36#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
63b61452 42#include <asm/auxio.h>
92704a1c 43#include <asm/head.h>
4a907dec 44#include <asm/hypervisor.h>
42d5f99b 45#include <asm/cacheflush.h>
1da177e4 46
1da177e4
LT
47/* UPA nodes send interrupt packet to UltraSparc with first data reg
48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
49 * delivered. We must translate this into a non-vector IRQ so we can
50 * set the softint on this cpu.
51 *
52 * To make processing these packets efficient and race free we use
53 * an array of irq buckets below. The interrupt vector handler in
54 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
e18e2a00
DM
55 *
56 * If you make changes to ino_bucket, please update hand coded assembler
57 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
1da177e4 58 */
e18e2a00 59struct ino_bucket {
42d5f99b 60/*0x00*/unsigned long __irq_chain_pa;
1da177e4 61
e18e2a00 62 /* Virtual interrupt number assigned to this INO. */
42d5f99b 63/*0x08*/unsigned int __virt_irq;
a650d383 64/*0x0c*/unsigned int __pad;
e18e2a00
DM
65};
66
67#define NUM_IVECS (IMAP_INR + 1)
10397e40 68struct ino_bucket *ivector_table;
eb2d8d60 69unsigned long ivector_table_pa;
1da177e4 70
42d5f99b
DM
71/* On several sun4u processors, it is illegal to mix bypass and
72 * non-bypass accesses. Therefore we access all INO buckets
73 * using bypass accesses only.
74 */
75static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
76{
77 unsigned long ret;
78
79 __asm__ __volatile__("ldxa [%1] %2, %0"
80 : "=&r" (ret)
81 : "r" (bucket_pa +
82 offsetof(struct ino_bucket,
83 __irq_chain_pa)),
84 "i" (ASI_PHYS_USE_EC));
85
86 return ret;
87}
88
89static void bucket_clear_chain_pa(unsigned long bucket_pa)
90{
91 __asm__ __volatile__("stxa %%g0, [%0] %1"
92 : /* no outputs */
93 : "r" (bucket_pa +
94 offsetof(struct ino_bucket,
95 __irq_chain_pa)),
96 "i" (ASI_PHYS_USE_EC));
97}
98
99static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
100{
101 unsigned int ret;
102
103 __asm__ __volatile__("lduwa [%1] %2, %0"
104 : "=&r" (ret)
105 : "r" (bucket_pa +
106 offsetof(struct ino_bucket,
107 __virt_irq)),
108 "i" (ASI_PHYS_USE_EC));
109
110 return ret;
111}
112
113static void bucket_set_virt_irq(unsigned long bucket_pa,
114 unsigned int virt_irq)
115{
116 __asm__ __volatile__("stwa %0, [%1] %2"
117 : /* no outputs */
118 : "r" (virt_irq),
119 "r" (bucket_pa +
120 offsetof(struct ino_bucket,
121 __virt_irq)),
122 "i" (ASI_PHYS_USE_EC));
123}
124
e18e2a00 125#define __irq_ino(irq) \
a650d383
DM
126 (((struct ino_bucket *)(irq)) - &ivector_table[0])
127#define __bucket(irq) ((struct ino_bucket *)(irq))
128#define __irq(bucket) ((unsigned long)(bucket))
e18e2a00 129
eb2d8d60 130#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
1da177e4 131
93b3238e 132static struct {
a650d383 133 unsigned long irq;
93b3238e
DM
134 unsigned int dev_handle;
135 unsigned int dev_ino;
136} virt_to_real_irq_table[NR_IRQS];
759f89e0 137static DEFINE_SPINLOCK(virt_irq_alloc_lock);
8047e247 138
a650d383 139unsigned char virt_irq_alloc(unsigned long real_irq)
8047e247 140{
759f89e0 141 unsigned long flags;
8047e247
DM
142 unsigned char ent;
143
144 BUILD_BUG_ON(NR_IRQS >= 256);
145
759f89e0
DM
146 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
147
35a17eb6 148 for (ent = 1; ent < NR_IRQS; ent++) {
93b3238e 149 if (!virt_to_real_irq_table[ent].irq)
35a17eb6
DM
150 break;
151 }
8047e247
DM
152 if (ent >= NR_IRQS) {
153 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
759f89e0
DM
154 ent = 0;
155 } else {
156 virt_to_real_irq_table[ent].irq = real_irq;
8047e247
DM
157 }
158
759f89e0 159 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
8047e247
DM
160
161 return ent;
162}
163
5746c99d 164#ifdef CONFIG_PCI_MSI
759f89e0 165void virt_irq_free(unsigned int virt_irq)
8047e247 166{
759f89e0 167 unsigned long flags;
8047e247 168
35a17eb6
DM
169 if (virt_irq >= NR_IRQS)
170 return;
171
759f89e0
DM
172 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
173
93b3238e 174 virt_to_real_irq_table[virt_irq].irq = 0;
35a17eb6 175
759f89e0 176 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
8047e247 177}
5746c99d 178#endif
8047e247 179
a650d383 180static unsigned long virt_to_real_irq(unsigned char virt_irq)
8047e247 181{
93b3238e 182 return virt_to_real_irq_table[virt_irq].irq;
8047e247
DM
183}
184
1da177e4 185/*
e18e2a00 186 * /proc/interrupts printing:
1da177e4 187 */
1da177e4
LT
188
189int show_interrupts(struct seq_file *p, void *v)
190{
e18e2a00
DM
191 int i = *(loff_t *) v, j;
192 struct irqaction * action;
1da177e4 193 unsigned long flags;
1da177e4 194
e18e2a00
DM
195 if (i == 0) {
196 seq_printf(p, " ");
197 for_each_online_cpu(j)
198 seq_printf(p, "CPU%d ",j);
199 seq_putc(p, '\n');
200 }
201
202 if (i < NR_IRQS) {
203 spin_lock_irqsave(&irq_desc[i].lock, flags);
204 action = irq_desc[i].action;
205 if (!action)
206 goto skip;
207 seq_printf(p, "%3d: ",i);
1da177e4
LT
208#ifndef CONFIG_SMP
209 seq_printf(p, "%10u ", kstat_irqs(i));
210#else
e18e2a00
DM
211 for_each_online_cpu(j)
212 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
1da177e4 213#endif
d1bef4ed 214 seq_printf(p, " %9s", irq_desc[i].chip->typename);
e18e2a00
DM
215 seq_printf(p, " %s", action->name);
216
217 for (action=action->next; action; action = action->next)
37cdcd9e 218 seq_printf(p, ", %s", action->name);
e18e2a00 219
1da177e4 220 seq_putc(p, '\n');
e18e2a00
DM
221skip:
222 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1da177e4 223 }
1da177e4
LT
224 return 0;
225}
226
ebd8c56c
DM
227static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
228{
229 unsigned int tid;
230
231 if (this_is_starfire) {
232 tid = starfire_translate(imap, cpuid);
233 tid <<= IMAP_TID_SHIFT;
234 tid &= IMAP_TID_UPA;
235 } else {
236 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
237 unsigned long ver;
238
239 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
240 if ((ver >> 32UL) == __JALAPENO_ID ||
241 (ver >> 32UL) == __SERRANO_ID) {
242 tid = cpuid << IMAP_TID_SHIFT;
243 tid &= IMAP_TID_JBUS;
244 } else {
245 unsigned int a = cpuid & 0x1f;
246 unsigned int n = (cpuid >> 5) & 0x1f;
247
248 tid = ((a << IMAP_AID_SHIFT) |
249 (n << IMAP_NID_SHIFT));
250 tid &= (IMAP_AID_SAFARI |
251 IMAP_NID_SAFARI);;
252 }
253 } else {
254 tid = cpuid << IMAP_TID_SHIFT;
255 tid &= IMAP_TID_UPA;
256 }
257 }
258
259 return tid;
260}
261
e18e2a00
DM
262struct irq_handler_data {
263 unsigned long iclr;
264 unsigned long imap;
8047e247 265
e18e2a00
DM
266 void (*pre_handler)(unsigned int, void *, void *);
267 void *pre_handler_arg1;
268 void *pre_handler_arg2;
269};
1da177e4 270
e18e2a00 271static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
1da177e4 272{
a650d383 273 unsigned long real_irq = virt_to_real_irq(virt_irq);
e18e2a00 274 struct ino_bucket *bucket = NULL;
1da177e4 275
e18e2a00
DM
276 if (likely(real_irq))
277 bucket = __bucket(real_irq);
8047e247 278
e18e2a00 279 return bucket;
1da177e4
LT
280}
281
e18e2a00
DM
282#ifdef CONFIG_SMP
283static int irq_choose_cpu(unsigned int virt_irq)
088dd1f8 284{
a53da52f 285 cpumask_t mask = irq_desc[virt_irq].affinity;
e18e2a00 286 int cpuid;
088dd1f8 287
e18e2a00
DM
288 if (cpus_equal(mask, CPU_MASK_ALL)) {
289 static int irq_rover;
290 static DEFINE_SPINLOCK(irq_rover_lock);
291 unsigned long flags;
1da177e4 292
e18e2a00
DM
293 /* Round-robin distribution... */
294 do_round_robin:
295 spin_lock_irqsave(&irq_rover_lock, flags);
10951ee6 296
e18e2a00
DM
297 while (!cpu_online(irq_rover)) {
298 if (++irq_rover >= NR_CPUS)
299 irq_rover = 0;
300 }
301 cpuid = irq_rover;
302 do {
303 if (++irq_rover >= NR_CPUS)
304 irq_rover = 0;
305 } while (!cpu_online(irq_rover));
1da177e4 306
e18e2a00
DM
307 spin_unlock_irqrestore(&irq_rover_lock, flags);
308 } else {
309 cpumask_t tmp;
088dd1f8 310
e18e2a00 311 cpus_and(tmp, cpu_online_map, mask);
088dd1f8 312
e18e2a00
DM
313 if (cpus_empty(tmp))
314 goto do_round_robin;
088dd1f8 315
e18e2a00 316 cpuid = first_cpu(tmp);
1da177e4 317 }
088dd1f8 318
e18e2a00
DM
319 return cpuid;
320}
321#else
322static int irq_choose_cpu(unsigned int virt_irq)
323{
324 return real_hard_smp_processor_id();
1da177e4 325}
e18e2a00 326#endif
1da177e4 327
e18e2a00 328static void sun4u_irq_enable(unsigned int virt_irq)
e3999574 329{
68c92186 330 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
e3999574 331
e18e2a00 332 if (likely(data)) {
861fe906 333 unsigned long cpuid, imap, val;
e18e2a00 334 unsigned int tid;
e3999574 335
e18e2a00
DM
336 cpuid = irq_choose_cpu(virt_irq);
337 imap = data->imap;
e3999574 338
e18e2a00 339 tid = sun4u_compute_tid(imap, cpuid);
e3999574 340
861fe906
DM
341 val = upa_readq(imap);
342 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
343 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
344 val |= tid | IMAP_VALID;
345 upa_writeq(val, imap);
e3999574 346 }
e3999574
DM
347}
348
b53bcb67
DM
349static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
350{
351 sun4u_irq_enable(virt_irq);
352}
353
e18e2a00 354static void sun4u_irq_disable(unsigned int virt_irq)
1da177e4 355{
68c92186 356 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
1da177e4 357
e18e2a00
DM
358 if (likely(data)) {
359 unsigned long imap = data->imap;
6e69d606 360 unsigned long tmp = upa_readq(imap);
1da177e4 361
e18e2a00 362 tmp &= ~IMAP_VALID;
861fe906 363 upa_writeq(tmp, imap);
088dd1f8 364 }
088dd1f8
DM
365}
366
e18e2a00 367static void sun4u_irq_end(unsigned int virt_irq)
088dd1f8 368{
68c92186 369 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
5a606b72
DM
370 struct irq_desc *desc = irq_desc + virt_irq;
371
372 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
373 return;
088dd1f8 374
e18e2a00 375 if (likely(data))
861fe906 376 upa_writeq(ICLR_IDLE, data->iclr);
088dd1f8
DM
377}
378
e18e2a00 379static void sun4v_irq_enable(unsigned int virt_irq)
088dd1f8 380{
e18e2a00
DM
381 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
382 unsigned int ino = bucket - &ivector_table[0];
088dd1f8 383
e18e2a00
DM
384 if (likely(bucket)) {
385 unsigned long cpuid;
386 int err;
088dd1f8 387
e18e2a00 388 cpuid = irq_choose_cpu(virt_irq);
088dd1f8 389
e18e2a00
DM
390 err = sun4v_intr_settarget(ino, cpuid);
391 if (err != HV_EOK)
e83fb17f
DM
392 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
393 "err(%d)\n", ino, cpuid, err);
a357b8f4
DM
394 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
395 if (err != HV_EOK)
e83fb17f 396 printk(KERN_ERR "sun4v_intr_setstate(%x): "
a357b8f4 397 "err(%d)\n", ino, err);
e18e2a00
DM
398 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
399 if (err != HV_EOK)
e83fb17f 400 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
e18e2a00 401 ino, err);
088dd1f8 402 }
088dd1f8
DM
403}
404
b53bcb67
DM
405static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
406{
407 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
408 unsigned int ino = bucket - &ivector_table[0];
409
410 if (likely(bucket)) {
411 unsigned long cpuid;
412 int err;
413
414 cpuid = irq_choose_cpu(virt_irq);
415
416 err = sun4v_intr_settarget(ino, cpuid);
417 if (err != HV_EOK)
e83fb17f
DM
418 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
419 "err(%d)\n", ino, cpuid, err);
b53bcb67
DM
420 }
421}
422
e18e2a00 423static void sun4v_irq_disable(unsigned int virt_irq)
1da177e4 424{
e18e2a00
DM
425 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
426 unsigned int ino = bucket - &ivector_table[0];
1da177e4 427
e18e2a00
DM
428 if (likely(bucket)) {
429 int err;
1da177e4 430
e18e2a00
DM
431 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
432 if (err != HV_EOK)
e83fb17f 433 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
e18e2a00 434 "err(%d)\n", ino, err);
1da177e4 435 }
e18e2a00 436}
1da177e4 437
e18e2a00
DM
438static void sun4v_irq_end(unsigned int virt_irq)
439{
440 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
441 unsigned int ino = bucket - &ivector_table[0];
5a606b72
DM
442 struct irq_desc *desc = irq_desc + virt_irq;
443
444 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
445 return;
1da177e4 446
e18e2a00
DM
447 if (likely(bucket)) {
448 int err;
1da177e4 449
e18e2a00
DM
450 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
451 if (err != HV_EOK)
e83fb17f 452 printk(KERN_ERR "sun4v_intr_setstate(%x): "
e18e2a00 453 "err(%d)\n", ino, err);
1da177e4 454 }
1da177e4
LT
455}
456
4a907dec
DM
457static void sun4v_virq_enable(unsigned int virt_irq)
458{
459 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
4a907dec
DM
460
461 if (likely(bucket)) {
462 unsigned long cpuid, dev_handle, dev_ino;
463 int err;
464
465 cpuid = irq_choose_cpu(virt_irq);
466
93b3238e
DM
467 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
468 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
4a907dec
DM
469
470 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
471 if (err != HV_EOK)
e83fb17f 472 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
4a907dec
DM
473 "err(%d)\n",
474 dev_handle, dev_ino, cpuid, err);
475 err = sun4v_vintr_set_state(dev_handle, dev_ino,
12450884
DM
476 HV_INTR_STATE_IDLE);
477 if (err != HV_EOK)
e83fb17f 478 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
12450884
DM
479 "HV_INTR_STATE_IDLE): err(%d)\n",
480 dev_handle, dev_ino, err);
481 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
4a907dec
DM
482 HV_INTR_ENABLED);
483 if (err != HV_EOK)
e83fb17f 484 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
4a907dec
DM
485 "HV_INTR_ENABLED): err(%d)\n",
486 dev_handle, dev_ino, err);
487 }
488}
489
b53bcb67
DM
490static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
491{
492 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
b53bcb67
DM
493
494 if (likely(bucket)) {
495 unsigned long cpuid, dev_handle, dev_ino;
496 int err;
497
498 cpuid = irq_choose_cpu(virt_irq);
499
93b3238e
DM
500 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
501 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
b53bcb67
DM
502
503 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
504 if (err != HV_EOK)
e83fb17f 505 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
b53bcb67
DM
506 "err(%d)\n",
507 dev_handle, dev_ino, cpuid, err);
508 }
509}
510
4a907dec
DM
511static void sun4v_virq_disable(unsigned int virt_irq)
512{
513 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
4a907dec
DM
514
515 if (likely(bucket)) {
516 unsigned long dev_handle, dev_ino;
517 int err;
518
93b3238e
DM
519 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
520 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
4a907dec 521
12450884 522 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
4a907dec
DM
523 HV_INTR_DISABLED);
524 if (err != HV_EOK)
e83fb17f 525 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
4a907dec
DM
526 "HV_INTR_DISABLED): err(%d)\n",
527 dev_handle, dev_ino, err);
528 }
529}
530
531static void sun4v_virq_end(unsigned int virt_irq)
532{
533 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
5a606b72
DM
534 struct irq_desc *desc = irq_desc + virt_irq;
535
536 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
537 return;
4a907dec
DM
538
539 if (likely(bucket)) {
540 unsigned long dev_handle, dev_ino;
541 int err;
542
93b3238e
DM
543 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
544 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
4a907dec
DM
545
546 err = sun4v_vintr_set_state(dev_handle, dev_ino,
547 HV_INTR_STATE_IDLE);
548 if (err != HV_EOK)
e83fb17f 549 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
4a907dec
DM
550 "HV_INTR_STATE_IDLE): err(%d)\n",
551 dev_handle, dev_ino, err);
552 }
553}
554
e18e2a00 555static void run_pre_handler(unsigned int virt_irq)
1da177e4 556{
e18e2a00 557 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
68c92186 558 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
1da177e4 559
e18e2a00
DM
560 if (likely(data->pre_handler)) {
561 data->pre_handler(__irq_ino(__irq(bucket)),
562 data->pre_handler_arg1,
563 data->pre_handler_arg2);
1da177e4 564 }
088dd1f8
DM
565}
566
729e7d7e 567static struct irq_chip sun4u_irq = {
e18e2a00
DM
568 .typename = "sun4u",
569 .enable = sun4u_irq_enable,
570 .disable = sun4u_irq_disable,
571 .end = sun4u_irq_end,
b53bcb67 572 .set_affinity = sun4u_set_affinity,
e18e2a00 573};
8047e247 574
729e7d7e 575static struct irq_chip sun4u_irq_ack = {
e18e2a00
DM
576 .typename = "sun4u+ack",
577 .enable = sun4u_irq_enable,
578 .disable = sun4u_irq_disable,
579 .ack = run_pre_handler,
580 .end = sun4u_irq_end,
b53bcb67 581 .set_affinity = sun4u_set_affinity,
e18e2a00 582};
088dd1f8 583
729e7d7e 584static struct irq_chip sun4v_irq = {
e18e2a00
DM
585 .typename = "sun4v",
586 .enable = sun4v_irq_enable,
587 .disable = sun4v_irq_disable,
588 .end = sun4v_irq_end,
b53bcb67 589 .set_affinity = sun4v_set_affinity,
e18e2a00 590};
1da177e4 591
4a907dec
DM
592static struct irq_chip sun4v_virq = {
593 .typename = "vsun4v",
594 .enable = sun4v_virq_enable,
595 .disable = sun4v_virq_disable,
596 .end = sun4v_virq_end,
b53bcb67 597 .set_affinity = sun4v_virt_set_affinity,
4a907dec
DM
598};
599
e18e2a00
DM
600void irq_install_pre_handler(int virt_irq,
601 void (*func)(unsigned int, void *, void *),
602 void *arg1, void *arg2)
603{
68c92186 604 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
759f89e0
DM
605 struct irq_chip *chip = get_irq_chip(virt_irq);
606
607 if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
608 printk(KERN_ERR "IRQ: Trying to install pre-handler on "
609 "sun4v irq %u\n", virt_irq);
610 return;
611 }
088dd1f8 612
e18e2a00
DM
613 data->pre_handler = func;
614 data->pre_handler_arg1 = arg1;
615 data->pre_handler_arg2 = arg2;
1da177e4 616
759f89e0 617 if (chip == &sun4u_irq_ack)
24ac26d4
DM
618 return;
619
759f89e0 620 set_irq_chip(virt_irq, &sun4u_irq_ack);
e18e2a00 621}
1da177e4 622
e18e2a00
DM
623unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
624{
625 struct ino_bucket *bucket;
626 struct irq_handler_data *data;
42d5f99b 627 unsigned int virt_irq;
e18e2a00 628 int ino;
1da177e4 629
e18e2a00 630 BUG_ON(tlb_type == hypervisor);
088dd1f8 631
861fe906 632 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
e18e2a00 633 bucket = &ivector_table[ino];
42d5f99b
DM
634 virt_irq = bucket_get_virt_irq(__pa(bucket));
635 if (!virt_irq) {
636 virt_irq = virt_irq_alloc(__irq(bucket));
637 bucket_set_virt_irq(__pa(bucket), virt_irq);
638 set_irq_chip(virt_irq, &sun4u_irq);
fd0504c3 639 }
1da177e4 640
42d5f99b 641 data = get_irq_chip_data(virt_irq);
68c92186 642 if (unlikely(data))
e18e2a00 643 goto out;
fd0504c3 644
e18e2a00
DM
645 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
646 if (unlikely(!data)) {
647 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
648 prom_halt();
1da177e4 649 }
42d5f99b 650 set_irq_chip_data(virt_irq, data);
1da177e4 651
e18e2a00
DM
652 data->imap = imap;
653 data->iclr = iclr;
1da177e4 654
e18e2a00 655out:
42d5f99b 656 return virt_irq;
e18e2a00 657}
1da177e4 658
4a907dec
DM
659static unsigned int sun4v_build_common(unsigned long sysino,
660 struct irq_chip *chip)
1da177e4 661{
8047e247 662 struct ino_bucket *bucket;
e18e2a00 663 struct irq_handler_data *data;
42d5f99b 664 unsigned int virt_irq;
8047e247 665
e18e2a00 666 BUG_ON(tlb_type != hypervisor);
1da177e4 667
e18e2a00 668 bucket = &ivector_table[sysino];
42d5f99b
DM
669 virt_irq = bucket_get_virt_irq(__pa(bucket));
670 if (!virt_irq) {
671 virt_irq = virt_irq_alloc(__irq(bucket));
672 bucket_set_virt_irq(__pa(bucket), virt_irq);
673 set_irq_chip(virt_irq, chip);
1da177e4 674 }
1da177e4 675
42d5f99b 676 data = get_irq_chip_data(virt_irq);
68c92186 677 if (unlikely(data))
1da177e4 678 goto out;
1da177e4 679
e18e2a00
DM
680 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
681 if (unlikely(!data)) {
682 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
683 prom_halt();
684 }
42d5f99b 685 set_irq_chip_data(virt_irq, data);
1da177e4 686
e18e2a00
DM
687 /* Catch accidental accesses to these things. IMAP/ICLR handling
688 * is done by hypervisor calls on sun4v platforms, not by direct
689 * register accesses.
690 */
691 data->imap = ~0UL;
692 data->iclr = ~0UL;
1da177e4 693
e18e2a00 694out:
42d5f99b 695 return virt_irq;
e18e2a00 696}
1da177e4 697
4a907dec
DM
698unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
699{
700 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
701
702 return sun4v_build_common(sysino, &sun4v_irq);
703}
704
705unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
706{
b80e6998
DM
707 struct irq_handler_data *data;
708 struct ino_bucket *bucket;
709 unsigned long hv_err, cookie;
42d5f99b 710 unsigned int virt_irq;
b80e6998
DM
711
712 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
713 if (unlikely(!bucket))
714 return 0;
42d5f99b
DM
715 __flush_dcache_range((unsigned long) bucket,
716 ((unsigned long) bucket +
717 sizeof(struct ino_bucket)));
b80e6998 718
42d5f99b
DM
719 virt_irq = virt_irq_alloc(__irq(bucket));
720 bucket_set_virt_irq(__pa(bucket), virt_irq);
721 set_irq_chip(virt_irq, &sun4v_virq);
4a907dec 722
b80e6998
DM
723 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
724 if (unlikely(!data))
725 return 0;
4a907dec 726
42d5f99b 727 set_irq_chip_data(virt_irq, data);
4a907dec 728
b80e6998
DM
729 /* Catch accidental accesses to these things. IMAP/ICLR handling
730 * is done by hypervisor calls on sun4v platforms, not by direct
731 * register accesses.
732 */
733 data->imap = ~0UL;
734 data->iclr = ~0UL;
735
736 cookie = ~__pa(bucket);
737 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
4a907dec
DM
738 if (hv_err) {
739 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
740 "err=%lu\n", devhandle, devino, hv_err);
741 prom_halt();
742 }
743
42d5f99b
DM
744 virt_to_real_irq_table[virt_irq].dev_handle = devhandle;
745 virt_to_real_irq_table[virt_irq].dev_ino = devino;
93b3238e 746
42d5f99b 747 return virt_irq;
4a907dec
DM
748}
749
e18e2a00
DM
750void ack_bad_irq(unsigned int virt_irq)
751{
752 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
753 unsigned int ino = 0xdeadbeef;
ab66a50e 754
e18e2a00
DM
755 if (bucket)
756 ino = bucket - &ivector_table[0];
6a76267f 757
e18e2a00
DM
758 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
759 ino, virt_irq);
1da177e4
LT
760}
761
1da177e4
LT
762void handler_irq(int irq, struct pt_regs *regs)
763{
eb2d8d60 764 unsigned long pstate, bucket_pa;
6d24c8dc 765 struct pt_regs *old_regs;
1da177e4 766
1da177e4 767 clear_softint(1 << irq);
1da177e4 768
6d24c8dc 769 old_regs = set_irq_regs(regs);
1da177e4 770 irq_enter();
1da177e4 771
a650d383
DM
772 /* Grab an atomic snapshot of the pending IVECs. */
773 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
774 "wrpr %0, %3, %%pstate\n\t"
775 "ldx [%2], %1\n\t"
776 "stx %%g0, [%2]\n\t"
777 "wrpr %0, 0x0, %%pstate\n\t"
eb2d8d60
DM
778 : "=&r" (pstate), "=&r" (bucket_pa)
779 : "r" (irq_work_pa(smp_processor_id())),
a650d383
DM
780 "i" (PSTATE_IE)
781 : "memory");
782
eb2d8d60
DM
783 while (bucket_pa) {
784 unsigned long next_pa;
785 unsigned int virt_irq;
1da177e4 786
42d5f99b
DM
787 next_pa = bucket_get_chain_pa(bucket_pa);
788 virt_irq = bucket_get_virt_irq(bucket_pa);
789 bucket_clear_chain_pa(bucket_pa);
fd0504c3 790
eb2d8d60
DM
791 __do_IRQ(virt_irq);
792
793 bucket_pa = next_pa;
1da177e4 794 }
e18e2a00 795
1da177e4 796 irq_exit();
6d24c8dc 797 set_irq_regs(old_regs);
1da177e4
LT
798}
799
e0204409
DM
800#ifdef CONFIG_HOTPLUG_CPU
801void fixup_irqs(void)
802{
803 unsigned int irq;
804
805 for (irq = 0; irq < NR_IRQS; irq++) {
806 unsigned long flags;
807
808 spin_lock_irqsave(&irq_desc[irq].lock, flags);
809 if (irq_desc[irq].action &&
810 !(irq_desc[irq].status & IRQ_PER_CPU)) {
811 if (irq_desc[irq].chip->set_affinity)
812 irq_desc[irq].chip->set_affinity(irq,
813 irq_desc[irq].affinity);
814 }
815 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
816 }
817}
818#endif
819
cdd5186f
DM
820struct sun5_timer {
821 u64 count0;
822 u64 limit0;
823 u64 count1;
824 u64 limit1;
825};
1da177e4 826
cdd5186f 827static struct sun5_timer *prom_timers;
1da177e4
LT
828static u64 prom_limit0, prom_limit1;
829
830static void map_prom_timers(void)
831{
25c7581b 832 struct device_node *dp;
6a23acf3 833 const unsigned int *addr;
1da177e4
LT
834
835 /* PROM timer node hangs out in the top level of device siblings... */
25c7581b
DM
836 dp = of_find_node_by_path("/");
837 dp = dp->child;
838 while (dp) {
839 if (!strcmp(dp->name, "counter-timer"))
840 break;
841 dp = dp->sibling;
842 }
1da177e4
LT
843
844 /* Assume if node is not present, PROM uses different tick mechanism
845 * which we should not care about.
846 */
25c7581b 847 if (!dp) {
1da177e4
LT
848 prom_timers = (struct sun5_timer *) 0;
849 return;
850 }
851
852 /* If PROM is really using this, it must be mapped by him. */
25c7581b
DM
853 addr = of_get_property(dp, "address", NULL);
854 if (!addr) {
1da177e4
LT
855 prom_printf("PROM does not have timer mapped, trying to continue.\n");
856 prom_timers = (struct sun5_timer *) 0;
857 return;
858 }
859 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
860}
861
862static void kill_prom_timer(void)
863{
864 if (!prom_timers)
865 return;
866
867 /* Save them away for later. */
868 prom_limit0 = prom_timers->limit0;
869 prom_limit1 = prom_timers->limit1;
870
871 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
872 * We turn both off here just to be paranoid.
873 */
874 prom_timers->limit0 = 0;
875 prom_timers->limit1 = 0;
876
877 /* Wheee, eat the interrupt packet too... */
878 __asm__ __volatile__(
879" mov 0x40, %%g2\n"
880" ldxa [%%g0] %0, %%g1\n"
881" ldxa [%%g2] %1, %%g1\n"
882" stxa %%g0, [%%g0] %0\n"
883" membar #Sync\n"
884 : /* no outputs */
885 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
886 : "g1", "g2");
887}
888
1da177e4
LT
889void init_irqwork_curcpu(void)
890{
1da177e4
LT
891 int cpu = hard_smp_processor_id();
892
eb2d8d60 893 trap_block[cpu].irq_worklist_pa = 0UL;
1da177e4
LT
894}
895
5cbc3073
DM
896/* Please be very careful with register_one_mondo() and
897 * sun4v_register_mondo_queues().
898 *
899 * On SMP this gets invoked from the CPU trampoline before
900 * the cpu has fully taken over the trap table from OBP,
901 * and it's kernel stack + %g6 thread register state is
902 * not fully cooked yet.
903 *
904 * Therefore you cannot make any OBP calls, not even prom_printf,
905 * from these two routines.
906 */
907static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
ac29c11d 908{
5cbc3073 909 unsigned long num_entries = (qmask + 1) / 64;
94f8762d
DM
910 unsigned long status;
911
912 status = sun4v_cpu_qconf(type, paddr, num_entries);
913 if (status != HV_EOK) {
914 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
915 "err %lu\n", type, paddr, num_entries, status);
ac29c11d
DM
916 prom_halt();
917 }
918}
919
b434e719 920void __cpuinit sun4v_register_mondo_queues(int this_cpu)
5b0c0572 921{
b5a37e96
DM
922 struct trap_per_cpu *tb = &trap_block[this_cpu];
923
5cbc3073
DM
924 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
925 tb->cpu_mondo_qmask);
926 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
927 tb->dev_mondo_qmask);
928 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
929 tb->resum_qmask);
930 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
931 tb->nonresum_qmask);
b5a37e96
DM
932}
933
b434e719 934static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
b5a37e96 935{
5cbc3073 936 unsigned long size = PAGE_ALIGN(qmask + 1);
b434e719 937 void *p = __alloc_bootmem_low(size, size, 0);
5cbc3073 938 if (!p) {
b5a37e96
DM
939 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
940 prom_halt();
941 }
942
5cbc3073 943 *pa_ptr = __pa(p);
b5a37e96
DM
944}
945
b434e719 946static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
b5a37e96 947{
5cbc3073 948 unsigned long size = PAGE_ALIGN(qmask + 1);
b434e719 949 void *p = __alloc_bootmem_low(size, size, 0);
5b0c0572 950
5cbc3073 951 if (!p) {
5b0c0572
DM
952 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
953 prom_halt();
954 }
955
5cbc3073 956 *pa_ptr = __pa(p);
5b0c0572
DM
957}
958
b434e719 959static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1d2f1f90
DM
960{
961#ifdef CONFIG_SMP
b5a37e96 962 void *page;
1d2f1f90
DM
963
964 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
965
b434e719 966 page = alloc_bootmem_low_pages(PAGE_SIZE);
1d2f1f90
DM
967 if (!page) {
968 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
969 prom_halt();
970 }
971
972 tb->cpu_mondo_block_pa = __pa(page);
973 tb->cpu_list_pa = __pa(page + 64);
974#endif
975}
976
b434e719
DM
977/* Allocate mondo and error queues for all possible cpus. */
978static void __init sun4v_init_mondo_queues(void)
ac29c11d 979{
b434e719 980 int cpu;
ac29c11d 981
b434e719
DM
982 for_each_possible_cpu(cpu) {
983 struct trap_per_cpu *tb = &trap_block[cpu];
1d2f1f90 984
b434e719
DM
985 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
986 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
987 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
988 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
989 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
990 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
991 tb->nonresum_qmask);
1d2f1f90 992
b434e719 993 init_cpu_send_mondo_info(tb);
72aff53f 994 }
b434e719
DM
995
996 /* Load up the boot cpu's entries. */
997 sun4v_register_mondo_queues(hard_smp_processor_id());
ac29c11d
DM
998}
999
e18e2a00
DM
1000static struct irqaction timer_irq_action = {
1001 .name = "timer",
1002};
1003
1da177e4
LT
1004/* Only invoked on boot processor. */
1005void __init init_IRQ(void)
1006{
10397e40
DM
1007 unsigned long size;
1008
1da177e4
LT
1009 map_prom_timers();
1010 kill_prom_timer();
1da177e4 1011
10397e40
DM
1012 size = sizeof(struct ino_bucket) * NUM_IVECS;
1013 ivector_table = alloc_bootmem_low(size);
1014 if (!ivector_table) {
1015 prom_printf("Fatal error, cannot allocate ivector_table\n");
1016 prom_halt();
1017 }
42d5f99b
DM
1018 __flush_dcache_range((unsigned long) ivector_table,
1019 ((unsigned long) ivector_table) + size);
10397e40
DM
1020
1021 ivector_table_pa = __pa(ivector_table);
eb2d8d60 1022
ac29c11d 1023 if (tlb_type == hypervisor)
b434e719 1024 sun4v_init_mondo_queues();
ac29c11d 1025
1da177e4
LT
1026 /* We need to clear any IRQ's pending in the soft interrupt
1027 * registers, a spurious one could be left around from the
1028 * PROM timer which we just disabled.
1029 */
1030 clear_softint(get_softint());
1031
1032 /* Now that ivector table is initialized, it is safe
1033 * to receive IRQ vector traps. We will normally take
1034 * one or two right now, in case some device PROM used
1035 * to boot us wants to speak to us. We just ignore them.
1036 */
1037 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1038 "or %%g1, %0, %%g1\n\t"
1039 "wrpr %%g1, 0x0, %%pstate"
1040 : /* No outputs */
1041 : "i" (PSTATE_IE)
1042 : "g1");
1da177e4 1043
e18e2a00 1044 irq_desc[0].action = &timer_irq_action;
1da177e4 1045}
This page took 0.350643 seconds and 5 git commands to generate.