1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/bootmem.h>
23 #include <linux/irq.h>
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
32 #include <asm/iommu.h>
34 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
44 #include <asm/hypervisor.h>
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55 * If you make changes to ino_bucket, please update hand coded assembler
56 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
59 /*0x00*/unsigned long irq_chain_pa
;
61 /* Virtual interrupt number assigned to this INO. */
62 /*0x08*/unsigned int virt_irq
;
63 /*0x0c*/unsigned int __pad
;
66 #define NUM_IVECS (IMAP_INR + 1)
67 struct ino_bucket
*ivector_table
;
68 unsigned long ivector_table_pa
;
70 #define __irq_ino(irq) \
71 (((struct ino_bucket *)(irq)) - &ivector_table[0])
72 #define __bucket(irq) ((struct ino_bucket *)(irq))
73 #define __irq(bucket) ((unsigned long)(bucket))
75 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
79 unsigned int dev_handle
;
81 } virt_to_real_irq_table
[NR_IRQS
];
82 static DEFINE_SPINLOCK(virt_irq_alloc_lock
);
84 unsigned char virt_irq_alloc(unsigned long real_irq
)
89 BUILD_BUG_ON(NR_IRQS
>= 256);
91 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
93 for (ent
= 1; ent
< NR_IRQS
; ent
++) {
94 if (!virt_to_real_irq_table
[ent
].irq
)
98 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
101 virt_to_real_irq_table
[ent
].irq
= real_irq
;
104 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
109 #ifdef CONFIG_PCI_MSI
110 void virt_irq_free(unsigned int virt_irq
)
114 if (virt_irq
>= NR_IRQS
)
117 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
119 virt_to_real_irq_table
[virt_irq
].irq
= 0;
121 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
125 static unsigned long virt_to_real_irq(unsigned char virt_irq
)
127 return virt_to_real_irq_table
[virt_irq
].irq
;
131 * /proc/interrupts printing:
134 int show_interrupts(struct seq_file
*p
, void *v
)
136 int i
= *(loff_t
*) v
, j
;
137 struct irqaction
* action
;
142 for_each_online_cpu(j
)
143 seq_printf(p
, "CPU%d ",j
);
148 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
149 action
= irq_desc
[i
].action
;
152 seq_printf(p
, "%3d: ",i
);
154 seq_printf(p
, "%10u ", kstat_irqs(i
));
156 for_each_online_cpu(j
)
157 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
159 seq_printf(p
, " %9s", irq_desc
[i
].chip
->typename
);
160 seq_printf(p
, " %s", action
->name
);
162 for (action
=action
->next
; action
; action
= action
->next
)
163 seq_printf(p
, ", %s", action
->name
);
167 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
172 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
176 if (this_is_starfire
) {
177 tid
= starfire_translate(imap
, cpuid
);
178 tid
<<= IMAP_TID_SHIFT
;
181 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
184 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
185 if ((ver
>> 32UL) == __JALAPENO_ID
||
186 (ver
>> 32UL) == __SERRANO_ID
) {
187 tid
= cpuid
<< IMAP_TID_SHIFT
;
188 tid
&= IMAP_TID_JBUS
;
190 unsigned int a
= cpuid
& 0x1f;
191 unsigned int n
= (cpuid
>> 5) & 0x1f;
193 tid
= ((a
<< IMAP_AID_SHIFT
) |
194 (n
<< IMAP_NID_SHIFT
));
195 tid
&= (IMAP_AID_SAFARI
|
199 tid
= cpuid
<< IMAP_TID_SHIFT
;
207 struct irq_handler_data
{
211 void (*pre_handler
)(unsigned int, void *, void *);
212 void *pre_handler_arg1
;
213 void *pre_handler_arg2
;
216 static inline struct ino_bucket
*virt_irq_to_bucket(unsigned int virt_irq
)
218 unsigned long real_irq
= virt_to_real_irq(virt_irq
);
219 struct ino_bucket
*bucket
= NULL
;
221 if (likely(real_irq
))
222 bucket
= __bucket(real_irq
);
228 static int irq_choose_cpu(unsigned int virt_irq
)
230 cpumask_t mask
= irq_desc
[virt_irq
].affinity
;
233 if (cpus_equal(mask
, CPU_MASK_ALL
)) {
234 static int irq_rover
;
235 static DEFINE_SPINLOCK(irq_rover_lock
);
238 /* Round-robin distribution... */
240 spin_lock_irqsave(&irq_rover_lock
, flags
);
242 while (!cpu_online(irq_rover
)) {
243 if (++irq_rover
>= NR_CPUS
)
248 if (++irq_rover
>= NR_CPUS
)
250 } while (!cpu_online(irq_rover
));
252 spin_unlock_irqrestore(&irq_rover_lock
, flags
);
256 cpus_and(tmp
, cpu_online_map
, mask
);
261 cpuid
= first_cpu(tmp
);
267 static int irq_choose_cpu(unsigned int virt_irq
)
269 return real_hard_smp_processor_id();
273 static void sun4u_irq_enable(unsigned int virt_irq
)
275 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
278 unsigned long cpuid
, imap
, val
;
281 cpuid
= irq_choose_cpu(virt_irq
);
284 tid
= sun4u_compute_tid(imap
, cpuid
);
286 val
= upa_readq(imap
);
287 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
288 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
289 val
|= tid
| IMAP_VALID
;
290 upa_writeq(val
, imap
);
294 static void sun4u_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
296 sun4u_irq_enable(virt_irq
);
299 static void sun4u_irq_disable(unsigned int virt_irq
)
301 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
304 unsigned long imap
= data
->imap
;
305 unsigned long tmp
= upa_readq(imap
);
308 upa_writeq(tmp
, imap
);
312 static void sun4u_irq_end(unsigned int virt_irq
)
314 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
315 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
317 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
321 upa_writeq(ICLR_IDLE
, data
->iclr
);
324 static void sun4v_irq_enable(unsigned int virt_irq
)
326 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
327 unsigned int ino
= bucket
- &ivector_table
[0];
329 if (likely(bucket
)) {
333 cpuid
= irq_choose_cpu(virt_irq
);
335 err
= sun4v_intr_settarget(ino
, cpuid
);
337 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
338 "err(%d)\n", ino
, cpuid
, err
);
339 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
341 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
342 "err(%d)\n", ino
, err
);
343 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
345 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
350 static void sun4v_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
352 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
353 unsigned int ino
= bucket
- &ivector_table
[0];
355 if (likely(bucket
)) {
359 cpuid
= irq_choose_cpu(virt_irq
);
361 err
= sun4v_intr_settarget(ino
, cpuid
);
363 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
364 "err(%d)\n", ino
, cpuid
, err
);
368 static void sun4v_irq_disable(unsigned int virt_irq
)
370 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
371 unsigned int ino
= bucket
- &ivector_table
[0];
373 if (likely(bucket
)) {
376 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
378 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
379 "err(%d)\n", ino
, err
);
383 static void sun4v_irq_end(unsigned int virt_irq
)
385 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
386 unsigned int ino
= bucket
- &ivector_table
[0];
387 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
389 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
392 if (likely(bucket
)) {
395 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
397 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
398 "err(%d)\n", ino
, err
);
402 static void sun4v_virq_enable(unsigned int virt_irq
)
404 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
406 if (likely(bucket
)) {
407 unsigned long cpuid
, dev_handle
, dev_ino
;
410 cpuid
= irq_choose_cpu(virt_irq
);
412 dev_handle
= virt_to_real_irq_table
[virt_irq
].dev_handle
;
413 dev_ino
= virt_to_real_irq_table
[virt_irq
].dev_ino
;
415 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
417 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
419 dev_handle
, dev_ino
, cpuid
, err
);
420 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
423 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
424 "HV_INTR_STATE_IDLE): err(%d)\n",
425 dev_handle
, dev_ino
, err
);
426 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
429 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
430 "HV_INTR_ENABLED): err(%d)\n",
431 dev_handle
, dev_ino
, err
);
435 static void sun4v_virt_set_affinity(unsigned int virt_irq
, cpumask_t mask
)
437 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
439 if (likely(bucket
)) {
440 unsigned long cpuid
, dev_handle
, dev_ino
;
443 cpuid
= irq_choose_cpu(virt_irq
);
445 dev_handle
= virt_to_real_irq_table
[virt_irq
].dev_handle
;
446 dev_ino
= virt_to_real_irq_table
[virt_irq
].dev_ino
;
448 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
450 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
452 dev_handle
, dev_ino
, cpuid
, err
);
456 static void sun4v_virq_disable(unsigned int virt_irq
)
458 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
460 if (likely(bucket
)) {
461 unsigned long dev_handle
, dev_ino
;
464 dev_handle
= virt_to_real_irq_table
[virt_irq
].dev_handle
;
465 dev_ino
= virt_to_real_irq_table
[virt_irq
].dev_ino
;
467 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
470 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
471 "HV_INTR_DISABLED): err(%d)\n",
472 dev_handle
, dev_ino
, err
);
476 static void sun4v_virq_end(unsigned int virt_irq
)
478 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
479 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
481 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
484 if (likely(bucket
)) {
485 unsigned long dev_handle
, dev_ino
;
488 dev_handle
= virt_to_real_irq_table
[virt_irq
].dev_handle
;
489 dev_ino
= virt_to_real_irq_table
[virt_irq
].dev_ino
;
491 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
494 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
495 "HV_INTR_STATE_IDLE): err(%d)\n",
496 dev_handle
, dev_ino
, err
);
500 static void run_pre_handler(unsigned int virt_irq
)
502 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
503 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
505 if (likely(data
->pre_handler
)) {
506 data
->pre_handler(__irq_ino(__irq(bucket
)),
507 data
->pre_handler_arg1
,
508 data
->pre_handler_arg2
);
512 static struct irq_chip sun4u_irq
= {
514 .enable
= sun4u_irq_enable
,
515 .disable
= sun4u_irq_disable
,
516 .end
= sun4u_irq_end
,
517 .set_affinity
= sun4u_set_affinity
,
520 static struct irq_chip sun4u_irq_ack
= {
521 .typename
= "sun4u+ack",
522 .enable
= sun4u_irq_enable
,
523 .disable
= sun4u_irq_disable
,
524 .ack
= run_pre_handler
,
525 .end
= sun4u_irq_end
,
526 .set_affinity
= sun4u_set_affinity
,
529 static struct irq_chip sun4v_irq
= {
531 .enable
= sun4v_irq_enable
,
532 .disable
= sun4v_irq_disable
,
533 .end
= sun4v_irq_end
,
534 .set_affinity
= sun4v_set_affinity
,
537 static struct irq_chip sun4v_virq
= {
538 .typename
= "vsun4v",
539 .enable
= sun4v_virq_enable
,
540 .disable
= sun4v_virq_disable
,
541 .end
= sun4v_virq_end
,
542 .set_affinity
= sun4v_virt_set_affinity
,
545 void irq_install_pre_handler(int virt_irq
,
546 void (*func
)(unsigned int, void *, void *),
547 void *arg1
, void *arg2
)
549 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
550 struct irq_chip
*chip
= get_irq_chip(virt_irq
);
552 if (WARN_ON(chip
== &sun4v_irq
|| chip
== &sun4v_virq
)) {
553 printk(KERN_ERR
"IRQ: Trying to install pre-handler on "
554 "sun4v irq %u\n", virt_irq
);
558 data
->pre_handler
= func
;
559 data
->pre_handler_arg1
= arg1
;
560 data
->pre_handler_arg2
= arg2
;
562 if (chip
== &sun4u_irq_ack
)
565 set_irq_chip(virt_irq
, &sun4u_irq_ack
);
568 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
570 struct ino_bucket
*bucket
;
571 struct irq_handler_data
*data
;
574 BUG_ON(tlb_type
== hypervisor
);
576 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
577 bucket
= &ivector_table
[ino
];
578 if (!bucket
->virt_irq
) {
579 bucket
->virt_irq
= virt_irq_alloc(__irq(bucket
));
580 set_irq_chip(bucket
->virt_irq
, &sun4u_irq
);
583 data
= get_irq_chip_data(bucket
->virt_irq
);
587 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
588 if (unlikely(!data
)) {
589 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
592 set_irq_chip_data(bucket
->virt_irq
, data
);
598 return bucket
->virt_irq
;
601 static unsigned int sun4v_build_common(unsigned long sysino
,
602 struct irq_chip
*chip
)
604 struct ino_bucket
*bucket
;
605 struct irq_handler_data
*data
;
607 BUG_ON(tlb_type
!= hypervisor
);
609 bucket
= &ivector_table
[sysino
];
610 if (!bucket
->virt_irq
) {
611 bucket
->virt_irq
= virt_irq_alloc(__irq(bucket
));
612 set_irq_chip(bucket
->virt_irq
, chip
);
615 data
= get_irq_chip_data(bucket
->virt_irq
);
619 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
620 if (unlikely(!data
)) {
621 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
624 set_irq_chip_data(bucket
->virt_irq
, data
);
626 /* Catch accidental accesses to these things. IMAP/ICLR handling
627 * is done by hypervisor calls on sun4v platforms, not by direct
634 return bucket
->virt_irq
;
637 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
639 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
641 return sun4v_build_common(sysino
, &sun4v_irq
);
644 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
646 struct irq_handler_data
*data
;
647 struct ino_bucket
*bucket
;
648 unsigned long hv_err
, cookie
;
650 bucket
= kzalloc(sizeof(struct ino_bucket
), GFP_ATOMIC
);
651 if (unlikely(!bucket
))
654 bucket
->virt_irq
= virt_irq_alloc(__irq(bucket
));
655 set_irq_chip(bucket
->virt_irq
, &sun4v_virq
);
657 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
661 set_irq_chip_data(bucket
->virt_irq
, data
);
663 /* Catch accidental accesses to these things. IMAP/ICLR handling
664 * is done by hypervisor calls on sun4v platforms, not by direct
670 cookie
= ~__pa(bucket
);
671 hv_err
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
673 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
674 "err=%lu\n", devhandle
, devino
, hv_err
);
678 virt_to_real_irq_table
[bucket
->virt_irq
].dev_handle
= devhandle
;
679 virt_to_real_irq_table
[bucket
->virt_irq
].dev_ino
= devino
;
681 return bucket
->virt_irq
;
684 void ack_bad_irq(unsigned int virt_irq
)
686 struct ino_bucket
*bucket
= virt_irq_to_bucket(virt_irq
);
687 unsigned int ino
= 0xdeadbeef;
690 ino
= bucket
- &ivector_table
[0];
692 printk(KERN_CRIT
"Unexpected IRQ from ino[%x] virt_irq[%u]\n",
696 void handler_irq(int irq
, struct pt_regs
*regs
)
698 unsigned long pstate
, bucket_pa
;
699 struct pt_regs
*old_regs
;
701 clear_softint(1 << irq
);
703 old_regs
= set_irq_regs(regs
);
706 /* Grab an atomic snapshot of the pending IVECs. */
707 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
708 "wrpr %0, %3, %%pstate\n\t"
711 "wrpr %0, 0x0, %%pstate\n\t"
712 : "=&r" (pstate
), "=&r" (bucket_pa
)
713 : "r" (irq_work_pa(smp_processor_id())),
718 unsigned long next_pa
;
719 unsigned int virt_irq
;
721 __asm__
__volatile__("ldxa [%2] %4, %0\n\t"
722 "lduwa [%3] %4, %1\n\t"
724 : "=&r" (next_pa
), "=&r" (virt_irq
)
726 offsetof(struct ino_bucket
,
729 offsetof(struct ino_bucket
,
731 "i" (ASI_PHYS_USE_EC
));
739 set_irq_regs(old_regs
);
742 #ifdef CONFIG_HOTPLUG_CPU
743 void fixup_irqs(void)
747 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
750 spin_lock_irqsave(&irq_desc
[irq
].lock
, flags
);
751 if (irq_desc
[irq
].action
&&
752 !(irq_desc
[irq
].status
& IRQ_PER_CPU
)) {
753 if (irq_desc
[irq
].chip
->set_affinity
)
754 irq_desc
[irq
].chip
->set_affinity(irq
,
755 irq_desc
[irq
].affinity
);
757 spin_unlock_irqrestore(&irq_desc
[irq
].lock
, flags
);
769 static struct sun5_timer
*prom_timers
;
770 static u64 prom_limit0
, prom_limit1
;
772 static void map_prom_timers(void)
774 struct device_node
*dp
;
775 const unsigned int *addr
;
777 /* PROM timer node hangs out in the top level of device siblings... */
778 dp
= of_find_node_by_path("/");
781 if (!strcmp(dp
->name
, "counter-timer"))
786 /* Assume if node is not present, PROM uses different tick mechanism
787 * which we should not care about.
790 prom_timers
= (struct sun5_timer
*) 0;
794 /* If PROM is really using this, it must be mapped by him. */
795 addr
= of_get_property(dp
, "address", NULL
);
797 prom_printf("PROM does not have timer mapped, trying to continue.\n");
798 prom_timers
= (struct sun5_timer
*) 0;
801 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
804 static void kill_prom_timer(void)
809 /* Save them away for later. */
810 prom_limit0
= prom_timers
->limit0
;
811 prom_limit1
= prom_timers
->limit1
;
813 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
814 * We turn both off here just to be paranoid.
816 prom_timers
->limit0
= 0;
817 prom_timers
->limit1
= 0;
819 /* Wheee, eat the interrupt packet too... */
820 __asm__
__volatile__(
822 " ldxa [%%g0] %0, %%g1\n"
823 " ldxa [%%g2] %1, %%g1\n"
824 " stxa %%g0, [%%g0] %0\n"
827 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
831 void init_irqwork_curcpu(void)
833 int cpu
= hard_smp_processor_id();
835 trap_block
[cpu
].irq_worklist_pa
= 0UL;
838 /* Please be very careful with register_one_mondo() and
839 * sun4v_register_mondo_queues().
841 * On SMP this gets invoked from the CPU trampoline before
842 * the cpu has fully taken over the trap table from OBP,
843 * and it's kernel stack + %g6 thread register state is
844 * not fully cooked yet.
846 * Therefore you cannot make any OBP calls, not even prom_printf,
847 * from these two routines.
849 static void __cpuinit
register_one_mondo(unsigned long paddr
, unsigned long type
, unsigned long qmask
)
851 unsigned long num_entries
= (qmask
+ 1) / 64;
852 unsigned long status
;
854 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
855 if (status
!= HV_EOK
) {
856 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
857 "err %lu\n", type
, paddr
, num_entries
, status
);
862 void __cpuinit
sun4v_register_mondo_queues(int this_cpu
)
864 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
866 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
867 tb
->cpu_mondo_qmask
);
868 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
869 tb
->dev_mondo_qmask
);
870 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
872 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
876 static void __init
alloc_one_mondo(unsigned long *pa_ptr
, unsigned long qmask
)
878 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
879 void *p
= __alloc_bootmem_low(size
, size
, 0);
881 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
888 static void __init
alloc_one_kbuf(unsigned long *pa_ptr
, unsigned long qmask
)
890 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
891 void *p
= __alloc_bootmem_low(size
, size
, 0);
894 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
901 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
906 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
908 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
910 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
914 tb
->cpu_mondo_block_pa
= __pa(page
);
915 tb
->cpu_list_pa
= __pa(page
+ 64);
919 /* Allocate mondo and error queues for all possible cpus. */
920 static void __init
sun4v_init_mondo_queues(void)
924 for_each_possible_cpu(cpu
) {
925 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
927 alloc_one_mondo(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
928 alloc_one_mondo(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
929 alloc_one_mondo(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
930 alloc_one_kbuf(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
931 alloc_one_mondo(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
932 alloc_one_kbuf(&tb
->nonresum_kernel_buf_pa
,
935 init_cpu_send_mondo_info(tb
);
938 /* Load up the boot cpu's entries. */
939 sun4v_register_mondo_queues(hard_smp_processor_id());
942 static struct irqaction timer_irq_action
= {
946 /* Only invoked on boot processor. */
947 void __init
init_IRQ(void)
954 size
= sizeof(struct ino_bucket
) * NUM_IVECS
;
955 ivector_table
= alloc_bootmem_low(size
);
956 if (!ivector_table
) {
957 prom_printf("Fatal error, cannot allocate ivector_table\n");
961 ivector_table_pa
= __pa(ivector_table
);
963 if (tlb_type
== hypervisor
)
964 sun4v_init_mondo_queues();
966 /* We need to clear any IRQ's pending in the soft interrupt
967 * registers, a spurious one could be left around from the
968 * PROM timer which we just disabled.
970 clear_softint(get_softint());
972 /* Now that ivector table is initialized, it is safe
973 * to receive IRQ vector traps. We will normally take
974 * one or two right now, in case some device PROM used
975 * to boot us wants to speak to us. We just ignore them.
977 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
978 "or %%g1, %0, %%g1\n\t"
979 "wrpr %%g1, 0x0, %%pstate"
984 irq_desc
[0].action
= &timer_irq_action
;