1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/bootmem.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
46 static void distribute_irqs(void);
49 /* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
62 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (SMP_CACHE_BYTES
)));
64 /* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
71 struct irq_work_struct
{
72 unsigned int irq_worklists
[16];
74 struct irq_work_struct __irq_work
[NR_CPUS
];
75 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
77 static struct irqaction
*irq_action
[NR_IRQS
+1];
79 /* This only synchronizes entities which modify IRQ handler
80 * state and some selected user-level spots that want to
81 * read things in the table. IRQ handler processing orders
82 * its' accesses such that no locking is needed.
84 static DEFINE_SPINLOCK(irq_action_lock
);
86 static void register_irq_proc (unsigned int irq
);
89 * Upper 2b of irqaction->flags holds the ino.
90 * irqaction->mask holds the smp affinity information.
92 #define put_ino_in_irqaction(action, irq) \
93 action->flags &= 0xffffffffffffUL; \
94 if (__bucket(irq) == &pil0_dummy_bucket) \
95 action->flags |= 0xdeadUL << 48; \
97 action->flags |= __irq_ino(irq) << 48;
98 #define get_ino_in_irqaction(action) (action->flags >> 48)
100 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
101 #define get_smpaff_in_irqaction(action) ((action)->mask)
103 int show_interrupts(struct seq_file
*p
, void *v
)
106 int i
= *(loff_t
*) v
;
107 struct irqaction
*action
;
112 spin_lock_irqsave(&irq_action_lock
, flags
);
114 if (!(action
= *(i
+ irq_action
)))
116 seq_printf(p
, "%3d: ", i
);
118 seq_printf(p
, "%10u ", kstat_irqs(i
));
120 for (j
= 0; j
< NR_CPUS
; j
++) {
123 seq_printf(p
, "%10u ",
124 kstat_cpu(j
).irqs
[i
]);
127 seq_printf(p
, " %s:%lx", action
->name
,
128 get_ino_in_irqaction(action
));
129 for (action
= action
->next
; action
; action
= action
->next
) {
130 seq_printf(p
, ", %s:%lx", action
->name
,
131 get_ino_in_irqaction(action
));
136 spin_unlock_irqrestore(&irq_action_lock
, flags
);
141 /* Now these are always passed a true fully specified sun4u INO. */
142 void enable_irq(unsigned int irq
)
144 struct ino_bucket
*bucket
= __bucket(irq
);
154 if (tlb_type
== hypervisor
) {
155 /* XXX SUN4V: implement me... XXX */
157 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
160 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
161 if ((ver
>> 32) == __JALAPENO_ID
||
162 (ver
>> 32) == __SERRANO_ID
) {
163 /* We set it to our JBUS ID. */
164 __asm__
__volatile__("ldxa [%%g0] %1, %0"
166 : "i" (ASI_JBUS_CONFIG
));
167 tid
= ((tid
& (0x1fUL
<<17)) << 9);
168 tid
&= IMAP_TID_JBUS
;
170 /* We set it to our Safari AID. */
171 __asm__
__volatile__("ldxa [%%g0] %1, %0"
173 : "i"(ASI_SAFARI_CONFIG
));
174 tid
= ((tid
& (0x3ffUL
<<17)) << 9);
175 tid
&= IMAP_AID_SAFARI
;
177 } else if (this_is_starfire
== 0) {
178 /* We set it to our UPA MID. */
179 __asm__
__volatile__("ldxa [%%g0] %1, %0"
181 : "i" (ASI_UPA_CONFIG
));
182 tid
= ((tid
& UPA_CONFIG_MID
) << 9);
185 tid
= (starfire_translate(imap
,
186 smp_processor_id()) << 26);
190 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
191 * of this SYSIO's preconfigured IGN in the SYSIO Control
192 * Register, the hardware just mirrors that value here.
193 * However for Graphics and UPA Slave devices the full
194 * IMAP_INR field can be set by the programmer here.
196 * Things like FFB can now be handled via the new IRQ
199 upa_writel(tid
| IMAP_VALID
, imap
);
205 /* This now gets passed true ino's as well. */
206 void disable_irq(unsigned int irq
)
208 struct ino_bucket
*bucket
= __bucket(irq
);
215 /* NOTE: We do not want to futz with the IRQ clear registers
216 * and move the state to IDLE, the SCSI code does call
217 * disable_irq() to assure atomicity in the queue cmd
218 * SCSI adapter driver code. Thus we'd lose interrupts.
220 tmp
= upa_readl(imap
);
222 upa_writel(tmp
, imap
);
226 /* The timer is the one "weird" interrupt which is generated by
227 * the CPU %tick register and not by some normal vectored interrupt
228 * source. To handle this special case, we use this dummy INO bucket.
230 static struct irq_desc pil0_dummy_desc
;
231 static struct ino_bucket pil0_dummy_bucket
= {
232 .irq_info
= &pil0_dummy_desc
,
235 static void build_irq_error(const char *msg
, unsigned int ino
, int pil
, int inofixup
,
236 unsigned long iclr
, unsigned long imap
,
237 struct ino_bucket
*bucket
)
239 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
240 "(%d:%d:%016lx:%016lx), halting...\n",
241 ino
, bucket
->pil
, bucket
->iclr
, bucket
->imap
,
242 pil
, inofixup
, iclr
, imap
);
246 unsigned int build_irq(int pil
, int inofixup
, unsigned long iclr
, unsigned long imap
)
248 struct ino_bucket
*bucket
;
252 if (iclr
!= 0UL || imap
!= 0UL) {
253 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
257 return __irq(&pil0_dummy_bucket
);
260 /* RULE: Both must be specified in all other cases. */
261 if (iclr
== 0UL || imap
== 0UL) {
262 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
263 pil
, inofixup
, iclr
, imap
);
267 ino
= (upa_readl(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
268 if (ino
> NUM_IVECS
) {
269 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
270 ino
, pil
, inofixup
, iclr
, imap
);
274 bucket
= &ivector_table
[ino
];
275 if (bucket
->flags
& IBF_ACTIVE
)
276 build_irq_error("IRQ: Trying to build active INO bucket.\n",
277 ino
, pil
, inofixup
, iclr
, imap
, bucket
);
279 if (bucket
->irq_info
) {
280 if (bucket
->imap
!= imap
|| bucket
->iclr
!= iclr
)
281 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
282 ino
, pil
, inofixup
, iclr
, imap
, bucket
);
287 bucket
->irq_info
= kmalloc(sizeof(struct irq_desc
), GFP_ATOMIC
);
288 if (!bucket
->irq_info
) {
289 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
292 memset(bucket
->irq_info
, 0, sizeof(struct irq_desc
));
294 /* Ok, looks good, set it up. Don't touch the irq_chain or
303 return __irq(bucket
);
306 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
308 unsigned long pstate
;
311 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
312 __asm__
__volatile__("wrpr %0, %1, %%pstate"
313 : : "r" (pstate
), "i" (PSTATE_IE
));
314 ent
= irq_work(smp_processor_id(), bucket
->pil
);
315 bucket
->irq_chain
= *ent
;
316 *ent
= __irq(bucket
);
317 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
320 static int check_irq_sharing(int pil
, unsigned long irqflags
)
322 struct irqaction
*action
, *tmp
;
324 action
= *(irq_action
+ pil
);
326 if ((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
)) {
327 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
)
336 static void append_irq_action(int pil
, struct irqaction
*action
)
338 struct irqaction
**pp
= irq_action
+ pil
;
345 static struct irqaction
*get_action_slot(struct ino_bucket
*bucket
)
347 struct irq_desc
*desc
= bucket
->irq_info
;
351 if (bucket
->flags
& IBF_PCI
)
352 max_irq
= MAX_IRQ_DESC_ACTION
;
353 for (i
= 0; i
< max_irq
; i
++) {
354 struct irqaction
*p
= &desc
->action
[i
];
357 if (desc
->action_active_mask
& mask
)
360 desc
->action_active_mask
|= mask
;
366 int request_irq(unsigned int irq
, irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
367 unsigned long irqflags
, const char *name
, void *dev_id
)
369 struct irqaction
*action
;
370 struct ino_bucket
*bucket
= __bucket(irq
);
374 if (unlikely(!handler
))
377 if (unlikely(!bucket
->irq_info
))
380 if ((bucket
!= &pil0_dummy_bucket
) && (irqflags
& SA_SAMPLE_RANDOM
)) {
382 * This function might sleep, we want to call it first,
383 * outside of the atomic block. In SA_STATIC_ALLOC case,
384 * random driver's kmalloc will fail, but it is safe.
385 * If already initialized, random driver will not reinit.
386 * Yes, this might clear the entropy pool if the wrong
387 * driver is attempted to be loaded, without actually
388 * installing a new handler, but is this really a problem,
389 * only the sysadmin is able to do this.
391 rand_initialize_irq(irq
);
394 spin_lock_irqsave(&irq_action_lock
, flags
);
396 if (check_irq_sharing(bucket
->pil
, irqflags
)) {
397 spin_unlock_irqrestore(&irq_action_lock
, flags
);
401 action
= get_action_slot(bucket
);
403 spin_unlock_irqrestore(&irq_action_lock
, flags
);
407 bucket
->flags
|= IBF_ACTIVE
;
409 if (bucket
!= &pil0_dummy_bucket
) {
410 pending
= bucket
->pending
;
415 action
->handler
= handler
;
416 action
->flags
= irqflags
;
419 action
->dev_id
= dev_id
;
420 put_ino_in_irqaction(action
, irq
);
421 put_smpaff_in_irqaction(action
, CPU_MASK_NONE
);
423 append_irq_action(bucket
->pil
, action
);
427 /* We ate the IVEC already, this makes sure it does not get lost. */
429 atomic_bucket_insert(bucket
);
430 set_softint(1 << bucket
->pil
);
433 spin_unlock_irqrestore(&irq_action_lock
, flags
);
435 if (bucket
!= &pil0_dummy_bucket
)
436 register_irq_proc(__irq_ino(irq
));
444 EXPORT_SYMBOL(request_irq
);
446 static struct irqaction
*unlink_irq_action(unsigned int irq
, void *dev_id
)
448 struct ino_bucket
*bucket
= __bucket(irq
);
449 struct irqaction
*action
, **pp
;
451 pp
= irq_action
+ bucket
->pil
;
453 if (unlikely(!action
))
456 if (unlikely(!action
->handler
)) {
457 printk("Freeing free IRQ %d\n", bucket
->pil
);
461 while (action
&& action
->dev_id
!= dev_id
) {
472 void free_irq(unsigned int irq
, void *dev_id
)
474 struct irqaction
*action
;
475 struct ino_bucket
*bucket
;
478 spin_lock_irqsave(&irq_action_lock
, flags
);
480 action
= unlink_irq_action(irq
, dev_id
);
482 spin_unlock_irqrestore(&irq_action_lock
, flags
);
484 if (unlikely(!action
))
487 synchronize_irq(irq
);
489 spin_lock_irqsave(&irq_action_lock
, flags
);
491 bucket
= __bucket(irq
);
492 if (bucket
!= &pil0_dummy_bucket
) {
493 struct irq_desc
*desc
= bucket
->irq_info
;
494 unsigned long imap
= bucket
->imap
;
497 for (i
= 0; i
< MAX_IRQ_DESC_ACTION
; i
++) {
498 struct irqaction
*p
= &desc
->action
[i
];
501 desc
->action_active_mask
&= ~(1 << i
);
506 if (!desc
->action_active_mask
) {
507 /* This unique interrupt source is now inactive. */
508 bucket
->flags
&= ~IBF_ACTIVE
;
510 /* See if any other buckets share this bucket's IMAP
511 * and are still active.
513 for (ent
= 0; ent
< NUM_IVECS
; ent
++) {
514 struct ino_bucket
*bp
= &ivector_table
[ent
];
517 (bp
->flags
& IBF_ACTIVE
) != 0)
521 /* Only disable when no other sub-irq levels of
522 * the same IMAP are active.
524 if (ent
== NUM_IVECS
)
529 spin_unlock_irqrestore(&irq_action_lock
, flags
);
532 EXPORT_SYMBOL(free_irq
);
535 void synchronize_irq(unsigned int irq
)
537 struct ino_bucket
*bucket
= __bucket(irq
);
540 /* The following is how I wish I could implement this.
541 * Unfortunately the ICLR registers are read-only, you can
542 * only write ICLR_foo values to them. To get the current
543 * IRQ status you would need to get at the IRQ diag registers
544 * in the PCI/SBUS controller and the layout of those vary
545 * from one controller to the next, sigh... -DaveM
547 unsigned long iclr
= bucket
->iclr
;
550 u32 tmp
= upa_readl(iclr
);
552 if (tmp
== ICLR_TRANSMIT
||
553 tmp
== ICLR_PENDING
) {
560 /* So we have to do this with a INPROGRESS bit just like x86. */
561 while (bucket
->flags
& IBF_INPROGRESS
)
565 #endif /* CONFIG_SMP */
567 static void process_bucket(int irq
, struct ino_bucket
*bp
, struct pt_regs
*regs
)
569 struct irq_desc
*desc
= bp
->irq_info
;
570 unsigned char flags
= bp
->flags
;
574 bp
->flags
|= IBF_INPROGRESS
;
576 if (unlikely(!(flags
& IBF_ACTIVE
))) {
581 if (desc
->pre_handler
)
582 desc
->pre_handler(bp
,
583 desc
->pre_handler_arg1
,
584 desc
->pre_handler_arg2
);
586 action_mask
= desc
->action_active_mask
;
588 for (i
= 0; i
< MAX_IRQ_DESC_ACTION
; i
++) {
589 struct irqaction
*p
= &desc
->action
[i
];
592 if (!(action_mask
& mask
))
595 action_mask
&= ~mask
;
597 if (p
->handler(__irq(bp
), p
->dev_id
, regs
) == IRQ_HANDLED
)
604 upa_writel(ICLR_IDLE
, bp
->iclr
);
605 /* Test and add entropy */
606 if (random
& SA_SAMPLE_RANDOM
)
607 add_interrupt_randomness(irq
);
610 bp
->flags
&= ~IBF_INPROGRESS
;
613 void handler_irq(int irq
, struct pt_regs
*regs
)
615 struct ino_bucket
*bp
;
616 int cpu
= smp_processor_id();
620 * Check for TICK_INT on level 14 softint.
623 unsigned long clr_mask
= 1 << irq
;
624 unsigned long tick_mask
= tick_ops
->softint_mask
;
626 if ((irq
== 14) && (get_softint() & tick_mask
)) {
628 clr_mask
= tick_mask
;
630 clear_softint(clr_mask
);
633 clear_softint(1 << irq
);
637 kstat_this_cpu
.irqs
[irq
]++;
642 __bucket(xchg32(irq_work(cpu
, irq
), 0)) :
645 bp
= __bucket(xchg32(irq_work(cpu
, irq
), 0));
648 struct ino_bucket
*nbp
= __bucket(bp
->irq_chain
);
651 process_bucket(irq
, bp
, regs
);
657 #ifdef CONFIG_BLK_DEV_FD
658 extern irqreturn_t
floppy_interrupt(int, void *, struct pt_regs
*);;
660 /* XXX No easy way to include asm/floppy.h XXX */
661 extern unsigned char *pdma_vaddr
;
662 extern unsigned long pdma_size
;
663 extern volatile int doing_pdma
;
664 extern unsigned long fdc_status
;
666 irqreturn_t
sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
668 if (likely(doing_pdma
)) {
669 void __iomem
*stat
= (void __iomem
*) fdc_status
;
670 unsigned char *vaddr
= pdma_vaddr
;
671 unsigned long size
= pdma_size
;
676 if (unlikely(!(val
& 0x80))) {
681 if (unlikely(!(val
& 0x20))) {
689 *vaddr
++ = readb(stat
+ 1);
691 unsigned char data
= *vaddr
++;
694 writeb(data
, stat
+ 1);
702 /* Send Terminal Count pulse to floppy controller. */
703 val
= readb(auxio_register
);
704 val
|= AUXIO_AUX1_FTCNT
;
705 writeb(val
, auxio_register
);
706 val
&= ~AUXIO_AUX1_FTCNT
;
707 writeb(val
, auxio_register
);
713 return floppy_interrupt(irq
, dev_cookie
, regs
);
715 EXPORT_SYMBOL(sparc_floppy_irq
);
718 /* We really don't need these at all on the Sparc. We only have
719 * stubs here because they are exported to modules.
721 unsigned long probe_irq_on(void)
726 EXPORT_SYMBOL(probe_irq_on
);
728 int probe_irq_off(unsigned long mask
)
733 EXPORT_SYMBOL(probe_irq_off
);
736 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
738 struct ino_bucket
*bucket
= get_ino_in_irqaction(p
) + ivector_table
;
739 unsigned long imap
= bucket
->imap
;
742 while (!cpu_online(goal_cpu
)) {
743 if (++goal_cpu
>= NR_CPUS
)
747 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
748 tid
= goal_cpu
<< 26;
749 tid
&= IMAP_AID_SAFARI
;
750 } else if (this_is_starfire
== 0) {
751 tid
= goal_cpu
<< 26;
754 tid
= (starfire_translate(imap
, goal_cpu
) << 26);
757 upa_writel(tid
| IMAP_VALID
, imap
);
760 if (++goal_cpu
>= NR_CPUS
)
762 } while (!cpu_online(goal_cpu
));
767 /* Called from request_irq. */
768 static void distribute_irqs(void)
773 spin_lock_irqsave(&irq_action_lock
, flags
);
777 * Skip the timer at [0], and very rare error/power intrs at [15].
778 * Also level [12], it causes problems on Ex000 systems.
780 for (level
= 1; level
< NR_IRQS
; level
++) {
781 struct irqaction
*p
= irq_action
[level
];
787 cpu
= retarget_one_irq(p
, cpu
);
791 spin_unlock_irqrestore(&irq_action_lock
, flags
);
802 static struct sun5_timer
*prom_timers
;
803 static u64 prom_limit0
, prom_limit1
;
805 static void map_prom_timers(void)
807 unsigned int addr
[3];
810 /* PROM timer node hangs out in the top level of device siblings... */
811 tnode
= prom_finddevice("/counter-timer");
813 /* Assume if node is not present, PROM uses different tick mechanism
814 * which we should not care about.
816 if (tnode
== 0 || tnode
== -1) {
817 prom_timers
= (struct sun5_timer
*) 0;
821 /* If PROM is really using this, it must be mapped by him. */
822 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
824 prom_printf("PROM does not have timer mapped, trying to continue.\n");
825 prom_timers
= (struct sun5_timer
*) 0;
828 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
831 static void kill_prom_timer(void)
836 /* Save them away for later. */
837 prom_limit0
= prom_timers
->limit0
;
838 prom_limit1
= prom_timers
->limit1
;
840 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
841 * We turn both off here just to be paranoid.
843 prom_timers
->limit0
= 0;
844 prom_timers
->limit1
= 0;
846 /* Wheee, eat the interrupt packet too... */
847 __asm__
__volatile__(
849 " ldxa [%%g0] %0, %%g1\n"
850 " ldxa [%%g2] %1, %%g1\n"
851 " stxa %%g0, [%%g0] %0\n"
854 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
858 void init_irqwork_curcpu(void)
860 int cpu
= hard_smp_processor_id();
862 memset(__irq_work
+ cpu
, 0, sizeof(struct irq_work_struct
));
865 static void __cpuinit
register_one_mondo(unsigned long paddr
, unsigned long type
)
867 register unsigned long func
__asm__("%o5");
868 register unsigned long arg0
__asm__("%o0");
869 register unsigned long arg1
__asm__("%o1");
870 register unsigned long arg2
__asm__("%o2");
872 func
= HV_FAST_CPU_QCONF
;
875 arg2
= 128; /* XXX Implied by Niagara queue offsets. XXX */
876 __asm__
__volatile__("ta %8"
877 : "=&r" (func
), "=&r" (arg0
),
878 "=&r" (arg1
), "=&r" (arg2
)
879 : "0" (func
), "1" (arg0
),
880 "2" (arg1
), "3" (arg2
),
883 if (arg0
!= HV_EOK
) {
884 prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n",
890 static void __cpuinit
sun4v_register_mondo_queues(int this_cpu
)
892 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
894 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
);
895 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
);
896 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
);
897 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
);
900 static void __cpuinit
alloc_one_mondo(unsigned long *pa_ptr
, int use_bootmem
)
905 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
907 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
910 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
914 *pa_ptr
= __pa(page
);
917 static void __cpuinit
alloc_one_kbuf(unsigned long *pa_ptr
, int use_bootmem
)
922 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
924 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
927 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
931 *pa_ptr
= __pa(page
);
934 static void __cpuinit
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
, int use_bootmem
)
939 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
942 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
944 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
947 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
951 tb
->cpu_mondo_block_pa
= __pa(page
);
952 tb
->cpu_list_pa
= __pa(page
+ 64);
956 /* Allocate and register the mondo and error queues for this cpu. */
957 void __cpuinit
sun4v_init_mondo_queues(int use_bootmem
)
959 int cpu
= hard_smp_processor_id();
960 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
962 alloc_one_mondo(&tb
->cpu_mondo_pa
, use_bootmem
);
963 alloc_one_mondo(&tb
->dev_mondo_pa
, use_bootmem
);
964 alloc_one_mondo(&tb
->resum_mondo_pa
, use_bootmem
);
965 alloc_one_kbuf(&tb
->resum_kernel_buf_pa
, use_bootmem
);
966 alloc_one_mondo(&tb
->nonresum_mondo_pa
, use_bootmem
);
967 alloc_one_kbuf(&tb
->nonresum_kernel_buf_pa
, use_bootmem
);
969 init_cpu_send_mondo_info(tb
, use_bootmem
);
971 sun4v_register_mondo_queues(cpu
);
974 /* Only invoked on boot processor. */
975 void __init
init_IRQ(void)
979 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
981 if (tlb_type
== hypervisor
)
982 sun4v_init_mondo_queues(1);
984 /* We need to clear any IRQ's pending in the soft interrupt
985 * registers, a spurious one could be left around from the
986 * PROM timer which we just disabled.
988 clear_softint(get_softint());
990 /* Now that ivector table is initialized, it is safe
991 * to receive IRQ vector traps. We will normally take
992 * one or two right now, in case some device PROM used
993 * to boot us wants to speak to us. We just ignore them.
995 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
996 "or %%g1, %0, %%g1\n\t"
997 "wrpr %%g1, 0x0, %%pstate"
1003 static struct proc_dir_entry
* root_irq_dir
;
1004 static struct proc_dir_entry
* irq_dir
[NUM_IVECS
];
1008 static int irq_affinity_read_proc (char *page
, char **start
, off_t off
,
1009 int count
, int *eof
, void *data
)
1011 struct ino_bucket
*bp
= ivector_table
+ (long)data
;
1012 struct irq_desc
*desc
= bp
->irq_info
;
1013 struct irqaction
*ap
= desc
->action
;
1017 mask
= get_smpaff_in_irqaction(ap
);
1018 if (cpus_empty(mask
))
1019 mask
= cpu_online_map
;
1021 len
= cpumask_scnprintf(page
, count
, mask
);
1022 if (count
- len
< 2)
1024 len
+= sprintf(page
+ len
, "\n");
1028 static inline void set_intr_affinity(int irq
, cpumask_t hw_aff
)
1030 struct ino_bucket
*bp
= ivector_table
+ irq
;
1031 struct irq_desc
*desc
= bp
->irq_info
;
1032 struct irqaction
*ap
= desc
->action
;
1034 /* Users specify affinity in terms of hw cpu ids.
1035 * As soon as we do this, handler_irq() might see and take action.
1037 put_smpaff_in_irqaction(ap
, hw_aff
);
1039 /* Migration is simply done by the next cpu to service this
1044 static int irq_affinity_write_proc (struct file
*file
, const char __user
*buffer
,
1045 unsigned long count
, void *data
)
1047 int irq
= (long) data
, full_count
= count
, err
;
1048 cpumask_t new_value
;
1050 err
= cpumask_parse(buffer
, count
, new_value
);
1053 * Do not allow disabling IRQs completely - it's a too easy
1054 * way to make the system unusable accidentally :-) At least
1055 * one online CPU still has to be targeted.
1057 cpus_and(new_value
, new_value
, cpu_online_map
);
1058 if (cpus_empty(new_value
))
1061 set_intr_affinity(irq
, new_value
);
1068 #define MAX_NAMELEN 10
1070 static void register_irq_proc (unsigned int irq
)
1072 char name
[MAX_NAMELEN
];
1074 if (!root_irq_dir
|| irq_dir
[irq
])
1077 memset(name
, 0, MAX_NAMELEN
);
1078 sprintf(name
, "%x", irq
);
1080 /* create /proc/irq/1234 */
1081 irq_dir
[irq
] = proc_mkdir(name
, root_irq_dir
);
1084 /* XXX SMP affinity not supported on starfire yet. */
1085 if (this_is_starfire
== 0) {
1086 struct proc_dir_entry
*entry
;
1088 /* create /proc/irq/1234/smp_affinity */
1089 entry
= create_proc_entry("smp_affinity", 0600, irq_dir
[irq
]);
1093 entry
->data
= (void *)(long)irq
;
1094 entry
->read_proc
= irq_affinity_read_proc
;
1095 entry
->write_proc
= irq_affinity_write_proc
;
1101 void init_irq_proc (void)
1103 /* create /proc/irq */
1104 root_irq_dir
= proc_mkdir("irq", NULL
);