[SPARC64]: Kill ino_bucket->pil
[deliverable/linux.git] / arch / sparc64 / kernel / irq.c
CommitLineData
1da177e4
LT
1/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
b5a37e96 24#include <linux/bootmem.h>
1da177e4
LT
25
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <asm/atomic.h>
29#include <asm/system.h>
30#include <asm/irq.h>
2e457ef6 31#include <asm/io.h>
1da177e4
LT
32#include <asm/sbus.h>
33#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
36#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
63b61452 42#include <asm/auxio.h>
92704a1c 43#include <asm/head.h>
1da177e4
LT
44
45#ifdef CONFIG_SMP
46static void distribute_irqs(void);
47#endif
48
49/* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
53 *
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
59 * at the same time.
60 */
61
62struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
63
64/* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
70 */
fd0504c3 71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
1da177e4 72
37cdcd9e 73static struct irqaction *irq_action[NR_IRQS];
1da177e4
LT
74
75/* This only synchronizes entities which modify IRQ handler
76 * state and some selected user-level spots that want to
77 * read things in the table. IRQ handler processing orders
78 * its' accesses such that no locking is needed.
79 */
80static DEFINE_SPINLOCK(irq_action_lock);
81
82static void register_irq_proc (unsigned int irq);
83
84/*
85 * Upper 2b of irqaction->flags holds the ino.
86 * irqaction->mask holds the smp affinity information.
87 */
88#define put_ino_in_irqaction(action, irq) \
89 action->flags &= 0xffffffffffffUL; \
fd0504c3
DM
90 action->flags |= __irq_ino(irq) << 48;
91
1da177e4
LT
92#define get_ino_in_irqaction(action) (action->flags >> 48)
93
94#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
95#define get_smpaff_in_irqaction(action) ((action)->mask)
96
97int show_interrupts(struct seq_file *p, void *v)
98{
99 unsigned long flags;
100 int i = *(loff_t *) v;
101 struct irqaction *action;
102#ifdef CONFIG_SMP
103 int j;
104#endif
105
106 spin_lock_irqsave(&irq_action_lock, flags);
107 if (i <= NR_IRQS) {
108 if (!(action = *(i + irq_action)))
109 goto out_unlock;
110 seq_printf(p, "%3d: ", i);
111#ifndef CONFIG_SMP
112 seq_printf(p, "%10u ", kstat_irqs(i));
113#else
394e3902 114 for_each_online_cpu(j) {
1da177e4
LT
115 seq_printf(p, "%10u ",
116 kstat_cpu(j).irqs[i]);
117 }
118#endif
37cdcd9e
DM
119 seq_printf(p, " %s", action->name);
120 for (action = action->next; action; action = action->next)
121 seq_printf(p, ", %s", action->name);
1da177e4
LT
122 seq_putc(p, '\n');
123 }
124out_unlock:
125 spin_unlock_irqrestore(&irq_action_lock, flags);
126
127 return 0;
128}
129
ebd8c56c
DM
130extern unsigned long real_hard_smp_processor_id(void);
131
132static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
133{
134 unsigned int tid;
135
136 if (this_is_starfire) {
137 tid = starfire_translate(imap, cpuid);
138 tid <<= IMAP_TID_SHIFT;
139 tid &= IMAP_TID_UPA;
140 } else {
141 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
142 unsigned long ver;
143
144 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
145 if ((ver >> 32UL) == __JALAPENO_ID ||
146 (ver >> 32UL) == __SERRANO_ID) {
147 tid = cpuid << IMAP_TID_SHIFT;
148 tid &= IMAP_TID_JBUS;
149 } else {
150 unsigned int a = cpuid & 0x1f;
151 unsigned int n = (cpuid >> 5) & 0x1f;
152
153 tid = ((a << IMAP_AID_SHIFT) |
154 (n << IMAP_NID_SHIFT));
155 tid &= (IMAP_AID_SAFARI |
156 IMAP_NID_SAFARI);;
157 }
158 } else {
159 tid = cpuid << IMAP_TID_SHIFT;
160 tid &= IMAP_TID_UPA;
161 }
162 }
163
164 return tid;
165}
166
1da177e4
LT
167/* Now these are always passed a true fully specified sun4u INO. */
168void enable_irq(unsigned int irq)
169{
170 struct ino_bucket *bucket = __bucket(irq);
ebd8c56c 171 unsigned long imap, cpuid;
1da177e4
LT
172
173 imap = bucket->imap;
174 if (imap == 0UL)
175 return;
176
177 preempt_disable();
178
ebd8c56c
DM
179 /* This gets the physical processor ID, even on uniprocessor,
180 * so we can always program the interrupt target correctly.
181 */
182 cpuid = real_hard_smp_processor_id();
183
d82ace7d 184 if (tlb_type == hypervisor) {
4bf447d6 185 unsigned int ino = __irq_ino(irq);
c4bea288 186 int err;
10951ee6 187
ebd8c56c 188 err = sun4v_intr_settarget(ino, cpuid);
c4bea288 189 if (err != HV_EOK)
ebd8c56c
DM
190 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
191 ino, cpuid, err);
abd92b2d 192 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
c4bea288
DM
193 if (err != HV_EOK)
194 printk("sun4v_intr_setenabled(%x): err(%d)\n",
195 ino, err);
d82ace7d 196 } else {
ebd8c56c 197 unsigned int tid = sun4u_compute_tid(imap, cpuid);
1da177e4 198
d82ace7d
DM
199 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
200 * of this SYSIO's preconfigured IGN in the SYSIO Control
201 * Register, the hardware just mirrors that value here.
202 * However for Graphics and UPA Slave devices the full
203 * IMAP_INR field can be set by the programmer here.
204 *
205 * Things like FFB can now be handled via the new IRQ
206 * mechanism.
207 */
208 upa_writel(tid | IMAP_VALID, imap);
209 }
1da177e4
LT
210
211 preempt_enable();
212}
213
214/* This now gets passed true ino's as well. */
215void disable_irq(unsigned int irq)
216{
217 struct ino_bucket *bucket = __bucket(irq);
218 unsigned long imap;
219
220 imap = bucket->imap;
221 if (imap != 0UL) {
10951ee6 222 if (tlb_type == hypervisor) {
4bf447d6 223 unsigned int ino = __irq_ino(irq);
c4bea288 224 int err;
4bf447d6 225
c4bea288
DM
226 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
227 if (err != HV_EOK)
228 printk("sun4v_intr_setenabled(%x): "
229 "err(%d)\n", ino, err);
10951ee6
DM
230 } else {
231 u32 tmp;
1da177e4 232
10951ee6
DM
233 /* NOTE: We do not want to futz with the IRQ clear registers
234 * and move the state to IDLE, the SCSI code does call
235 * disable_irq() to assure atomicity in the queue cmd
236 * SCSI adapter driver code. Thus we'd lose interrupts.
237 */
238 tmp = upa_readl(imap);
239 tmp &= ~IMAP_VALID;
240 upa_writel(tmp, imap);
241 }
1da177e4
LT
242 }
243}
244
37cdcd9e 245static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
088dd1f8
DM
246 unsigned long iclr, unsigned long imap,
247 struct ino_bucket *bucket)
248{
37cdcd9e
DM
249 prom_printf("IRQ: INO %04x (%016lx:%016lx) --> "
250 "(%d:%016lx:%016lx), halting...\n",
251 ino, bucket->iclr, bucket->imap,
252 inofixup, iclr, imap);
088dd1f8
DM
253 prom_halt();
254}
255
37cdcd9e 256unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
1da177e4
LT
257{
258 struct ino_bucket *bucket;
259 int ino;
260
10951ee6
DM
261 BUG_ON(tlb_type == hypervisor);
262
1da177e4
LT
263 /* RULE: Both must be specified in all other cases. */
264 if (iclr == 0UL || imap == 0UL) {
37cdcd9e
DM
265 prom_printf("Invalid build_irq %d %016lx %016lx\n",
266 inofixup, iclr, imap);
1da177e4
LT
267 prom_halt();
268 }
269
270 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
271 if (ino > NUM_IVECS) {
37cdcd9e
DM
272 prom_printf("Invalid INO %04x (%d:%016lx:%016lx)\n",
273 ino, inofixup, iclr, imap);
1da177e4
LT
274 prom_halt();
275 }
276
1da177e4 277 bucket = &ivector_table[ino];
088dd1f8
DM
278 if (bucket->flags & IBF_ACTIVE)
279 build_irq_error("IRQ: Trying to build active INO bucket.\n",
37cdcd9e 280 ino, inofixup, iclr, imap, bucket);
088dd1f8
DM
281
282 if (bucket->irq_info) {
283 if (bucket->imap != imap || bucket->iclr != iclr)
284 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
37cdcd9e 285 ino, inofixup, iclr, imap, bucket);
088dd1f8
DM
286
287 goto out;
288 }
289
9132983a 290 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
088dd1f8
DM
291 if (!bucket->irq_info) {
292 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
1da177e4
LT
293 prom_halt();
294 }
088dd1f8
DM
295
296 /* Ok, looks good, set it up. Don't touch the irq_chain or
297 * the pending flag.
298 */
1da177e4
LT
299 bucket->imap = imap;
300 bucket->iclr = iclr;
1da177e4
LT
301 bucket->flags = 0;
302
088dd1f8 303out:
1da177e4
LT
304 return __irq(bucket);
305}
306
37cdcd9e 307unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
e3999574
DM
308{
309 struct ino_bucket *bucket;
310 unsigned long sysino;
311
312 sysino = sun4v_devino_to_sysino(devhandle, devino);
313
e3999574
DM
314 bucket = &ivector_table[sysino];
315
316 /* Catch accidental accesses to these things. IMAP/ICLR handling
317 * is done by hypervisor calls on sun4v platforms, not by direct
318 * register accesses.
22780e23
DM
319 *
320 * But we need to make them look unique for the disable_irq() logic
321 * in free_irq().
e3999574 322 */
22780e23
DM
323 bucket->imap = ~0UL - sysino;
324 bucket->iclr = ~0UL - sysino;
e3999574 325
e3999574
DM
326 bucket->flags = flags;
327
9132983a 328 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
e3999574
DM
329 if (!bucket->irq_info) {
330 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
331 prom_halt();
332 }
e3999574
DM
333
334 return __irq(bucket);
335}
336
1da177e4
LT
337static void atomic_bucket_insert(struct ino_bucket *bucket)
338{
339 unsigned long pstate;
340 unsigned int *ent;
341
342 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
343 __asm__ __volatile__("wrpr %0, %1, %%pstate"
344 : : "r" (pstate), "i" (PSTATE_IE));
fd0504c3 345 ent = irq_work(smp_processor_id());
1da177e4
LT
346 bucket->irq_chain = *ent;
347 *ent = __irq(bucket);
348 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
349}
350
088dd1f8
DM
351static int check_irq_sharing(int pil, unsigned long irqflags)
352{
37cdcd9e 353 struct irqaction *action;
088dd1f8
DM
354
355 action = *(irq_action + pil);
356 if (action) {
37cdcd9e 357 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ))
088dd1f8 358 return -EBUSY;
088dd1f8
DM
359 }
360 return 0;
361}
362
363static void append_irq_action(int pil, struct irqaction *action)
364{
365 struct irqaction **pp = irq_action + pil;
366
367 while (*pp)
368 pp = &((*pp)->next);
369 *pp = action;
370}
371
372static struct irqaction *get_action_slot(struct ino_bucket *bucket)
373{
374 struct irq_desc *desc = bucket->irq_info;
375 int max_irq, i;
376
377 max_irq = 1;
378 if (bucket->flags & IBF_PCI)
379 max_irq = MAX_IRQ_DESC_ACTION;
380 for (i = 0; i < max_irq; i++) {
381 struct irqaction *p = &desc->action[i];
382 u32 mask = (1 << i);
383
384 if (desc->action_active_mask & mask)
385 continue;
386
387 desc->action_active_mask |= mask;
388 return p;
389 }
390 return NULL;
391}
392
1da177e4
LT
393int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
394 unsigned long irqflags, const char *name, void *dev_id)
395{
088dd1f8 396 struct irqaction *action;
1da177e4
LT
397 struct ino_bucket *bucket = __bucket(irq);
398 unsigned long flags;
399 int pending = 0;
400
088dd1f8 401 if (unlikely(!handler))
1da177e4 402 return -EINVAL;
088dd1f8
DM
403
404 if (unlikely(!bucket->irq_info))
405 return -ENODEV;
1da177e4 406
fd0504c3 407 if (irqflags & SA_SAMPLE_RANDOM) {
1da177e4
LT
408 /*
409 * This function might sleep, we want to call it first,
410 * outside of the atomic block. In SA_STATIC_ALLOC case,
411 * random driver's kmalloc will fail, but it is safe.
412 * If already initialized, random driver will not reinit.
413 * Yes, this might clear the entropy pool if the wrong
414 * driver is attempted to be loaded, without actually
415 * installing a new handler, but is this really a problem,
416 * only the sysadmin is able to do this.
417 */
37cdcd9e 418 rand_initialize_irq(PIL_DEVICE_IRQ);
1da177e4
LT
419 }
420
421 spin_lock_irqsave(&irq_action_lock, flags);
422
37cdcd9e 423 if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) {
088dd1f8
DM
424 spin_unlock_irqrestore(&irq_action_lock, flags);
425 return -EBUSY;
1da177e4
LT
426 }
427
088dd1f8 428 action = get_action_slot(bucket);
1da177e4
LT
429 if (!action) {
430 spin_unlock_irqrestore(&irq_action_lock, flags);
431 return -ENOMEM;
432 }
433
088dd1f8 434 bucket->flags |= IBF_ACTIVE;
fd0504c3
DM
435 pending = bucket->pending;
436 if (pending)
437 bucket->pending = 0;
1da177e4
LT
438
439 action->handler = handler;
440 action->flags = irqflags;
441 action->name = name;
442 action->next = NULL;
443 action->dev_id = dev_id;
444 put_ino_in_irqaction(action, irq);
445 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
446
37cdcd9e 447 append_irq_action(PIL_DEVICE_IRQ, action);
1da177e4
LT
448
449 enable_irq(irq);
450
451 /* We ate the IVEC already, this makes sure it does not get lost. */
452 if (pending) {
453 atomic_bucket_insert(bucket);
fd0504c3 454 set_softint(1 << PIL_DEVICE_IRQ);
1da177e4 455 }
088dd1f8 456
1da177e4 457 spin_unlock_irqrestore(&irq_action_lock, flags);
088dd1f8 458
fd0504c3 459 register_irq_proc(__irq_ino(irq));
1da177e4
LT
460
461#ifdef CONFIG_SMP
462 distribute_irqs();
463#endif
464 return 0;
1da177e4
LT
465}
466
467EXPORT_SYMBOL(request_irq);
468
088dd1f8 469static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
1da177e4 470{
088dd1f8 471 struct irqaction *action, **pp;
1da177e4 472
37cdcd9e 473 pp = irq_action + PIL_DEVICE_IRQ;
088dd1f8
DM
474 action = *pp;
475 if (unlikely(!action))
476 return NULL;
1da177e4 477
088dd1f8 478 if (unlikely(!action->handler)) {
37cdcd9e 479 printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ);
088dd1f8 480 return NULL;
1da177e4
LT
481 }
482
088dd1f8
DM
483 while (action && action->dev_id != dev_id) {
484 pp = &action->next;
485 action = *pp;
1da177e4
LT
486 }
487
088dd1f8
DM
488 if (likely(action))
489 *pp = action->next;
490
491 return action;
492}
493
494void free_irq(unsigned int irq, void *dev_id)
495{
496 struct irqaction *action;
497 struct ino_bucket *bucket;
fd0504c3 498 struct irq_desc *desc;
088dd1f8 499 unsigned long flags;
fd0504c3 500 int ent, i;
088dd1f8
DM
501
502 spin_lock_irqsave(&irq_action_lock, flags);
503
504 action = unlink_irq_action(irq, dev_id);
1da177e4
LT
505
506 spin_unlock_irqrestore(&irq_action_lock, flags);
507
088dd1f8
DM
508 if (unlikely(!action))
509 return;
510
1da177e4
LT
511 synchronize_irq(irq);
512
513 spin_lock_irqsave(&irq_action_lock, flags);
514
088dd1f8 515 bucket = __bucket(irq);
fd0504c3 516 desc = bucket->irq_info;
1da177e4 517
fd0504c3
DM
518 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
519 struct irqaction *p = &desc->action[i];
088dd1f8 520
fd0504c3
DM
521 if (p == action) {
522 desc->action_active_mask &= ~(1 << i);
523 break;
1da177e4 524 }
fd0504c3 525 }
1da177e4 526
fd0504c3
DM
527 if (!desc->action_active_mask) {
528 unsigned long imap = bucket->imap;
1da177e4 529
fd0504c3
DM
530 /* This unique interrupt source is now inactive. */
531 bucket->flags &= ~IBF_ACTIVE;
1da177e4 532
fd0504c3
DM
533 /* See if any other buckets share this bucket's IMAP
534 * and are still active.
535 */
536 for (ent = 0; ent < NUM_IVECS; ent++) {
537 struct ino_bucket *bp = &ivector_table[ent];
538 if (bp != bucket &&
539 bp->imap == imap &&
540 (bp->flags & IBF_ACTIVE) != 0)
541 break;
088dd1f8 542 }
fd0504c3
DM
543
544 /* Only disable when no other sub-irq levels of
545 * the same IMAP are active.
546 */
547 if (ent == NUM_IVECS)
548 disable_irq(irq);
1da177e4
LT
549 }
550
1da177e4
LT
551 spin_unlock_irqrestore(&irq_action_lock, flags);
552}
553
554EXPORT_SYMBOL(free_irq);
555
556#ifdef CONFIG_SMP
557void synchronize_irq(unsigned int irq)
558{
559 struct ino_bucket *bucket = __bucket(irq);
560
561#if 0
562 /* The following is how I wish I could implement this.
563 * Unfortunately the ICLR registers are read-only, you can
564 * only write ICLR_foo values to them. To get the current
565 * IRQ status you would need to get at the IRQ diag registers
566 * in the PCI/SBUS controller and the layout of those vary
567 * from one controller to the next, sigh... -DaveM
568 */
569 unsigned long iclr = bucket->iclr;
570
571 while (1) {
572 u32 tmp = upa_readl(iclr);
573
574 if (tmp == ICLR_TRANSMIT ||
575 tmp == ICLR_PENDING) {
576 cpu_relax();
577 continue;
578 }
579 break;
580 }
581#else
582 /* So we have to do this with a INPROGRESS bit just like x86. */
583 while (bucket->flags & IBF_INPROGRESS)
584 cpu_relax();
585#endif
586}
587#endif /* CONFIG_SMP */
588
fd0504c3 589static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
1da177e4 590{
088dd1f8
DM
591 struct irq_desc *desc = bp->irq_info;
592 unsigned char flags = bp->flags;
593 u32 action_mask, i;
594 int random;
1da177e4 595
088dd1f8 596 bp->flags |= IBF_INPROGRESS;
1da177e4 597
088dd1f8
DM
598 if (unlikely(!(flags & IBF_ACTIVE))) {
599 bp->pending = 1;
1da177e4 600 goto out;
1da177e4
LT
601 }
602
088dd1f8
DM
603 if (desc->pre_handler)
604 desc->pre_handler(bp,
605 desc->pre_handler_arg1,
606 desc->pre_handler_arg2);
1da177e4 607
088dd1f8
DM
608 action_mask = desc->action_active_mask;
609 random = 0;
610 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
611 struct irqaction *p = &desc->action[i];
612 u32 mask = (1 << i);
1da177e4 613
088dd1f8
DM
614 if (!(action_mask & mask))
615 continue;
1da177e4 616
088dd1f8 617 action_mask &= ~mask;
1da177e4 618
088dd1f8
DM
619 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
620 random |= p->flags;
621
622 if (!action_mask)
623 break;
624 }
10951ee6 625
6a76267f
DM
626 if (tlb_type == hypervisor) {
627 unsigned int ino = __irq_ino(bp);
628 int err;
ab66a50e 629
6a76267f
DM
630 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
631 if (err != HV_EOK)
632 printk("sun4v_intr_setstate(%x): "
633 "err(%d)\n", ino, err);
634 } else {
635 upa_writel(ICLR_IDLE, bp->iclr);
088dd1f8 636 }
6a76267f
DM
637
638 /* Test and add entropy */
639 if (random & SA_SAMPLE_RANDOM)
37cdcd9e 640 add_interrupt_randomness(PIL_DEVICE_IRQ);
1da177e4 641out:
088dd1f8 642 bp->flags &= ~IBF_INPROGRESS;
1da177e4
LT
643}
644
fd0504c3
DM
645#ifndef CONFIG_SMP
646extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
647
648void timer_irq(int irq, struct pt_regs *regs)
649{
650 unsigned long clr_mask = 1 << irq;
651 unsigned long tick_mask = tick_ops->softint_mask;
652
653 if (get_softint() & tick_mask) {
654 irq = 0;
655 clr_mask = tick_mask;
656 }
657 clear_softint(clr_mask);
658
659 irq_enter();
660 kstat_this_cpu.irqs[irq]++;
661 timer_interrupt(irq, NULL, regs);
662 irq_exit();
663}
664#endif
665
1da177e4
LT
666void handler_irq(int irq, struct pt_regs *regs)
667{
088dd1f8 668 struct ino_bucket *bp;
1da177e4
LT
669 int cpu = smp_processor_id();
670
fd0504c3
DM
671 /* XXX at this point we should be able to assert that
672 * XXX irq is PIL_DEVICE_IRQ...
1da177e4 673 */
1da177e4 674 clear_softint(1 << irq);
1da177e4
LT
675
676 irq_enter();
1da177e4
LT
677
678 /* Sliiiick... */
fd0504c3 679 bp = __bucket(xchg32(irq_work(cpu), 0));
088dd1f8
DM
680 while (bp) {
681 struct ino_bucket *nbp = __bucket(bp->irq_chain);
1da177e4 682
37cdcd9e 683 kstat_this_cpu.irqs[bp->virt_irq]++;
fd0504c3 684
1da177e4 685 bp->irq_chain = 0;
fd0504c3 686 process_bucket(bp, regs);
088dd1f8 687 bp = nbp;
1da177e4
LT
688 }
689 irq_exit();
690}
691
692#ifdef CONFIG_BLK_DEV_FD
53b3531b 693extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
1da177e4 694
63b61452
DM
695/* XXX No easy way to include asm/floppy.h XXX */
696extern unsigned char *pdma_vaddr;
697extern unsigned long pdma_size;
698extern volatile int doing_pdma;
699extern unsigned long fdc_status;
1da177e4 700
63b61452 701irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
1da177e4 702{
63b61452
DM
703 if (likely(doing_pdma)) {
704 void __iomem *stat = (void __iomem *) fdc_status;
705 unsigned char *vaddr = pdma_vaddr;
706 unsigned long size = pdma_size;
707 u8 val;
708
709 while (size) {
710 val = readb(stat);
711 if (unlikely(!(val & 0x80))) {
712 pdma_vaddr = vaddr;
713 pdma_size = size;
714 return IRQ_HANDLED;
715 }
716 if (unlikely(!(val & 0x20))) {
717 pdma_vaddr = vaddr;
718 pdma_size = size;
719 doing_pdma = 0;
720 goto main_interrupt;
721 }
722 if (val & 0x40) {
723 /* read */
724 *vaddr++ = readb(stat + 1);
725 } else {
726 unsigned char data = *vaddr++;
1da177e4 727
63b61452
DM
728 /* write */
729 writeb(data, stat + 1);
730 }
731 size--;
732 }
1da177e4 733
63b61452
DM
734 pdma_vaddr = vaddr;
735 pdma_size = size;
1da177e4 736
63b61452
DM
737 /* Send Terminal Count pulse to floppy controller. */
738 val = readb(auxio_register);
739 val |= AUXIO_AUX1_FTCNT;
740 writeb(val, auxio_register);
94bbc176 741 val &= ~AUXIO_AUX1_FTCNT;
63b61452 742 writeb(val, auxio_register);
1da177e4 743
63b61452 744 doing_pdma = 0;
1da177e4 745 }
1da177e4 746
63b61452
DM
747main_interrupt:
748 return floppy_interrupt(irq, dev_cookie, regs);
1da177e4 749}
63b61452
DM
750EXPORT_SYMBOL(sparc_floppy_irq);
751#endif
1da177e4
LT
752
753/* We really don't need these at all on the Sparc. We only have
754 * stubs here because they are exported to modules.
755 */
756unsigned long probe_irq_on(void)
757{
758 return 0;
759}
760
761EXPORT_SYMBOL(probe_irq_on);
762
763int probe_irq_off(unsigned long mask)
764{
765 return 0;
766}
767
768EXPORT_SYMBOL(probe_irq_off);
769
770#ifdef CONFIG_SMP
771static int retarget_one_irq(struct irqaction *p, int goal_cpu)
772{
773 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
1da177e4
LT
774
775 while (!cpu_online(goal_cpu)) {
776 if (++goal_cpu >= NR_CPUS)
777 goal_cpu = 0;
778 }
779
10951ee6 780 if (tlb_type == hypervisor) {
4bf447d6 781 unsigned int ino = __irq_ino(bucket);
10951ee6 782
4bf447d6
DM
783 sun4v_intr_settarget(ino, goal_cpu);
784 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
1da177e4 785 } else {
22780e23 786 unsigned long imap = bucket->imap;
ebd8c56c 787 unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
10951ee6 788
10951ee6 789 upa_writel(tid | IMAP_VALID, imap);
1da177e4 790 }
1da177e4 791
cee2824f 792 do {
1da177e4
LT
793 if (++goal_cpu >= NR_CPUS)
794 goal_cpu = 0;
cee2824f 795 } while (!cpu_online(goal_cpu));
1da177e4
LT
796
797 return goal_cpu;
798}
799
800/* Called from request_irq. */
801static void distribute_irqs(void)
802{
803 unsigned long flags;
804 int cpu, level;
805
806 spin_lock_irqsave(&irq_action_lock, flags);
807 cpu = 0;
808
1da177e4
LT
809 for (level = 1; level < NR_IRQS; level++) {
810 struct irqaction *p = irq_action[level];
088dd1f8 811
1da177e4
LT
812 while(p) {
813 cpu = retarget_one_irq(p, cpu);
814 p = p->next;
815 }
816 }
817 spin_unlock_irqrestore(&irq_action_lock, flags);
818}
819#endif
820
cdd5186f
DM
821struct sun5_timer {
822 u64 count0;
823 u64 limit0;
824 u64 count1;
825 u64 limit1;
826};
1da177e4 827
cdd5186f 828static struct sun5_timer *prom_timers;
1da177e4
LT
829static u64 prom_limit0, prom_limit1;
830
831static void map_prom_timers(void)
832{
833 unsigned int addr[3];
834 int tnode, err;
835
836 /* PROM timer node hangs out in the top level of device siblings... */
837 tnode = prom_finddevice("/counter-timer");
838
839 /* Assume if node is not present, PROM uses different tick mechanism
840 * which we should not care about.
841 */
842 if (tnode == 0 || tnode == -1) {
843 prom_timers = (struct sun5_timer *) 0;
844 return;
845 }
846
847 /* If PROM is really using this, it must be mapped by him. */
848 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
849 if (err == -1) {
850 prom_printf("PROM does not have timer mapped, trying to continue.\n");
851 prom_timers = (struct sun5_timer *) 0;
852 return;
853 }
854 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
855}
856
857static void kill_prom_timer(void)
858{
859 if (!prom_timers)
860 return;
861
862 /* Save them away for later. */
863 prom_limit0 = prom_timers->limit0;
864 prom_limit1 = prom_timers->limit1;
865
866 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
867 * We turn both off here just to be paranoid.
868 */
869 prom_timers->limit0 = 0;
870 prom_timers->limit1 = 0;
871
872 /* Wheee, eat the interrupt packet too... */
873 __asm__ __volatile__(
874" mov 0x40, %%g2\n"
875" ldxa [%%g0] %0, %%g1\n"
876" ldxa [%%g2] %1, %%g1\n"
877" stxa %%g0, [%%g0] %0\n"
878" membar #Sync\n"
879 : /* no outputs */
880 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
881 : "g1", "g2");
882}
883
1da177e4
LT
884void init_irqwork_curcpu(void)
885{
1da177e4
LT
886 int cpu = hard_smp_processor_id();
887
fd0504c3 888 trap_block[cpu].irq_worklist = 0;
1da177e4
LT
889}
890
b5a37e96 891static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
ac29c11d 892{
94f8762d
DM
893 unsigned long num_entries = 128;
894 unsigned long status;
895
896 status = sun4v_cpu_qconf(type, paddr, num_entries);
897 if (status != HV_EOK) {
898 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
899 "err %lu\n", type, paddr, num_entries, status);
ac29c11d
DM
900 prom_halt();
901 }
902}
903
b5a37e96 904static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
5b0c0572 905{
b5a37e96
DM
906 struct trap_per_cpu *tb = &trap_block[this_cpu];
907
908 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
909 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
910 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
911 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
912}
913
914static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
915{
916 void *page;
917
918 if (use_bootmem)
919 page = alloc_bootmem_low_pages(PAGE_SIZE);
920 else
921 page = (void *) get_zeroed_page(GFP_ATOMIC);
922
923 if (!page) {
924 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
925 prom_halt();
926 }
927
928 *pa_ptr = __pa(page);
929}
930
931static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
932{
933 void *page;
934
935 if (use_bootmem)
936 page = alloc_bootmem_low_pages(PAGE_SIZE);
937 else
938 page = (void *) get_zeroed_page(GFP_ATOMIC);
5b0c0572
DM
939
940 if (!page) {
941 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
942 prom_halt();
943 }
944
945 *pa_ptr = __pa(page);
946}
947
b5a37e96 948static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
1d2f1f90
DM
949{
950#ifdef CONFIG_SMP
b5a37e96 951 void *page;
1d2f1f90
DM
952
953 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
954
b5a37e96
DM
955 if (use_bootmem)
956 page = alloc_bootmem_low_pages(PAGE_SIZE);
957 else
958 page = (void *) get_zeroed_page(GFP_ATOMIC);
959
1d2f1f90
DM
960 if (!page) {
961 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
962 prom_halt();
963 }
964
965 tb->cpu_mondo_block_pa = __pa(page);
966 tb->cpu_list_pa = __pa(page + 64);
967#endif
968}
969
b5a37e96 970/* Allocate and register the mondo and error queues for this cpu. */
72aff53f 971void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
ac29c11d 972{
ac29c11d
DM
973 struct trap_per_cpu *tb = &trap_block[cpu];
974
72aff53f
DM
975 if (alloc) {
976 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
977 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
978 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
979 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
980 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
981 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
1d2f1f90 982
72aff53f
DM
983 init_cpu_send_mondo_info(tb, use_bootmem);
984 }
1d2f1f90 985
72aff53f
DM
986 if (load) {
987 if (cpu != hard_smp_processor_id()) {
988 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
989 cpu, hard_smp_processor_id());
990 prom_halt();
991 }
992 sun4v_register_mondo_queues(cpu);
993 }
ac29c11d
DM
994}
995
1da177e4
LT
996/* Only invoked on boot processor. */
997void __init init_IRQ(void)
998{
999 map_prom_timers();
1000 kill_prom_timer();
1001 memset(&ivector_table[0], 0, sizeof(ivector_table));
1002
ac29c11d 1003 if (tlb_type == hypervisor)
72aff53f 1004 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
ac29c11d 1005
1da177e4
LT
1006 /* We need to clear any IRQ's pending in the soft interrupt
1007 * registers, a spurious one could be left around from the
1008 * PROM timer which we just disabled.
1009 */
1010 clear_softint(get_softint());
1011
1012 /* Now that ivector table is initialized, it is safe
1013 * to receive IRQ vector traps. We will normally take
1014 * one or two right now, in case some device PROM used
1015 * to boot us wants to speak to us. We just ignore them.
1016 */
1017 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1018 "or %%g1, %0, %%g1\n\t"
1019 "wrpr %%g1, 0x0, %%pstate"
1020 : /* No outputs */
1021 : "i" (PSTATE_IE)
1022 : "g1");
1023}
1024
1025static struct proc_dir_entry * root_irq_dir;
1026static struct proc_dir_entry * irq_dir [NUM_IVECS];
1027
1028#ifdef CONFIG_SMP
1029
1030static int irq_affinity_read_proc (char *page, char **start, off_t off,
1031 int count, int *eof, void *data)
1032{
1033 struct ino_bucket *bp = ivector_table + (long)data;
12cf649f
ED
1034 struct irq_desc *desc = bp->irq_info;
1035 struct irqaction *ap = desc->action;
1da177e4
LT
1036 cpumask_t mask;
1037 int len;
1038
1039 mask = get_smpaff_in_irqaction(ap);
1040 if (cpus_empty(mask))
1041 mask = cpu_online_map;
1042
1043 len = cpumask_scnprintf(page, count, mask);
1044 if (count - len < 2)
1045 return -EINVAL;
1046 len += sprintf(page + len, "\n");
1047 return len;
1048}
1049
1050static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1051{
1052 struct ino_bucket *bp = ivector_table + irq;
12cf649f
ED
1053 struct irq_desc *desc = bp->irq_info;
1054 struct irqaction *ap = desc->action;
1da177e4
LT
1055
1056 /* Users specify affinity in terms of hw cpu ids.
1057 * As soon as we do this, handler_irq() might see and take action.
1058 */
12cf649f 1059 put_smpaff_in_irqaction(ap, hw_aff);
1da177e4
LT
1060
1061 /* Migration is simply done by the next cpu to service this
1062 * interrupt.
1063 */
1064}
1065
1066static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1067 unsigned long count, void *data)
1068{
1069 int irq = (long) data, full_count = count, err;
1070 cpumask_t new_value;
1071
1072 err = cpumask_parse(buffer, count, new_value);
1073
1074 /*
1075 * Do not allow disabling IRQs completely - it's a too easy
1076 * way to make the system unusable accidentally :-) At least
1077 * one online CPU still has to be targeted.
1078 */
1079 cpus_and(new_value, new_value, cpu_online_map);
1080 if (cpus_empty(new_value))
1081 return -EINVAL;
1082
1083 set_intr_affinity(irq, new_value);
1084
1085 return full_count;
1086}
1087
1088#endif
1089
1090#define MAX_NAMELEN 10
1091
1092static void register_irq_proc (unsigned int irq)
1093{
1094 char name [MAX_NAMELEN];
1095
1096 if (!root_irq_dir || irq_dir[irq])
1097 return;
1098
1099 memset(name, 0, MAX_NAMELEN);
1100 sprintf(name, "%x", irq);
1101
1102 /* create /proc/irq/1234 */
1103 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1104
1105#ifdef CONFIG_SMP
1106 /* XXX SMP affinity not supported on starfire yet. */
1107 if (this_is_starfire == 0) {
1108 struct proc_dir_entry *entry;
1109
1110 /* create /proc/irq/1234/smp_affinity */
1111 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1112
1113 if (entry) {
1114 entry->nlink = 1;
1115 entry->data = (void *)(long)irq;
1116 entry->read_proc = irq_affinity_read_proc;
1117 entry->write_proc = irq_affinity_write_proc;
1118 }
1119 }
1120#endif
1121}
1122
1123void init_irq_proc (void)
1124{
1125 /* create /proc/irq */
1126 root_irq_dir = proc_mkdir("irq", NULL);
1127}
1128
This page took 0.178085 seconds and 5 git commands to generate.