[SPARC64]: Fix compile error in irq.c
[deliverable/linux.git] / arch / sparc64 / kernel / irq.c
1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
29 #include <asm/irq.h>
30 #include <asm/io.h>
31 #include <asm/sbus.h>
32 #include <asm/iommu.h>
33 #include <asm/upa.h>
34 #include <asm/oplib.h>
35 #include <asm/timer.h>
36 #include <asm/smp.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
41 #include <asm/auxio.h>
42
43 #ifdef CONFIG_SMP
44 static void distribute_irqs(void);
45 #endif
46
47 /* UPA nodes send interrupt packet to UltraSparc with first data reg
48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
49 * delivered. We must translate this into a non-vector IRQ so we can
50 * set the softint on this cpu.
51 *
52 * To make processing these packets efficient and race free we use
53 * an array of irq buckets below. The interrupt vector handler in
54 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55 * The IVEC handler does not need to act atomically, the PIL dispatch
56 * code uses CAS to get an atomic snapshot of the list and clear it
57 * at the same time.
58 */
59
60 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
61
62 /* This has to be in the main kernel image, it cannot be
63 * turned into per-cpu data. The reason is that the main
64 * kernel image is locked into the TLB and this structure
65 * is accessed from the vectored interrupt trap handler. If
66 * access to this structure takes a TLB miss it could cause
67 * the 5-level sparc v9 trap stack to overflow.
68 */
69 struct irq_work_struct {
70 unsigned int irq_worklists[16];
71 };
72 struct irq_work_struct __irq_work[NR_CPUS];
73 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
74
75 static struct irqaction *irq_action[NR_IRQS+1];
76
77 /* This only synchronizes entities which modify IRQ handler
78 * state and some selected user-level spots that want to
79 * read things in the table. IRQ handler processing orders
80 * its' accesses such that no locking is needed.
81 */
82 static DEFINE_SPINLOCK(irq_action_lock);
83
84 static void register_irq_proc (unsigned int irq);
85
86 /*
87 * Upper 2b of irqaction->flags holds the ino.
88 * irqaction->mask holds the smp affinity information.
89 */
90 #define put_ino_in_irqaction(action, irq) \
91 action->flags &= 0xffffffffffffUL; \
92 if (__bucket(irq) == &pil0_dummy_bucket) \
93 action->flags |= 0xdeadUL << 48; \
94 else \
95 action->flags |= __irq_ino(irq) << 48;
96 #define get_ino_in_irqaction(action) (action->flags >> 48)
97
98 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
99 #define get_smpaff_in_irqaction(action) ((action)->mask)
100
101 int show_interrupts(struct seq_file *p, void *v)
102 {
103 unsigned long flags;
104 int i = *(loff_t *) v;
105 struct irqaction *action;
106 #ifdef CONFIG_SMP
107 int j;
108 #endif
109
110 spin_lock_irqsave(&irq_action_lock, flags);
111 if (i <= NR_IRQS) {
112 if (!(action = *(i + irq_action)))
113 goto out_unlock;
114 seq_printf(p, "%3d: ", i);
115 #ifndef CONFIG_SMP
116 seq_printf(p, "%10u ", kstat_irqs(i));
117 #else
118 for (j = 0; j < NR_CPUS; j++) {
119 if (!cpu_online(j))
120 continue;
121 seq_printf(p, "%10u ",
122 kstat_cpu(j).irqs[i]);
123 }
124 #endif
125 seq_printf(p, " %s:%lx", action->name,
126 get_ino_in_irqaction(action));
127 for (action = action->next; action; action = action->next) {
128 seq_printf(p, ", %s:%lx", action->name,
129 get_ino_in_irqaction(action));
130 }
131 seq_putc(p, '\n');
132 }
133 out_unlock:
134 spin_unlock_irqrestore(&irq_action_lock, flags);
135
136 return 0;
137 }
138
139 /* Now these are always passed a true fully specified sun4u INO. */
140 void enable_irq(unsigned int irq)
141 {
142 struct ino_bucket *bucket = __bucket(irq);
143 unsigned long imap;
144 unsigned long tid;
145
146 imap = bucket->imap;
147 if (imap == 0UL)
148 return;
149
150 preempt_disable();
151
152 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
153 unsigned long ver;
154
155 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
156 if ((ver >> 32) == 0x003e0016) {
157 /* We set it to our JBUS ID. */
158 __asm__ __volatile__("ldxa [%%g0] %1, %0"
159 : "=r" (tid)
160 : "i" (ASI_JBUS_CONFIG));
161 tid = ((tid & (0x1fUL<<17)) << 9);
162 tid &= IMAP_TID_JBUS;
163 } else {
164 /* We set it to our Safari AID. */
165 __asm__ __volatile__("ldxa [%%g0] %1, %0"
166 : "=r" (tid)
167 : "i" (ASI_SAFARI_CONFIG));
168 tid = ((tid & (0x3ffUL<<17)) << 9);
169 tid &= IMAP_AID_SAFARI;
170 }
171 } else if (this_is_starfire == 0) {
172 /* We set it to our UPA MID. */
173 __asm__ __volatile__("ldxa [%%g0] %1, %0"
174 : "=r" (tid)
175 : "i" (ASI_UPA_CONFIG));
176 tid = ((tid & UPA_CONFIG_MID) << 9);
177 tid &= IMAP_TID_UPA;
178 } else {
179 tid = (starfire_translate(imap, smp_processor_id()) << 26);
180 tid &= IMAP_TID_UPA;
181 }
182
183 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
184 * of this SYSIO's preconfigured IGN in the SYSIO Control
185 * Register, the hardware just mirrors that value here.
186 * However for Graphics and UPA Slave devices the full
187 * IMAP_INR field can be set by the programmer here.
188 *
189 * Things like FFB can now be handled via the new IRQ mechanism.
190 */
191 upa_writel(tid | IMAP_VALID, imap);
192
193 preempt_enable();
194 }
195
196 /* This now gets passed true ino's as well. */
197 void disable_irq(unsigned int irq)
198 {
199 struct ino_bucket *bucket = __bucket(irq);
200 unsigned long imap;
201
202 imap = bucket->imap;
203 if (imap != 0UL) {
204 u32 tmp;
205
206 /* NOTE: We do not want to futz with the IRQ clear registers
207 * and move the state to IDLE, the SCSI code does call
208 * disable_irq() to assure atomicity in the queue cmd
209 * SCSI adapter driver code. Thus we'd lose interrupts.
210 */
211 tmp = upa_readl(imap);
212 tmp &= ~IMAP_VALID;
213 upa_writel(tmp, imap);
214 }
215 }
216
217 /* The timer is the one "weird" interrupt which is generated by
218 * the CPU %tick register and not by some normal vectored interrupt
219 * source. To handle this special case, we use this dummy INO bucket.
220 */
221 static struct irq_desc pil0_dummy_desc;
222 static struct ino_bucket pil0_dummy_bucket = {
223 .irq_info = &pil0_dummy_desc,
224 };
225
226 static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
227 unsigned long iclr, unsigned long imap,
228 struct ino_bucket *bucket)
229 {
230 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
231 "(%d:%d:%016lx:%016lx), halting...\n",
232 ino, bucket->pil, bucket->iclr, bucket->imap,
233 pil, inofixup, iclr, imap);
234 prom_halt();
235 }
236
237 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
238 {
239 struct ino_bucket *bucket;
240 int ino;
241
242 if (pil == 0) {
243 if (iclr != 0UL || imap != 0UL) {
244 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
245 iclr, imap);
246 prom_halt();
247 }
248 return __irq(&pil0_dummy_bucket);
249 }
250
251 /* RULE: Both must be specified in all other cases. */
252 if (iclr == 0UL || imap == 0UL) {
253 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
254 pil, inofixup, iclr, imap);
255 prom_halt();
256 }
257
258 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
259 if (ino > NUM_IVECS) {
260 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
261 ino, pil, inofixup, iclr, imap);
262 prom_halt();
263 }
264
265 bucket = &ivector_table[ino];
266 if (bucket->flags & IBF_ACTIVE)
267 build_irq_error("IRQ: Trying to build active INO bucket.\n",
268 ino, pil, inofixup, iclr, imap, bucket);
269
270 if (bucket->irq_info) {
271 if (bucket->imap != imap || bucket->iclr != iclr)
272 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
273 ino, pil, inofixup, iclr, imap, bucket);
274
275 goto out;
276 }
277
278 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
279 if (!bucket->irq_info) {
280 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
281 prom_halt();
282 }
283 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
284
285 /* Ok, looks good, set it up. Don't touch the irq_chain or
286 * the pending flag.
287 */
288 bucket->imap = imap;
289 bucket->iclr = iclr;
290 bucket->pil = pil;
291 bucket->flags = 0;
292
293 out:
294 return __irq(bucket);
295 }
296
297 static void atomic_bucket_insert(struct ino_bucket *bucket)
298 {
299 unsigned long pstate;
300 unsigned int *ent;
301
302 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
303 __asm__ __volatile__("wrpr %0, %1, %%pstate"
304 : : "r" (pstate), "i" (PSTATE_IE));
305 ent = irq_work(smp_processor_id(), bucket->pil);
306 bucket->irq_chain = *ent;
307 *ent = __irq(bucket);
308 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
309 }
310
311 static int check_irq_sharing(int pil, unsigned long irqflags)
312 {
313 struct irqaction *action, *tmp;
314
315 action = *(irq_action + pil);
316 if (action) {
317 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
318 for (tmp = action; tmp->next; tmp = tmp->next)
319 ;
320 } else {
321 return -EBUSY;
322 }
323 }
324 return 0;
325 }
326
327 static void append_irq_action(int pil, struct irqaction *action)
328 {
329 struct irqaction **pp = irq_action + pil;
330
331 while (*pp)
332 pp = &((*pp)->next);
333 *pp = action;
334 }
335
336 static struct irqaction *get_action_slot(struct ino_bucket *bucket)
337 {
338 struct irq_desc *desc = bucket->irq_info;
339 int max_irq, i;
340
341 max_irq = 1;
342 if (bucket->flags & IBF_PCI)
343 max_irq = MAX_IRQ_DESC_ACTION;
344 for (i = 0; i < max_irq; i++) {
345 struct irqaction *p = &desc->action[i];
346 u32 mask = (1 << i);
347
348 if (desc->action_active_mask & mask)
349 continue;
350
351 desc->action_active_mask |= mask;
352 return p;
353 }
354 return NULL;
355 }
356
357 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
358 unsigned long irqflags, const char *name, void *dev_id)
359 {
360 struct irqaction *action;
361 struct ino_bucket *bucket = __bucket(irq);
362 unsigned long flags;
363 int pending = 0;
364
365 if (unlikely(!handler))
366 return -EINVAL;
367
368 if (unlikely(!bucket->irq_info))
369 return -ENODEV;
370
371 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
372 /*
373 * This function might sleep, we want to call it first,
374 * outside of the atomic block. In SA_STATIC_ALLOC case,
375 * random driver's kmalloc will fail, but it is safe.
376 * If already initialized, random driver will not reinit.
377 * Yes, this might clear the entropy pool if the wrong
378 * driver is attempted to be loaded, without actually
379 * installing a new handler, but is this really a problem,
380 * only the sysadmin is able to do this.
381 */
382 rand_initialize_irq(irq);
383 }
384
385 spin_lock_irqsave(&irq_action_lock, flags);
386
387 if (check_irq_sharing(bucket->pil, irqflags)) {
388 spin_unlock_irqrestore(&irq_action_lock, flags);
389 return -EBUSY;
390 }
391
392 action = get_action_slot(bucket);
393 if (!action) {
394 spin_unlock_irqrestore(&irq_action_lock, flags);
395 return -ENOMEM;
396 }
397
398 bucket->flags |= IBF_ACTIVE;
399 pending = 0;
400 if (bucket != &pil0_dummy_bucket) {
401 pending = bucket->pending;
402 if (pending)
403 bucket->pending = 0;
404 }
405
406 action->handler = handler;
407 action->flags = irqflags;
408 action->name = name;
409 action->next = NULL;
410 action->dev_id = dev_id;
411 put_ino_in_irqaction(action, irq);
412 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
413
414 append_irq_action(bucket->pil, action);
415
416 enable_irq(irq);
417
418 /* We ate the IVEC already, this makes sure it does not get lost. */
419 if (pending) {
420 atomic_bucket_insert(bucket);
421 set_softint(1 << bucket->pil);
422 }
423
424 spin_unlock_irqrestore(&irq_action_lock, flags);
425
426 if (bucket != &pil0_dummy_bucket)
427 register_irq_proc(__irq_ino(irq));
428
429 #ifdef CONFIG_SMP
430 distribute_irqs();
431 #endif
432 return 0;
433 }
434
435 EXPORT_SYMBOL(request_irq);
436
437 static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
438 {
439 struct ino_bucket *bucket = __bucket(irq);
440 struct irqaction *action, **pp;
441
442 pp = irq_action + bucket->pil;
443 action = *pp;
444 if (unlikely(!action))
445 return NULL;
446
447 if (unlikely(!action->handler)) {
448 printk("Freeing free IRQ %d\n", bucket->pil);
449 return NULL;
450 }
451
452 while (action && action->dev_id != dev_id) {
453 pp = &action->next;
454 action = *pp;
455 }
456
457 if (likely(action))
458 *pp = action->next;
459
460 return action;
461 }
462
463 void free_irq(unsigned int irq, void *dev_id)
464 {
465 struct irqaction *action;
466 struct ino_bucket *bucket;
467 unsigned long flags;
468
469 spin_lock_irqsave(&irq_action_lock, flags);
470
471 action = unlink_irq_action(irq, dev_id);
472
473 spin_unlock_irqrestore(&irq_action_lock, flags);
474
475 if (unlikely(!action))
476 return;
477
478 synchronize_irq(irq);
479
480 spin_lock_irqsave(&irq_action_lock, flags);
481
482 bucket = __bucket(irq);
483 if (bucket != &pil0_dummy_bucket) {
484 struct irq_desc *desc = bucket->irq_info;
485 unsigned long imap = bucket->imap;
486 int ent, i;
487
488 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
489 struct irqaction *p = &desc->action[i];
490
491 if (p == action) {
492 desc->action_active_mask &= ~(1 << i);
493 break;
494 }
495 }
496
497 if (!desc->action_active_mask) {
498 /* This unique interrupt source is now inactive. */
499 bucket->flags &= ~IBF_ACTIVE;
500
501 /* See if any other buckets share this bucket's IMAP
502 * and are still active.
503 */
504 for (ent = 0; ent < NUM_IVECS; ent++) {
505 struct ino_bucket *bp = &ivector_table[ent];
506 if (bp != bucket &&
507 bp->imap == imap &&
508 (bp->flags & IBF_ACTIVE) != 0)
509 break;
510 }
511
512 /* Only disable when no other sub-irq levels of
513 * the same IMAP are active.
514 */
515 if (ent == NUM_IVECS)
516 disable_irq(irq);
517 }
518 }
519
520 spin_unlock_irqrestore(&irq_action_lock, flags);
521 }
522
523 EXPORT_SYMBOL(free_irq);
524
525 #ifdef CONFIG_SMP
526 void synchronize_irq(unsigned int irq)
527 {
528 struct ino_bucket *bucket = __bucket(irq);
529
530 #if 0
531 /* The following is how I wish I could implement this.
532 * Unfortunately the ICLR registers are read-only, you can
533 * only write ICLR_foo values to them. To get the current
534 * IRQ status you would need to get at the IRQ diag registers
535 * in the PCI/SBUS controller and the layout of those vary
536 * from one controller to the next, sigh... -DaveM
537 */
538 unsigned long iclr = bucket->iclr;
539
540 while (1) {
541 u32 tmp = upa_readl(iclr);
542
543 if (tmp == ICLR_TRANSMIT ||
544 tmp == ICLR_PENDING) {
545 cpu_relax();
546 continue;
547 }
548 break;
549 }
550 #else
551 /* So we have to do this with a INPROGRESS bit just like x86. */
552 while (bucket->flags & IBF_INPROGRESS)
553 cpu_relax();
554 #endif
555 }
556 #endif /* CONFIG_SMP */
557
558 static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
559 {
560 struct irq_desc *desc = bp->irq_info;
561 unsigned char flags = bp->flags;
562 u32 action_mask, i;
563 int random;
564
565 bp->flags |= IBF_INPROGRESS;
566
567 if (unlikely(!(flags & IBF_ACTIVE))) {
568 bp->pending = 1;
569 goto out;
570 }
571
572 if (desc->pre_handler)
573 desc->pre_handler(bp,
574 desc->pre_handler_arg1,
575 desc->pre_handler_arg2);
576
577 action_mask = desc->action_active_mask;
578 random = 0;
579 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
580 struct irqaction *p = &desc->action[i];
581 u32 mask = (1 << i);
582
583 if (!(action_mask & mask))
584 continue;
585
586 action_mask &= ~mask;
587
588 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
589 random |= p->flags;
590
591 if (!action_mask)
592 break;
593 }
594 if (bp->pil != 0) {
595 upa_writel(ICLR_IDLE, bp->iclr);
596 /* Test and add entropy */
597 if (random & SA_SAMPLE_RANDOM)
598 add_interrupt_randomness(irq);
599 }
600 out:
601 bp->flags &= ~IBF_INPROGRESS;
602 }
603
604 void handler_irq(int irq, struct pt_regs *regs)
605 {
606 struct ino_bucket *bp;
607 int cpu = smp_processor_id();
608
609 #ifndef CONFIG_SMP
610 /*
611 * Check for TICK_INT on level 14 softint.
612 */
613 {
614 unsigned long clr_mask = 1 << irq;
615 unsigned long tick_mask = tick_ops->softint_mask;
616
617 if ((irq == 14) && (get_softint() & tick_mask)) {
618 irq = 0;
619 clr_mask = tick_mask;
620 }
621 clear_softint(clr_mask);
622 }
623 #else
624 clear_softint(1 << irq);
625 #endif
626
627 irq_enter();
628 kstat_this_cpu.irqs[irq]++;
629
630 /* Sliiiick... */
631 #ifndef CONFIG_SMP
632 bp = ((irq != 0) ?
633 __bucket(xchg32(irq_work(cpu, irq), 0)) :
634 &pil0_dummy_bucket);
635 #else
636 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
637 #endif
638 while (bp) {
639 struct ino_bucket *nbp = __bucket(bp->irq_chain);
640
641 bp->irq_chain = 0;
642 process_bucket(irq, bp, regs);
643 bp = nbp;
644 }
645 irq_exit();
646 }
647
648 #ifdef CONFIG_BLK_DEV_FD
649 extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);;
650
651 /* XXX No easy way to include asm/floppy.h XXX */
652 extern unsigned char *pdma_vaddr;
653 extern unsigned long pdma_size;
654 extern volatile int doing_pdma;
655 extern unsigned long fdc_status;
656
657 irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
658 {
659 if (likely(doing_pdma)) {
660 void __iomem *stat = (void __iomem *) fdc_status;
661 unsigned char *vaddr = pdma_vaddr;
662 unsigned long size = pdma_size;
663 u8 val;
664
665 while (size) {
666 val = readb(stat);
667 if (unlikely(!(val & 0x80))) {
668 pdma_vaddr = vaddr;
669 pdma_size = size;
670 return IRQ_HANDLED;
671 }
672 if (unlikely(!(val & 0x20))) {
673 pdma_vaddr = vaddr;
674 pdma_size = size;
675 doing_pdma = 0;
676 goto main_interrupt;
677 }
678 if (val & 0x40) {
679 /* read */
680 *vaddr++ = readb(stat + 1);
681 } else {
682 unsigned char data = *vaddr++;
683
684 /* write */
685 writeb(data, stat + 1);
686 }
687 size--;
688 }
689
690 pdma_vaddr = vaddr;
691 pdma_size = size;
692
693 /* Send Terminal Count pulse to floppy controller. */
694 val = readb(auxio_register);
695 val |= AUXIO_AUX1_FTCNT;
696 writeb(val, auxio_register);
697 val &= AUXIO_AUX1_FTCNT;
698 writeb(val, auxio_register);
699
700 doing_pdma = 0;
701 }
702
703 main_interrupt:
704 return floppy_interrupt(irq, dev_cookie, regs);
705 }
706 EXPORT_SYMBOL(sparc_floppy_irq);
707 #endif
708
709 /* We really don't need these at all on the Sparc. We only have
710 * stubs here because they are exported to modules.
711 */
712 unsigned long probe_irq_on(void)
713 {
714 return 0;
715 }
716
717 EXPORT_SYMBOL(probe_irq_on);
718
719 int probe_irq_off(unsigned long mask)
720 {
721 return 0;
722 }
723
724 EXPORT_SYMBOL(probe_irq_off);
725
726 #ifdef CONFIG_SMP
727 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
728 {
729 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
730 unsigned long imap = bucket->imap;
731 unsigned int tid;
732
733 while (!cpu_online(goal_cpu)) {
734 if (++goal_cpu >= NR_CPUS)
735 goal_cpu = 0;
736 }
737
738 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
739 tid = goal_cpu << 26;
740 tid &= IMAP_AID_SAFARI;
741 } else if (this_is_starfire == 0) {
742 tid = goal_cpu << 26;
743 tid &= IMAP_TID_UPA;
744 } else {
745 tid = (starfire_translate(imap, goal_cpu) << 26);
746 tid &= IMAP_TID_UPA;
747 }
748 upa_writel(tid | IMAP_VALID, imap);
749
750 do {
751 if (++goal_cpu >= NR_CPUS)
752 goal_cpu = 0;
753 } while (!cpu_online(goal_cpu));
754
755 return goal_cpu;
756 }
757
758 /* Called from request_irq. */
759 static void distribute_irqs(void)
760 {
761 unsigned long flags;
762 int cpu, level;
763
764 spin_lock_irqsave(&irq_action_lock, flags);
765 cpu = 0;
766
767 /*
768 * Skip the timer at [0], and very rare error/power intrs at [15].
769 * Also level [12], it causes problems on Ex000 systems.
770 */
771 for (level = 1; level < NR_IRQS; level++) {
772 struct irqaction *p = irq_action[level];
773
774 if (level == 12)
775 continue;
776
777 while(p) {
778 cpu = retarget_one_irq(p, cpu);
779 p = p->next;
780 }
781 }
782 spin_unlock_irqrestore(&irq_action_lock, flags);
783 }
784 #endif
785
786 struct sun5_timer {
787 u64 count0;
788 u64 limit0;
789 u64 count1;
790 u64 limit1;
791 };
792
793 static struct sun5_timer *prom_timers;
794 static u64 prom_limit0, prom_limit1;
795
796 static void map_prom_timers(void)
797 {
798 unsigned int addr[3];
799 int tnode, err;
800
801 /* PROM timer node hangs out in the top level of device siblings... */
802 tnode = prom_finddevice("/counter-timer");
803
804 /* Assume if node is not present, PROM uses different tick mechanism
805 * which we should not care about.
806 */
807 if (tnode == 0 || tnode == -1) {
808 prom_timers = (struct sun5_timer *) 0;
809 return;
810 }
811
812 /* If PROM is really using this, it must be mapped by him. */
813 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
814 if (err == -1) {
815 prom_printf("PROM does not have timer mapped, trying to continue.\n");
816 prom_timers = (struct sun5_timer *) 0;
817 return;
818 }
819 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
820 }
821
822 static void kill_prom_timer(void)
823 {
824 if (!prom_timers)
825 return;
826
827 /* Save them away for later. */
828 prom_limit0 = prom_timers->limit0;
829 prom_limit1 = prom_timers->limit1;
830
831 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
832 * We turn both off here just to be paranoid.
833 */
834 prom_timers->limit0 = 0;
835 prom_timers->limit1 = 0;
836
837 /* Wheee, eat the interrupt packet too... */
838 __asm__ __volatile__(
839 " mov 0x40, %%g2\n"
840 " ldxa [%%g0] %0, %%g1\n"
841 " ldxa [%%g2] %1, %%g1\n"
842 " stxa %%g0, [%%g0] %0\n"
843 " membar #Sync\n"
844 : /* no outputs */
845 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
846 : "g1", "g2");
847 }
848
849 void init_irqwork_curcpu(void)
850 {
851 register struct irq_work_struct *workp asm("o2");
852 register unsigned long tmp asm("o3");
853 int cpu = hard_smp_processor_id();
854
855 memset(__irq_work + cpu, 0, sizeof(*workp));
856
857 /* Make sure we are called with PSTATE_IE disabled. */
858 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
859 : "=r" (tmp));
860 if (tmp & PSTATE_IE) {
861 prom_printf("BUG: init_irqwork_curcpu() called with "
862 "PSTATE_IE enabled, bailing.\n");
863 __asm__ __volatile__("mov %%i7, %0\n\t"
864 : "=r" (tmp));
865 prom_printf("BUG: Called from %lx\n", tmp);
866 prom_halt();
867 }
868
869 /* Set interrupt globals. */
870 workp = &__irq_work[cpu];
871 __asm__ __volatile__(
872 "rdpr %%pstate, %0\n\t"
873 "wrpr %0, %1, %%pstate\n\t"
874 "mov %2, %%g6\n\t"
875 "wrpr %0, 0x0, %%pstate\n\t"
876 : "=&r" (tmp)
877 : "i" (PSTATE_IG), "r" (workp));
878 }
879
880 /* Only invoked on boot processor. */
881 void __init init_IRQ(void)
882 {
883 map_prom_timers();
884 kill_prom_timer();
885 memset(&ivector_table[0], 0, sizeof(ivector_table));
886
887 /* We need to clear any IRQ's pending in the soft interrupt
888 * registers, a spurious one could be left around from the
889 * PROM timer which we just disabled.
890 */
891 clear_softint(get_softint());
892
893 /* Now that ivector table is initialized, it is safe
894 * to receive IRQ vector traps. We will normally take
895 * one or two right now, in case some device PROM used
896 * to boot us wants to speak to us. We just ignore them.
897 */
898 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
899 "or %%g1, %0, %%g1\n\t"
900 "wrpr %%g1, 0x0, %%pstate"
901 : /* No outputs */
902 : "i" (PSTATE_IE)
903 : "g1");
904 }
905
906 static struct proc_dir_entry * root_irq_dir;
907 static struct proc_dir_entry * irq_dir [NUM_IVECS];
908
909 #ifdef CONFIG_SMP
910
911 static int irq_affinity_read_proc (char *page, char **start, off_t off,
912 int count, int *eof, void *data)
913 {
914 struct ino_bucket *bp = ivector_table + (long)data;
915 struct irq_desc *desc = bp->irq_info;
916 struct irqaction *ap = desc->action;
917 cpumask_t mask;
918 int len;
919
920 mask = get_smpaff_in_irqaction(ap);
921 if (cpus_empty(mask))
922 mask = cpu_online_map;
923
924 len = cpumask_scnprintf(page, count, mask);
925 if (count - len < 2)
926 return -EINVAL;
927 len += sprintf(page + len, "\n");
928 return len;
929 }
930
931 static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
932 {
933 struct ino_bucket *bp = ivector_table + irq;
934 struct irq_desc *desc = bp->irq_info;
935 struct irqaction *ap = desc->action;
936
937 /* Users specify affinity in terms of hw cpu ids.
938 * As soon as we do this, handler_irq() might see and take action.
939 */
940 put_smpaff_in_irqaction(ap, hw_aff);
941
942 /* Migration is simply done by the next cpu to service this
943 * interrupt.
944 */
945 }
946
947 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
948 unsigned long count, void *data)
949 {
950 int irq = (long) data, full_count = count, err;
951 cpumask_t new_value;
952
953 err = cpumask_parse(buffer, count, new_value);
954
955 /*
956 * Do not allow disabling IRQs completely - it's a too easy
957 * way to make the system unusable accidentally :-) At least
958 * one online CPU still has to be targeted.
959 */
960 cpus_and(new_value, new_value, cpu_online_map);
961 if (cpus_empty(new_value))
962 return -EINVAL;
963
964 set_intr_affinity(irq, new_value);
965
966 return full_count;
967 }
968
969 #endif
970
971 #define MAX_NAMELEN 10
972
973 static void register_irq_proc (unsigned int irq)
974 {
975 char name [MAX_NAMELEN];
976
977 if (!root_irq_dir || irq_dir[irq])
978 return;
979
980 memset(name, 0, MAX_NAMELEN);
981 sprintf(name, "%x", irq);
982
983 /* create /proc/irq/1234 */
984 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
985
986 #ifdef CONFIG_SMP
987 /* XXX SMP affinity not supported on starfire yet. */
988 if (this_is_starfire == 0) {
989 struct proc_dir_entry *entry;
990
991 /* create /proc/irq/1234/smp_affinity */
992 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
993
994 if (entry) {
995 entry->nlink = 1;
996 entry->data = (void *)(long)irq;
997 entry->read_proc = irq_affinity_read_proc;
998 entry->write_proc = irq_affinity_write_proc;
999 }
1000 }
1001 #endif
1002 }
1003
1004 void init_irq_proc (void)
1005 {
1006 /* create /proc/irq */
1007 root_irq_dir = proc_mkdir("irq", NULL);
1008 }
1009
This page took 0.064919 seconds and 5 git commands to generate.