[PATCH] sparc64 pt_regs fixes
[deliverable/linux.git] / arch / sparc64 / kernel / irq.c
1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
15 #include <linux/mm.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/bootmem.h>
24 #include <linux/irq.h>
25
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
30 #include <asm/irq.h>
31 #include <asm/io.h>
32 #include <asm/sbus.h>
33 #include <asm/iommu.h>
34 #include <asm/upa.h>
35 #include <asm/oplib.h>
36 #include <asm/prom.h>
37 #include <asm/timer.h>
38 #include <asm/smp.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
44 #include <asm/head.h>
45
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
50 *
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
56 * at the same time.
57 *
58 * If you make changes to ino_bucket, please update hand coded assembler
59 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
60 */
61 struct ino_bucket {
62 /* Next handler in per-CPU IRQ worklist. We know that
63 * bucket pointers have the high 32-bits clear, so to
64 * save space we only store the bits we need.
65 */
66 /*0x00*/unsigned int irq_chain;
67
68 /* Virtual interrupt number assigned to this INO. */
69 /*0x04*/unsigned int virt_irq;
70 };
71
72 #define NUM_IVECS (IMAP_INR + 1)
73 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
74
75 #define __irq_ino(irq) \
76 (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
77 #define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
78 #define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
79
80 /* This has to be in the main kernel image, it cannot be
81 * turned into per-cpu data. The reason is that the main
82 * kernel image is locked into the TLB and this structure
83 * is accessed from the vectored interrupt trap handler. If
84 * access to this structure takes a TLB miss it could cause
85 * the 5-level sparc v9 trap stack to overflow.
86 */
87 #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
88
89 static unsigned int virt_to_real_irq_table[NR_IRQS];
90 static unsigned char virt_irq_cur = 1;
91
92 static unsigned char virt_irq_alloc(unsigned int real_irq)
93 {
94 unsigned char ent;
95
96 BUILD_BUG_ON(NR_IRQS >= 256);
97
98 ent = virt_irq_cur;
99 if (ent >= NR_IRQS) {
100 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
101 return 0;
102 }
103
104 virt_irq_cur = ent + 1;
105 virt_to_real_irq_table[ent] = real_irq;
106
107 return ent;
108 }
109
110 #if 0 /* Currently unused. */
111 static unsigned char real_to_virt_irq(unsigned int real_irq)
112 {
113 struct ino_bucket *bucket = __bucket(real_irq);
114
115 return bucket->virt_irq;
116 }
117 #endif
118
119 static unsigned int virt_to_real_irq(unsigned char virt_irq)
120 {
121 return virt_to_real_irq_table[virt_irq];
122 }
123
124 /*
125 * /proc/interrupts printing:
126 */
127
128 int show_interrupts(struct seq_file *p, void *v)
129 {
130 int i = *(loff_t *) v, j;
131 struct irqaction * action;
132 unsigned long flags;
133
134 if (i == 0) {
135 seq_printf(p, " ");
136 for_each_online_cpu(j)
137 seq_printf(p, "CPU%d ",j);
138 seq_putc(p, '\n');
139 }
140
141 if (i < NR_IRQS) {
142 spin_lock_irqsave(&irq_desc[i].lock, flags);
143 action = irq_desc[i].action;
144 if (!action)
145 goto skip;
146 seq_printf(p, "%3d: ",i);
147 #ifndef CONFIG_SMP
148 seq_printf(p, "%10u ", kstat_irqs(i));
149 #else
150 for_each_online_cpu(j)
151 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
152 #endif
153 seq_printf(p, " %9s", irq_desc[i].chip->typename);
154 seq_printf(p, " %s", action->name);
155
156 for (action=action->next; action; action = action->next)
157 seq_printf(p, ", %s", action->name);
158
159 seq_putc(p, '\n');
160 skip:
161 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
162 }
163 return 0;
164 }
165
166 extern unsigned long real_hard_smp_processor_id(void);
167
168 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
169 {
170 unsigned int tid;
171
172 if (this_is_starfire) {
173 tid = starfire_translate(imap, cpuid);
174 tid <<= IMAP_TID_SHIFT;
175 tid &= IMAP_TID_UPA;
176 } else {
177 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
178 unsigned long ver;
179
180 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
181 if ((ver >> 32UL) == __JALAPENO_ID ||
182 (ver >> 32UL) == __SERRANO_ID) {
183 tid = cpuid << IMAP_TID_SHIFT;
184 tid &= IMAP_TID_JBUS;
185 } else {
186 unsigned int a = cpuid & 0x1f;
187 unsigned int n = (cpuid >> 5) & 0x1f;
188
189 tid = ((a << IMAP_AID_SHIFT) |
190 (n << IMAP_NID_SHIFT));
191 tid &= (IMAP_AID_SAFARI |
192 IMAP_NID_SAFARI);;
193 }
194 } else {
195 tid = cpuid << IMAP_TID_SHIFT;
196 tid &= IMAP_TID_UPA;
197 }
198 }
199
200 return tid;
201 }
202
203 struct irq_handler_data {
204 unsigned long iclr;
205 unsigned long imap;
206
207 void (*pre_handler)(unsigned int, void *, void *);
208 void *pre_handler_arg1;
209 void *pre_handler_arg2;
210 };
211
212 static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
213 {
214 unsigned int real_irq = virt_to_real_irq(virt_irq);
215 struct ino_bucket *bucket = NULL;
216
217 if (likely(real_irq))
218 bucket = __bucket(real_irq);
219
220 return bucket;
221 }
222
223 #ifdef CONFIG_SMP
224 static int irq_choose_cpu(unsigned int virt_irq)
225 {
226 cpumask_t mask = irq_desc[virt_irq].affinity;
227 int cpuid;
228
229 if (cpus_equal(mask, CPU_MASK_ALL)) {
230 static int irq_rover;
231 static DEFINE_SPINLOCK(irq_rover_lock);
232 unsigned long flags;
233
234 /* Round-robin distribution... */
235 do_round_robin:
236 spin_lock_irqsave(&irq_rover_lock, flags);
237
238 while (!cpu_online(irq_rover)) {
239 if (++irq_rover >= NR_CPUS)
240 irq_rover = 0;
241 }
242 cpuid = irq_rover;
243 do {
244 if (++irq_rover >= NR_CPUS)
245 irq_rover = 0;
246 } while (!cpu_online(irq_rover));
247
248 spin_unlock_irqrestore(&irq_rover_lock, flags);
249 } else {
250 cpumask_t tmp;
251
252 cpus_and(tmp, cpu_online_map, mask);
253
254 if (cpus_empty(tmp))
255 goto do_round_robin;
256
257 cpuid = first_cpu(tmp);
258 }
259
260 return cpuid;
261 }
262 #else
263 static int irq_choose_cpu(unsigned int virt_irq)
264 {
265 return real_hard_smp_processor_id();
266 }
267 #endif
268
269 static void sun4u_irq_enable(unsigned int virt_irq)
270 {
271 irq_desc_t *desc = irq_desc + virt_irq;
272 struct irq_handler_data *data = desc->handler_data;
273
274 if (likely(data)) {
275 unsigned long cpuid, imap;
276 unsigned int tid;
277
278 cpuid = irq_choose_cpu(virt_irq);
279 imap = data->imap;
280
281 tid = sun4u_compute_tid(imap, cpuid);
282
283 upa_writel(tid | IMAP_VALID, imap);
284 }
285 }
286
287 static void sun4u_irq_disable(unsigned int virt_irq)
288 {
289 irq_desc_t *desc = irq_desc + virt_irq;
290 struct irq_handler_data *data = desc->handler_data;
291
292 if (likely(data)) {
293 unsigned long imap = data->imap;
294 u32 tmp = upa_readl(imap);
295
296 tmp &= ~IMAP_VALID;
297 upa_writel(tmp, imap);
298 }
299 }
300
301 static void sun4u_irq_end(unsigned int virt_irq)
302 {
303 irq_desc_t *desc = irq_desc + virt_irq;
304 struct irq_handler_data *data = desc->handler_data;
305
306 if (likely(data))
307 upa_writel(ICLR_IDLE, data->iclr);
308 }
309
310 static void sun4v_irq_enable(unsigned int virt_irq)
311 {
312 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
313 unsigned int ino = bucket - &ivector_table[0];
314
315 if (likely(bucket)) {
316 unsigned long cpuid;
317 int err;
318
319 cpuid = irq_choose_cpu(virt_irq);
320
321 err = sun4v_intr_settarget(ino, cpuid);
322 if (err != HV_EOK)
323 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
324 ino, cpuid, err);
325 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
326 if (err != HV_EOK)
327 printk("sun4v_intr_setenabled(%x): err(%d)\n",
328 ino, err);
329 }
330 }
331
332 static void sun4v_irq_disable(unsigned int virt_irq)
333 {
334 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
335 unsigned int ino = bucket - &ivector_table[0];
336
337 if (likely(bucket)) {
338 int err;
339
340 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
341 if (err != HV_EOK)
342 printk("sun4v_intr_setenabled(%x): "
343 "err(%d)\n", ino, err);
344 }
345 }
346
347 static void sun4v_irq_end(unsigned int virt_irq)
348 {
349 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
350 unsigned int ino = bucket - &ivector_table[0];
351
352 if (likely(bucket)) {
353 int err;
354
355 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
356 if (err != HV_EOK)
357 printk("sun4v_intr_setstate(%x): "
358 "err(%d)\n", ino, err);
359 }
360 }
361
362 static void run_pre_handler(unsigned int virt_irq)
363 {
364 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
365 irq_desc_t *desc = irq_desc + virt_irq;
366 struct irq_handler_data *data = desc->handler_data;
367
368 if (likely(data->pre_handler)) {
369 data->pre_handler(__irq_ino(__irq(bucket)),
370 data->pre_handler_arg1,
371 data->pre_handler_arg2);
372 }
373 }
374
375 static struct hw_interrupt_type sun4u_irq = {
376 .typename = "sun4u",
377 .enable = sun4u_irq_enable,
378 .disable = sun4u_irq_disable,
379 .end = sun4u_irq_end,
380 };
381
382 static struct hw_interrupt_type sun4u_irq_ack = {
383 .typename = "sun4u+ack",
384 .enable = sun4u_irq_enable,
385 .disable = sun4u_irq_disable,
386 .ack = run_pre_handler,
387 .end = sun4u_irq_end,
388 };
389
390 static struct hw_interrupt_type sun4v_irq = {
391 .typename = "sun4v",
392 .enable = sun4v_irq_enable,
393 .disable = sun4v_irq_disable,
394 .end = sun4v_irq_end,
395 };
396
397 static struct hw_interrupt_type sun4v_irq_ack = {
398 .typename = "sun4v+ack",
399 .enable = sun4v_irq_enable,
400 .disable = sun4v_irq_disable,
401 .ack = run_pre_handler,
402 .end = sun4v_irq_end,
403 };
404
405 void irq_install_pre_handler(int virt_irq,
406 void (*func)(unsigned int, void *, void *),
407 void *arg1, void *arg2)
408 {
409 irq_desc_t *desc = irq_desc + virt_irq;
410 struct irq_handler_data *data = desc->handler_data;
411
412 data->pre_handler = func;
413 data->pre_handler_arg1 = arg1;
414 data->pre_handler_arg2 = arg2;
415
416 if (desc->chip == &sun4u_irq_ack ||
417 desc->chip == &sun4v_irq_ack)
418 return;
419
420 desc->chip = (desc->chip == &sun4u_irq ?
421 &sun4u_irq_ack : &sun4v_irq_ack);
422 }
423
424 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
425 {
426 struct ino_bucket *bucket;
427 struct irq_handler_data *data;
428 irq_desc_t *desc;
429 int ino;
430
431 BUG_ON(tlb_type == hypervisor);
432
433 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
434 bucket = &ivector_table[ino];
435 if (!bucket->virt_irq) {
436 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
437 irq_desc[bucket->virt_irq].chip = &sun4u_irq;
438 }
439
440 desc = irq_desc + bucket->virt_irq;
441 if (unlikely(desc->handler_data))
442 goto out;
443
444 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
445 if (unlikely(!data)) {
446 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
447 prom_halt();
448 }
449 desc->handler_data = data;
450
451 data->imap = imap;
452 data->iclr = iclr;
453
454 out:
455 return bucket->virt_irq;
456 }
457
458 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
459 {
460 struct ino_bucket *bucket;
461 struct irq_handler_data *data;
462 unsigned long sysino;
463 irq_desc_t *desc;
464
465 BUG_ON(tlb_type != hypervisor);
466
467 sysino = sun4v_devino_to_sysino(devhandle, devino);
468 bucket = &ivector_table[sysino];
469 if (!bucket->virt_irq) {
470 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
471 irq_desc[bucket->virt_irq].chip = &sun4v_irq;
472 }
473
474 desc = irq_desc + bucket->virt_irq;
475 if (unlikely(desc->handler_data))
476 goto out;
477
478 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
479 if (unlikely(!data)) {
480 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
481 prom_halt();
482 }
483 desc->handler_data = data;
484
485 /* Catch accidental accesses to these things. IMAP/ICLR handling
486 * is done by hypervisor calls on sun4v platforms, not by direct
487 * register accesses.
488 */
489 data->imap = ~0UL;
490 data->iclr = ~0UL;
491
492 out:
493 return bucket->virt_irq;
494 }
495
496 void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq)
497 {
498 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
499 unsigned long pstate;
500 unsigned int *ent;
501
502 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
503 __asm__ __volatile__("wrpr %0, %1, %%pstate"
504 : : "r" (pstate), "i" (PSTATE_IE));
505 ent = irq_work(smp_processor_id());
506 bucket->irq_chain = *ent;
507 *ent = __irq(bucket);
508 set_softint(1 << PIL_DEVICE_IRQ);
509 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
510 }
511
512 void ack_bad_irq(unsigned int virt_irq)
513 {
514 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
515 unsigned int ino = 0xdeadbeef;
516
517 if (bucket)
518 ino = bucket - &ivector_table[0];
519
520 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
521 ino, virt_irq);
522 }
523
524 #ifndef CONFIG_SMP
525 extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
526
527 void timer_irq(int irq, struct pt_regs *regs)
528 {
529 unsigned long clr_mask = 1 << irq;
530 unsigned long tick_mask = tick_ops->softint_mask;
531
532 if (get_softint() & tick_mask) {
533 irq = 0;
534 clr_mask = tick_mask;
535 }
536 clear_softint(clr_mask);
537
538 irq_enter();
539
540 kstat_this_cpu.irqs[0]++;
541 timer_interrupt(irq, NULL, regs);
542
543 irq_exit();
544 }
545 #endif
546
547 void handler_irq(int irq, struct pt_regs *regs)
548 {
549 struct ino_bucket *bucket;
550 struct pt_regs *old_regs;
551
552 clear_softint(1 << irq);
553
554 old_regs = set_irq_regs(regs);
555 irq_enter();
556
557 /* Sliiiick... */
558 bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
559 while (bucket) {
560 struct ino_bucket *next = __bucket(bucket->irq_chain);
561
562 bucket->irq_chain = 0;
563 __do_IRQ(bucket->virt_irq);
564
565 bucket = next;
566 }
567
568 irq_exit();
569 set_irq_regs(old_regs);
570 }
571
572 struct sun5_timer {
573 u64 count0;
574 u64 limit0;
575 u64 count1;
576 u64 limit1;
577 };
578
579 static struct sun5_timer *prom_timers;
580 static u64 prom_limit0, prom_limit1;
581
582 static void map_prom_timers(void)
583 {
584 struct device_node *dp;
585 unsigned int *addr;
586
587 /* PROM timer node hangs out in the top level of device siblings... */
588 dp = of_find_node_by_path("/");
589 dp = dp->child;
590 while (dp) {
591 if (!strcmp(dp->name, "counter-timer"))
592 break;
593 dp = dp->sibling;
594 }
595
596 /* Assume if node is not present, PROM uses different tick mechanism
597 * which we should not care about.
598 */
599 if (!dp) {
600 prom_timers = (struct sun5_timer *) 0;
601 return;
602 }
603
604 /* If PROM is really using this, it must be mapped by him. */
605 addr = of_get_property(dp, "address", NULL);
606 if (!addr) {
607 prom_printf("PROM does not have timer mapped, trying to continue.\n");
608 prom_timers = (struct sun5_timer *) 0;
609 return;
610 }
611 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
612 }
613
614 static void kill_prom_timer(void)
615 {
616 if (!prom_timers)
617 return;
618
619 /* Save them away for later. */
620 prom_limit0 = prom_timers->limit0;
621 prom_limit1 = prom_timers->limit1;
622
623 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
624 * We turn both off here just to be paranoid.
625 */
626 prom_timers->limit0 = 0;
627 prom_timers->limit1 = 0;
628
629 /* Wheee, eat the interrupt packet too... */
630 __asm__ __volatile__(
631 " mov 0x40, %%g2\n"
632 " ldxa [%%g0] %0, %%g1\n"
633 " ldxa [%%g2] %1, %%g1\n"
634 " stxa %%g0, [%%g0] %0\n"
635 " membar #Sync\n"
636 : /* no outputs */
637 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
638 : "g1", "g2");
639 }
640
641 void init_irqwork_curcpu(void)
642 {
643 int cpu = hard_smp_processor_id();
644
645 trap_block[cpu].irq_worklist = 0;
646 }
647
648 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
649 {
650 unsigned long num_entries = 128;
651 unsigned long status;
652
653 status = sun4v_cpu_qconf(type, paddr, num_entries);
654 if (status != HV_EOK) {
655 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
656 "err %lu\n", type, paddr, num_entries, status);
657 prom_halt();
658 }
659 }
660
661 static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
662 {
663 struct trap_per_cpu *tb = &trap_block[this_cpu];
664
665 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
666 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
667 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
668 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
669 }
670
671 static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
672 {
673 void *page;
674
675 if (use_bootmem)
676 page = alloc_bootmem_low_pages(PAGE_SIZE);
677 else
678 page = (void *) get_zeroed_page(GFP_ATOMIC);
679
680 if (!page) {
681 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
682 prom_halt();
683 }
684
685 *pa_ptr = __pa(page);
686 }
687
688 static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
689 {
690 void *page;
691
692 if (use_bootmem)
693 page = alloc_bootmem_low_pages(PAGE_SIZE);
694 else
695 page = (void *) get_zeroed_page(GFP_ATOMIC);
696
697 if (!page) {
698 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
699 prom_halt();
700 }
701
702 *pa_ptr = __pa(page);
703 }
704
705 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
706 {
707 #ifdef CONFIG_SMP
708 void *page;
709
710 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
711
712 if (use_bootmem)
713 page = alloc_bootmem_low_pages(PAGE_SIZE);
714 else
715 page = (void *) get_zeroed_page(GFP_ATOMIC);
716
717 if (!page) {
718 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
719 prom_halt();
720 }
721
722 tb->cpu_mondo_block_pa = __pa(page);
723 tb->cpu_list_pa = __pa(page + 64);
724 #endif
725 }
726
727 /* Allocate and register the mondo and error queues for this cpu. */
728 void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
729 {
730 struct trap_per_cpu *tb = &trap_block[cpu];
731
732 if (alloc) {
733 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
734 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
735 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
736 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
737 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
738 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
739
740 init_cpu_send_mondo_info(tb, use_bootmem);
741 }
742
743 if (load) {
744 if (cpu != hard_smp_processor_id()) {
745 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
746 cpu, hard_smp_processor_id());
747 prom_halt();
748 }
749 sun4v_register_mondo_queues(cpu);
750 }
751 }
752
753 static struct irqaction timer_irq_action = {
754 .name = "timer",
755 };
756
757 /* Only invoked on boot processor. */
758 void __init init_IRQ(void)
759 {
760 map_prom_timers();
761 kill_prom_timer();
762 memset(&ivector_table[0], 0, sizeof(ivector_table));
763
764 if (tlb_type == hypervisor)
765 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
766
767 /* We need to clear any IRQ's pending in the soft interrupt
768 * registers, a spurious one could be left around from the
769 * PROM timer which we just disabled.
770 */
771 clear_softint(get_softint());
772
773 /* Now that ivector table is initialized, it is safe
774 * to receive IRQ vector traps. We will normally take
775 * one or two right now, in case some device PROM used
776 * to boot us wants to speak to us. We just ignore them.
777 */
778 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
779 "or %%g1, %0, %%g1\n\t"
780 "wrpr %%g1, 0x0, %%pstate"
781 : /* No outputs */
782 : "i" (PSTATE_IE)
783 : "g1");
784
785 irq_desc[0].action = &timer_irq_action;
786 }
This page took 0.065972 seconds and 5 git commands to generate.