Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: irq.c,v 1.114 2001/12/11 04:55:51 davem Exp $ |
2 | * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the | |
3 | * Sparc the IRQ's are basically 'cast in stone' | |
4 | * and you are supposed to probe the prom's device | |
5 | * node trees to find out who's got which IRQ. | |
6 | * | |
7 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
8 | * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) | |
9 | * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) | |
10 | * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) | |
11 | * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) | |
12 | */ | |
13 | ||
14 | #include <linux/config.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/linkage.h> | |
20 | #include <linux/kernel_stat.h> | |
21 | #include <linux/signal.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/random.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/smp.h> | |
28 | #include <linux/smp_lock.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/threads.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/seq_file.h> | |
33 | ||
34 | #include <asm/ptrace.h> | |
35 | #include <asm/processor.h> | |
36 | #include <asm/system.h> | |
37 | #include <asm/psr.h> | |
38 | #include <asm/smp.h> | |
39 | #include <asm/vaddrs.h> | |
40 | #include <asm/timer.h> | |
41 | #include <asm/openprom.h> | |
42 | #include <asm/oplib.h> | |
43 | #include <asm/traps.h> | |
44 | #include <asm/irq.h> | |
45 | #include <asm/io.h> | |
46 | #include <asm/pgalloc.h> | |
47 | #include <asm/pgtable.h> | |
48 | #include <asm/pcic.h> | |
49 | #include <asm/cacheflush.h> | |
50 | ||
51 | #ifdef CONFIG_SMP | |
52 | #define SMP_NOP2 "nop; nop;\n\t" | |
53 | #define SMP_NOP3 "nop; nop; nop;\n\t" | |
54 | #else | |
55 | #define SMP_NOP2 | |
56 | #define SMP_NOP3 | |
57 | #endif /* SMP */ | |
58 | unsigned long __local_irq_save(void) | |
59 | { | |
60 | unsigned long retval; | |
61 | unsigned long tmp; | |
62 | ||
63 | __asm__ __volatile__( | |
64 | "rd %%psr, %0\n\t" | |
65 | SMP_NOP3 /* Sun4m + Cypress + SMP bug */ | |
66 | "or %0, %2, %1\n\t" | |
67 | "wr %1, 0, %%psr\n\t" | |
68 | "nop; nop; nop\n" | |
69 | : "=&r" (retval), "=r" (tmp) | |
70 | : "i" (PSR_PIL) | |
71 | : "memory"); | |
72 | ||
73 | return retval; | |
74 | } | |
75 | ||
76 | void local_irq_enable(void) | |
77 | { | |
78 | unsigned long tmp; | |
79 | ||
80 | __asm__ __volatile__( | |
81 | "rd %%psr, %0\n\t" | |
82 | SMP_NOP3 /* Sun4m + Cypress + SMP bug */ | |
83 | "andn %0, %1, %0\n\t" | |
84 | "wr %0, 0, %%psr\n\t" | |
85 | "nop; nop; nop\n" | |
86 | : "=&r" (tmp) | |
87 | : "i" (PSR_PIL) | |
88 | : "memory"); | |
89 | } | |
90 | ||
91 | void local_irq_restore(unsigned long old_psr) | |
92 | { | |
93 | unsigned long tmp; | |
94 | ||
95 | __asm__ __volatile__( | |
96 | "rd %%psr, %0\n\t" | |
97 | "and %2, %1, %2\n\t" | |
98 | SMP_NOP2 /* Sun4m + Cypress + SMP bug */ | |
99 | "andn %0, %1, %0\n\t" | |
100 | "wr %0, %2, %%psr\n\t" | |
101 | "nop; nop; nop\n" | |
102 | : "=&r" (tmp) | |
103 | : "i" (PSR_PIL), "r" (old_psr) | |
104 | : "memory"); | |
105 | } | |
106 | ||
107 | EXPORT_SYMBOL(__local_irq_save); | |
108 | EXPORT_SYMBOL(local_irq_enable); | |
109 | EXPORT_SYMBOL(local_irq_restore); | |
110 | ||
111 | /* | |
112 | * Dave Redman (djhr@tadpole.co.uk) | |
113 | * | |
114 | * IRQ numbers.. These are no longer restricted to 15.. | |
115 | * | |
116 | * this is done to enable SBUS cards and onboard IO to be masked | |
117 | * correctly. using the interrupt level isn't good enough. | |
118 | * | |
119 | * For example: | |
120 | * A device interrupting at sbus level6 and the Floppy both come in | |
121 | * at IRQ11, but enabling and disabling them requires writing to | |
122 | * different bits in the SLAVIO/SEC. | |
123 | * | |
124 | * As a result of these changes sun4m machines could now support | |
125 | * directed CPU interrupts using the existing enable/disable irq code | |
126 | * with tweaks. | |
127 | * | |
128 | */ | |
129 | ||
130 | static void irq_panic(void) | |
131 | { | |
132 | extern char *cputypval; | |
133 | prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval); | |
134 | prom_halt(); | |
135 | } | |
136 | ||
137 | void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) = | |
138 | (void (*)(irqreturn_t (*)(int, void *,struct pt_regs *))) irq_panic; | |
139 | ||
140 | /* | |
141 | * Dave Redman (djhr@tadpole.co.uk) | |
142 | * | |
143 | * There used to be extern calls and hard coded values here.. very sucky! | |
144 | * instead, because some of the devices attach very early, I do something | |
145 | * equally sucky but at least we'll never try to free statically allocated | |
146 | * space or call kmalloc before kmalloc_init :(. | |
147 | * | |
148 | * In fact it's the timer10 that attaches first.. then timer14 | |
149 | * then kmalloc_init is called.. then the tty interrupts attach. | |
150 | * hmmm.... | |
151 | * | |
152 | */ | |
153 | #define MAX_STATIC_ALLOC 4 | |
154 | struct irqaction static_irqaction[MAX_STATIC_ALLOC]; | |
155 | int static_irq_count; | |
156 | ||
157 | struct irqaction *irq_action[NR_IRQS] = { | |
158 | [0 ... (NR_IRQS-1)] = NULL | |
159 | }; | |
160 | ||
161 | /* Used to protect the IRQ action lists */ | |
162 | DEFINE_SPINLOCK(irq_action_lock); | |
163 | ||
164 | int show_interrupts(struct seq_file *p, void *v) | |
165 | { | |
166 | int i = *(loff_t *) v; | |
167 | struct irqaction * action; | |
168 | unsigned long flags; | |
169 | #ifdef CONFIG_SMP | |
170 | int j; | |
171 | #endif | |
172 | ||
173 | if (sparc_cpu_model == sun4d) { | |
174 | extern int show_sun4d_interrupts(struct seq_file *, void *); | |
175 | ||
176 | return show_sun4d_interrupts(p, v); | |
177 | } | |
178 | spin_lock_irqsave(&irq_action_lock, flags); | |
179 | if (i < NR_IRQS) { | |
180 | action = *(i + irq_action); | |
181 | if (!action) | |
182 | goto out_unlock; | |
183 | seq_printf(p, "%3d: ", i); | |
184 | #ifndef CONFIG_SMP | |
185 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
186 | #else | |
187 | for (j = 0; j < NR_CPUS; j++) { | |
188 | if (cpu_online(j)) | |
189 | seq_printf(p, "%10u ", | |
190 | kstat_cpu(cpu_logical_map(j)).irqs[i]); | |
191 | } | |
192 | #endif | |
193 | seq_printf(p, " %c %s", | |
194 | (action->flags & SA_INTERRUPT) ? '+' : ' ', | |
195 | action->name); | |
196 | for (action=action->next; action; action = action->next) { | |
197 | seq_printf(p, ",%s %s", | |
198 | (action->flags & SA_INTERRUPT) ? " +" : "", | |
199 | action->name); | |
200 | } | |
201 | seq_putc(p, '\n'); | |
202 | } | |
203 | out_unlock: | |
204 | spin_unlock_irqrestore(&irq_action_lock, flags); | |
205 | return 0; | |
206 | } | |
207 | ||
208 | void free_irq(unsigned int irq, void *dev_id) | |
209 | { | |
210 | struct irqaction * action; | |
211 | struct irqaction * tmp = NULL; | |
212 | unsigned long flags; | |
213 | unsigned int cpu_irq; | |
214 | ||
215 | if (sparc_cpu_model == sun4d) { | |
216 | extern void sun4d_free_irq(unsigned int, void *); | |
217 | ||
218 | sun4d_free_irq(irq, dev_id); | |
219 | return; | |
220 | } | |
221 | cpu_irq = irq & (NR_IRQS - 1); | |
222 | if (cpu_irq > 14) { /* 14 irq levels on the sparc */ | |
223 | printk("Trying to free bogus IRQ %d\n", irq); | |
224 | return; | |
225 | } | |
226 | ||
227 | spin_lock_irqsave(&irq_action_lock, flags); | |
228 | ||
229 | action = *(cpu_irq + irq_action); | |
230 | ||
231 | if (!action->handler) { | |
232 | printk("Trying to free free IRQ%d\n",irq); | |
233 | goto out_unlock; | |
234 | } | |
235 | if (dev_id) { | |
236 | for (; action; action = action->next) { | |
237 | if (action->dev_id == dev_id) | |
238 | break; | |
239 | tmp = action; | |
240 | } | |
241 | if (!action) { | |
242 | printk("Trying to free free shared IRQ%d\n",irq); | |
243 | goto out_unlock; | |
244 | } | |
245 | } else if (action->flags & SA_SHIRQ) { | |
246 | printk("Trying to free shared IRQ%d with NULL device ID\n", irq); | |
247 | goto out_unlock; | |
248 | } | |
249 | if (action->flags & SA_STATIC_ALLOC) | |
250 | { | |
251 | /* This interrupt is marked as specially allocated | |
252 | * so it is a bad idea to free it. | |
253 | */ | |
254 | printk("Attempt to free statically allocated IRQ%d (%s)\n", | |
255 | irq, action->name); | |
256 | goto out_unlock; | |
257 | } | |
258 | ||
259 | if (action && tmp) | |
260 | tmp->next = action->next; | |
261 | else | |
262 | *(cpu_irq + irq_action) = action->next; | |
263 | ||
264 | spin_unlock_irqrestore(&irq_action_lock, flags); | |
265 | ||
266 | synchronize_irq(irq); | |
267 | ||
268 | spin_lock_irqsave(&irq_action_lock, flags); | |
269 | ||
270 | kfree(action); | |
271 | ||
272 | if (!(*(cpu_irq + irq_action))) | |
273 | disable_irq(irq); | |
274 | ||
275 | out_unlock: | |
276 | spin_unlock_irqrestore(&irq_action_lock, flags); | |
277 | } | |
278 | ||
279 | EXPORT_SYMBOL(free_irq); | |
280 | ||
281 | /* | |
282 | * This is called when we want to synchronize with | |
283 | * interrupts. We may for example tell a device to | |
284 | * stop sending interrupts: but to make sure there | |
285 | * are no interrupts that are executing on another | |
286 | * CPU we need to call this function. | |
287 | */ | |
288 | #ifdef CONFIG_SMP | |
289 | void synchronize_irq(unsigned int irq) | |
290 | { | |
291 | printk("synchronize_irq says: implement me!\n"); | |
292 | BUG(); | |
293 | } | |
294 | #endif /* SMP */ | |
295 | ||
296 | void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) | |
297 | { | |
298 | int i; | |
299 | struct irqaction * action; | |
300 | unsigned int cpu_irq; | |
301 | ||
302 | cpu_irq = irq & (NR_IRQS - 1); | |
303 | action = *(cpu_irq + irq_action); | |
304 | ||
305 | printk("IO device interrupt, irq = %d\n", irq); | |
306 | printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, | |
307 | regs->npc, regs->u_regs[14]); | |
308 | if (action) { | |
309 | printk("Expecting: "); | |
310 | for (i = 0; i < 16; i++) | |
311 | if (action->handler) | |
312 | printk("[%s:%d:0x%x] ", action->name, | |
313 | (int) i, (unsigned int) action->handler); | |
314 | } | |
315 | printk("AIEEE\n"); | |
316 | panic("bogus interrupt received"); | |
317 | } | |
318 | ||
319 | void handler_irq(int irq, struct pt_regs * regs) | |
320 | { | |
321 | struct irqaction * action; | |
322 | int cpu = smp_processor_id(); | |
323 | #ifdef CONFIG_SMP | |
324 | extern void smp4m_irq_rotate(int cpu); | |
325 | #endif | |
326 | ||
327 | irq_enter(); | |
328 | disable_pil_irq(irq); | |
329 | #ifdef CONFIG_SMP | |
330 | /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ | |
331 | if(irq < 10) | |
332 | smp4m_irq_rotate(cpu); | |
333 | #endif | |
334 | action = *(irq + irq_action); | |
335 | kstat_cpu(cpu).irqs[irq]++; | |
336 | do { | |
337 | if (!action || !action->handler) | |
338 | unexpected_irq(irq, NULL, regs); | |
339 | action->handler(irq, action->dev_id, regs); | |
340 | action = action->next; | |
341 | } while (action); | |
342 | enable_pil_irq(irq); | |
343 | irq_exit(); | |
344 | } | |
345 | ||
346 | #ifdef CONFIG_BLK_DEV_FD | |
347 | extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs); | |
348 | ||
349 | void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) | |
350 | { | |
351 | int cpu = smp_processor_id(); | |
352 | ||
353 | disable_pil_irq(irq); | |
354 | irq_enter(); | |
355 | kstat_cpu(cpu).irqs[irq]++; | |
356 | floppy_interrupt(irq, dev_id, regs); | |
357 | irq_exit(); | |
358 | enable_pil_irq(irq); | |
359 | // XXX Eek, it's totally changed with preempt_count() and such | |
360 | // if (softirq_pending(cpu)) | |
361 | // do_softirq(); | |
362 | } | |
363 | #endif | |
364 | ||
365 | /* Fast IRQ's on the Sparc can only have one routine attached to them, | |
366 | * thus no sharing possible. | |
367 | */ | |
368 | int request_fast_irq(unsigned int irq, | |
369 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
370 | unsigned long irqflags, const char *devname) | |
371 | { | |
372 | struct irqaction *action; | |
373 | unsigned long flags; | |
374 | unsigned int cpu_irq; | |
375 | int ret; | |
376 | #ifdef CONFIG_SMP | |
377 | struct tt_entry *trap_table; | |
378 | extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3; | |
379 | #endif | |
380 | ||
381 | cpu_irq = irq & (NR_IRQS - 1); | |
382 | if(cpu_irq > 14) { | |
383 | ret = -EINVAL; | |
384 | goto out; | |
385 | } | |
386 | if(!handler) { | |
387 | ret = -EINVAL; | |
388 | goto out; | |
389 | } | |
390 | ||
391 | spin_lock_irqsave(&irq_action_lock, flags); | |
392 | ||
393 | action = *(cpu_irq + irq_action); | |
394 | if(action) { | |
395 | if(action->flags & SA_SHIRQ) | |
396 | panic("Trying to register fast irq when already shared.\n"); | |
397 | if(irqflags & SA_SHIRQ) | |
398 | panic("Trying to register fast irq as shared.\n"); | |
399 | ||
400 | /* Anyway, someone already owns it so cannot be made fast. */ | |
401 | printk("request_fast_irq: Trying to register yet already owned.\n"); | |
402 | ret = -EBUSY; | |
403 | goto out_unlock; | |
404 | } | |
405 | ||
406 | /* If this is flagged as statically allocated then we use our | |
407 | * private struct which is never freed. | |
408 | */ | |
409 | if (irqflags & SA_STATIC_ALLOC) { | |
410 | if (static_irq_count < MAX_STATIC_ALLOC) | |
411 | action = &static_irqaction[static_irq_count++]; | |
412 | else | |
413 | printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", | |
414 | irq, devname); | |
415 | } | |
416 | ||
417 | if (action == NULL) | |
418 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | |
419 | GFP_ATOMIC); | |
420 | ||
421 | if (!action) { | |
422 | ret = -ENOMEM; | |
423 | goto out_unlock; | |
424 | } | |
425 | ||
426 | /* Dork with trap table if we get this far. */ | |
427 | #define INSTANTIATE(table) \ | |
428 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ | |
429 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ | |
430 | SPARC_BRANCH((unsigned long) handler, \ | |
431 | (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ | |
432 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ | |
433 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; | |
434 | ||
435 | INSTANTIATE(sparc_ttable) | |
436 | #ifdef CONFIG_SMP | |
437 | trap_table = &trapbase_cpu1; INSTANTIATE(trap_table) | |
438 | trap_table = &trapbase_cpu2; INSTANTIATE(trap_table) | |
439 | trap_table = &trapbase_cpu3; INSTANTIATE(trap_table) | |
440 | #endif | |
441 | #undef INSTANTIATE | |
442 | /* | |
443 | * XXX Correct thing whould be to flush only I- and D-cache lines | |
444 | * which contain the handler in question. But as of time of the | |
445 | * writing we have no CPU-neutral interface to fine-grained flushes. | |
446 | */ | |
447 | flush_cache_all(); | |
448 | ||
449 | action->handler = handler; | |
450 | action->flags = irqflags; | |
451 | cpus_clear(action->mask); | |
452 | action->name = devname; | |
453 | action->dev_id = NULL; | |
454 | action->next = NULL; | |
455 | ||
456 | *(cpu_irq + irq_action) = action; | |
457 | ||
458 | enable_irq(irq); | |
459 | ||
460 | ret = 0; | |
461 | out_unlock: | |
462 | spin_unlock_irqrestore(&irq_action_lock, flags); | |
463 | out: | |
464 | return ret; | |
465 | } | |
466 | ||
467 | int request_irq(unsigned int irq, | |
468 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
469 | unsigned long irqflags, const char * devname, void *dev_id) | |
470 | { | |
471 | struct irqaction * action, *tmp = NULL; | |
472 | unsigned long flags; | |
473 | unsigned int cpu_irq; | |
474 | int ret; | |
475 | ||
476 | if (sparc_cpu_model == sun4d) { | |
477 | extern int sun4d_request_irq(unsigned int, | |
478 | irqreturn_t (*)(int, void *, struct pt_regs *), | |
479 | unsigned long, const char *, void *); | |
480 | return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); | |
481 | } | |
482 | cpu_irq = irq & (NR_IRQS - 1); | |
483 | if(cpu_irq > 14) { | |
484 | ret = -EINVAL; | |
485 | goto out; | |
486 | } | |
487 | if (!handler) { | |
488 | ret = -EINVAL; | |
489 | goto out; | |
490 | } | |
491 | ||
492 | spin_lock_irqsave(&irq_action_lock, flags); | |
493 | ||
494 | action = *(cpu_irq + irq_action); | |
495 | if (action) { | |
496 | if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { | |
497 | for (tmp = action; tmp->next; tmp = tmp->next); | |
498 | } else { | |
499 | ret = -EBUSY; | |
500 | goto out_unlock; | |
501 | } | |
502 | if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { | |
503 | printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); | |
504 | ret = -EBUSY; | |
505 | goto out_unlock; | |
506 | } | |
507 | action = NULL; /* Or else! */ | |
508 | } | |
509 | ||
510 | /* If this is flagged as statically allocated then we use our | |
511 | * private struct which is never freed. | |
512 | */ | |
513 | if (irqflags & SA_STATIC_ALLOC) { | |
514 | if (static_irq_count < MAX_STATIC_ALLOC) | |
515 | action = &static_irqaction[static_irq_count++]; | |
516 | else | |
517 | printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); | |
518 | } | |
519 | ||
520 | if (action == NULL) | |
521 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | |
522 | GFP_ATOMIC); | |
523 | ||
524 | if (!action) { | |
525 | ret = -ENOMEM; | |
526 | goto out_unlock; | |
527 | } | |
528 | ||
529 | action->handler = handler; | |
530 | action->flags = irqflags; | |
531 | cpus_clear(action->mask); | |
532 | action->name = devname; | |
533 | action->next = NULL; | |
534 | action->dev_id = dev_id; | |
535 | ||
536 | if (tmp) | |
537 | tmp->next = action; | |
538 | else | |
539 | *(cpu_irq + irq_action) = action; | |
540 | ||
541 | enable_irq(irq); | |
542 | ||
543 | ret = 0; | |
544 | out_unlock: | |
545 | spin_unlock_irqrestore(&irq_action_lock, flags); | |
546 | out: | |
547 | return ret; | |
548 | } | |
549 | ||
550 | EXPORT_SYMBOL(request_irq); | |
551 | ||
552 | /* We really don't need these at all on the Sparc. We only have | |
553 | * stubs here because they are exported to modules. | |
554 | */ | |
555 | unsigned long probe_irq_on(void) | |
556 | { | |
557 | return 0; | |
558 | } | |
559 | ||
560 | EXPORT_SYMBOL(probe_irq_on); | |
561 | ||
562 | int probe_irq_off(unsigned long mask) | |
563 | { | |
564 | return 0; | |
565 | } | |
566 | ||
567 | EXPORT_SYMBOL(probe_irq_off); | |
568 | ||
569 | /* djhr | |
570 | * This could probably be made indirect too and assigned in the CPU | |
571 | * bits of the code. That would be much nicer I think and would also | |
572 | * fit in with the idea of being able to tune your kernel for your machine | |
573 | * by removing unrequired machine and device support. | |
574 | * | |
575 | */ | |
576 | ||
577 | void __init init_IRQ(void) | |
578 | { | |
579 | extern void sun4c_init_IRQ( void ); | |
580 | extern void sun4m_init_IRQ( void ); | |
581 | extern void sun4d_init_IRQ( void ); | |
582 | ||
583 | switch(sparc_cpu_model) { | |
584 | case sun4c: | |
585 | case sun4: | |
586 | sun4c_init_IRQ(); | |
587 | break; | |
588 | ||
589 | case sun4m: | |
590 | #ifdef CONFIG_PCI | |
591 | pcic_probe(); | |
592 | if (pcic_present()) { | |
593 | sun4m_pci_init_IRQ(); | |
594 | break; | |
595 | } | |
596 | #endif | |
597 | sun4m_init_IRQ(); | |
598 | break; | |
599 | ||
600 | case sun4d: | |
601 | sun4d_init_IRQ(); | |
602 | break; | |
603 | ||
604 | default: | |
605 | prom_printf("Cannot initialize IRQ's on this Sun machine..."); | |
606 | break; | |
607 | } | |
608 | btfixup(); | |
609 | } | |
610 | ||
611 | void init_irq_proc(void) | |
612 | { | |
613 | /* For now, nothing... */ | |
614 | } |