| 1 | /* |
| 2 | * Interrupt request handling routines. On the |
| 3 | * Sparc the IRQs are basically 'cast in stone' |
| 4 | * and you are supposed to probe the prom's device |
| 5 | * node trees to find out who's got which IRQ. |
| 6 | * |
| 7 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
| 8 | * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) |
| 9 | * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) |
| 10 | * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) |
| 11 | * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/seq_file.h> |
| 16 | |
| 17 | #include <asm/pcic.h> |
| 18 | #include <asm/leon.h> |
| 19 | |
| 20 | #include "kernel.h" |
| 21 | #include "irq.h" |
| 22 | |
| 23 | #ifdef CONFIG_SMP |
| 24 | #define SMP_NOP2 "nop; nop;\n\t" |
| 25 | #define SMP_NOP3 "nop; nop; nop;\n\t" |
| 26 | #else |
| 27 | #define SMP_NOP2 |
| 28 | #define SMP_NOP3 |
| 29 | #endif /* SMP */ |
| 30 | |
| 31 | unsigned long arch_local_irq_save(void) |
| 32 | { |
| 33 | unsigned long retval; |
| 34 | unsigned long tmp; |
| 35 | |
| 36 | __asm__ __volatile__( |
| 37 | "rd %%psr, %0\n\t" |
| 38 | SMP_NOP3 /* Sun4m + Cypress + SMP bug */ |
| 39 | "or %0, %2, %1\n\t" |
| 40 | "wr %1, 0, %%psr\n\t" |
| 41 | "nop; nop; nop\n" |
| 42 | : "=&r" (retval), "=r" (tmp) |
| 43 | : "i" (PSR_PIL) |
| 44 | : "memory"); |
| 45 | |
| 46 | return retval; |
| 47 | } |
| 48 | EXPORT_SYMBOL(arch_local_irq_save); |
| 49 | |
| 50 | void arch_local_irq_enable(void) |
| 51 | { |
| 52 | unsigned long tmp; |
| 53 | |
| 54 | __asm__ __volatile__( |
| 55 | "rd %%psr, %0\n\t" |
| 56 | SMP_NOP3 /* Sun4m + Cypress + SMP bug */ |
| 57 | "andn %0, %1, %0\n\t" |
| 58 | "wr %0, 0, %%psr\n\t" |
| 59 | "nop; nop; nop\n" |
| 60 | : "=&r" (tmp) |
| 61 | : "i" (PSR_PIL) |
| 62 | : "memory"); |
| 63 | } |
| 64 | EXPORT_SYMBOL(arch_local_irq_enable); |
| 65 | |
| 66 | void arch_local_irq_restore(unsigned long old_psr) |
| 67 | { |
| 68 | unsigned long tmp; |
| 69 | |
| 70 | __asm__ __volatile__( |
| 71 | "rd %%psr, %0\n\t" |
| 72 | "and %2, %1, %2\n\t" |
| 73 | SMP_NOP2 /* Sun4m + Cypress + SMP bug */ |
| 74 | "andn %0, %1, %0\n\t" |
| 75 | "wr %0, %2, %%psr\n\t" |
| 76 | "nop; nop; nop\n" |
| 77 | : "=&r" (tmp) |
| 78 | : "i" (PSR_PIL), "r" (old_psr) |
| 79 | : "memory"); |
| 80 | } |
| 81 | EXPORT_SYMBOL(arch_local_irq_restore); |
| 82 | |
| 83 | /* |
| 84 | * Dave Redman (djhr@tadpole.co.uk) |
| 85 | * |
| 86 | * IRQ numbers.. These are no longer restricted to 15.. |
| 87 | * |
| 88 | * this is done to enable SBUS cards and onboard IO to be masked |
| 89 | * correctly. using the interrupt level isn't good enough. |
| 90 | * |
| 91 | * For example: |
| 92 | * A device interrupting at sbus level6 and the Floppy both come in |
| 93 | * at IRQ11, but enabling and disabling them requires writing to |
| 94 | * different bits in the SLAVIO/SEC. |
| 95 | * |
| 96 | * As a result of these changes sun4m machines could now support |
| 97 | * directed CPU interrupts using the existing enable/disable irq code |
| 98 | * with tweaks. |
| 99 | * |
| 100 | */ |
| 101 | |
| 102 | static void irq_panic(void) |
| 103 | { |
| 104 | prom_printf("machine: %s doesn't have irq handlers defined!\n", |
| 105 | &cputypval[0]); |
| 106 | prom_halt(); |
| 107 | } |
| 108 | |
| 109 | void (*sparc_init_timers)(irq_handler_t) = (void (*)(irq_handler_t))irq_panic; |
| 110 | |
| 111 | /* |
| 112 | * Dave Redman (djhr@tadpole.co.uk) |
| 113 | * |
| 114 | * There used to be extern calls and hard coded values here.. very sucky! |
| 115 | * instead, because some of the devices attach very early, I do something |
| 116 | * equally sucky but at least we'll never try to free statically allocated |
| 117 | * space or call kmalloc before kmalloc_init :(. |
| 118 | * |
| 119 | * In fact it's the timer10 that attaches first.. then timer14 |
| 120 | * then kmalloc_init is called.. then the tty interrupts attach. |
| 121 | * hmmm.... |
| 122 | * |
| 123 | */ |
| 124 | #define MAX_STATIC_ALLOC 4 |
| 125 | struct irqaction static_irqaction[MAX_STATIC_ALLOC]; |
| 126 | int static_irq_count; |
| 127 | |
| 128 | static struct { |
| 129 | struct irqaction *action; |
| 130 | int flags; |
| 131 | } sparc_irq[NR_IRQS]; |
| 132 | #define SPARC_IRQ_INPROGRESS 1 |
| 133 | |
| 134 | /* Used to protect the IRQ action lists */ |
| 135 | DEFINE_SPINLOCK(irq_action_lock); |
| 136 | |
| 137 | int show_interrupts(struct seq_file *p, void *v) |
| 138 | { |
| 139 | int i = *(loff_t *)v; |
| 140 | struct irqaction *action; |
| 141 | unsigned long flags; |
| 142 | #ifdef CONFIG_SMP |
| 143 | int j; |
| 144 | #endif |
| 145 | |
| 146 | if (sparc_cpu_model == sun4d) |
| 147 | return show_sun4d_interrupts(p, v); |
| 148 | |
| 149 | spin_lock_irqsave(&irq_action_lock, flags); |
| 150 | if (i < NR_IRQS) { |
| 151 | action = sparc_irq[i].action; |
| 152 | if (!action) |
| 153 | goto out_unlock; |
| 154 | seq_printf(p, "%3d: ", i); |
| 155 | #ifndef CONFIG_SMP |
| 156 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 157 | #else |
| 158 | for_each_online_cpu(j) { |
| 159 | seq_printf(p, "%10u ", |
| 160 | kstat_cpu(j).irqs[i]); |
| 161 | } |
| 162 | #endif |
| 163 | seq_printf(p, " %c %s", |
| 164 | (action->flags & IRQF_DISABLED) ? '+' : ' ', |
| 165 | action->name); |
| 166 | for (action = action->next; action; action = action->next) { |
| 167 | seq_printf(p, ",%s %s", |
| 168 | (action->flags & IRQF_DISABLED) ? " +" : "", |
| 169 | action->name); |
| 170 | } |
| 171 | seq_putc(p, '\n'); |
| 172 | } |
| 173 | out_unlock: |
| 174 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | void free_irq(unsigned int irq, void *dev_id) |
| 179 | { |
| 180 | struct irqaction *action; |
| 181 | struct irqaction **actionp; |
| 182 | unsigned long flags; |
| 183 | unsigned int cpu_irq; |
| 184 | |
| 185 | if (sparc_cpu_model == sun4d) { |
| 186 | sun4d_free_irq(irq, dev_id); |
| 187 | return; |
| 188 | } |
| 189 | cpu_irq = irq & (NR_IRQS - 1); |
| 190 | if (cpu_irq > 14) { /* 14 irq levels on the sparc */ |
| 191 | printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq); |
| 192 | return; |
| 193 | } |
| 194 | |
| 195 | spin_lock_irqsave(&irq_action_lock, flags); |
| 196 | |
| 197 | actionp = &sparc_irq[cpu_irq].action; |
| 198 | action = *actionp; |
| 199 | |
| 200 | if (!action->handler) { |
| 201 | printk(KERN_ERR "Trying to free free IRQ%d\n", irq); |
| 202 | goto out_unlock; |
| 203 | } |
| 204 | if (dev_id) { |
| 205 | for (; action; action = action->next) { |
| 206 | if (action->dev_id == dev_id) |
| 207 | break; |
| 208 | actionp = &action->next; |
| 209 | } |
| 210 | if (!action) { |
| 211 | printk(KERN_ERR "Trying to free free shared IRQ%d\n", |
| 212 | irq); |
| 213 | goto out_unlock; |
| 214 | } |
| 215 | } else if (action->flags & IRQF_SHARED) { |
| 216 | printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n", |
| 217 | irq); |
| 218 | goto out_unlock; |
| 219 | } |
| 220 | if (action->flags & SA_STATIC_ALLOC) { |
| 221 | /* |
| 222 | * This interrupt is marked as specially allocated |
| 223 | * so it is a bad idea to free it. |
| 224 | */ |
| 225 | printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n", |
| 226 | irq, action->name); |
| 227 | goto out_unlock; |
| 228 | } |
| 229 | |
| 230 | *actionp = action->next; |
| 231 | |
| 232 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 233 | |
| 234 | synchronize_irq(irq); |
| 235 | |
| 236 | spin_lock_irqsave(&irq_action_lock, flags); |
| 237 | |
| 238 | kfree(action); |
| 239 | |
| 240 | if (!sparc_irq[cpu_irq].action) |
| 241 | __disable_irq(irq); |
| 242 | |
| 243 | out_unlock: |
| 244 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 245 | } |
| 246 | EXPORT_SYMBOL(free_irq); |
| 247 | |
| 248 | /* |
| 249 | * This is called when we want to synchronize with |
| 250 | * interrupts. We may for example tell a device to |
| 251 | * stop sending interrupts: but to make sure there |
| 252 | * are no interrupts that are executing on another |
| 253 | * CPU we need to call this function. |
| 254 | */ |
| 255 | #ifdef CONFIG_SMP |
| 256 | void synchronize_irq(unsigned int irq) |
| 257 | { |
| 258 | unsigned int cpu_irq; |
| 259 | |
| 260 | cpu_irq = irq & (NR_IRQS - 1); |
| 261 | while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) |
| 262 | cpu_relax(); |
| 263 | } |
| 264 | EXPORT_SYMBOL(synchronize_irq); |
| 265 | #endif /* SMP */ |
| 266 | |
| 267 | void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs) |
| 268 | { |
| 269 | int i; |
| 270 | struct irqaction *action; |
| 271 | unsigned int cpu_irq; |
| 272 | |
| 273 | cpu_irq = irq & (NR_IRQS - 1); |
| 274 | action = sparc_irq[cpu_irq].action; |
| 275 | |
| 276 | printk(KERN_ERR "IO device interrupt, irq = %d\n", irq); |
| 277 | printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, |
| 278 | regs->npc, regs->u_regs[14]); |
| 279 | if (action) { |
| 280 | printk(KERN_ERR "Expecting: "); |
| 281 | for (i = 0; i < 16; i++) |
| 282 | if (action->handler) |
| 283 | printk(KERN_CONT "[%s:%d:0x%x] ", action->name, |
| 284 | i, (unsigned int)action->handler); |
| 285 | } |
| 286 | printk(KERN_ERR "AIEEE\n"); |
| 287 | panic("bogus interrupt received"); |
| 288 | } |
| 289 | |
| 290 | void handler_irq(int pil, struct pt_regs *regs) |
| 291 | { |
| 292 | struct pt_regs *old_regs; |
| 293 | struct irqaction *action; |
| 294 | int cpu = smp_processor_id(); |
| 295 | |
| 296 | old_regs = set_irq_regs(regs); |
| 297 | irq_enter(); |
| 298 | disable_pil_irq(pil); |
| 299 | #ifdef CONFIG_SMP |
| 300 | /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ |
| 301 | if ((sparc_cpu_model==sun4m) && (pil < 10)) |
| 302 | smp4m_irq_rotate(cpu); |
| 303 | #endif |
| 304 | action = sparc_irq[pil].action; |
| 305 | sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS; |
| 306 | kstat_cpu(cpu).irqs[pil]++; |
| 307 | do { |
| 308 | if (!action || !action->handler) |
| 309 | unexpected_irq(pil, NULL, regs); |
| 310 | action->handler(pil, action->dev_id); |
| 311 | action = action->next; |
| 312 | } while (action); |
| 313 | sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS; |
| 314 | enable_pil_irq(pil); |
| 315 | irq_exit(); |
| 316 | set_irq_regs(old_regs); |
| 317 | } |
| 318 | |
| 319 | #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) |
| 320 | |
| 321 | /* |
| 322 | * Fast IRQs on the Sparc can only have one routine attached to them, |
| 323 | * thus no sharing possible. |
| 324 | */ |
| 325 | static int request_fast_irq(unsigned int irq, |
| 326 | void (*handler)(void), |
| 327 | unsigned long irqflags, const char *devname) |
| 328 | { |
| 329 | struct irqaction *action; |
| 330 | unsigned long flags; |
| 331 | unsigned int cpu_irq; |
| 332 | int ret; |
| 333 | #if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON |
| 334 | struct tt_entry *trap_table; |
| 335 | #endif |
| 336 | cpu_irq = irq & (NR_IRQS - 1); |
| 337 | if (cpu_irq > 14) { |
| 338 | ret = -EINVAL; |
| 339 | goto out; |
| 340 | } |
| 341 | if (!handler) { |
| 342 | ret = -EINVAL; |
| 343 | goto out; |
| 344 | } |
| 345 | |
| 346 | spin_lock_irqsave(&irq_action_lock, flags); |
| 347 | |
| 348 | action = sparc_irq[cpu_irq].action; |
| 349 | if (action) { |
| 350 | if (action->flags & IRQF_SHARED) |
| 351 | panic("Trying to register fast irq when already shared.\n"); |
| 352 | if (irqflags & IRQF_SHARED) |
| 353 | panic("Trying to register fast irq as shared.\n"); |
| 354 | |
| 355 | /* Anyway, someone already owns it so cannot be made fast. */ |
| 356 | printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n"); |
| 357 | ret = -EBUSY; |
| 358 | goto out_unlock; |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * If this is flagged as statically allocated then we use our |
| 363 | * private struct which is never freed. |
| 364 | */ |
| 365 | if (irqflags & SA_STATIC_ALLOC) { |
| 366 | if (static_irq_count < MAX_STATIC_ALLOC) |
| 367 | action = &static_irqaction[static_irq_count++]; |
| 368 | else |
| 369 | printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", |
| 370 | irq, devname); |
| 371 | } |
| 372 | |
| 373 | if (action == NULL) |
| 374 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); |
| 375 | if (!action) { |
| 376 | ret = -ENOMEM; |
| 377 | goto out_unlock; |
| 378 | } |
| 379 | |
| 380 | /* Dork with trap table if we get this far. */ |
| 381 | #define INSTANTIATE(table) \ |
| 382 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ |
| 383 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ |
| 384 | SPARC_BRANCH((unsigned long) handler, \ |
| 385 | (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ |
| 386 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ |
| 387 | table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; |
| 388 | |
| 389 | INSTANTIATE(sparc_ttable) |
| 390 | #if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON |
| 391 | trap_table = &trapbase_cpu1; |
| 392 | INSTANTIATE(trap_table) |
| 393 | trap_table = &trapbase_cpu2; |
| 394 | INSTANTIATE(trap_table) |
| 395 | trap_table = &trapbase_cpu3; |
| 396 | INSTANTIATE(trap_table) |
| 397 | #endif |
| 398 | #undef INSTANTIATE |
| 399 | /* |
| 400 | * XXX Correct thing whould be to flush only I- and D-cache lines |
| 401 | * which contain the handler in question. But as of time of the |
| 402 | * writing we have no CPU-neutral interface to fine-grained flushes. |
| 403 | */ |
| 404 | flush_cache_all(); |
| 405 | |
| 406 | action->flags = irqflags; |
| 407 | action->name = devname; |
| 408 | action->dev_id = NULL; |
| 409 | action->next = NULL; |
| 410 | |
| 411 | sparc_irq[cpu_irq].action = action; |
| 412 | |
| 413 | __enable_irq(irq); |
| 414 | |
| 415 | ret = 0; |
| 416 | out_unlock: |
| 417 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 418 | out: |
| 419 | return ret; |
| 420 | } |
| 421 | |
| 422 | /* |
| 423 | * These variables are used to access state from the assembler |
| 424 | * interrupt handler, floppy_hardint, so we cannot put these in |
| 425 | * the floppy driver image because that would not work in the |
| 426 | * modular case. |
| 427 | */ |
| 428 | volatile unsigned char *fdc_status; |
| 429 | EXPORT_SYMBOL(fdc_status); |
| 430 | |
| 431 | char *pdma_vaddr; |
| 432 | EXPORT_SYMBOL(pdma_vaddr); |
| 433 | |
| 434 | unsigned long pdma_size; |
| 435 | EXPORT_SYMBOL(pdma_size); |
| 436 | |
| 437 | volatile int doing_pdma; |
| 438 | EXPORT_SYMBOL(doing_pdma); |
| 439 | |
| 440 | char *pdma_base; |
| 441 | EXPORT_SYMBOL(pdma_base); |
| 442 | |
| 443 | unsigned long pdma_areasize; |
| 444 | EXPORT_SYMBOL(pdma_areasize); |
| 445 | |
| 446 | static irq_handler_t floppy_irq_handler; |
| 447 | |
| 448 | void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) |
| 449 | { |
| 450 | struct pt_regs *old_regs; |
| 451 | int cpu = smp_processor_id(); |
| 452 | |
| 453 | old_regs = set_irq_regs(regs); |
| 454 | disable_pil_irq(irq); |
| 455 | irq_enter(); |
| 456 | kstat_cpu(cpu).irqs[irq]++; |
| 457 | floppy_irq_handler(irq, dev_id); |
| 458 | irq_exit(); |
| 459 | enable_pil_irq(irq); |
| 460 | set_irq_regs(old_regs); |
| 461 | /* |
| 462 | * XXX Eek, it's totally changed with preempt_count() and such |
| 463 | * if (softirq_pending(cpu)) |
| 464 | * do_softirq(); |
| 465 | */ |
| 466 | } |
| 467 | |
| 468 | int sparc_floppy_request_irq(int irq, unsigned long flags, |
| 469 | irq_handler_t irq_handler) |
| 470 | { |
| 471 | floppy_irq_handler = irq_handler; |
| 472 | return request_fast_irq(irq, floppy_hardint, flags, "floppy"); |
| 473 | } |
| 474 | EXPORT_SYMBOL(sparc_floppy_request_irq); |
| 475 | |
| 476 | #endif |
| 477 | |
| 478 | int request_irq(unsigned int irq, |
| 479 | irq_handler_t handler, |
| 480 | unsigned long irqflags, const char *devname, void *dev_id) |
| 481 | { |
| 482 | struct irqaction *action, **actionp; |
| 483 | unsigned long flags; |
| 484 | unsigned int cpu_irq; |
| 485 | int ret; |
| 486 | |
| 487 | if (sparc_cpu_model == sun4d) |
| 488 | return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); |
| 489 | |
| 490 | cpu_irq = irq & (NR_IRQS - 1); |
| 491 | if (cpu_irq > 14) { |
| 492 | ret = -EINVAL; |
| 493 | goto out; |
| 494 | } |
| 495 | if (!handler) { |
| 496 | ret = -EINVAL; |
| 497 | goto out; |
| 498 | } |
| 499 | |
| 500 | spin_lock_irqsave(&irq_action_lock, flags); |
| 501 | |
| 502 | actionp = &sparc_irq[cpu_irq].action; |
| 503 | action = *actionp; |
| 504 | if (action) { |
| 505 | if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) { |
| 506 | ret = -EBUSY; |
| 507 | goto out_unlock; |
| 508 | } |
| 509 | if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) { |
| 510 | printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n", |
| 511 | irq); |
| 512 | ret = -EBUSY; |
| 513 | goto out_unlock; |
| 514 | } |
| 515 | for ( ; action; action = *actionp) |
| 516 | actionp = &action->next; |
| 517 | } |
| 518 | |
| 519 | /* If this is flagged as statically allocated then we use our |
| 520 | * private struct which is never freed. |
| 521 | */ |
| 522 | if (irqflags & SA_STATIC_ALLOC) { |
| 523 | if (static_irq_count < MAX_STATIC_ALLOC) |
| 524 | action = &static_irqaction[static_irq_count++]; |
| 525 | else |
| 526 | printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", |
| 527 | irq, devname); |
| 528 | } |
| 529 | if (action == NULL) |
| 530 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); |
| 531 | if (!action) { |
| 532 | ret = -ENOMEM; |
| 533 | goto out_unlock; |
| 534 | } |
| 535 | |
| 536 | action->handler = handler; |
| 537 | action->flags = irqflags; |
| 538 | action->name = devname; |
| 539 | action->next = NULL; |
| 540 | action->dev_id = dev_id; |
| 541 | |
| 542 | *actionp = action; |
| 543 | |
| 544 | __enable_irq(irq); |
| 545 | |
| 546 | ret = 0; |
| 547 | out_unlock: |
| 548 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 549 | out: |
| 550 | return ret; |
| 551 | } |
| 552 | EXPORT_SYMBOL(request_irq); |
| 553 | |
| 554 | void disable_irq_nosync(unsigned int irq) |
| 555 | { |
| 556 | __disable_irq(irq); |
| 557 | } |
| 558 | EXPORT_SYMBOL(disable_irq_nosync); |
| 559 | |
| 560 | void disable_irq(unsigned int irq) |
| 561 | { |
| 562 | __disable_irq(irq); |
| 563 | } |
| 564 | EXPORT_SYMBOL(disable_irq); |
| 565 | |
| 566 | void enable_irq(unsigned int irq) |
| 567 | { |
| 568 | __enable_irq(irq); |
| 569 | } |
| 570 | EXPORT_SYMBOL(enable_irq); |
| 571 | |
| 572 | /* |
| 573 | * We really don't need these at all on the Sparc. We only have |
| 574 | * stubs here because they are exported to modules. |
| 575 | */ |
| 576 | unsigned long probe_irq_on(void) |
| 577 | { |
| 578 | return 0; |
| 579 | } |
| 580 | EXPORT_SYMBOL(probe_irq_on); |
| 581 | |
| 582 | int probe_irq_off(unsigned long mask) |
| 583 | { |
| 584 | return 0; |
| 585 | } |
| 586 | EXPORT_SYMBOL(probe_irq_off); |
| 587 | |
| 588 | /* djhr |
| 589 | * This could probably be made indirect too and assigned in the CPU |
| 590 | * bits of the code. That would be much nicer I think and would also |
| 591 | * fit in with the idea of being able to tune your kernel for your machine |
| 592 | * by removing unrequired machine and device support. |
| 593 | * |
| 594 | */ |
| 595 | |
| 596 | void __init init_IRQ(void) |
| 597 | { |
| 598 | switch (sparc_cpu_model) { |
| 599 | case sun4c: |
| 600 | case sun4: |
| 601 | sun4c_init_IRQ(); |
| 602 | break; |
| 603 | |
| 604 | case sun4m: |
| 605 | #ifdef CONFIG_PCI |
| 606 | pcic_probe(); |
| 607 | if (pcic_present()) { |
| 608 | sun4m_pci_init_IRQ(); |
| 609 | break; |
| 610 | } |
| 611 | #endif |
| 612 | sun4m_init_IRQ(); |
| 613 | break; |
| 614 | |
| 615 | case sun4d: |
| 616 | sun4d_init_IRQ(); |
| 617 | break; |
| 618 | |
| 619 | case sparc_leon: |
| 620 | leon_init_IRQ(); |
| 621 | break; |
| 622 | |
| 623 | default: |
| 624 | prom_printf("Cannot initialize IRQs on this Sun machine..."); |
| 625 | break; |
| 626 | } |
| 627 | btfixup(); |
| 628 | } |
| 629 | |
| 630 | #ifdef CONFIG_PROC_FS |
| 631 | void init_irq_proc(void) |
| 632 | { |
| 633 | /* For now, nothing... */ |
| 634 | } |
| 635 | #endif /* CONFIG_PROC_FS */ |