Commit | Line | Data |
---|---|---|
f9bd170a PM |
1 | /* |
2 | * i8259 interrupt controller driver. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
0ebfff14 BH |
9 | #undef DEBUG |
10 | ||
1da177e4 LT |
11 | #include <linux/init.h> |
12 | #include <linux/ioport.h> | |
13 | #include <linux/interrupt.h> | |
0ebfff14 BH |
14 | #include <linux/kernel.h> |
15 | #include <linux/delay.h> | |
1da177e4 LT |
16 | #include <asm/io.h> |
17 | #include <asm/i8259.h> | |
0ebfff14 | 18 | #include <asm/prom.h> |
1da177e4 | 19 | |
f9bd170a | 20 | static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ |
1da177e4 | 21 | |
f9bd170a | 22 | static unsigned char cached_8259[2] = { 0xff, 0xff }; |
1da177e4 LT |
23 | #define cached_A1 (cached_8259[0]) |
24 | #define cached_21 (cached_8259[1]) | |
25 | ||
26 | static DEFINE_SPINLOCK(i8259_lock); | |
27 | ||
0ebfff14 | 28 | static struct irq_host *i8259_host; |
1da177e4 LT |
29 | |
30 | /* | |
31 | * Acknowledge the IRQ using either the PCI host bridge's interrupt | |
32 | * acknowledge feature or poll. How i8259_init() is called determines | |
33 | * which is called. It should be noted that polling is broken on some | |
34 | * IBM and Motorola PReP boxes so we must use the int-ack feature on them. | |
35 | */ | |
35a84c2f | 36 | unsigned int i8259_irq(void) |
1da177e4 LT |
37 | { |
38 | int irq; | |
0ebfff14 | 39 | int lock = 0; |
1da177e4 LT |
40 | |
41 | /* Either int-ack or poll for the IRQ */ | |
42 | if (pci_intack) | |
f9bd170a | 43 | irq = readb(pci_intack); |
1da177e4 | 44 | else { |
0ebfff14 BH |
45 | spin_lock(&i8259_lock); |
46 | lock = 1; | |
47 | ||
1da177e4 LT |
48 | /* Perform an interrupt acknowledge cycle on controller 1. */ |
49 | outb(0x0C, 0x20); /* prepare for poll */ | |
50 | irq = inb(0x20) & 7; | |
51 | if (irq == 2 ) { | |
52 | /* | |
53 | * Interrupt is cascaded so perform interrupt | |
54 | * acknowledge on controller 2. | |
55 | */ | |
56 | outb(0x0C, 0xA0); /* prepare for poll */ | |
57 | irq = (inb(0xA0) & 7) + 8; | |
58 | } | |
59 | } | |
60 | ||
61 | if (irq == 7) { | |
62 | /* | |
63 | * This may be a spurious interrupt. | |
64 | * | |
65 | * Read the interrupt status register (ISR). If the most | |
66 | * significant bit is not set then there is no valid | |
67 | * interrupt. | |
68 | */ | |
69 | if (!pci_intack) | |
70 | outb(0x0B, 0x20); /* ISR register */ | |
71 | if(~inb(0x20) & 0x80) | |
0ebfff14 BH |
72 | irq = NO_IRQ; |
73 | } else if (irq == 0xff) | |
74 | irq = NO_IRQ; | |
1da177e4 | 75 | |
0ebfff14 BH |
76 | if (lock) |
77 | spin_unlock(&i8259_lock); | |
78 | return irq; | |
f9bd170a PM |
79 | } |
80 | ||
1da177e4 LT |
81 | static void i8259_mask_and_ack_irq(unsigned int irq_nr) |
82 | { | |
83 | unsigned long flags; | |
84 | ||
85 | spin_lock_irqsave(&i8259_lock, flags); | |
1da177e4 LT |
86 | if (irq_nr > 7) { |
87 | cached_A1 |= 1 << (irq_nr-8); | |
f9bd170a PM |
88 | inb(0xA1); /* DUMMY */ |
89 | outb(cached_A1, 0xA1); | |
90 | outb(0x20, 0xA0); /* Non-specific EOI */ | |
91 | outb(0x20, 0x20); /* Non-specific EOI to cascade */ | |
1da177e4 LT |
92 | } else { |
93 | cached_21 |= 1 << irq_nr; | |
f9bd170a PM |
94 | inb(0x21); /* DUMMY */ |
95 | outb(cached_21, 0x21); | |
96 | outb(0x20, 0x20); /* Non-specific EOI */ | |
1da177e4 LT |
97 | } |
98 | spin_unlock_irqrestore(&i8259_lock, flags); | |
99 | } | |
100 | ||
101 | static void i8259_set_irq_mask(int irq_nr) | |
102 | { | |
103 | outb(cached_A1,0xA1); | |
104 | outb(cached_21,0x21); | |
105 | } | |
106 | ||
107 | static void i8259_mask_irq(unsigned int irq_nr) | |
108 | { | |
109 | unsigned long flags; | |
110 | ||
0ebfff14 BH |
111 | pr_debug("i8259_mask_irq(%d)\n", irq_nr); |
112 | ||
1da177e4 | 113 | spin_lock_irqsave(&i8259_lock, flags); |
f9bd170a | 114 | if (irq_nr < 8) |
1da177e4 LT |
115 | cached_21 |= 1 << irq_nr; |
116 | else | |
117 | cached_A1 |= 1 << (irq_nr-8); | |
118 | i8259_set_irq_mask(irq_nr); | |
119 | spin_unlock_irqrestore(&i8259_lock, flags); | |
120 | } | |
121 | ||
122 | static void i8259_unmask_irq(unsigned int irq_nr) | |
123 | { | |
124 | unsigned long flags; | |
125 | ||
0ebfff14 BH |
126 | pr_debug("i8259_unmask_irq(%d)\n", irq_nr); |
127 | ||
1da177e4 | 128 | spin_lock_irqsave(&i8259_lock, flags); |
f9bd170a | 129 | if (irq_nr < 8) |
1da177e4 LT |
130 | cached_21 &= ~(1 << irq_nr); |
131 | else | |
132 | cached_A1 &= ~(1 << (irq_nr-8)); | |
133 | i8259_set_irq_mask(irq_nr); | |
134 | spin_unlock_irqrestore(&i8259_lock, flags); | |
135 | } | |
136 | ||
b9e5b4e6 | 137 | static struct irq_chip i8259_pic = { |
b27df672 | 138 | .name = " i8259 ", |
b9e5b4e6 | 139 | .mask = i8259_mask_irq, |
3a800ff5 | 140 | .disable = i8259_mask_irq, |
b9e5b4e6 BH |
141 | .unmask = i8259_unmask_irq, |
142 | .mask_ack = i8259_mask_and_ack_irq, | |
1da177e4 LT |
143 | }; |
144 | ||
145 | static struct resource pic1_iores = { | |
146 | .name = "8259 (master)", | |
147 | .start = 0x20, | |
148 | .end = 0x21, | |
149 | .flags = IORESOURCE_BUSY, | |
150 | }; | |
151 | ||
152 | static struct resource pic2_iores = { | |
153 | .name = "8259 (slave)", | |
154 | .start = 0xa0, | |
155 | .end = 0xa1, | |
156 | .flags = IORESOURCE_BUSY, | |
157 | }; | |
158 | ||
159 | static struct resource pic_edgectrl_iores = { | |
160 | .name = "8259 edge control", | |
161 | .start = 0x4d0, | |
162 | .end = 0x4d1, | |
163 | .flags = IORESOURCE_BUSY, | |
164 | }; | |
165 | ||
0ebfff14 BH |
166 | static int i8259_host_match(struct irq_host *h, struct device_node *node) |
167 | { | |
52964f87 | 168 | return h->of_node == NULL || h->of_node == node; |
0ebfff14 BH |
169 | } |
170 | ||
171 | static int i8259_host_map(struct irq_host *h, unsigned int virq, | |
6e99e458 | 172 | irq_hw_number_t hw) |
0ebfff14 BH |
173 | { |
174 | pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); | |
175 | ||
176 | /* We block the internal cascade */ | |
177 | if (hw == 2) | |
6cff46f4 | 178 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; |
0ebfff14 | 179 | |
6e99e458 | 180 | /* We use the level handler only for now, we might want to |
0ebfff14 BH |
181 | * be more cautious here but that works for now |
182 | */ | |
6cff46f4 | 183 | irq_to_desc(virq)->status |= IRQ_LEVEL; |
0ebfff14 BH |
184 | set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); |
185 | return 0; | |
186 | } | |
187 | ||
188 | static void i8259_host_unmap(struct irq_host *h, unsigned int virq) | |
189 | { | |
190 | /* Make sure irq is masked in hardware */ | |
191 | i8259_mask_irq(virq); | |
192 | ||
193 | /* remove chip and handler */ | |
194 | set_irq_chip_and_handler(virq, NULL, NULL); | |
195 | ||
196 | /* Make sure it's completed */ | |
197 | synchronize_irq(virq); | |
198 | } | |
199 | ||
200 | static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, | |
201 | u32 *intspec, unsigned int intsize, | |
202 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | |
203 | { | |
204 | static unsigned char map_isa_senses[4] = { | |
205 | IRQ_TYPE_LEVEL_LOW, | |
206 | IRQ_TYPE_LEVEL_HIGH, | |
207 | IRQ_TYPE_EDGE_FALLING, | |
208 | IRQ_TYPE_EDGE_RISING, | |
209 | }; | |
210 | ||
211 | *out_hwirq = intspec[0]; | |
212 | if (intsize > 1 && intspec[1] < 4) | |
213 | *out_flags = map_isa_senses[intspec[1]]; | |
214 | else | |
215 | *out_flags = IRQ_TYPE_NONE; | |
216 | ||
217 | return 0; | |
218 | } | |
219 | ||
220 | static struct irq_host_ops i8259_host_ops = { | |
221 | .match = i8259_host_match, | |
222 | .map = i8259_host_map, | |
223 | .unmap = i8259_host_unmap, | |
224 | .xlate = i8259_host_xlate, | |
1da177e4 LT |
225 | }; |
226 | ||
f4d4c354 BH |
227 | struct irq_host *i8259_get_host(void) |
228 | { | |
229 | return i8259_host; | |
230 | } | |
231 | ||
40681b95 | 232 | /** |
0ebfff14 BH |
233 | * i8259_init - Initialize the legacy controller |
234 | * @node: device node of the legacy PIC (can be NULL, but then, it will match | |
235 | * all interrupts, so beware) | |
236 | * @intack_addr: PCI interrupt acknowledge (real) address which will return | |
237 | * the active irq from the 8259 | |
1da177e4 | 238 | */ |
0ebfff14 | 239 | void i8259_init(struct device_node *node, unsigned long intack_addr) |
1da177e4 LT |
240 | { |
241 | unsigned long flags; | |
242 | ||
0ebfff14 | 243 | /* initialize the controller */ |
1da177e4 | 244 | spin_lock_irqsave(&i8259_lock, flags); |
0ebfff14 BH |
245 | |
246 | /* Mask all first */ | |
247 | outb(0xff, 0xA1); | |
248 | outb(0xff, 0x21); | |
f9bd170a | 249 | |
1da177e4 LT |
250 | /* init master interrupt controller */ |
251 | outb(0x11, 0x20); /* Start init sequence */ | |
252 | outb(0x00, 0x21); /* Vector base */ | |
253 | outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ | |
254 | outb(0x01, 0x21); /* Select 8086 mode */ | |
255 | ||
256 | /* init slave interrupt controller */ | |
257 | outb(0x11, 0xA0); /* Start init sequence */ | |
258 | outb(0x08, 0xA1); /* Vector base */ | |
259 | outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ | |
260 | outb(0x01, 0xA1); /* Select 8086 mode */ | |
261 | ||
0ebfff14 BH |
262 | /* That thing is slow */ |
263 | udelay(100); | |
264 | ||
1da177e4 LT |
265 | /* always read ISR */ |
266 | outb(0x0B, 0x20); | |
267 | outb(0x0B, 0xA0); | |
268 | ||
0ebfff14 BH |
269 | /* Unmask the internal cascade */ |
270 | cached_21 &= ~(1 << 2); | |
271 | ||
272 | /* Set interrupt masks */ | |
1da177e4 LT |
273 | outb(cached_A1, 0xA1); |
274 | outb(cached_21, 0x21); | |
275 | ||
276 | spin_unlock_irqrestore(&i8259_lock, flags); | |
277 | ||
0ebfff14 | 278 | /* create a legacy host */ |
19fc65b5 | 279 | i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, |
52964f87 | 280 | 0, &i8259_host_ops, 0); |
0ebfff14 BH |
281 | if (i8259_host == NULL) { |
282 | printk(KERN_ERR "i8259: failed to allocate irq host !\n"); | |
283 | return; | |
b9e5b4e6 | 284 | } |
9d2ba6fa | 285 | |
1da177e4 | 286 | /* reserve our resources */ |
0ebfff14 BH |
287 | /* XXX should we continue doing that ? it seems to cause problems |
288 | * with further requesting of PCI IO resources for that range... | |
289 | * need to look into it. | |
290 | */ | |
1da177e4 LT |
291 | request_resource(&ioport_resource, &pic1_iores); |
292 | request_resource(&ioport_resource, &pic2_iores); | |
293 | request_resource(&ioport_resource, &pic_edgectrl_iores); | |
294 | ||
295 | if (intack_addr != 0) | |
296 | pci_intack = ioremap(intack_addr, 1); | |
f9bd170a | 297 | |
0ebfff14 | 298 | printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); |
1da177e4 | 299 | } |