93160375c8410ed426be0a1fb86fad5e76056214
[deliverable/linux.git] / arch / x86 / kvm / i8259.c
1 /*
2 * 8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2007 Intel Corporation
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 * Authors:
25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
26 * Port from Qemu.
27 */
28 #include <linux/mm.h>
29 #include <linux/bitops.h>
30 #include "irq.h"
31
32 #include <linux/kvm_host.h>
33
34 static void pic_lock(struct kvm_pic *s)
35 {
36 spin_lock(&s->lock);
37 }
38
39 static void pic_unlock(struct kvm_pic *s)
40 {
41 struct kvm *kvm = s->kvm;
42 unsigned acks = s->pending_acks;
43 bool wakeup = s->wakeup_needed;
44 struct kvm_vcpu *vcpu;
45
46 s->pending_acks = 0;
47 s->wakeup_needed = false;
48
49 spin_unlock(&s->lock);
50
51 while (acks) {
52 kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)),
53 __ffs(acks));
54 acks &= acks - 1;
55 }
56
57 if (wakeup) {
58 vcpu = s->kvm->vcpus[0];
59 if (vcpu)
60 kvm_vcpu_kick(vcpu);
61 }
62 }
63
64 static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
65 {
66 s->isr &= ~(1 << irq);
67 s->isr_ack |= (1 << irq);
68 }
69
70 void kvm_pic_clear_isr_ack(struct kvm *kvm)
71 {
72 struct kvm_pic *s = pic_irqchip(kvm);
73 s->pics[0].isr_ack = 0xff;
74 s->pics[1].isr_ack = 0xff;
75 }
76
77 /*
78 * set irq level. If an edge is detected, then the IRR is set to 1
79 */
80 static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
81 {
82 int mask;
83 mask = 1 << irq;
84 if (s->elcr & mask) /* level triggered */
85 if (level) {
86 s->irr |= mask;
87 s->last_irr |= mask;
88 } else {
89 s->irr &= ~mask;
90 s->last_irr &= ~mask;
91 }
92 else /* edge triggered */
93 if (level) {
94 if ((s->last_irr & mask) == 0)
95 s->irr |= mask;
96 s->last_irr |= mask;
97 } else
98 s->last_irr &= ~mask;
99 }
100
101 /*
102 * return the highest priority found in mask (highest = smallest
103 * number). Return 8 if no irq
104 */
105 static inline int get_priority(struct kvm_kpic_state *s, int mask)
106 {
107 int priority;
108 if (mask == 0)
109 return 8;
110 priority = 0;
111 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
112 priority++;
113 return priority;
114 }
115
116 /*
117 * return the pic wanted interrupt. return -1 if none
118 */
119 static int pic_get_irq(struct kvm_kpic_state *s)
120 {
121 int mask, cur_priority, priority;
122
123 mask = s->irr & ~s->imr;
124 priority = get_priority(s, mask);
125 if (priority == 8)
126 return -1;
127 /*
128 * compute current priority. If special fully nested mode on the
129 * master, the IRQ coming from the slave is not taken into account
130 * for the priority computation.
131 */
132 mask = s->isr;
133 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
134 mask &= ~(1 << 2);
135 cur_priority = get_priority(s, mask);
136 if (priority < cur_priority)
137 /*
138 * higher priority found: an irq should be generated
139 */
140 return (priority + s->priority_add) & 7;
141 else
142 return -1;
143 }
144
145 /*
146 * raise irq to CPU if necessary. must be called every time the active
147 * irq may change
148 */
149 static void pic_update_irq(struct kvm_pic *s)
150 {
151 int irq2, irq;
152
153 irq2 = pic_get_irq(&s->pics[1]);
154 if (irq2 >= 0) {
155 /*
156 * if irq request by slave pic, signal master PIC
157 */
158 pic_set_irq1(&s->pics[0], 2, 1);
159 pic_set_irq1(&s->pics[0], 2, 0);
160 }
161 irq = pic_get_irq(&s->pics[0]);
162 if (irq >= 0)
163 s->irq_request(s->irq_request_opaque, 1);
164 else
165 s->irq_request(s->irq_request_opaque, 0);
166 }
167
168 void kvm_pic_update_irq(struct kvm_pic *s)
169 {
170 pic_lock(s);
171 pic_update_irq(s);
172 pic_unlock(s);
173 }
174
175 void kvm_pic_set_irq(void *opaque, int irq, int level)
176 {
177 struct kvm_pic *s = opaque;
178
179 pic_lock(s);
180 if (irq >= 0 && irq < PIC_NUM_PINS) {
181 pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
182 pic_update_irq(s);
183 }
184 pic_unlock(s);
185 }
186
187 /*
188 * acknowledge interrupt 'irq'
189 */
190 static inline void pic_intack(struct kvm_kpic_state *s, int irq)
191 {
192 s->isr |= 1 << irq;
193 if (s->auto_eoi) {
194 if (s->rotate_on_auto_eoi)
195 s->priority_add = (irq + 1) & 7;
196 pic_clear_isr(s, irq);
197 }
198 /*
199 * We don't clear a level sensitive interrupt here
200 */
201 if (!(s->elcr & (1 << irq)))
202 s->irr &= ~(1 << irq);
203 }
204
205 int kvm_pic_read_irq(struct kvm *kvm)
206 {
207 int irq, irq2, intno;
208 struct kvm_pic *s = pic_irqchip(kvm);
209
210 pic_lock(s);
211 irq = pic_get_irq(&s->pics[0]);
212 if (irq >= 0) {
213 pic_intack(&s->pics[0], irq);
214 if (irq == 2) {
215 irq2 = pic_get_irq(&s->pics[1]);
216 if (irq2 >= 0)
217 pic_intack(&s->pics[1], irq2);
218 else
219 /*
220 * spurious IRQ on slave controller
221 */
222 irq2 = 7;
223 intno = s->pics[1].irq_base + irq2;
224 irq = irq2 + 8;
225 } else
226 intno = s->pics[0].irq_base + irq;
227 } else {
228 /*
229 * spurious IRQ on host controller
230 */
231 irq = 7;
232 intno = s->pics[0].irq_base + irq;
233 }
234 pic_update_irq(s);
235 pic_unlock(s);
236 kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq);
237
238 return intno;
239 }
240
241 void kvm_pic_reset(struct kvm_kpic_state *s)
242 {
243 int irq, irqbase, n;
244 struct kvm *kvm = s->pics_state->irq_request_opaque;
245 struct kvm_vcpu *vcpu0 = kvm->vcpus[0];
246
247 if (s == &s->pics_state->pics[0])
248 irqbase = 0;
249 else
250 irqbase = 8;
251
252 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
253 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
254 if (s->irr & (1 << irq) || s->isr & (1 << irq)) {
255 n = irq + irqbase;
256 s->pics_state->pending_acks |= 1 << n;
257 }
258 }
259 s->last_irr = 0;
260 s->irr = 0;
261 s->imr = 0;
262 s->isr = 0;
263 s->isr_ack = 0xff;
264 s->priority_add = 0;
265 s->irq_base = 0;
266 s->read_reg_select = 0;
267 s->poll = 0;
268 s->special_mask = 0;
269 s->init_state = 0;
270 s->auto_eoi = 0;
271 s->rotate_on_auto_eoi = 0;
272 s->special_fully_nested_mode = 0;
273 s->init4 = 0;
274 }
275
276 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
277 {
278 struct kvm_kpic_state *s = opaque;
279 int priority, cmd, irq;
280
281 addr &= 1;
282 if (addr == 0) {
283 if (val & 0x10) {
284 kvm_pic_reset(s); /* init */
285 /*
286 * deassert a pending interrupt
287 */
288 s->pics_state->irq_request(s->pics_state->
289 irq_request_opaque, 0);
290 s->init_state = 1;
291 s->init4 = val & 1;
292 if (val & 0x02)
293 printk(KERN_ERR "single mode not supported");
294 if (val & 0x08)
295 printk(KERN_ERR
296 "level sensitive irq not supported");
297 } else if (val & 0x08) {
298 if (val & 0x04)
299 s->poll = 1;
300 if (val & 0x02)
301 s->read_reg_select = val & 1;
302 if (val & 0x40)
303 s->special_mask = (val >> 5) & 1;
304 } else {
305 cmd = val >> 5;
306 switch (cmd) {
307 case 0:
308 case 4:
309 s->rotate_on_auto_eoi = cmd >> 2;
310 break;
311 case 1: /* end of interrupt */
312 case 5:
313 priority = get_priority(s, s->isr);
314 if (priority != 8) {
315 irq = (priority + s->priority_add) & 7;
316 pic_clear_isr(s, irq);
317 if (cmd == 5)
318 s->priority_add = (irq + 1) & 7;
319 pic_update_irq(s->pics_state);
320 }
321 break;
322 case 3:
323 irq = val & 7;
324 pic_clear_isr(s, irq);
325 pic_update_irq(s->pics_state);
326 break;
327 case 6:
328 s->priority_add = (val + 1) & 7;
329 pic_update_irq(s->pics_state);
330 break;
331 case 7:
332 irq = val & 7;
333 s->priority_add = (irq + 1) & 7;
334 pic_clear_isr(s, irq);
335 pic_update_irq(s->pics_state);
336 break;
337 default:
338 break; /* no operation */
339 }
340 }
341 } else
342 switch (s->init_state) {
343 case 0: /* normal mode */
344 s->imr = val;
345 pic_update_irq(s->pics_state);
346 break;
347 case 1:
348 s->irq_base = val & 0xf8;
349 s->init_state = 2;
350 break;
351 case 2:
352 if (s->init4)
353 s->init_state = 3;
354 else
355 s->init_state = 0;
356 break;
357 case 3:
358 s->special_fully_nested_mode = (val >> 4) & 1;
359 s->auto_eoi = (val >> 1) & 1;
360 s->init_state = 0;
361 break;
362 }
363 }
364
365 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
366 {
367 int ret;
368
369 ret = pic_get_irq(s);
370 if (ret >= 0) {
371 if (addr1 >> 7) {
372 s->pics_state->pics[0].isr &= ~(1 << 2);
373 s->pics_state->pics[0].irr &= ~(1 << 2);
374 }
375 s->irr &= ~(1 << ret);
376 pic_clear_isr(s, ret);
377 if (addr1 >> 7 || ret != 2)
378 pic_update_irq(s->pics_state);
379 } else {
380 ret = 0x07;
381 pic_update_irq(s->pics_state);
382 }
383
384 return ret;
385 }
386
387 static u32 pic_ioport_read(void *opaque, u32 addr1)
388 {
389 struct kvm_kpic_state *s = opaque;
390 unsigned int addr;
391 int ret;
392
393 addr = addr1;
394 addr &= 1;
395 if (s->poll) {
396 ret = pic_poll_read(s, addr1);
397 s->poll = 0;
398 } else
399 if (addr == 0)
400 if (s->read_reg_select)
401 ret = s->isr;
402 else
403 ret = s->irr;
404 else
405 ret = s->imr;
406 return ret;
407 }
408
409 static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
410 {
411 struct kvm_kpic_state *s = opaque;
412 s->elcr = val & s->elcr_mask;
413 }
414
415 static u32 elcr_ioport_read(void *opaque, u32 addr1)
416 {
417 struct kvm_kpic_state *s = opaque;
418 return s->elcr;
419 }
420
421 static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
422 int len, int is_write)
423 {
424 switch (addr) {
425 case 0x20:
426 case 0x21:
427 case 0xa0:
428 case 0xa1:
429 case 0x4d0:
430 case 0x4d1:
431 return 1;
432 default:
433 return 0;
434 }
435 }
436
437 static void picdev_write(struct kvm_io_device *this,
438 gpa_t addr, int len, const void *val)
439 {
440 struct kvm_pic *s = this->private;
441 unsigned char data = *(unsigned char *)val;
442
443 if (len != 1) {
444 if (printk_ratelimit())
445 printk(KERN_ERR "PIC: non byte write\n");
446 return;
447 }
448 pic_lock(s);
449 switch (addr) {
450 case 0x20:
451 case 0x21:
452 case 0xa0:
453 case 0xa1:
454 pic_ioport_write(&s->pics[addr >> 7], addr, data);
455 break;
456 case 0x4d0:
457 case 0x4d1:
458 elcr_ioport_write(&s->pics[addr & 1], addr, data);
459 break;
460 }
461 pic_unlock(s);
462 }
463
464 static void picdev_read(struct kvm_io_device *this,
465 gpa_t addr, int len, void *val)
466 {
467 struct kvm_pic *s = this->private;
468 unsigned char data = 0;
469
470 if (len != 1) {
471 if (printk_ratelimit())
472 printk(KERN_ERR "PIC: non byte read\n");
473 return;
474 }
475 pic_lock(s);
476 switch (addr) {
477 case 0x20:
478 case 0x21:
479 case 0xa0:
480 case 0xa1:
481 data = pic_ioport_read(&s->pics[addr >> 7], addr);
482 break;
483 case 0x4d0:
484 case 0x4d1:
485 data = elcr_ioport_read(&s->pics[addr & 1], addr);
486 break;
487 }
488 *(unsigned char *)val = data;
489 pic_unlock(s);
490 }
491
492 /*
493 * callback when PIC0 irq status changed
494 */
495 static void pic_irq_request(void *opaque, int level)
496 {
497 struct kvm *kvm = opaque;
498 struct kvm_vcpu *vcpu = kvm->vcpus[0];
499 struct kvm_pic *s = pic_irqchip(kvm);
500 int irq = pic_get_irq(&s->pics[0]);
501
502 s->output = level;
503 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
504 s->pics[0].isr_ack &= ~(1 << irq);
505 s->wakeup_needed = true;
506 }
507 }
508
509 struct kvm_pic *kvm_create_pic(struct kvm *kvm)
510 {
511 struct kvm_pic *s;
512 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
513 if (!s)
514 return NULL;
515 spin_lock_init(&s->lock);
516 s->kvm = kvm;
517 s->pics[0].elcr_mask = 0xf8;
518 s->pics[1].elcr_mask = 0xde;
519 s->irq_request = pic_irq_request;
520 s->irq_request_opaque = kvm;
521 s->pics[0].pics_state = s;
522 s->pics[1].pics_state = s;
523
524 /*
525 * Initialize PIO device
526 */
527 s->dev.read = picdev_read;
528 s->dev.write = picdev_write;
529 s->dev.in_range = picdev_in_range;
530 s->dev.private = s;
531 kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
532 return s;
533 }
This page took 0.047486 seconds and 4 git commands to generate.