Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
d46a78b0 | 19 | * 4. PIRQs - Hardware interrupts. |
e46cdb66 JF |
20 | * |
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
28e08861 | 29 | #include <linux/bootmem.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
b21ddbf5 | 31 | #include <linux/irqnr.h> |
e46cdb66 | 32 | |
38e20b07 | 33 | #include <asm/desc.h> |
e46cdb66 JF |
34 | #include <asm/ptrace.h> |
35 | #include <asm/irq.h> | |
792dc4f6 | 36 | #include <asm/idle.h> |
0794bfc7 | 37 | #include <asm/io_apic.h> |
e46cdb66 JF |
38 | #include <asm/sync_bitops.h> |
39 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 40 | #include <asm/xen/hypervisor.h> |
e46cdb66 | 41 | |
38e20b07 SY |
42 | #include <xen/xen.h> |
43 | #include <xen/hvm.h> | |
e04d0d07 | 44 | #include <xen/xen-ops.h> |
e46cdb66 JF |
45 | #include <xen/events.h> |
46 | #include <xen/interface/xen.h> | |
47 | #include <xen/interface/event_channel.h> | |
38e20b07 SY |
48 | #include <xen/interface/hvm/hvm_op.h> |
49 | #include <xen/interface/hvm/params.h> | |
e46cdb66 | 50 | |
e46cdb66 JF |
51 | /* |
52 | * This lock protects updates to the following mapping and reference-count | |
53 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
54 | */ | |
55 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
56 | ||
57 | /* IRQ <-> VIRQ mapping. */ | |
204fba4a | 58 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
e46cdb66 | 59 | |
f87e4cac | 60 | /* IRQ <-> IPI mapping */ |
204fba4a | 61 | static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
f87e4cac | 62 | |
ced40d0f JF |
63 | /* Interrupt types. */ |
64 | enum xen_irq_type { | |
d77bbd4d | 65 | IRQT_UNBOUND = 0, |
f87e4cac JF |
66 | IRQT_PIRQ, |
67 | IRQT_VIRQ, | |
68 | IRQT_IPI, | |
69 | IRQT_EVTCHN | |
70 | }; | |
e46cdb66 | 71 | |
ced40d0f JF |
72 | /* |
73 | * Packed IRQ information: | |
74 | * type - enum xen_irq_type | |
75 | * event channel - irq->event channel mapping | |
76 | * cpu - cpu this event channel is bound to | |
77 | * index - type-specific information: | |
78 | * PIRQ - vector, with MSB being "needs EIO" | |
79 | * VIRQ - virq number | |
80 | * IPI - IPI vector | |
81 | * EVTCHN - | |
82 | */ | |
83 | struct irq_info | |
84 | { | |
85 | enum xen_irq_type type; /* type */ | |
86 | unsigned short evtchn; /* event channel */ | |
87 | unsigned short cpu; /* cpu bound */ | |
88 | ||
89 | union { | |
90 | unsigned short virq; | |
91 | enum ipi_vector ipi; | |
92 | struct { | |
93 | unsigned short gsi; | |
d46a78b0 JF |
94 | unsigned char vector; |
95 | unsigned char flags; | |
ced40d0f JF |
96 | } pirq; |
97 | } u; | |
98 | }; | |
d46a78b0 | 99 | #define PIRQ_NEEDS_EOI (1 << 0) |
ced40d0f | 100 | |
b21ddbf5 | 101 | static struct irq_info *irq_info; |
e46cdb66 | 102 | |
b21ddbf5 | 103 | static int *evtchn_to_irq; |
c7a3589e MT |
104 | struct cpu_evtchn_s { |
105 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
106 | }; | |
107 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; | |
108 | static inline unsigned long *cpu_evtchn_mask(int cpu) | |
109 | { | |
110 | return cpu_evtchn_mask_p[cpu].bits; | |
111 | } | |
e46cdb66 | 112 | |
e46cdb66 JF |
113 | /* Xen will never allocate port zero for any purpose. */ |
114 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
115 | ||
e46cdb66 | 116 | static struct irq_chip xen_dynamic_chip; |
aaca4964 | 117 | static struct irq_chip xen_percpu_chip; |
d46a78b0 | 118 | static struct irq_chip xen_pirq_chip; |
e46cdb66 JF |
119 | |
120 | /* Constructor for packed IRQ information. */ | |
ced40d0f JF |
121 | static struct irq_info mk_unbound_info(void) |
122 | { | |
123 | return (struct irq_info) { .type = IRQT_UNBOUND }; | |
124 | } | |
125 | ||
126 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | |
127 | { | |
90af9514 IC |
128 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, |
129 | .cpu = 0 }; | |
ced40d0f JF |
130 | } |
131 | ||
132 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | |
e46cdb66 | 133 | { |
ced40d0f | 134 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
90af9514 | 135 | .cpu = 0, .u.ipi = ipi }; |
ced40d0f JF |
136 | } |
137 | ||
138 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | |
139 | { | |
140 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | |
90af9514 | 141 | .cpu = 0, .u.virq = virq }; |
ced40d0f JF |
142 | } |
143 | ||
144 | static struct irq_info mk_pirq_info(unsigned short evtchn, | |
145 | unsigned short gsi, unsigned short vector) | |
146 | { | |
147 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | |
90af9514 | 148 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; |
e46cdb66 JF |
149 | } |
150 | ||
151 | /* | |
152 | * Accessors for packed IRQ information. | |
153 | */ | |
ced40d0f | 154 | static struct irq_info *info_for_irq(unsigned irq) |
e46cdb66 | 155 | { |
ced40d0f | 156 | return &irq_info[irq]; |
e46cdb66 JF |
157 | } |
158 | ||
ced40d0f | 159 | static unsigned int evtchn_from_irq(unsigned irq) |
e46cdb66 | 160 | { |
ced40d0f | 161 | return info_for_irq(irq)->evtchn; |
e46cdb66 JF |
162 | } |
163 | ||
d4c04536 IC |
164 | unsigned irq_from_evtchn(unsigned int evtchn) |
165 | { | |
166 | return evtchn_to_irq[evtchn]; | |
167 | } | |
168 | EXPORT_SYMBOL_GPL(irq_from_evtchn); | |
169 | ||
ced40d0f | 170 | static enum ipi_vector ipi_from_irq(unsigned irq) |
e46cdb66 | 171 | { |
ced40d0f JF |
172 | struct irq_info *info = info_for_irq(irq); |
173 | ||
174 | BUG_ON(info == NULL); | |
175 | BUG_ON(info->type != IRQT_IPI); | |
176 | ||
177 | return info->u.ipi; | |
178 | } | |
179 | ||
180 | static unsigned virq_from_irq(unsigned irq) | |
181 | { | |
182 | struct irq_info *info = info_for_irq(irq); | |
183 | ||
184 | BUG_ON(info == NULL); | |
185 | BUG_ON(info->type != IRQT_VIRQ); | |
186 | ||
187 | return info->u.virq; | |
188 | } | |
189 | ||
190 | static unsigned gsi_from_irq(unsigned irq) | |
191 | { | |
192 | struct irq_info *info = info_for_irq(irq); | |
193 | ||
194 | BUG_ON(info == NULL); | |
195 | BUG_ON(info->type != IRQT_PIRQ); | |
196 | ||
197 | return info->u.pirq.gsi; | |
198 | } | |
199 | ||
200 | static unsigned vector_from_irq(unsigned irq) | |
201 | { | |
202 | struct irq_info *info = info_for_irq(irq); | |
203 | ||
204 | BUG_ON(info == NULL); | |
205 | BUG_ON(info->type != IRQT_PIRQ); | |
206 | ||
207 | return info->u.pirq.vector; | |
208 | } | |
209 | ||
210 | static enum xen_irq_type type_from_irq(unsigned irq) | |
211 | { | |
212 | return info_for_irq(irq)->type; | |
213 | } | |
214 | ||
215 | static unsigned cpu_from_irq(unsigned irq) | |
216 | { | |
217 | return info_for_irq(irq)->cpu; | |
218 | } | |
219 | ||
220 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | |
221 | { | |
222 | int irq = evtchn_to_irq[evtchn]; | |
223 | unsigned ret = 0; | |
224 | ||
225 | if (irq != -1) | |
226 | ret = cpu_from_irq(irq); | |
227 | ||
228 | return ret; | |
e46cdb66 JF |
229 | } |
230 | ||
d46a78b0 JF |
231 | static bool pirq_needs_eoi(unsigned irq) |
232 | { | |
233 | struct irq_info *info = info_for_irq(irq); | |
234 | ||
235 | BUG_ON(info->type != IRQT_PIRQ); | |
236 | ||
237 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; | |
238 | } | |
239 | ||
e46cdb66 JF |
240 | static inline unsigned long active_evtchns(unsigned int cpu, |
241 | struct shared_info *sh, | |
242 | unsigned int idx) | |
243 | { | |
244 | return (sh->evtchn_pending[idx] & | |
c7a3589e | 245 | cpu_evtchn_mask(cpu)[idx] & |
e46cdb66 JF |
246 | ~sh->evtchn_mask[idx]); |
247 | } | |
248 | ||
249 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
250 | { | |
251 | int irq = evtchn_to_irq[chn]; | |
252 | ||
253 | BUG_ON(irq == -1); | |
254 | #ifdef CONFIG_SMP | |
7f7ace0c | 255 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
e46cdb66 JF |
256 | #endif |
257 | ||
ced40d0f | 258 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
c7a3589e | 259 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
e46cdb66 | 260 | |
ced40d0f | 261 | irq_info[irq].cpu = cpu; |
e46cdb66 JF |
262 | } |
263 | ||
264 | static void init_evtchn_cpu_bindings(void) | |
265 | { | |
266 | #ifdef CONFIG_SMP | |
10e58084 | 267 | struct irq_desc *desc; |
e46cdb66 | 268 | int i; |
10e58084 | 269 | |
e46cdb66 | 270 | /* By default all event channels notify CPU#0. */ |
0b8f1efa | 271 | for_each_irq_desc(i, desc) { |
7f7ace0c | 272 | cpumask_copy(desc->affinity, cpumask_of(0)); |
0b8f1efa | 273 | } |
e46cdb66 JF |
274 | #endif |
275 | ||
c7a3589e | 276 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
e46cdb66 JF |
277 | } |
278 | ||
e46cdb66 JF |
279 | static inline void clear_evtchn(int port) |
280 | { | |
281 | struct shared_info *s = HYPERVISOR_shared_info; | |
282 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
283 | } | |
284 | ||
285 | static inline void set_evtchn(int port) | |
286 | { | |
287 | struct shared_info *s = HYPERVISOR_shared_info; | |
288 | sync_set_bit(port, &s->evtchn_pending[0]); | |
289 | } | |
290 | ||
168d2f46 JF |
291 | static inline int test_evtchn(int port) |
292 | { | |
293 | struct shared_info *s = HYPERVISOR_shared_info; | |
294 | return sync_test_bit(port, &s->evtchn_pending[0]); | |
295 | } | |
296 | ||
e46cdb66 JF |
297 | |
298 | /** | |
299 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
300 | * @irq: irq of event channel to send event to | |
301 | * | |
302 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
303 | * save/restore. Notifications on a broken connection are silently | |
304 | * dropped. | |
305 | */ | |
306 | void notify_remote_via_irq(int irq) | |
307 | { | |
308 | int evtchn = evtchn_from_irq(irq); | |
309 | ||
310 | if (VALID_EVTCHN(evtchn)) | |
311 | notify_remote_via_evtchn(evtchn); | |
312 | } | |
313 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
314 | ||
315 | static void mask_evtchn(int port) | |
316 | { | |
317 | struct shared_info *s = HYPERVISOR_shared_info; | |
318 | sync_set_bit(port, &s->evtchn_mask[0]); | |
319 | } | |
320 | ||
321 | static void unmask_evtchn(int port) | |
322 | { | |
323 | struct shared_info *s = HYPERVISOR_shared_info; | |
324 | unsigned int cpu = get_cpu(); | |
325 | ||
326 | BUG_ON(!irqs_disabled()); | |
327 | ||
328 | /* Slow path (hypercall) if this is a non-local port. */ | |
329 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
330 | struct evtchn_unmask unmask = { .port = port }; | |
331 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
332 | } else { | |
333 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
334 | ||
335 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
336 | ||
337 | /* | |
338 | * The following is basically the equivalent of | |
339 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
340 | * the interrupt edge' if the channel is masked. | |
341 | */ | |
342 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
343 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
344 | &vcpu_info->evtchn_pending_sel)) | |
345 | vcpu_info->evtchn_upcall_pending = 1; | |
346 | } | |
347 | ||
348 | put_cpu(); | |
349 | } | |
350 | ||
0794bfc7 KRW |
351 | static int get_nr_hw_irqs(void) |
352 | { | |
353 | int ret = 1; | |
354 | ||
355 | #ifdef CONFIG_X86_IO_APIC | |
356 | ret = get_nr_irqs_gsi(); | |
357 | #endif | |
358 | ||
359 | return ret; | |
360 | } | |
361 | ||
e46cdb66 JF |
362 | static int find_unbound_irq(void) |
363 | { | |
77dff1c7 TG |
364 | struct irq_data *data; |
365 | int irq, res; | |
e46cdb66 | 366 | |
99ad198c | 367 | for (irq = 0; irq < nr_irqs; irq++) { |
77dff1c7 | 368 | data = irq_get_irq_data(irq); |
99ad198c | 369 | /* only 0->15 have init'd desc; handle irq > 16 */ |
77dff1c7 | 370 | if (!data) |
99ad198c | 371 | break; |
77dff1c7 | 372 | if (data->chip == &no_irq_chip) |
99ad198c | 373 | break; |
77dff1c7 | 374 | if (data->chip != &xen_dynamic_chip) |
99ad198c | 375 | continue; |
d77bbd4d | 376 | if (irq_info[irq].type == IRQT_UNBOUND) |
77dff1c7 | 377 | return irq; |
99ad198c | 378 | } |
e46cdb66 | 379 | |
5a15d7e8 YL |
380 | if (irq == nr_irqs) |
381 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | |
e46cdb66 | 382 | |
77dff1c7 | 383 | res = irq_alloc_desc_at(irq, 0); |
6f8a0ed4 | 384 | |
77dff1c7 TG |
385 | if (WARN_ON(res != irq)) |
386 | return -1; | |
ced40d0f | 387 | |
e46cdb66 JF |
388 | return irq; |
389 | } | |
390 | ||
d46a78b0 JF |
391 | static bool identity_mapped_irq(unsigned irq) |
392 | { | |
0794bfc7 KRW |
393 | /* identity map all the hardware irqs */ |
394 | return irq < get_nr_hw_irqs(); | |
d46a78b0 JF |
395 | } |
396 | ||
397 | static void pirq_unmask_notify(int irq) | |
398 | { | |
399 | struct physdev_eoi eoi = { .irq = irq }; | |
400 | ||
401 | if (unlikely(pirq_needs_eoi(irq))) { | |
402 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | |
403 | WARN_ON(rc); | |
404 | } | |
405 | } | |
406 | ||
407 | static void pirq_query_unmask(int irq) | |
408 | { | |
409 | struct physdev_irq_status_query irq_status; | |
410 | struct irq_info *info = info_for_irq(irq); | |
411 | ||
412 | BUG_ON(info->type != IRQT_PIRQ); | |
413 | ||
414 | irq_status.irq = irq; | |
415 | if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) | |
416 | irq_status.flags = 0; | |
417 | ||
418 | info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; | |
419 | if (irq_status.flags & XENIRQSTAT_needs_eoi) | |
420 | info->u.pirq.flags |= PIRQ_NEEDS_EOI; | |
421 | } | |
422 | ||
423 | static bool probing_irq(int irq) | |
424 | { | |
425 | struct irq_desc *desc = irq_to_desc(irq); | |
426 | ||
427 | return desc && desc->action == NULL; | |
428 | } | |
429 | ||
430 | static unsigned int startup_pirq(unsigned int irq) | |
431 | { | |
432 | struct evtchn_bind_pirq bind_pirq; | |
433 | struct irq_info *info = info_for_irq(irq); | |
434 | int evtchn = evtchn_from_irq(irq); | |
435 | ||
436 | BUG_ON(info->type != IRQT_PIRQ); | |
437 | ||
438 | if (VALID_EVTCHN(evtchn)) | |
439 | goto out; | |
440 | ||
441 | bind_pirq.pirq = irq; | |
442 | /* NB. We are happy to share unless we are probing. */ | |
443 | bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE; | |
444 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { | |
445 | if (!probing_irq(irq)) | |
446 | printk(KERN_INFO "Failed to obtain physical IRQ %d\n", | |
447 | irq); | |
448 | return 0; | |
449 | } | |
450 | evtchn = bind_pirq.port; | |
451 | ||
452 | pirq_query_unmask(irq); | |
453 | ||
454 | evtchn_to_irq[evtchn] = irq; | |
455 | bind_evtchn_to_cpu(evtchn, 0); | |
456 | info->evtchn = evtchn; | |
457 | ||
458 | out: | |
459 | unmask_evtchn(evtchn); | |
460 | pirq_unmask_notify(irq); | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
465 | static void shutdown_pirq(unsigned int irq) | |
466 | { | |
467 | struct evtchn_close close; | |
468 | struct irq_info *info = info_for_irq(irq); | |
469 | int evtchn = evtchn_from_irq(irq); | |
470 | ||
471 | BUG_ON(info->type != IRQT_PIRQ); | |
472 | ||
473 | if (!VALID_EVTCHN(evtchn)) | |
474 | return; | |
475 | ||
476 | mask_evtchn(evtchn); | |
477 | ||
478 | close.port = evtchn; | |
479 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
480 | BUG(); | |
481 | ||
482 | bind_evtchn_to_cpu(evtchn, 0); | |
483 | evtchn_to_irq[evtchn] = -1; | |
484 | info->evtchn = 0; | |
485 | } | |
486 | ||
487 | static void enable_pirq(unsigned int irq) | |
488 | { | |
489 | startup_pirq(irq); | |
490 | } | |
491 | ||
492 | static void disable_pirq(unsigned int irq) | |
493 | { | |
494 | } | |
495 | ||
496 | static void ack_pirq(unsigned int irq) | |
497 | { | |
498 | int evtchn = evtchn_from_irq(irq); | |
499 | ||
500 | move_native_irq(irq); | |
501 | ||
502 | if (VALID_EVTCHN(evtchn)) { | |
503 | mask_evtchn(evtchn); | |
504 | clear_evtchn(evtchn); | |
505 | } | |
506 | } | |
507 | ||
508 | static void end_pirq(unsigned int irq) | |
509 | { | |
510 | int evtchn = evtchn_from_irq(irq); | |
511 | struct irq_desc *desc = irq_to_desc(irq); | |
512 | ||
513 | if (WARN_ON(!desc)) | |
514 | return; | |
515 | ||
516 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == | |
517 | (IRQ_DISABLED|IRQ_PENDING)) { | |
518 | shutdown_pirq(irq); | |
519 | } else if (VALID_EVTCHN(evtchn)) { | |
520 | unmask_evtchn(evtchn); | |
521 | pirq_unmask_notify(irq); | |
522 | } | |
523 | } | |
524 | ||
525 | static int find_irq_by_gsi(unsigned gsi) | |
526 | { | |
527 | int irq; | |
528 | ||
b21ddbf5 | 529 | for (irq = 0; irq < nr_irqs; irq++) { |
d46a78b0 JF |
530 | struct irq_info *info = info_for_irq(irq); |
531 | ||
532 | if (info == NULL || info->type != IRQT_PIRQ) | |
533 | continue; | |
534 | ||
535 | if (gsi_from_irq(irq) == gsi) | |
536 | return irq; | |
537 | } | |
538 | ||
539 | return -1; | |
540 | } | |
541 | ||
542 | /* | |
543 | * Allocate a physical irq, along with a vector. We don't assign an | |
544 | * event channel until the irq actually started up. Return an | |
545 | * existing irq if we've already got one for the gsi. | |
546 | */ | |
547 | int xen_allocate_pirq(unsigned gsi) | |
548 | { | |
549 | int irq; | |
550 | struct physdev_irq irq_op; | |
551 | ||
552 | spin_lock(&irq_mapping_update_lock); | |
553 | ||
554 | irq = find_irq_by_gsi(gsi); | |
555 | if (irq != -1) { | |
556 | printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n", | |
557 | irq, gsi); | |
558 | goto out; /* XXX need refcount? */ | |
559 | } | |
560 | ||
561 | if (identity_mapped_irq(gsi)) { | |
562 | irq = gsi; | |
0794bfc7 | 563 | irq_to_desc_alloc_node(irq, 0); |
d46a78b0 JF |
564 | dynamic_irq_init(irq); |
565 | } else | |
566 | irq = find_unbound_irq(); | |
567 | ||
568 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | |
569 | handle_level_irq, "pirq"); | |
570 | ||
571 | irq_op.irq = irq; | |
572 | if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | |
573 | dynamic_irq_cleanup(irq); | |
574 | irq = -ENOSPC; | |
575 | goto out; | |
576 | } | |
577 | ||
578 | irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector); | |
579 | ||
580 | out: | |
581 | spin_unlock(&irq_mapping_update_lock); | |
582 | ||
583 | return irq; | |
584 | } | |
585 | ||
586 | int xen_vector_from_irq(unsigned irq) | |
587 | { | |
588 | return vector_from_irq(irq); | |
589 | } | |
590 | ||
591 | int xen_gsi_from_irq(unsigned irq) | |
592 | { | |
593 | return gsi_from_irq(irq); | |
594 | } | |
595 | ||
b536b4b9 | 596 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
597 | { |
598 | int irq; | |
599 | ||
600 | spin_lock(&irq_mapping_update_lock); | |
601 | ||
602 | irq = evtchn_to_irq[evtchn]; | |
603 | ||
604 | if (irq == -1) { | |
605 | irq = find_unbound_irq(); | |
606 | ||
e46cdb66 | 607 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
dffe2e1e | 608 | handle_edge_irq, "event"); |
e46cdb66 JF |
609 | |
610 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 611 | irq_info[irq] = mk_evtchn_info(evtchn); |
e46cdb66 JF |
612 | } |
613 | ||
e46cdb66 JF |
614 | spin_unlock(&irq_mapping_update_lock); |
615 | ||
616 | return irq; | |
617 | } | |
b536b4b9 | 618 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 619 | |
f87e4cac JF |
620 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
621 | { | |
622 | struct evtchn_bind_ipi bind_ipi; | |
623 | int evtchn, irq; | |
624 | ||
625 | spin_lock(&irq_mapping_update_lock); | |
626 | ||
627 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
90af9514 | 628 | |
f87e4cac JF |
629 | if (irq == -1) { |
630 | irq = find_unbound_irq(); | |
631 | if (irq < 0) | |
632 | goto out; | |
633 | ||
aaca4964 JF |
634 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
635 | handle_percpu_irq, "ipi"); | |
f87e4cac JF |
636 | |
637 | bind_ipi.vcpu = cpu; | |
638 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
639 | &bind_ipi) != 0) | |
640 | BUG(); | |
641 | evtchn = bind_ipi.port; | |
642 | ||
643 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 644 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
f87e4cac JF |
645 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
646 | ||
647 | bind_evtchn_to_cpu(evtchn, cpu); | |
648 | } | |
649 | ||
f87e4cac JF |
650 | out: |
651 | spin_unlock(&irq_mapping_update_lock); | |
652 | return irq; | |
653 | } | |
654 | ||
655 | ||
e46cdb66 JF |
656 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
657 | { | |
658 | struct evtchn_bind_virq bind_virq; | |
659 | int evtchn, irq; | |
660 | ||
661 | spin_lock(&irq_mapping_update_lock); | |
662 | ||
663 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
664 | ||
665 | if (irq == -1) { | |
666 | bind_virq.virq = virq; | |
667 | bind_virq.vcpu = cpu; | |
668 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
669 | &bind_virq) != 0) | |
670 | BUG(); | |
671 | evtchn = bind_virq.port; | |
672 | ||
673 | irq = find_unbound_irq(); | |
674 | ||
aaca4964 JF |
675 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
676 | handle_percpu_irq, "virq"); | |
e46cdb66 JF |
677 | |
678 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 679 | irq_info[irq] = mk_virq_info(evtchn, virq); |
e46cdb66 JF |
680 | |
681 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
682 | ||
683 | bind_evtchn_to_cpu(evtchn, cpu); | |
684 | } | |
685 | ||
e46cdb66 JF |
686 | spin_unlock(&irq_mapping_update_lock); |
687 | ||
688 | return irq; | |
689 | } | |
690 | ||
691 | static void unbind_from_irq(unsigned int irq) | |
692 | { | |
693 | struct evtchn_close close; | |
694 | int evtchn = evtchn_from_irq(irq); | |
695 | ||
696 | spin_lock(&irq_mapping_update_lock); | |
697 | ||
d77bbd4d | 698 | if (VALID_EVTCHN(evtchn)) { |
e46cdb66 JF |
699 | close.port = evtchn; |
700 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
701 | BUG(); | |
702 | ||
703 | switch (type_from_irq(irq)) { | |
704 | case IRQT_VIRQ: | |
705 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 706 | [virq_from_irq(irq)] = -1; |
e46cdb66 | 707 | break; |
d68d82af AN |
708 | case IRQT_IPI: |
709 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 710 | [ipi_from_irq(irq)] = -1; |
d68d82af | 711 | break; |
e46cdb66 JF |
712 | default: |
713 | break; | |
714 | } | |
715 | ||
716 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
717 | bind_evtchn_to_cpu(evtchn, 0); | |
718 | ||
719 | evtchn_to_irq[evtchn] = -1; | |
fed5ea87 IC |
720 | } |
721 | ||
722 | if (irq_info[irq].type != IRQT_UNBOUND) { | |
ced40d0f | 723 | irq_info[irq] = mk_unbound_info(); |
e46cdb66 | 724 | |
77dff1c7 | 725 | irq_free_desc(irq); |
e46cdb66 JF |
726 | } |
727 | ||
728 | spin_unlock(&irq_mapping_update_lock); | |
729 | } | |
730 | ||
731 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 732 | irq_handler_t handler, |
e46cdb66 JF |
733 | unsigned long irqflags, |
734 | const char *devname, void *dev_id) | |
735 | { | |
736 | unsigned int irq; | |
737 | int retval; | |
738 | ||
739 | irq = bind_evtchn_to_irq(evtchn); | |
740 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
741 | if (retval != 0) { | |
742 | unbind_from_irq(irq); | |
743 | return retval; | |
744 | } | |
745 | ||
746 | return irq; | |
747 | } | |
748 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
749 | ||
750 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 751 | irq_handler_t handler, |
e46cdb66 JF |
752 | unsigned long irqflags, const char *devname, void *dev_id) |
753 | { | |
754 | unsigned int irq; | |
755 | int retval; | |
756 | ||
757 | irq = bind_virq_to_irq(virq, cpu); | |
758 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
759 | if (retval != 0) { | |
760 | unbind_from_irq(irq); | |
761 | return retval; | |
762 | } | |
763 | ||
764 | return irq; | |
765 | } | |
766 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
767 | ||
f87e4cac JF |
768 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
769 | unsigned int cpu, | |
770 | irq_handler_t handler, | |
771 | unsigned long irqflags, | |
772 | const char *devname, | |
773 | void *dev_id) | |
774 | { | |
775 | int irq, retval; | |
776 | ||
777 | irq = bind_ipi_to_irq(ipi, cpu); | |
778 | if (irq < 0) | |
779 | return irq; | |
780 | ||
4877c737 | 781 | irqflags |= IRQF_NO_SUSPEND; |
f87e4cac JF |
782 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
783 | if (retval != 0) { | |
784 | unbind_from_irq(irq); | |
785 | return retval; | |
786 | } | |
787 | ||
788 | return irq; | |
789 | } | |
790 | ||
e46cdb66 JF |
791 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
792 | { | |
793 | free_irq(irq, dev_id); | |
794 | unbind_from_irq(irq); | |
795 | } | |
796 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
797 | ||
f87e4cac JF |
798 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
799 | { | |
800 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
801 | BUG_ON(irq < 0); | |
802 | notify_remote_via_irq(irq); | |
803 | } | |
804 | ||
ee523ca1 JF |
805 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
806 | { | |
807 | struct shared_info *sh = HYPERVISOR_shared_info; | |
808 | int cpu = smp_processor_id(); | |
809 | int i; | |
810 | unsigned long flags; | |
811 | static DEFINE_SPINLOCK(debug_lock); | |
812 | ||
813 | spin_lock_irqsave(&debug_lock, flags); | |
814 | ||
815 | printk("vcpu %d\n ", cpu); | |
816 | ||
817 | for_each_online_cpu(i) { | |
818 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | |
819 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | |
e849c3e9 | 820 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
ee523ca1 JF |
821 | v->evtchn_upcall_pending, |
822 | v->evtchn_pending_sel); | |
823 | } | |
824 | printk("pending:\n "); | |
825 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | |
826 | printk("%08lx%s", sh->evtchn_pending[i], | |
827 | i % 8 == 0 ? "\n " : " "); | |
828 | printk("\nmasks:\n "); | |
829 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
830 | printk("%08lx%s", sh->evtchn_mask[i], | |
831 | i % 8 == 0 ? "\n " : " "); | |
832 | ||
833 | printk("\nunmasked:\n "); | |
834 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
835 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | |
836 | i % 8 == 0 ? "\n " : " "); | |
837 | ||
838 | printk("\npending list:\n"); | |
839 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | |
840 | if (sync_test_bit(i, sh->evtchn_pending)) { | |
841 | printk(" %d: event %d -> irq %d\n", | |
ced40d0f JF |
842 | cpu_from_evtchn(i), i, |
843 | evtchn_to_irq[i]); | |
ee523ca1 JF |
844 | } |
845 | } | |
846 | ||
847 | spin_unlock_irqrestore(&debug_lock, flags); | |
848 | ||
849 | return IRQ_HANDLED; | |
850 | } | |
851 | ||
245b2e70 TH |
852 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
853 | ||
e46cdb66 JF |
854 | /* |
855 | * Search the CPUs pending events bitmasks. For each one found, map | |
856 | * the event number to an irq, and feed it into do_IRQ() for | |
857 | * handling. | |
858 | * | |
859 | * Xen uses a two-level bitmap to speed searching. The first level is | |
860 | * a bitset of words which contain pending event bits. The second | |
861 | * level is a bitset of pending events themselves. | |
862 | */ | |
38e20b07 | 863 | static void __xen_evtchn_do_upcall(void) |
e46cdb66 JF |
864 | { |
865 | int cpu = get_cpu(); | |
866 | struct shared_info *s = HYPERVISOR_shared_info; | |
867 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
229664be | 868 | unsigned count; |
e46cdb66 | 869 | |
229664be JF |
870 | do { |
871 | unsigned long pending_words; | |
e46cdb66 | 872 | |
229664be | 873 | vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66 | 874 | |
245b2e70 | 875 | if (__get_cpu_var(xed_nesting_count)++) |
229664be | 876 | goto out; |
e46cdb66 | 877 | |
e849c3e9 IY |
878 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
879 | /* Clear master flag /before/ clearing selector flag. */ | |
6673cf63 | 880 | wmb(); |
e849c3e9 | 881 | #endif |
229664be JF |
882 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
883 | while (pending_words != 0) { | |
884 | unsigned long pending_bits; | |
885 | int word_idx = __ffs(pending_words); | |
886 | pending_words &= ~(1UL << word_idx); | |
887 | ||
888 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
889 | int bit_idx = __ffs(pending_bits); | |
890 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
891 | int irq = evtchn_to_irq[port]; | |
ca4dbc66 | 892 | struct irq_desc *desc; |
229664be | 893 | |
ca4dbc66 EB |
894 | if (irq != -1) { |
895 | desc = irq_to_desc(irq); | |
896 | if (desc) | |
897 | generic_handle_irq_desc(irq, desc); | |
898 | } | |
e46cdb66 JF |
899 | } |
900 | } | |
e46cdb66 | 901 | |
229664be JF |
902 | BUG_ON(!irqs_disabled()); |
903 | ||
245b2e70 TH |
904 | count = __get_cpu_var(xed_nesting_count); |
905 | __get_cpu_var(xed_nesting_count) = 0; | |
183d03cc | 906 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
229664be JF |
907 | |
908 | out: | |
38e20b07 SY |
909 | |
910 | put_cpu(); | |
911 | } | |
912 | ||
913 | void xen_evtchn_do_upcall(struct pt_regs *regs) | |
914 | { | |
915 | struct pt_regs *old_regs = set_irq_regs(regs); | |
916 | ||
917 | exit_idle(); | |
918 | irq_enter(); | |
919 | ||
920 | __xen_evtchn_do_upcall(); | |
921 | ||
3445a8fd JF |
922 | irq_exit(); |
923 | set_irq_regs(old_regs); | |
38e20b07 | 924 | } |
3445a8fd | 925 | |
38e20b07 SY |
926 | void xen_hvm_evtchn_do_upcall(void) |
927 | { | |
928 | __xen_evtchn_do_upcall(); | |
e46cdb66 | 929 | } |
183d03cc | 930 | EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); |
e46cdb66 | 931 | |
eb1e305f JF |
932 | /* Rebind a new event channel to an existing irq. */ |
933 | void rebind_evtchn_irq(int evtchn, int irq) | |
934 | { | |
d77bbd4d JF |
935 | struct irq_info *info = info_for_irq(irq); |
936 | ||
eb1e305f JF |
937 | /* Make sure the irq is masked, since the new event channel |
938 | will also be masked. */ | |
939 | disable_irq(irq); | |
940 | ||
941 | spin_lock(&irq_mapping_update_lock); | |
942 | ||
943 | /* After resume the irq<->evtchn mappings are all cleared out */ | |
944 | BUG_ON(evtchn_to_irq[evtchn] != -1); | |
945 | /* Expect irq to have been bound before, | |
d77bbd4d JF |
946 | so there should be a proper type */ |
947 | BUG_ON(info->type == IRQT_UNBOUND); | |
eb1e305f JF |
948 | |
949 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 950 | irq_info[irq] = mk_evtchn_info(evtchn); |
eb1e305f JF |
951 | |
952 | spin_unlock(&irq_mapping_update_lock); | |
953 | ||
954 | /* new event channels are always bound to cpu 0 */ | |
0de26520 | 955 | irq_set_affinity(irq, cpumask_of(0)); |
eb1e305f JF |
956 | |
957 | /* Unmask the event channel. */ | |
958 | enable_irq(irq); | |
959 | } | |
960 | ||
e46cdb66 | 961 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
d5dedd45 | 962 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
e46cdb66 JF |
963 | { |
964 | struct evtchn_bind_vcpu bind_vcpu; | |
965 | int evtchn = evtchn_from_irq(irq); | |
966 | ||
183d03cc SS |
967 | /* events delivered via platform PCI interrupts are always |
968 | * routed to vcpu 0 */ | |
969 | if (!VALID_EVTCHN(evtchn) || | |
970 | (xen_hvm_domain() && !xen_have_vector_callback)) | |
d5dedd45 | 971 | return -1; |
e46cdb66 JF |
972 | |
973 | /* Send future instances of this interrupt to other vcpu. */ | |
974 | bind_vcpu.port = evtchn; | |
975 | bind_vcpu.vcpu = tcpu; | |
976 | ||
977 | /* | |
978 | * If this fails, it usually just indicates that we're dealing with a | |
979 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
980 | * it, but don't do the xenlinux-level rebind in that case. | |
981 | */ | |
982 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
983 | bind_evtchn_to_cpu(evtchn, tcpu); | |
e46cdb66 | 984 | |
d5dedd45 YL |
985 | return 0; |
986 | } | |
e46cdb66 | 987 | |
d5dedd45 | 988 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) |
e46cdb66 | 989 | { |
0de26520 | 990 | unsigned tcpu = cpumask_first(dest); |
d5dedd45 YL |
991 | |
992 | return rebind_irq_to_cpu(irq, tcpu); | |
e46cdb66 JF |
993 | } |
994 | ||
642e0c88 IY |
995 | int resend_irq_on_evtchn(unsigned int irq) |
996 | { | |
997 | int masked, evtchn = evtchn_from_irq(irq); | |
998 | struct shared_info *s = HYPERVISOR_shared_info; | |
999 | ||
1000 | if (!VALID_EVTCHN(evtchn)) | |
1001 | return 1; | |
1002 | ||
1003 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | |
1004 | sync_set_bit(evtchn, s->evtchn_pending); | |
1005 | if (!masked) | |
1006 | unmask_evtchn(evtchn); | |
1007 | ||
1008 | return 1; | |
1009 | } | |
1010 | ||
e46cdb66 JF |
1011 | static void enable_dynirq(unsigned int irq) |
1012 | { | |
1013 | int evtchn = evtchn_from_irq(irq); | |
1014 | ||
1015 | if (VALID_EVTCHN(evtchn)) | |
1016 | unmask_evtchn(evtchn); | |
1017 | } | |
1018 | ||
1019 | static void disable_dynirq(unsigned int irq) | |
1020 | { | |
1021 | int evtchn = evtchn_from_irq(irq); | |
1022 | ||
1023 | if (VALID_EVTCHN(evtchn)) | |
1024 | mask_evtchn(evtchn); | |
1025 | } | |
1026 | ||
1027 | static void ack_dynirq(unsigned int irq) | |
1028 | { | |
1029 | int evtchn = evtchn_from_irq(irq); | |
1030 | ||
1031 | move_native_irq(irq); | |
1032 | ||
1033 | if (VALID_EVTCHN(evtchn)) | |
1034 | clear_evtchn(evtchn); | |
1035 | } | |
1036 | ||
1037 | static int retrigger_dynirq(unsigned int irq) | |
1038 | { | |
1039 | int evtchn = evtchn_from_irq(irq); | |
ee8fa1c6 | 1040 | struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66 JF |
1041 | int ret = 0; |
1042 | ||
1043 | if (VALID_EVTCHN(evtchn)) { | |
ee8fa1c6 JF |
1044 | int masked; |
1045 | ||
1046 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | |
1047 | sync_set_bit(evtchn, sh->evtchn_pending); | |
1048 | if (!masked) | |
1049 | unmask_evtchn(evtchn); | |
e46cdb66 JF |
1050 | ret = 1; |
1051 | } | |
1052 | ||
1053 | return ret; | |
1054 | } | |
1055 | ||
0e91398f JF |
1056 | static void restore_cpu_virqs(unsigned int cpu) |
1057 | { | |
1058 | struct evtchn_bind_virq bind_virq; | |
1059 | int virq, irq, evtchn; | |
1060 | ||
1061 | for (virq = 0; virq < NR_VIRQS; virq++) { | |
1062 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | |
1063 | continue; | |
1064 | ||
ced40d0f | 1065 | BUG_ON(virq_from_irq(irq) != virq); |
0e91398f JF |
1066 | |
1067 | /* Get a new binding from Xen. */ | |
1068 | bind_virq.virq = virq; | |
1069 | bind_virq.vcpu = cpu; | |
1070 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
1071 | &bind_virq) != 0) | |
1072 | BUG(); | |
1073 | evtchn = bind_virq.port; | |
1074 | ||
1075 | /* Record the new mapping. */ | |
1076 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 1077 | irq_info[irq] = mk_virq_info(evtchn, virq); |
0e91398f JF |
1078 | bind_evtchn_to_cpu(evtchn, cpu); |
1079 | ||
1080 | /* Ready for use. */ | |
1081 | unmask_evtchn(evtchn); | |
1082 | } | |
1083 | } | |
1084 | ||
1085 | static void restore_cpu_ipis(unsigned int cpu) | |
1086 | { | |
1087 | struct evtchn_bind_ipi bind_ipi; | |
1088 | int ipi, irq, evtchn; | |
1089 | ||
1090 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | |
1091 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | |
1092 | continue; | |
1093 | ||
ced40d0f | 1094 | BUG_ON(ipi_from_irq(irq) != ipi); |
0e91398f JF |
1095 | |
1096 | /* Get a new binding from Xen. */ | |
1097 | bind_ipi.vcpu = cpu; | |
1098 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
1099 | &bind_ipi) != 0) | |
1100 | BUG(); | |
1101 | evtchn = bind_ipi.port; | |
1102 | ||
1103 | /* Record the new mapping. */ | |
1104 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 1105 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
0e91398f JF |
1106 | bind_evtchn_to_cpu(evtchn, cpu); |
1107 | ||
1108 | /* Ready for use. */ | |
1109 | unmask_evtchn(evtchn); | |
1110 | ||
1111 | } | |
1112 | } | |
1113 | ||
2d9e1e2f JF |
1114 | /* Clear an irq's pending state, in preparation for polling on it */ |
1115 | void xen_clear_irq_pending(int irq) | |
1116 | { | |
1117 | int evtchn = evtchn_from_irq(irq); | |
1118 | ||
1119 | if (VALID_EVTCHN(evtchn)) | |
1120 | clear_evtchn(evtchn); | |
1121 | } | |
1122 | ||
168d2f46 JF |
1123 | void xen_set_irq_pending(int irq) |
1124 | { | |
1125 | int evtchn = evtchn_from_irq(irq); | |
1126 | ||
1127 | if (VALID_EVTCHN(evtchn)) | |
1128 | set_evtchn(evtchn); | |
1129 | } | |
1130 | ||
1131 | bool xen_test_irq_pending(int irq) | |
1132 | { | |
1133 | int evtchn = evtchn_from_irq(irq); | |
1134 | bool ret = false; | |
1135 | ||
1136 | if (VALID_EVTCHN(evtchn)) | |
1137 | ret = test_evtchn(evtchn); | |
1138 | ||
1139 | return ret; | |
1140 | } | |
1141 | ||
2d9e1e2f JF |
1142 | /* Poll waiting for an irq to become pending. In the usual case, the |
1143 | irq will be disabled so it won't deliver an interrupt. */ | |
1144 | void xen_poll_irq(int irq) | |
1145 | { | |
1146 | evtchn_port_t evtchn = evtchn_from_irq(irq); | |
1147 | ||
1148 | if (VALID_EVTCHN(evtchn)) { | |
1149 | struct sched_poll poll; | |
1150 | ||
1151 | poll.nr_ports = 1; | |
1152 | poll.timeout = 0; | |
ff3c5362 | 1153 | set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f JF |
1154 | |
1155 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) | |
1156 | BUG(); | |
1157 | } | |
1158 | } | |
1159 | ||
0e91398f JF |
1160 | void xen_irq_resume(void) |
1161 | { | |
1162 | unsigned int cpu, irq, evtchn; | |
1163 | ||
1164 | init_evtchn_cpu_bindings(); | |
1165 | ||
1166 | /* New event-channel space is not 'live' yet. */ | |
1167 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
1168 | mask_evtchn(evtchn); | |
1169 | ||
1170 | /* No IRQ <-> event-channel mappings. */ | |
0b8f1efa | 1171 | for (irq = 0; irq < nr_irqs; irq++) |
0e91398f JF |
1172 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
1173 | ||
1174 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
1175 | evtchn_to_irq[evtchn] = -1; | |
1176 | ||
1177 | for_each_possible_cpu(cpu) { | |
1178 | restore_cpu_virqs(cpu); | |
1179 | restore_cpu_ipis(cpu); | |
1180 | } | |
1181 | } | |
1182 | ||
e46cdb66 JF |
1183 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
1184 | .name = "xen-dyn", | |
54a353a0 JF |
1185 | |
1186 | .disable = disable_dynirq, | |
e46cdb66 JF |
1187 | .mask = disable_dynirq, |
1188 | .unmask = enable_dynirq, | |
54a353a0 | 1189 | |
e46cdb66 JF |
1190 | .ack = ack_dynirq, |
1191 | .set_affinity = set_affinity_irq, | |
1192 | .retrigger = retrigger_dynirq, | |
1193 | }; | |
1194 | ||
d46a78b0 JF |
1195 | static struct irq_chip xen_pirq_chip __read_mostly = { |
1196 | .name = "xen-pirq", | |
1197 | ||
1198 | .startup = startup_pirq, | |
1199 | .shutdown = shutdown_pirq, | |
1200 | ||
1201 | .enable = enable_pirq, | |
1202 | .unmask = enable_pirq, | |
1203 | ||
1204 | .disable = disable_pirq, | |
1205 | .mask = disable_pirq, | |
1206 | ||
1207 | .ack = ack_pirq, | |
1208 | .end = end_pirq, | |
1209 | ||
1210 | .set_affinity = set_affinity_irq, | |
1211 | ||
1212 | .retrigger = retrigger_dynirq, | |
1213 | }; | |
1214 | ||
aaca4964 JF |
1215 | static struct irq_chip xen_percpu_chip __read_mostly = { |
1216 | .name = "xen-percpu", | |
1217 | ||
1218 | .disable = disable_dynirq, | |
1219 | .mask = disable_dynirq, | |
1220 | .unmask = enable_dynirq, | |
1221 | ||
1222 | .ack = ack_dynirq, | |
1223 | }; | |
1224 | ||
38e20b07 SY |
1225 | int xen_set_callback_via(uint64_t via) |
1226 | { | |
1227 | struct xen_hvm_param a; | |
1228 | a.domid = DOMID_SELF; | |
1229 | a.index = HVM_PARAM_CALLBACK_IRQ; | |
1230 | a.value = via; | |
1231 | return HYPERVISOR_hvm_op(HVMOP_set_param, &a); | |
1232 | } | |
1233 | EXPORT_SYMBOL_GPL(xen_set_callback_via); | |
1234 | ||
ca65f9fc | 1235 | #ifdef CONFIG_XEN_PVHVM |
38e20b07 SY |
1236 | /* Vector callbacks are better than PCI interrupts to receive event |
1237 | * channel notifications because we can receive vector callbacks on any | |
1238 | * vcpu and we don't need PCI support or APIC interactions. */ | |
1239 | void xen_callback_vector(void) | |
1240 | { | |
1241 | int rc; | |
1242 | uint64_t callback_via; | |
1243 | if (xen_have_vector_callback) { | |
1244 | callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); | |
1245 | rc = xen_set_callback_via(callback_via); | |
1246 | if (rc) { | |
1247 | printk(KERN_ERR "Request for Xen HVM callback vector" | |
1248 | " failed.\n"); | |
1249 | xen_have_vector_callback = 0; | |
1250 | return; | |
1251 | } | |
1252 | printk(KERN_INFO "Xen HVM callback vector for event delivery is " | |
1253 | "enabled\n"); | |
1254 | /* in the restore case the vector has already been allocated */ | |
1255 | if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) | |
1256 | alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); | |
1257 | } | |
1258 | } | |
ca65f9fc SS |
1259 | #else |
1260 | void xen_callback_vector(void) {} | |
1261 | #endif | |
38e20b07 | 1262 | |
e46cdb66 JF |
1263 | void __init xen_init_IRQ(void) |
1264 | { | |
1265 | int i; | |
c7a3589e | 1266 | |
a70c352a PE |
1267 | cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), |
1268 | GFP_KERNEL); | |
b21ddbf5 JF |
1269 | irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); |
1270 | ||
1271 | evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), | |
1272 | GFP_KERNEL); | |
1273 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
1274 | evtchn_to_irq[i] = -1; | |
e46cdb66 JF |
1275 | |
1276 | init_evtchn_cpu_bindings(); | |
1277 | ||
1278 | /* No event channels are 'live' right now. */ | |
1279 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
1280 | mask_evtchn(i); | |
1281 | ||
38e20b07 SY |
1282 | if (xen_hvm_domain()) { |
1283 | xen_callback_vector(); | |
1284 | native_init_IRQ(); | |
1285 | } else { | |
1286 | irq_ctx_init(smp_processor_id()); | |
1287 | } | |
e46cdb66 | 1288 | } |