Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / drivers / xen / events / events_base.c
1 /*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is received, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. PIRQs - Hardware interrupts.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35
36 #ifdef CONFIG_X86
37 #include <asm/desc.h>
38 #include <asm/ptrace.h>
39 #include <asm/irq.h>
40 #include <asm/idle.h>
41 #include <asm/io_apic.h>
42 #include <asm/xen/page.h>
43 #include <asm/xen/pci.h>
44 #endif
45 #include <asm/sync_bitops.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/hypervisor.h>
48
49 #include <xen/xen.h>
50 #include <xen/hvm.h>
51 #include <xen/xen-ops.h>
52 #include <xen/events.h>
53 #include <xen/interface/xen.h>
54 #include <xen/interface/event_channel.h>
55 #include <xen/interface/hvm/hvm_op.h>
56 #include <xen/interface/hvm/params.h>
57 #include <xen/interface/physdev.h>
58 #include <xen/interface/sched.h>
59 #include <xen/interface/vcpu.h>
60 #include <asm/hw_irq.h>
61
62 #include "events_internal.h"
63
64 const struct evtchn_ops *evtchn_ops;
65
66 /*
67 * This lock protects updates to the following mapping and reference-count
68 * arrays. The lock does not need to be acquired to read the mapping tables.
69 */
70 static DEFINE_MUTEX(irq_mapping_update_lock);
71
72 static LIST_HEAD(xen_irq_list_head);
73
74 /* IRQ <-> VIRQ mapping. */
75 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
76
77 /* IRQ <-> IPI mapping */
78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
79
80 int **evtchn_to_irq;
81 #ifdef CONFIG_X86
82 static unsigned long *pirq_eoi_map;
83 #endif
84 static bool (*pirq_needs_eoi)(unsigned irq);
85
86 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
87 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
89
90 /* Xen will never allocate port zero for any purpose. */
91 #define VALID_EVTCHN(chn) ((chn) != 0)
92
93 static struct irq_chip xen_dynamic_chip;
94 static struct irq_chip xen_percpu_chip;
95 static struct irq_chip xen_pirq_chip;
96 static void enable_dynirq(struct irq_data *data);
97 static void disable_dynirq(struct irq_data *data);
98
99 static void clear_evtchn_to_irq_row(unsigned row)
100 {
101 unsigned col;
102
103 for (col = 0; col < EVTCHN_PER_ROW; col++)
104 evtchn_to_irq[row][col] = -1;
105 }
106
107 static void clear_evtchn_to_irq_all(void)
108 {
109 unsigned row;
110
111 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
112 if (evtchn_to_irq[row] == NULL)
113 continue;
114 clear_evtchn_to_irq_row(row);
115 }
116 }
117
118 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
119 {
120 unsigned row;
121 unsigned col;
122
123 if (evtchn >= xen_evtchn_max_channels())
124 return -EINVAL;
125
126 row = EVTCHN_ROW(evtchn);
127 col = EVTCHN_COL(evtchn);
128
129 if (evtchn_to_irq[row] == NULL) {
130 /* Unallocated irq entries return -1 anyway */
131 if (irq == -1)
132 return 0;
133
134 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
135 if (evtchn_to_irq[row] == NULL)
136 return -ENOMEM;
137
138 clear_evtchn_to_irq_row(row);
139 }
140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
142 return 0;
143 }
144
145 int get_evtchn_to_irq(unsigned evtchn)
146 {
147 if (evtchn >= xen_evtchn_max_channels())
148 return -1;
149 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
150 return -1;
151 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
152 }
153
154 /* Get info for IRQ */
155 struct irq_info *info_for_irq(unsigned irq)
156 {
157 return irq_get_handler_data(irq);
158 }
159
160 /* Constructors for packed IRQ information. */
161 static int xen_irq_info_common_setup(struct irq_info *info,
162 unsigned irq,
163 enum xen_irq_type type,
164 unsigned evtchn,
165 unsigned short cpu)
166 {
167 int ret;
168
169 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
170
171 info->type = type;
172 info->irq = irq;
173 info->evtchn = evtchn;
174 info->cpu = cpu;
175
176 ret = set_evtchn_to_irq(evtchn, irq);
177 if (ret < 0)
178 return ret;
179
180 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
181
182 return xen_evtchn_port_setup(info);
183 }
184
185 static int xen_irq_info_evtchn_setup(unsigned irq,
186 unsigned evtchn)
187 {
188 struct irq_info *info = info_for_irq(irq);
189
190 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
191 }
192
193 static int xen_irq_info_ipi_setup(unsigned cpu,
194 unsigned irq,
195 unsigned evtchn,
196 enum ipi_vector ipi)
197 {
198 struct irq_info *info = info_for_irq(irq);
199
200 info->u.ipi = ipi;
201
202 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
203
204 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
205 }
206
207 static int xen_irq_info_virq_setup(unsigned cpu,
208 unsigned irq,
209 unsigned evtchn,
210 unsigned virq)
211 {
212 struct irq_info *info = info_for_irq(irq);
213
214 info->u.virq = virq;
215
216 per_cpu(virq_to_irq, cpu)[virq] = irq;
217
218 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
219 }
220
221 static int xen_irq_info_pirq_setup(unsigned irq,
222 unsigned evtchn,
223 unsigned pirq,
224 unsigned gsi,
225 uint16_t domid,
226 unsigned char flags)
227 {
228 struct irq_info *info = info_for_irq(irq);
229
230 info->u.pirq.pirq = pirq;
231 info->u.pirq.gsi = gsi;
232 info->u.pirq.domid = domid;
233 info->u.pirq.flags = flags;
234
235 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
236 }
237
238 static void xen_irq_info_cleanup(struct irq_info *info)
239 {
240 set_evtchn_to_irq(info->evtchn, -1);
241 info->evtchn = 0;
242 }
243
244 /*
245 * Accessors for packed IRQ information.
246 */
247 unsigned int evtchn_from_irq(unsigned irq)
248 {
249 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
250 return 0;
251
252 return info_for_irq(irq)->evtchn;
253 }
254
255 unsigned irq_from_evtchn(unsigned int evtchn)
256 {
257 return get_evtchn_to_irq(evtchn);
258 }
259 EXPORT_SYMBOL_GPL(irq_from_evtchn);
260
261 int irq_from_virq(unsigned int cpu, unsigned int virq)
262 {
263 return per_cpu(virq_to_irq, cpu)[virq];
264 }
265
266 static enum ipi_vector ipi_from_irq(unsigned irq)
267 {
268 struct irq_info *info = info_for_irq(irq);
269
270 BUG_ON(info == NULL);
271 BUG_ON(info->type != IRQT_IPI);
272
273 return info->u.ipi;
274 }
275
276 static unsigned virq_from_irq(unsigned irq)
277 {
278 struct irq_info *info = info_for_irq(irq);
279
280 BUG_ON(info == NULL);
281 BUG_ON(info->type != IRQT_VIRQ);
282
283 return info->u.virq;
284 }
285
286 static unsigned pirq_from_irq(unsigned irq)
287 {
288 struct irq_info *info = info_for_irq(irq);
289
290 BUG_ON(info == NULL);
291 BUG_ON(info->type != IRQT_PIRQ);
292
293 return info->u.pirq.pirq;
294 }
295
296 static enum xen_irq_type type_from_irq(unsigned irq)
297 {
298 return info_for_irq(irq)->type;
299 }
300
301 unsigned cpu_from_irq(unsigned irq)
302 {
303 return info_for_irq(irq)->cpu;
304 }
305
306 unsigned int cpu_from_evtchn(unsigned int evtchn)
307 {
308 int irq = get_evtchn_to_irq(evtchn);
309 unsigned ret = 0;
310
311 if (irq != -1)
312 ret = cpu_from_irq(irq);
313
314 return ret;
315 }
316
317 #ifdef CONFIG_X86
318 static bool pirq_check_eoi_map(unsigned irq)
319 {
320 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
321 }
322 #endif
323
324 static bool pirq_needs_eoi_flag(unsigned irq)
325 {
326 struct irq_info *info = info_for_irq(irq);
327 BUG_ON(info->type != IRQT_PIRQ);
328
329 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
330 }
331
332 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
333 {
334 int irq = get_evtchn_to_irq(chn);
335 struct irq_info *info = info_for_irq(irq);
336
337 BUG_ON(irq == -1);
338 #ifdef CONFIG_SMP
339 cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
340 #endif
341 xen_evtchn_port_bind_to_cpu(info, cpu);
342
343 info->cpu = cpu;
344 }
345
346 static void xen_evtchn_mask_all(void)
347 {
348 unsigned int evtchn;
349
350 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
351 mask_evtchn(evtchn);
352 }
353
354 /**
355 * notify_remote_via_irq - send event to remote end of event channel via irq
356 * @irq: irq of event channel to send event to
357 *
358 * Unlike notify_remote_via_evtchn(), this is safe to use across
359 * save/restore. Notifications on a broken connection are silently
360 * dropped.
361 */
362 void notify_remote_via_irq(int irq)
363 {
364 int evtchn = evtchn_from_irq(irq);
365
366 if (VALID_EVTCHN(evtchn))
367 notify_remote_via_evtchn(evtchn);
368 }
369 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
370
371 static void xen_irq_init(unsigned irq)
372 {
373 struct irq_info *info;
374 #ifdef CONFIG_SMP
375 /* By default all event channels notify CPU#0. */
376 cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
377 #endif
378
379 info = kzalloc(sizeof(*info), GFP_KERNEL);
380 if (info == NULL)
381 panic("Unable to allocate metadata for IRQ%d\n", irq);
382
383 info->type = IRQT_UNBOUND;
384 info->refcnt = -1;
385
386 irq_set_handler_data(irq, info);
387
388 list_add_tail(&info->list, &xen_irq_list_head);
389 }
390
391 static int __must_check xen_allocate_irqs_dynamic(int nvec)
392 {
393 int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
394
395 if (irq >= 0) {
396 for (i = 0; i < nvec; i++)
397 xen_irq_init(irq + i);
398 }
399
400 return irq;
401 }
402
403 static inline int __must_check xen_allocate_irq_dynamic(void)
404 {
405
406 return xen_allocate_irqs_dynamic(1);
407 }
408
409 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
410 {
411 int irq;
412
413 /*
414 * A PV guest has no concept of a GSI (since it has no ACPI
415 * nor access to/knowledge of the physical APICs). Therefore
416 * all IRQs are dynamically allocated from the entire IRQ
417 * space.
418 */
419 if (xen_pv_domain() && !xen_initial_domain())
420 return xen_allocate_irq_dynamic();
421
422 /* Legacy IRQ descriptors are already allocated by the arch. */
423 if (gsi < NR_IRQS_LEGACY)
424 irq = gsi;
425 else
426 irq = irq_alloc_desc_at(gsi, -1);
427
428 xen_irq_init(irq);
429
430 return irq;
431 }
432
433 static void xen_free_irq(unsigned irq)
434 {
435 struct irq_info *info = irq_get_handler_data(irq);
436
437 if (WARN_ON(!info))
438 return;
439
440 list_del(&info->list);
441
442 irq_set_handler_data(irq, NULL);
443
444 WARN_ON(info->refcnt > 0);
445
446 kfree(info);
447
448 /* Legacy IRQ descriptors are managed by the arch. */
449 if (irq < NR_IRQS_LEGACY)
450 return;
451
452 irq_free_desc(irq);
453 }
454
455 static void xen_evtchn_close(unsigned int port)
456 {
457 struct evtchn_close close;
458
459 close.port = port;
460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
461 BUG();
462 }
463
464 static void pirq_query_unmask(int irq)
465 {
466 struct physdev_irq_status_query irq_status;
467 struct irq_info *info = info_for_irq(irq);
468
469 BUG_ON(info->type != IRQT_PIRQ);
470
471 irq_status.irq = pirq_from_irq(irq);
472 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
473 irq_status.flags = 0;
474
475 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
476 if (irq_status.flags & XENIRQSTAT_needs_eoi)
477 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
478 }
479
480 static void eoi_pirq(struct irq_data *data)
481 {
482 int evtchn = evtchn_from_irq(data->irq);
483 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
484 int rc = 0;
485
486 irq_move_irq(data);
487
488 if (VALID_EVTCHN(evtchn))
489 clear_evtchn(evtchn);
490
491 if (pirq_needs_eoi(data->irq)) {
492 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
493 WARN_ON(rc);
494 }
495 }
496
497 static void mask_ack_pirq(struct irq_data *data)
498 {
499 disable_dynirq(data);
500 eoi_pirq(data);
501 }
502
503 static unsigned int __startup_pirq(unsigned int irq)
504 {
505 struct evtchn_bind_pirq bind_pirq;
506 struct irq_info *info = info_for_irq(irq);
507 int evtchn = evtchn_from_irq(irq);
508 int rc;
509
510 BUG_ON(info->type != IRQT_PIRQ);
511
512 if (VALID_EVTCHN(evtchn))
513 goto out;
514
515 bind_pirq.pirq = pirq_from_irq(irq);
516 /* NB. We are happy to share unless we are probing. */
517 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
518 BIND_PIRQ__WILL_SHARE : 0;
519 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
520 if (rc != 0) {
521 pr_warn("Failed to obtain physical IRQ %d\n", irq);
522 return 0;
523 }
524 evtchn = bind_pirq.port;
525
526 pirq_query_unmask(irq);
527
528 rc = set_evtchn_to_irq(evtchn, irq);
529 if (rc)
530 goto err;
531
532 bind_evtchn_to_cpu(evtchn, 0);
533 info->evtchn = evtchn;
534
535 rc = xen_evtchn_port_setup(info);
536 if (rc)
537 goto err;
538
539 out:
540 unmask_evtchn(evtchn);
541 eoi_pirq(irq_get_irq_data(irq));
542
543 return 0;
544
545 err:
546 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
547 xen_evtchn_close(evtchn);
548 return 0;
549 }
550
551 static unsigned int startup_pirq(struct irq_data *data)
552 {
553 return __startup_pirq(data->irq);
554 }
555
556 static void shutdown_pirq(struct irq_data *data)
557 {
558 unsigned int irq = data->irq;
559 struct irq_info *info = info_for_irq(irq);
560 unsigned evtchn = evtchn_from_irq(irq);
561
562 BUG_ON(info->type != IRQT_PIRQ);
563
564 if (!VALID_EVTCHN(evtchn))
565 return;
566
567 mask_evtchn(evtchn);
568 xen_evtchn_close(evtchn);
569 xen_irq_info_cleanup(info);
570 }
571
572 static void enable_pirq(struct irq_data *data)
573 {
574 startup_pirq(data);
575 }
576
577 static void disable_pirq(struct irq_data *data)
578 {
579 disable_dynirq(data);
580 }
581
582 int xen_irq_from_gsi(unsigned gsi)
583 {
584 struct irq_info *info;
585
586 list_for_each_entry(info, &xen_irq_list_head, list) {
587 if (info->type != IRQT_PIRQ)
588 continue;
589
590 if (info->u.pirq.gsi == gsi)
591 return info->irq;
592 }
593
594 return -1;
595 }
596 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
597
598 static void __unbind_from_irq(unsigned int irq)
599 {
600 int evtchn = evtchn_from_irq(irq);
601 struct irq_info *info = irq_get_handler_data(irq);
602
603 if (info->refcnt > 0) {
604 info->refcnt--;
605 if (info->refcnt != 0)
606 return;
607 }
608
609 if (VALID_EVTCHN(evtchn)) {
610 unsigned int cpu = cpu_from_irq(irq);
611
612 xen_evtchn_close(evtchn);
613
614 switch (type_from_irq(irq)) {
615 case IRQT_VIRQ:
616 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
617 break;
618 case IRQT_IPI:
619 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
620 break;
621 default:
622 break;
623 }
624
625 xen_irq_info_cleanup(info);
626 }
627
628 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
629
630 xen_free_irq(irq);
631 }
632
633 /*
634 * Do not make any assumptions regarding the relationship between the
635 * IRQ number returned here and the Xen pirq argument.
636 *
637 * Note: We don't assign an event channel until the irq actually started
638 * up. Return an existing irq if we've already got one for the gsi.
639 *
640 * Shareable implies level triggered, not shareable implies edge
641 * triggered here.
642 */
643 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
644 unsigned pirq, int shareable, char *name)
645 {
646 int irq = -1;
647 struct physdev_irq irq_op;
648 int ret;
649
650 mutex_lock(&irq_mapping_update_lock);
651
652 irq = xen_irq_from_gsi(gsi);
653 if (irq != -1) {
654 pr_info("%s: returning irq %d for gsi %u\n",
655 __func__, irq, gsi);
656 goto out;
657 }
658
659 irq = xen_allocate_irq_gsi(gsi);
660 if (irq < 0)
661 goto out;
662
663 irq_op.irq = irq;
664 irq_op.vector = 0;
665
666 /* Only the privileged domain can do this. For non-priv, the pcifront
667 * driver provides a PCI bus that does the call to do exactly
668 * this in the priv domain. */
669 if (xen_initial_domain() &&
670 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
671 xen_free_irq(irq);
672 irq = -ENOSPC;
673 goto out;
674 }
675
676 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
677 shareable ? PIRQ_SHAREABLE : 0);
678 if (ret < 0) {
679 __unbind_from_irq(irq);
680 irq = ret;
681 goto out;
682 }
683
684 pirq_query_unmask(irq);
685 /* We try to use the handler with the appropriate semantic for the
686 * type of interrupt: if the interrupt is an edge triggered
687 * interrupt we use handle_edge_irq.
688 *
689 * On the other hand if the interrupt is level triggered we use
690 * handle_fasteoi_irq like the native code does for this kind of
691 * interrupts.
692 *
693 * Depending on the Xen version, pirq_needs_eoi might return true
694 * not only for level triggered interrupts but for edge triggered
695 * interrupts too. In any case Xen always honors the eoi mechanism,
696 * not injecting any more pirqs of the same kind if the first one
697 * hasn't received an eoi yet. Therefore using the fasteoi handler
698 * is the right choice either way.
699 */
700 if (shareable)
701 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
702 handle_fasteoi_irq, name);
703 else
704 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
705 handle_edge_irq, name);
706
707 out:
708 mutex_unlock(&irq_mapping_update_lock);
709
710 return irq;
711 }
712
713 #ifdef CONFIG_PCI_MSI
714 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
715 {
716 int rc;
717 struct physdev_get_free_pirq op_get_free_pirq;
718
719 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
720 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
721
722 WARN_ONCE(rc == -ENOSYS,
723 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
724
725 return rc ? -1 : op_get_free_pirq.pirq;
726 }
727
728 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
729 int pirq, int nvec, const char *name, domid_t domid)
730 {
731 int i, irq, ret;
732
733 mutex_lock(&irq_mapping_update_lock);
734
735 irq = xen_allocate_irqs_dynamic(nvec);
736 if (irq < 0)
737 goto out;
738
739 for (i = 0; i < nvec; i++) {
740 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
741
742 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
743 i == 0 ? 0 : PIRQ_MSI_GROUP);
744 if (ret < 0)
745 goto error_irq;
746 }
747
748 ret = irq_set_msi_desc(irq, msidesc);
749 if (ret < 0)
750 goto error_irq;
751 out:
752 mutex_unlock(&irq_mapping_update_lock);
753 return irq;
754 error_irq:
755 for (; i >= 0; i--)
756 __unbind_from_irq(irq + i);
757 mutex_unlock(&irq_mapping_update_lock);
758 return ret;
759 }
760 #endif
761
762 int xen_destroy_irq(int irq)
763 {
764 struct physdev_unmap_pirq unmap_irq;
765 struct irq_info *info = info_for_irq(irq);
766 int rc = -ENOENT;
767
768 mutex_lock(&irq_mapping_update_lock);
769
770 /*
771 * If trying to remove a vector in a MSI group different
772 * than the first one skip the PIRQ unmap unless this vector
773 * is the first one in the group.
774 */
775 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
776 unmap_irq.pirq = info->u.pirq.pirq;
777 unmap_irq.domid = info->u.pirq.domid;
778 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
779 /* If another domain quits without making the pci_disable_msix
780 * call, the Xen hypervisor takes care of freeing the PIRQs
781 * (free_domain_pirqs).
782 */
783 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
784 pr_info("domain %d does not have %d anymore\n",
785 info->u.pirq.domid, info->u.pirq.pirq);
786 else if (rc) {
787 pr_warn("unmap irq failed %d\n", rc);
788 goto out;
789 }
790 }
791
792 xen_free_irq(irq);
793
794 out:
795 mutex_unlock(&irq_mapping_update_lock);
796 return rc;
797 }
798
799 int xen_irq_from_pirq(unsigned pirq)
800 {
801 int irq;
802
803 struct irq_info *info;
804
805 mutex_lock(&irq_mapping_update_lock);
806
807 list_for_each_entry(info, &xen_irq_list_head, list) {
808 if (info->type != IRQT_PIRQ)
809 continue;
810 irq = info->irq;
811 if (info->u.pirq.pirq == pirq)
812 goto out;
813 }
814 irq = -1;
815 out:
816 mutex_unlock(&irq_mapping_update_lock);
817
818 return irq;
819 }
820
821
822 int xen_pirq_from_irq(unsigned irq)
823 {
824 return pirq_from_irq(irq);
825 }
826 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
827
828 int bind_evtchn_to_irq(unsigned int evtchn)
829 {
830 int irq;
831 int ret;
832
833 if (evtchn >= xen_evtchn_max_channels())
834 return -ENOMEM;
835
836 mutex_lock(&irq_mapping_update_lock);
837
838 irq = get_evtchn_to_irq(evtchn);
839
840 if (irq == -1) {
841 irq = xen_allocate_irq_dynamic();
842 if (irq < 0)
843 goto out;
844
845 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
846 handle_edge_irq, "event");
847
848 ret = xen_irq_info_evtchn_setup(irq, evtchn);
849 if (ret < 0) {
850 __unbind_from_irq(irq);
851 irq = ret;
852 goto out;
853 }
854 /* New interdomain events are bound to VCPU 0. */
855 bind_evtchn_to_cpu(evtchn, 0);
856 } else {
857 struct irq_info *info = info_for_irq(irq);
858 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
859 }
860
861 out:
862 mutex_unlock(&irq_mapping_update_lock);
863
864 return irq;
865 }
866 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
867
868 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
869 {
870 struct evtchn_bind_ipi bind_ipi;
871 int evtchn, irq;
872 int ret;
873
874 mutex_lock(&irq_mapping_update_lock);
875
876 irq = per_cpu(ipi_to_irq, cpu)[ipi];
877
878 if (irq == -1) {
879 irq = xen_allocate_irq_dynamic();
880 if (irq < 0)
881 goto out;
882
883 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
884 handle_percpu_irq, "ipi");
885
886 bind_ipi.vcpu = cpu;
887 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
888 &bind_ipi) != 0)
889 BUG();
890 evtchn = bind_ipi.port;
891
892 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
893 if (ret < 0) {
894 __unbind_from_irq(irq);
895 irq = ret;
896 goto out;
897 }
898 bind_evtchn_to_cpu(evtchn, cpu);
899 } else {
900 struct irq_info *info = info_for_irq(irq);
901 WARN_ON(info == NULL || info->type != IRQT_IPI);
902 }
903
904 out:
905 mutex_unlock(&irq_mapping_update_lock);
906 return irq;
907 }
908
909 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
910 unsigned int remote_port)
911 {
912 struct evtchn_bind_interdomain bind_interdomain;
913 int err;
914
915 bind_interdomain.remote_dom = remote_domain;
916 bind_interdomain.remote_port = remote_port;
917
918 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
919 &bind_interdomain);
920
921 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
922 }
923 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
924
925 static int find_virq(unsigned int virq, unsigned int cpu)
926 {
927 struct evtchn_status status;
928 int port, rc = -ENOENT;
929
930 memset(&status, 0, sizeof(status));
931 for (port = 0; port < xen_evtchn_max_channels(); port++) {
932 status.dom = DOMID_SELF;
933 status.port = port;
934 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
935 if (rc < 0)
936 continue;
937 if (status.status != EVTCHNSTAT_virq)
938 continue;
939 if (status.u.virq == virq && status.vcpu == cpu) {
940 rc = port;
941 break;
942 }
943 }
944 return rc;
945 }
946
947 /**
948 * xen_evtchn_nr_channels - number of usable event channel ports
949 *
950 * This may be less than the maximum supported by the current
951 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
952 * supported.
953 */
954 unsigned xen_evtchn_nr_channels(void)
955 {
956 return evtchn_ops->nr_channels();
957 }
958 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
959
960 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
961 {
962 struct evtchn_bind_virq bind_virq;
963 int evtchn, irq, ret;
964
965 mutex_lock(&irq_mapping_update_lock);
966
967 irq = per_cpu(virq_to_irq, cpu)[virq];
968
969 if (irq == -1) {
970 irq = xen_allocate_irq_dynamic();
971 if (irq < 0)
972 goto out;
973
974 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
975 handle_percpu_irq, "virq");
976
977 bind_virq.virq = virq;
978 bind_virq.vcpu = cpu;
979 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
980 &bind_virq);
981 if (ret == 0)
982 evtchn = bind_virq.port;
983 else {
984 if (ret == -EEXIST)
985 ret = find_virq(virq, cpu);
986 BUG_ON(ret < 0);
987 evtchn = ret;
988 }
989
990 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
991 if (ret < 0) {
992 __unbind_from_irq(irq);
993 irq = ret;
994 goto out;
995 }
996
997 bind_evtchn_to_cpu(evtchn, cpu);
998 } else {
999 struct irq_info *info = info_for_irq(irq);
1000 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1001 }
1002
1003 out:
1004 mutex_unlock(&irq_mapping_update_lock);
1005
1006 return irq;
1007 }
1008
1009 static void unbind_from_irq(unsigned int irq)
1010 {
1011 mutex_lock(&irq_mapping_update_lock);
1012 __unbind_from_irq(irq);
1013 mutex_unlock(&irq_mapping_update_lock);
1014 }
1015
1016 int bind_evtchn_to_irqhandler(unsigned int evtchn,
1017 irq_handler_t handler,
1018 unsigned long irqflags,
1019 const char *devname, void *dev_id)
1020 {
1021 int irq, retval;
1022
1023 irq = bind_evtchn_to_irq(evtchn);
1024 if (irq < 0)
1025 return irq;
1026 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1027 if (retval != 0) {
1028 unbind_from_irq(irq);
1029 return retval;
1030 }
1031
1032 return irq;
1033 }
1034 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1035
1036 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1037 unsigned int remote_port,
1038 irq_handler_t handler,
1039 unsigned long irqflags,
1040 const char *devname,
1041 void *dev_id)
1042 {
1043 int irq, retval;
1044
1045 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1046 if (irq < 0)
1047 return irq;
1048
1049 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1050 if (retval != 0) {
1051 unbind_from_irq(irq);
1052 return retval;
1053 }
1054
1055 return irq;
1056 }
1057 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1058
1059 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1060 irq_handler_t handler,
1061 unsigned long irqflags, const char *devname, void *dev_id)
1062 {
1063 int irq, retval;
1064
1065 irq = bind_virq_to_irq(virq, cpu);
1066 if (irq < 0)
1067 return irq;
1068 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1069 if (retval != 0) {
1070 unbind_from_irq(irq);
1071 return retval;
1072 }
1073
1074 return irq;
1075 }
1076 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1077
1078 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1079 unsigned int cpu,
1080 irq_handler_t handler,
1081 unsigned long irqflags,
1082 const char *devname,
1083 void *dev_id)
1084 {
1085 int irq, retval;
1086
1087 irq = bind_ipi_to_irq(ipi, cpu);
1088 if (irq < 0)
1089 return irq;
1090
1091 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1092 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1093 if (retval != 0) {
1094 unbind_from_irq(irq);
1095 return retval;
1096 }
1097
1098 return irq;
1099 }
1100
1101 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1102 {
1103 struct irq_info *info = irq_get_handler_data(irq);
1104
1105 if (WARN_ON(!info))
1106 return;
1107 free_irq(irq, dev_id);
1108 unbind_from_irq(irq);
1109 }
1110 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1111
1112 /**
1113 * xen_set_irq_priority() - set an event channel priority.
1114 * @irq:irq bound to an event channel.
1115 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1116 */
1117 int xen_set_irq_priority(unsigned irq, unsigned priority)
1118 {
1119 struct evtchn_set_priority set_priority;
1120
1121 set_priority.port = evtchn_from_irq(irq);
1122 set_priority.priority = priority;
1123
1124 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1125 &set_priority);
1126 }
1127 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1128
1129 int evtchn_make_refcounted(unsigned int evtchn)
1130 {
1131 int irq = get_evtchn_to_irq(evtchn);
1132 struct irq_info *info;
1133
1134 if (irq == -1)
1135 return -ENOENT;
1136
1137 info = irq_get_handler_data(irq);
1138
1139 if (!info)
1140 return -ENOENT;
1141
1142 WARN_ON(info->refcnt != -1);
1143
1144 info->refcnt = 1;
1145
1146 return 0;
1147 }
1148 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1149
1150 int evtchn_get(unsigned int evtchn)
1151 {
1152 int irq;
1153 struct irq_info *info;
1154 int err = -ENOENT;
1155
1156 if (evtchn >= xen_evtchn_max_channels())
1157 return -EINVAL;
1158
1159 mutex_lock(&irq_mapping_update_lock);
1160
1161 irq = get_evtchn_to_irq(evtchn);
1162 if (irq == -1)
1163 goto done;
1164
1165 info = irq_get_handler_data(irq);
1166
1167 if (!info)
1168 goto done;
1169
1170 err = -EINVAL;
1171 if (info->refcnt <= 0)
1172 goto done;
1173
1174 info->refcnt++;
1175 err = 0;
1176 done:
1177 mutex_unlock(&irq_mapping_update_lock);
1178
1179 return err;
1180 }
1181 EXPORT_SYMBOL_GPL(evtchn_get);
1182
1183 void evtchn_put(unsigned int evtchn)
1184 {
1185 int irq = get_evtchn_to_irq(evtchn);
1186 if (WARN_ON(irq == -1))
1187 return;
1188 unbind_from_irq(irq);
1189 }
1190 EXPORT_SYMBOL_GPL(evtchn_put);
1191
1192 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1193 {
1194 int irq;
1195
1196 #ifdef CONFIG_X86
1197 if (unlikely(vector == XEN_NMI_VECTOR)) {
1198 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1199 if (rc < 0)
1200 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1201 return;
1202 }
1203 #endif
1204 irq = per_cpu(ipi_to_irq, cpu)[vector];
1205 BUG_ON(irq < 0);
1206 notify_remote_via_irq(irq);
1207 }
1208
1209 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1210
1211 static void __xen_evtchn_do_upcall(void)
1212 {
1213 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1214 int cpu = get_cpu();
1215 unsigned count;
1216
1217 do {
1218 vcpu_info->evtchn_upcall_pending = 0;
1219
1220 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1221 goto out;
1222
1223 xen_evtchn_handle_events(cpu);
1224
1225 BUG_ON(!irqs_disabled());
1226
1227 count = __this_cpu_read(xed_nesting_count);
1228 __this_cpu_write(xed_nesting_count, 0);
1229 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1230
1231 out:
1232
1233 put_cpu();
1234 }
1235
1236 void xen_evtchn_do_upcall(struct pt_regs *regs)
1237 {
1238 struct pt_regs *old_regs = set_irq_regs(regs);
1239
1240 irq_enter();
1241 #ifdef CONFIG_X86
1242 exit_idle();
1243 inc_irq_stat(irq_hv_callback_count);
1244 #endif
1245
1246 __xen_evtchn_do_upcall();
1247
1248 irq_exit();
1249 set_irq_regs(old_regs);
1250 }
1251
1252 void xen_hvm_evtchn_do_upcall(void)
1253 {
1254 __xen_evtchn_do_upcall();
1255 }
1256 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1257
1258 /* Rebind a new event channel to an existing irq. */
1259 void rebind_evtchn_irq(int evtchn, int irq)
1260 {
1261 struct irq_info *info = info_for_irq(irq);
1262
1263 if (WARN_ON(!info))
1264 return;
1265
1266 /* Make sure the irq is masked, since the new event channel
1267 will also be masked. */
1268 disable_irq(irq);
1269
1270 mutex_lock(&irq_mapping_update_lock);
1271
1272 /* After resume the irq<->evtchn mappings are all cleared out */
1273 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1274 /* Expect irq to have been bound before,
1275 so there should be a proper type */
1276 BUG_ON(info->type == IRQT_UNBOUND);
1277
1278 (void)xen_irq_info_evtchn_setup(irq, evtchn);
1279
1280 mutex_unlock(&irq_mapping_update_lock);
1281
1282 /* new event channels are always bound to cpu 0 */
1283 irq_set_affinity(irq, cpumask_of(0));
1284
1285 /* Unmask the event channel. */
1286 enable_irq(irq);
1287 }
1288
1289 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1290 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1291 {
1292 struct evtchn_bind_vcpu bind_vcpu;
1293 int evtchn = evtchn_from_irq(irq);
1294 int masked;
1295
1296 if (!VALID_EVTCHN(evtchn))
1297 return -1;
1298
1299 /*
1300 * Events delivered via platform PCI interrupts are always
1301 * routed to vcpu 0 and hence cannot be rebound.
1302 */
1303 if (xen_hvm_domain() && !xen_have_vector_callback)
1304 return -1;
1305
1306 /* Send future instances of this interrupt to other vcpu. */
1307 bind_vcpu.port = evtchn;
1308 bind_vcpu.vcpu = tcpu;
1309
1310 /*
1311 * Mask the event while changing the VCPU binding to prevent
1312 * it being delivered on an unexpected VCPU.
1313 */
1314 masked = test_and_set_mask(evtchn);
1315
1316 /*
1317 * If this fails, it usually just indicates that we're dealing with a
1318 * virq or IPI channel, which don't actually need to be rebound. Ignore
1319 * it, but don't do the xenlinux-level rebind in that case.
1320 */
1321 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1322 bind_evtchn_to_cpu(evtchn, tcpu);
1323
1324 if (!masked)
1325 unmask_evtchn(evtchn);
1326
1327 return 0;
1328 }
1329
1330 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1331 bool force)
1332 {
1333 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
1334
1335 return rebind_irq_to_cpu(data->irq, tcpu);
1336 }
1337
1338 static void enable_dynirq(struct irq_data *data)
1339 {
1340 int evtchn = evtchn_from_irq(data->irq);
1341
1342 if (VALID_EVTCHN(evtchn))
1343 unmask_evtchn(evtchn);
1344 }
1345
1346 static void disable_dynirq(struct irq_data *data)
1347 {
1348 int evtchn = evtchn_from_irq(data->irq);
1349
1350 if (VALID_EVTCHN(evtchn))
1351 mask_evtchn(evtchn);
1352 }
1353
1354 static void ack_dynirq(struct irq_data *data)
1355 {
1356 int evtchn = evtchn_from_irq(data->irq);
1357
1358 irq_move_irq(data);
1359
1360 if (VALID_EVTCHN(evtchn))
1361 clear_evtchn(evtchn);
1362 }
1363
1364 static void mask_ack_dynirq(struct irq_data *data)
1365 {
1366 disable_dynirq(data);
1367 ack_dynirq(data);
1368 }
1369
1370 static int retrigger_dynirq(struct irq_data *data)
1371 {
1372 unsigned int evtchn = evtchn_from_irq(data->irq);
1373 int masked;
1374
1375 if (!VALID_EVTCHN(evtchn))
1376 return 0;
1377
1378 masked = test_and_set_mask(evtchn);
1379 set_evtchn(evtchn);
1380 if (!masked)
1381 unmask_evtchn(evtchn);
1382
1383 return 1;
1384 }
1385
1386 static void restore_pirqs(void)
1387 {
1388 int pirq, rc, irq, gsi;
1389 struct physdev_map_pirq map_irq;
1390 struct irq_info *info;
1391
1392 list_for_each_entry(info, &xen_irq_list_head, list) {
1393 if (info->type != IRQT_PIRQ)
1394 continue;
1395
1396 pirq = info->u.pirq.pirq;
1397 gsi = info->u.pirq.gsi;
1398 irq = info->irq;
1399
1400 /* save/restore of PT devices doesn't work, so at this point the
1401 * only devices present are GSI based emulated devices */
1402 if (!gsi)
1403 continue;
1404
1405 map_irq.domid = DOMID_SELF;
1406 map_irq.type = MAP_PIRQ_TYPE_GSI;
1407 map_irq.index = gsi;
1408 map_irq.pirq = pirq;
1409
1410 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1411 if (rc) {
1412 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1413 gsi, irq, pirq, rc);
1414 xen_free_irq(irq);
1415 continue;
1416 }
1417
1418 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1419
1420 __startup_pirq(irq);
1421 }
1422 }
1423
1424 static void restore_cpu_virqs(unsigned int cpu)
1425 {
1426 struct evtchn_bind_virq bind_virq;
1427 int virq, irq, evtchn;
1428
1429 for (virq = 0; virq < NR_VIRQS; virq++) {
1430 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1431 continue;
1432
1433 BUG_ON(virq_from_irq(irq) != virq);
1434
1435 /* Get a new binding from Xen. */
1436 bind_virq.virq = virq;
1437 bind_virq.vcpu = cpu;
1438 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1439 &bind_virq) != 0)
1440 BUG();
1441 evtchn = bind_virq.port;
1442
1443 /* Record the new mapping. */
1444 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1445 bind_evtchn_to_cpu(evtchn, cpu);
1446 }
1447 }
1448
1449 static void restore_cpu_ipis(unsigned int cpu)
1450 {
1451 struct evtchn_bind_ipi bind_ipi;
1452 int ipi, irq, evtchn;
1453
1454 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1455 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1456 continue;
1457
1458 BUG_ON(ipi_from_irq(irq) != ipi);
1459
1460 /* Get a new binding from Xen. */
1461 bind_ipi.vcpu = cpu;
1462 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1463 &bind_ipi) != 0)
1464 BUG();
1465 evtchn = bind_ipi.port;
1466
1467 /* Record the new mapping. */
1468 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1469 bind_evtchn_to_cpu(evtchn, cpu);
1470 }
1471 }
1472
1473 /* Clear an irq's pending state, in preparation for polling on it */
1474 void xen_clear_irq_pending(int irq)
1475 {
1476 int evtchn = evtchn_from_irq(irq);
1477
1478 if (VALID_EVTCHN(evtchn))
1479 clear_evtchn(evtchn);
1480 }
1481 EXPORT_SYMBOL(xen_clear_irq_pending);
1482 void xen_set_irq_pending(int irq)
1483 {
1484 int evtchn = evtchn_from_irq(irq);
1485
1486 if (VALID_EVTCHN(evtchn))
1487 set_evtchn(evtchn);
1488 }
1489
1490 bool xen_test_irq_pending(int irq)
1491 {
1492 int evtchn = evtchn_from_irq(irq);
1493 bool ret = false;
1494
1495 if (VALID_EVTCHN(evtchn))
1496 ret = test_evtchn(evtchn);
1497
1498 return ret;
1499 }
1500
1501 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1502 * the irq will be disabled so it won't deliver an interrupt. */
1503 void xen_poll_irq_timeout(int irq, u64 timeout)
1504 {
1505 evtchn_port_t evtchn = evtchn_from_irq(irq);
1506
1507 if (VALID_EVTCHN(evtchn)) {
1508 struct sched_poll poll;
1509
1510 poll.nr_ports = 1;
1511 poll.timeout = timeout;
1512 set_xen_guest_handle(poll.ports, &evtchn);
1513
1514 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1515 BUG();
1516 }
1517 }
1518 EXPORT_SYMBOL(xen_poll_irq_timeout);
1519 /* Poll waiting for an irq to become pending. In the usual case, the
1520 * irq will be disabled so it won't deliver an interrupt. */
1521 void xen_poll_irq(int irq)
1522 {
1523 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1524 }
1525
1526 /* Check whether the IRQ line is shared with other guests. */
1527 int xen_test_irq_shared(int irq)
1528 {
1529 struct irq_info *info = info_for_irq(irq);
1530 struct physdev_irq_status_query irq_status;
1531
1532 if (WARN_ON(!info))
1533 return -ENOENT;
1534
1535 irq_status.irq = info->u.pirq.pirq;
1536
1537 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1538 return 0;
1539 return !(irq_status.flags & XENIRQSTAT_shared);
1540 }
1541 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1542
1543 void xen_irq_resume(void)
1544 {
1545 unsigned int cpu;
1546 struct irq_info *info;
1547
1548 /* New event-channel space is not 'live' yet. */
1549 xen_evtchn_mask_all();
1550 xen_evtchn_resume();
1551
1552 /* No IRQ <-> event-channel mappings. */
1553 list_for_each_entry(info, &xen_irq_list_head, list)
1554 info->evtchn = 0; /* zap event-channel binding */
1555
1556 clear_evtchn_to_irq_all();
1557
1558 for_each_possible_cpu(cpu) {
1559 restore_cpu_virqs(cpu);
1560 restore_cpu_ipis(cpu);
1561 }
1562
1563 restore_pirqs();
1564 }
1565
1566 static struct irq_chip xen_dynamic_chip __read_mostly = {
1567 .name = "xen-dyn",
1568
1569 .irq_disable = disable_dynirq,
1570 .irq_mask = disable_dynirq,
1571 .irq_unmask = enable_dynirq,
1572
1573 .irq_ack = ack_dynirq,
1574 .irq_mask_ack = mask_ack_dynirq,
1575
1576 .irq_set_affinity = set_affinity_irq,
1577 .irq_retrigger = retrigger_dynirq,
1578 };
1579
1580 static struct irq_chip xen_pirq_chip __read_mostly = {
1581 .name = "xen-pirq",
1582
1583 .irq_startup = startup_pirq,
1584 .irq_shutdown = shutdown_pirq,
1585 .irq_enable = enable_pirq,
1586 .irq_disable = disable_pirq,
1587
1588 .irq_mask = disable_dynirq,
1589 .irq_unmask = enable_dynirq,
1590
1591 .irq_ack = eoi_pirq,
1592 .irq_eoi = eoi_pirq,
1593 .irq_mask_ack = mask_ack_pirq,
1594
1595 .irq_set_affinity = set_affinity_irq,
1596
1597 .irq_retrigger = retrigger_dynirq,
1598 };
1599
1600 static struct irq_chip xen_percpu_chip __read_mostly = {
1601 .name = "xen-percpu",
1602
1603 .irq_disable = disable_dynirq,
1604 .irq_mask = disable_dynirq,
1605 .irq_unmask = enable_dynirq,
1606
1607 .irq_ack = ack_dynirq,
1608 };
1609
1610 int xen_set_callback_via(uint64_t via)
1611 {
1612 struct xen_hvm_param a;
1613 a.domid = DOMID_SELF;
1614 a.index = HVM_PARAM_CALLBACK_IRQ;
1615 a.value = via;
1616 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1617 }
1618 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1619
1620 #ifdef CONFIG_XEN_PVHVM
1621 /* Vector callbacks are better than PCI interrupts to receive event
1622 * channel notifications because we can receive vector callbacks on any
1623 * vcpu and we don't need PCI support or APIC interactions. */
1624 void xen_callback_vector(void)
1625 {
1626 int rc;
1627 uint64_t callback_via;
1628 if (xen_have_vector_callback) {
1629 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1630 rc = xen_set_callback_via(callback_via);
1631 if (rc) {
1632 pr_err("Request for Xen HVM callback vector failed\n");
1633 xen_have_vector_callback = 0;
1634 return;
1635 }
1636 pr_info("Xen HVM callback vector for event delivery is enabled\n");
1637 /* in the restore case the vector has already been allocated */
1638 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1639 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1640 xen_hvm_callback_vector);
1641 }
1642 }
1643 #else
1644 void xen_callback_vector(void) {}
1645 #endif
1646
1647 #undef MODULE_PARAM_PREFIX
1648 #define MODULE_PARAM_PREFIX "xen."
1649
1650 static bool fifo_events = true;
1651 module_param(fifo_events, bool, 0);
1652
1653 void __init xen_init_IRQ(void)
1654 {
1655 int ret = -EINVAL;
1656
1657 if (fifo_events)
1658 ret = xen_evtchn_fifo_init();
1659 if (ret < 0)
1660 xen_evtchn_2l_init();
1661
1662 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1663 sizeof(*evtchn_to_irq), GFP_KERNEL);
1664 BUG_ON(!evtchn_to_irq);
1665
1666 /* No event channels are 'live' right now. */
1667 xen_evtchn_mask_all();
1668
1669 pirq_needs_eoi = pirq_needs_eoi_flag;
1670
1671 #ifdef CONFIG_X86
1672 if (xen_pv_domain()) {
1673 irq_ctx_init(smp_processor_id());
1674 if (xen_initial_domain())
1675 pci_xen_initial_domain();
1676 }
1677 if (xen_feature(XENFEAT_hvm_callback_vector))
1678 xen_callback_vector();
1679
1680 if (xen_hvm_domain()) {
1681 native_init_IRQ();
1682 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1683 * __acpi_register_gsi can point at the right function */
1684 pci_xen_hvm_init();
1685 } else {
1686 int rc;
1687 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1688
1689 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1690 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1691 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1692 /* TODO: No PVH support for PIRQ EOI */
1693 if (rc != 0) {
1694 free_page((unsigned long) pirq_eoi_map);
1695 pirq_eoi_map = NULL;
1696 } else
1697 pirq_needs_eoi = pirq_check_eoi_map;
1698 }
1699 #endif
1700 }
This page took 0.124771 seconds and 5 git commands to generate.