xen/events: move 2-level specific code into its own file
[deliverable/linux.git] / drivers / xen / events / events_base.c
CommitLineData
e46cdb66
JF
1/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
25985edc 8 * chip. When an event is received, it is mapped to an irq and sent
e46cdb66
JF
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
d46a78b0 19 * 4. PIRQs - Hardware interrupts.
e46cdb66
JF
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
283c0972
JP
24#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
e46cdb66
JF
26#include <linux/linkage.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/string.h>
28e08861 31#include <linux/bootmem.h>
5a0e3ad6 32#include <linux/slab.h>
b21ddbf5 33#include <linux/irqnr.h>
f731e3ef 34#include <linux/pci.h>
e46cdb66 35
0ec53ecf 36#ifdef CONFIG_X86
38e20b07 37#include <asm/desc.h>
e46cdb66
JF
38#include <asm/ptrace.h>
39#include <asm/irq.h>
792dc4f6 40#include <asm/idle.h>
0794bfc7 41#include <asm/io_apic.h>
9846ff10 42#include <asm/xen/page.h>
42a1de56 43#include <asm/xen/pci.h>
0ec53ecf
SS
44#endif
45#include <asm/sync_bitops.h>
e46cdb66 46#include <asm/xen/hypercall.h>
8d1b8753 47#include <asm/xen/hypervisor.h>
e46cdb66 48
38e20b07
SY
49#include <xen/xen.h>
50#include <xen/hvm.h>
e04d0d07 51#include <xen/xen-ops.h>
e46cdb66
JF
52#include <xen/events.h>
53#include <xen/interface/xen.h>
54#include <xen/interface/event_channel.h>
38e20b07
SY
55#include <xen/interface/hvm/hvm_op.h>
56#include <xen/interface/hvm/params.h>
0ec53ecf
SS
57#include <xen/interface/physdev.h>
58#include <xen/interface/sched.h>
6efa20e4 59#include <xen/interface/vcpu.h>
0ec53ecf 60#include <asm/hw_irq.h>
e46cdb66 61
9a489f45
DV
62#include "events_internal.h"
63
e46cdb66
JF
64/*
65 * This lock protects updates to the following mapping and reference-count
66 * arrays. The lock does not need to be acquired to read the mapping tables.
67 */
77365948 68static DEFINE_MUTEX(irq_mapping_update_lock);
e46cdb66 69
6cb6537d
IC
70static LIST_HEAD(xen_irq_list_head);
71
e46cdb66 72/* IRQ <-> VIRQ mapping. */
204fba4a 73static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
e46cdb66 74
f87e4cac 75/* IRQ <-> IPI mapping */
204fba4a 76static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
f87e4cac 77
9a489f45 78int *evtchn_to_irq;
bf86ad80 79#ifdef CONFIG_X86
9846ff10 80static unsigned long *pirq_eoi_map;
bf86ad80 81#endif
9846ff10 82static bool (*pirq_needs_eoi)(unsigned irq);
3b32f574 83
e46cdb66
JF
84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0)
86
e46cdb66 87static struct irq_chip xen_dynamic_chip;
aaca4964 88static struct irq_chip xen_percpu_chip;
d46a78b0 89static struct irq_chip xen_pirq_chip;
7e186bdd
SS
90static void enable_dynirq(struct irq_data *data);
91static void disable_dynirq(struct irq_data *data);
e46cdb66 92
9158c358 93/* Get info for IRQ */
9a489f45 94struct irq_info *info_for_irq(unsigned irq)
ced40d0f 95{
c442b806 96 return irq_get_handler_data(irq);
ced40d0f
JF
97}
98
9158c358
IC
99/* Constructors for packed IRQ information. */
100static void xen_irq_info_common_init(struct irq_info *info,
3d4cfa37 101 unsigned irq,
9158c358
IC
102 enum xen_irq_type type,
103 unsigned short evtchn,
104 unsigned short cpu)
ced40d0f 105{
9158c358
IC
106
107 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
108
109 info->type = type;
6cb6537d 110 info->irq = irq;
9158c358
IC
111 info->evtchn = evtchn;
112 info->cpu = cpu;
3d4cfa37
IC
113
114 evtchn_to_irq[evtchn] = irq;
934f585e
JG
115
116 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
ced40d0f
JF
117}
118
9158c358
IC
119static void xen_irq_info_evtchn_init(unsigned irq,
120 unsigned short evtchn)
ced40d0f 121{
9158c358
IC
122 struct irq_info *info = info_for_irq(irq);
123
3d4cfa37 124 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
ced40d0f
JF
125}
126
3d4cfa37
IC
127static void xen_irq_info_ipi_init(unsigned cpu,
128 unsigned irq,
9158c358
IC
129 unsigned short evtchn,
130 enum ipi_vector ipi)
e46cdb66 131{
9158c358
IC
132 struct irq_info *info = info_for_irq(irq);
133
3d4cfa37 134 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
9158c358
IC
135
136 info->u.ipi = ipi;
3d4cfa37
IC
137
138 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
ced40d0f
JF
139}
140
3d4cfa37
IC
141static void xen_irq_info_virq_init(unsigned cpu,
142 unsigned irq,
9158c358
IC
143 unsigned short evtchn,
144 unsigned short virq)
ced40d0f 145{
9158c358
IC
146 struct irq_info *info = info_for_irq(irq);
147
3d4cfa37 148 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
9158c358
IC
149
150 info->u.virq = virq;
3d4cfa37
IC
151
152 per_cpu(virq_to_irq, cpu)[virq] = irq;
ced40d0f
JF
153}
154
9158c358
IC
155static void xen_irq_info_pirq_init(unsigned irq,
156 unsigned short evtchn,
157 unsigned short pirq,
158 unsigned short gsi,
beafbdc1 159 uint16_t domid,
9158c358 160 unsigned char flags)
ced40d0f 161{
9158c358
IC
162 struct irq_info *info = info_for_irq(irq);
163
3d4cfa37 164 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
9158c358
IC
165
166 info->u.pirq.pirq = pirq;
167 info->u.pirq.gsi = gsi;
beafbdc1 168 info->u.pirq.domid = domid;
9158c358 169 info->u.pirq.flags = flags;
e46cdb66
JF
170}
171
172/*
173 * Accessors for packed IRQ information.
174 */
9a489f45 175unsigned int evtchn_from_irq(unsigned irq)
e46cdb66 176{
110e7c7e
JJ
177 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
178 return 0;
179
ced40d0f 180 return info_for_irq(irq)->evtchn;
e46cdb66
JF
181}
182
d4c04536
IC
183unsigned irq_from_evtchn(unsigned int evtchn)
184{
185 return evtchn_to_irq[evtchn];
186}
187EXPORT_SYMBOL_GPL(irq_from_evtchn);
188
9a489f45
DV
189int irq_from_virq(unsigned int cpu, unsigned int virq)
190{
191 return per_cpu(virq_to_irq, cpu)[virq];
192}
193
ced40d0f 194static enum ipi_vector ipi_from_irq(unsigned irq)
e46cdb66 195{
ced40d0f
JF
196 struct irq_info *info = info_for_irq(irq);
197
198 BUG_ON(info == NULL);
199 BUG_ON(info->type != IRQT_IPI);
200
201 return info->u.ipi;
202}
203
204static unsigned virq_from_irq(unsigned irq)
205{
206 struct irq_info *info = info_for_irq(irq);
207
208 BUG_ON(info == NULL);
209 BUG_ON(info->type != IRQT_VIRQ);
210
211 return info->u.virq;
212}
213
7a043f11
SS
214static unsigned pirq_from_irq(unsigned irq)
215{
216 struct irq_info *info = info_for_irq(irq);
217
218 BUG_ON(info == NULL);
219 BUG_ON(info->type != IRQT_PIRQ);
220
221 return info->u.pirq.pirq;
222}
223
ced40d0f
JF
224static enum xen_irq_type type_from_irq(unsigned irq)
225{
226 return info_for_irq(irq)->type;
227}
228
9a489f45 229unsigned cpu_from_irq(unsigned irq)
ced40d0f
JF
230{
231 return info_for_irq(irq)->cpu;
232}
233
9a489f45 234unsigned int cpu_from_evtchn(unsigned int evtchn)
ced40d0f
JF
235{
236 int irq = evtchn_to_irq[evtchn];
237 unsigned ret = 0;
238
239 if (irq != -1)
240 ret = cpu_from_irq(irq);
241
242 return ret;
e46cdb66
JF
243}
244
bf86ad80 245#ifdef CONFIG_X86
9846ff10 246static bool pirq_check_eoi_map(unsigned irq)
d46a78b0 247{
521394e4 248 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
9846ff10 249}
bf86ad80 250#endif
d46a78b0 251
9846ff10
SS
252static bool pirq_needs_eoi_flag(unsigned irq)
253{
254 struct irq_info *info = info_for_irq(irq);
d46a78b0
JF
255 BUG_ON(info->type != IRQT_PIRQ);
256
257 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
258}
259
e46cdb66
JF
260static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
261{
262 int irq = evtchn_to_irq[chn];
9a489f45 263 struct irq_info *info = info_for_irq(irq);
e46cdb66
JF
264
265 BUG_ON(irq == -1);
266#ifdef CONFIG_SMP
c9e265e0 267 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
e46cdb66
JF
268#endif
269
9a489f45 270 xen_evtchn_port_bind_to_cpu(info, cpu);
168d2f46 271
9a489f45 272 info->cpu = cpu;
3f70fa82
WL
273}
274
e46cdb66
JF
275/**
276 * notify_remote_via_irq - send event to remote end of event channel via irq
277 * @irq: irq of event channel to send event to
278 *
279 * Unlike notify_remote_via_evtchn(), this is safe to use across
280 * save/restore. Notifications on a broken connection are silently
281 * dropped.
282 */
283void notify_remote_via_irq(int irq)
284{
285 int evtchn = evtchn_from_irq(irq);
286
287 if (VALID_EVTCHN(evtchn))
288 notify_remote_via_evtchn(evtchn);
289}
290EXPORT_SYMBOL_GPL(notify_remote_via_irq);
291
6cb6537d
IC
292static void xen_irq_init(unsigned irq)
293{
294 struct irq_info *info;
b5328cd1 295#ifdef CONFIG_SMP
6cb6537d
IC
296 struct irq_desc *desc = irq_to_desc(irq);
297
298 /* By default all event channels notify CPU#0. */
299 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
44626e4a 300#endif
6cb6537d 301
ca62ce8c
IC
302 info = kzalloc(sizeof(*info), GFP_KERNEL);
303 if (info == NULL)
304 panic("Unable to allocate metadata for IRQ%d\n", irq);
6cb6537d
IC
305
306 info->type = IRQT_UNBOUND;
420eb554 307 info->refcnt = -1;
6cb6537d 308
c442b806 309 irq_set_handler_data(irq, info);
ca62ce8c 310
6cb6537d
IC
311 list_add_tail(&info->list, &xen_irq_list_head);
312}
313
7bee9768 314static int __must_check xen_allocate_irq_dynamic(void)
0794bfc7 315{
89911501
IC
316 int first = 0;
317 int irq;
0794bfc7
KRW
318
319#ifdef CONFIG_X86_IO_APIC
89911501
IC
320 /*
321 * For an HVM guest or domain 0 which see "real" (emulated or
25985edc 322 * actual respectively) GSIs we allocate dynamic IRQs
89911501
IC
323 * e.g. those corresponding to event channels or MSIs
324 * etc. from the range above those "real" GSIs to avoid
325 * collisions.
326 */
327 if (xen_initial_domain() || xen_hvm_domain())
328 first = get_nr_irqs_gsi();
0794bfc7
KRW
329#endif
330
89911501 331 irq = irq_alloc_desc_from(first, -1);
3a69e916 332
e6599225
KRW
333 if (irq >= 0)
334 xen_irq_init(irq);
ced40d0f 335
e46cdb66 336 return irq;
d46a78b0
JF
337}
338
7bee9768 339static int __must_check xen_allocate_irq_gsi(unsigned gsi)
c9df1ce5
IC
340{
341 int irq;
342
89911501
IC
343 /*
344 * A PV guest has no concept of a GSI (since it has no ACPI
345 * nor access to/knowledge of the physical APICs). Therefore
346 * all IRQs are dynamically allocated from the entire IRQ
347 * space.
348 */
349 if (xen_pv_domain() && !xen_initial_domain())
c9df1ce5
IC
350 return xen_allocate_irq_dynamic();
351
352 /* Legacy IRQ descriptors are already allocated by the arch. */
353 if (gsi < NR_IRQS_LEGACY)
6cb6537d
IC
354 irq = gsi;
355 else
356 irq = irq_alloc_desc_at(gsi, -1);
c9df1ce5 357
6cb6537d 358 xen_irq_init(irq);
c9df1ce5
IC
359
360 return irq;
361}
362
363static void xen_free_irq(unsigned irq)
364{
c442b806 365 struct irq_info *info = irq_get_handler_data(irq);
6cb6537d 366
94032c50
KRW
367 if (WARN_ON(!info))
368 return;
369
6cb6537d 370 list_del(&info->list);
9158c358 371
c442b806 372 irq_set_handler_data(irq, NULL);
ca62ce8c 373
420eb554
DDG
374 WARN_ON(info->refcnt > 0);
375
ca62ce8c
IC
376 kfree(info);
377
72146104
IC
378 /* Legacy IRQ descriptors are managed by the arch. */
379 if (irq < NR_IRQS_LEGACY)
380 return;
381
c9df1ce5
IC
382 irq_free_desc(irq);
383}
384
d46a78b0
JF
385static void pirq_query_unmask(int irq)
386{
387 struct physdev_irq_status_query irq_status;
388 struct irq_info *info = info_for_irq(irq);
389
390 BUG_ON(info->type != IRQT_PIRQ);
391
7a043f11 392 irq_status.irq = pirq_from_irq(irq);
d46a78b0
JF
393 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
394 irq_status.flags = 0;
395
396 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
397 if (irq_status.flags & XENIRQSTAT_needs_eoi)
398 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
399}
400
401static bool probing_irq(int irq)
402{
403 struct irq_desc *desc = irq_to_desc(irq);
404
405 return desc && desc->action == NULL;
406}
407
7e186bdd
SS
408static void eoi_pirq(struct irq_data *data)
409{
410 int evtchn = evtchn_from_irq(data->irq);
411 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
412 int rc = 0;
413
414 irq_move_irq(data);
415
416 if (VALID_EVTCHN(evtchn))
417 clear_evtchn(evtchn);
418
419 if (pirq_needs_eoi(data->irq)) {
420 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
421 WARN_ON(rc);
422 }
423}
424
425static void mask_ack_pirq(struct irq_data *data)
426{
427 disable_dynirq(data);
428 eoi_pirq(data);
429}
430
c9e265e0 431static unsigned int __startup_pirq(unsigned int irq)
d46a78b0
JF
432{
433 struct evtchn_bind_pirq bind_pirq;
434 struct irq_info *info = info_for_irq(irq);
435 int evtchn = evtchn_from_irq(irq);
15ebbb82 436 int rc;
d46a78b0
JF
437
438 BUG_ON(info->type != IRQT_PIRQ);
439
440 if (VALID_EVTCHN(evtchn))
441 goto out;
442
7a043f11 443 bind_pirq.pirq = pirq_from_irq(irq);
d46a78b0 444 /* NB. We are happy to share unless we are probing. */
15ebbb82
KRW
445 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
446 BIND_PIRQ__WILL_SHARE : 0;
447 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
448 if (rc != 0) {
d46a78b0 449 if (!probing_irq(irq))
283c0972 450 pr_info("Failed to obtain physical IRQ %d\n", irq);
d46a78b0
JF
451 return 0;
452 }
453 evtchn = bind_pirq.port;
454
455 pirq_query_unmask(irq);
456
457 evtchn_to_irq[evtchn] = irq;
458 bind_evtchn_to_cpu(evtchn, 0);
459 info->evtchn = evtchn;
460
461out:
462 unmask_evtchn(evtchn);
7e186bdd 463 eoi_pirq(irq_get_irq_data(irq));
d46a78b0
JF
464
465 return 0;
466}
467
c9e265e0
TG
468static unsigned int startup_pirq(struct irq_data *data)
469{
470 return __startup_pirq(data->irq);
471}
472
473static void shutdown_pirq(struct irq_data *data)
d46a78b0
JF
474{
475 struct evtchn_close close;
c9e265e0 476 unsigned int irq = data->irq;
d46a78b0
JF
477 struct irq_info *info = info_for_irq(irq);
478 int evtchn = evtchn_from_irq(irq);
479
480 BUG_ON(info->type != IRQT_PIRQ);
481
482 if (!VALID_EVTCHN(evtchn))
483 return;
484
485 mask_evtchn(evtchn);
486
487 close.port = evtchn;
488 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
489 BUG();
490
491 bind_evtchn_to_cpu(evtchn, 0);
492 evtchn_to_irq[evtchn] = -1;
493 info->evtchn = 0;
494}
495
c9e265e0 496static void enable_pirq(struct irq_data *data)
d46a78b0 497{
c9e265e0 498 startup_pirq(data);
d46a78b0
JF
499}
500
c9e265e0 501static void disable_pirq(struct irq_data *data)
d46a78b0 502{
7e186bdd 503 disable_dynirq(data);
d46a78b0
JF
504}
505
68c2c39a 506int xen_irq_from_gsi(unsigned gsi)
d46a78b0 507{
6cb6537d 508 struct irq_info *info;
d46a78b0 509
6cb6537d
IC
510 list_for_each_entry(info, &xen_irq_list_head, list) {
511 if (info->type != IRQT_PIRQ)
d46a78b0
JF
512 continue;
513
6cb6537d
IC
514 if (info->u.pirq.gsi == gsi)
515 return info->irq;
d46a78b0
JF
516 }
517
518 return -1;
519}
68c2c39a 520EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
d46a78b0 521
653378ac
IC
522/*
523 * Do not make any assumptions regarding the relationship between the
524 * IRQ number returned here and the Xen pirq argument.
7a043f11
SS
525 *
526 * Note: We don't assign an event channel until the irq actually started
527 * up. Return an existing irq if we've already got one for the gsi.
e5ac0bda
SS
528 *
529 * Shareable implies level triggered, not shareable implies edge
530 * triggered here.
d46a78b0 531 */
f4d0635b
IC
532int xen_bind_pirq_gsi_to_irq(unsigned gsi,
533 unsigned pirq, int shareable, char *name)
d46a78b0 534{
a0e18116 535 int irq = -1;
d46a78b0
JF
536 struct physdev_irq irq_op;
537
77365948 538 mutex_lock(&irq_mapping_update_lock);
d46a78b0 539
68c2c39a 540 irq = xen_irq_from_gsi(gsi);
d46a78b0 541 if (irq != -1) {
283c0972
JP
542 pr_info("%s: returning irq %d for gsi %u\n",
543 __func__, irq, gsi);
420eb554 544 goto out;
d46a78b0
JF
545 }
546
c9df1ce5 547 irq = xen_allocate_irq_gsi(gsi);
7bee9768
IC
548 if (irq < 0)
549 goto out;
d46a78b0 550
d46a78b0 551 irq_op.irq = irq;
b5401a96
AN
552 irq_op.vector = 0;
553
554 /* Only the privileged domain can do this. For non-priv, the pcifront
555 * driver provides a PCI bus that does the call to do exactly
556 * this in the priv domain. */
557 if (xen_initial_domain() &&
558 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
c9df1ce5 559 xen_free_irq(irq);
d46a78b0
JF
560 irq = -ENOSPC;
561 goto out;
562 }
563
dec02dea 564 xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
9158c358 565 shareable ? PIRQ_SHAREABLE : 0);
d46a78b0 566
7e186bdd
SS
567 pirq_query_unmask(irq);
568 /* We try to use the handler with the appropriate semantic for the
e5ac0bda
SS
569 * type of interrupt: if the interrupt is an edge triggered
570 * interrupt we use handle_edge_irq.
7e186bdd 571 *
e5ac0bda
SS
572 * On the other hand if the interrupt is level triggered we use
573 * handle_fasteoi_irq like the native code does for this kind of
7e186bdd 574 * interrupts.
e5ac0bda 575 *
7e186bdd
SS
576 * Depending on the Xen version, pirq_needs_eoi might return true
577 * not only for level triggered interrupts but for edge triggered
578 * interrupts too. In any case Xen always honors the eoi mechanism,
579 * not injecting any more pirqs of the same kind if the first one
580 * hasn't received an eoi yet. Therefore using the fasteoi handler
581 * is the right choice either way.
582 */
e5ac0bda 583 if (shareable)
7e186bdd
SS
584 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
585 handle_fasteoi_irq, name);
586 else
587 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
588 handle_edge_irq, name);
589
d46a78b0 590out:
77365948 591 mutex_unlock(&irq_mapping_update_lock);
d46a78b0
JF
592
593 return irq;
594}
595
f731e3ef 596#ifdef CONFIG_PCI_MSI
bf480d95 597int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
cbf6aa89 598{
5cad61a6 599 int rc;
cbf6aa89 600 struct physdev_get_free_pirq op_get_free_pirq;
cbf6aa89 601
bf480d95 602 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
cbf6aa89 603 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
cbf6aa89 604
5cad61a6
IC
605 WARN_ONCE(rc == -ENOSYS,
606 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
607
608 return rc ? -1 : op_get_free_pirq.pirq;
cbf6aa89
IC
609}
610
bf480d95 611int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
dec02dea 612 int pirq, const char *name, domid_t domid)
809f9267 613{
bf480d95 614 int irq, ret;
4b41df7f 615
77365948 616 mutex_lock(&irq_mapping_update_lock);
809f9267 617
4b41df7f 618 irq = xen_allocate_irq_dynamic();
e6599225 619 if (irq < 0)
bb5d079a 620 goto out;
809f9267 621
7e186bdd
SS
622 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
623 name);
809f9267 624
dec02dea 625 xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
5f6fb454 626 ret = irq_set_msi_desc(irq, msidesc);
bf480d95
IC
627 if (ret < 0)
628 goto error_irq;
809f9267 629out:
77365948 630 mutex_unlock(&irq_mapping_update_lock);
4b41df7f 631 return irq;
bf480d95 632error_irq:
77365948 633 mutex_unlock(&irq_mapping_update_lock);
bf480d95 634 xen_free_irq(irq);
e6599225 635 return ret;
809f9267 636}
f731e3ef
QH
637#endif
638
b5401a96
AN
639int xen_destroy_irq(int irq)
640{
641 struct irq_desc *desc;
38aa66fc
JF
642 struct physdev_unmap_pirq unmap_irq;
643 struct irq_info *info = info_for_irq(irq);
b5401a96
AN
644 int rc = -ENOENT;
645
77365948 646 mutex_lock(&irq_mapping_update_lock);
b5401a96
AN
647
648 desc = irq_to_desc(irq);
649 if (!desc)
650 goto out;
651
38aa66fc 652 if (xen_initial_domain()) {
12334715 653 unmap_irq.pirq = info->u.pirq.pirq;
beafbdc1 654 unmap_irq.domid = info->u.pirq.domid;
38aa66fc 655 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1eff1ad0
KRW
656 /* If another domain quits without making the pci_disable_msix
657 * call, the Xen hypervisor takes care of freeing the PIRQs
658 * (free_domain_pirqs).
659 */
660 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
283c0972 661 pr_info("domain %d does not have %d anymore\n",
1eff1ad0
KRW
662 info->u.pirq.domid, info->u.pirq.pirq);
663 else if (rc) {
283c0972 664 pr_warn("unmap irq failed %d\n", rc);
38aa66fc
JF
665 goto out;
666 }
667 }
b5401a96 668
c9df1ce5 669 xen_free_irq(irq);
b5401a96
AN
670
671out:
77365948 672 mutex_unlock(&irq_mapping_update_lock);
b5401a96
AN
673 return rc;
674}
675
af42b8d1 676int xen_irq_from_pirq(unsigned pirq)
d46a78b0 677{
69c358ce 678 int irq;
d46a78b0 679
69c358ce 680 struct irq_info *info;
e46cdb66 681
77365948 682 mutex_lock(&irq_mapping_update_lock);
69c358ce
IC
683
684 list_for_each_entry(info, &xen_irq_list_head, list) {
9bb9efe4 685 if (info->type != IRQT_PIRQ)
69c358ce
IC
686 continue;
687 irq = info->irq;
688 if (info->u.pirq.pirq == pirq)
689 goto out;
690 }
691 irq = -1;
692out:
77365948 693 mutex_unlock(&irq_mapping_update_lock);
69c358ce
IC
694
695 return irq;
af42b8d1
SS
696}
697
e6197acc
KRW
698
699int xen_pirq_from_irq(unsigned irq)
700{
701 return pirq_from_irq(irq);
702}
703EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
b536b4b9 704int bind_evtchn_to_irq(unsigned int evtchn)
e46cdb66
JF
705{
706 int irq;
707
77365948 708 mutex_lock(&irq_mapping_update_lock);
e46cdb66
JF
709
710 irq = evtchn_to_irq[evtchn];
711
712 if (irq == -1) {
c9df1ce5 713 irq = xen_allocate_irq_dynamic();
68ba45ff 714 if (irq < 0)
7bee9768 715 goto out;
e46cdb66 716
c442b806 717 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
7e186bdd 718 handle_edge_irq, "event");
e46cdb66 719
9158c358 720 xen_irq_info_evtchn_init(irq, evtchn);
5e152e6c
KRW
721 } else {
722 struct irq_info *info = info_for_irq(irq);
723 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
e46cdb66
JF
724 }
725
7bee9768 726out:
77365948 727 mutex_unlock(&irq_mapping_update_lock);
e46cdb66
JF
728
729 return irq;
730}
b536b4b9 731EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
e46cdb66 732
f87e4cac
JF
733static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
734{
735 struct evtchn_bind_ipi bind_ipi;
736 int evtchn, irq;
737
77365948 738 mutex_lock(&irq_mapping_update_lock);
f87e4cac
JF
739
740 irq = per_cpu(ipi_to_irq, cpu)[ipi];
90af9514 741
f87e4cac 742 if (irq == -1) {
c9df1ce5 743 irq = xen_allocate_irq_dynamic();
f87e4cac
JF
744 if (irq < 0)
745 goto out;
746
c442b806 747 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
aaca4964 748 handle_percpu_irq, "ipi");
f87e4cac
JF
749
750 bind_ipi.vcpu = cpu;
751 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
752 &bind_ipi) != 0)
753 BUG();
754 evtchn = bind_ipi.port;
755
3d4cfa37 756 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
f87e4cac
JF
757
758 bind_evtchn_to_cpu(evtchn, cpu);
5e152e6c
KRW
759 } else {
760 struct irq_info *info = info_for_irq(irq);
761 WARN_ON(info == NULL || info->type != IRQT_IPI);
f87e4cac
JF
762 }
763
f87e4cac 764 out:
77365948 765 mutex_unlock(&irq_mapping_update_lock);
f87e4cac
JF
766 return irq;
767}
768
2e820f58
IC
769static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
770 unsigned int remote_port)
771{
772 struct evtchn_bind_interdomain bind_interdomain;
773 int err;
774
775 bind_interdomain.remote_dom = remote_domain;
776 bind_interdomain.remote_port = remote_port;
777
778 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
779 &bind_interdomain);
780
781 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
782}
783
62cc5fc7
OH
784static int find_virq(unsigned int virq, unsigned int cpu)
785{
786 struct evtchn_status status;
787 int port, rc = -ENOENT;
788
789 memset(&status, 0, sizeof(status));
790 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
791 status.dom = DOMID_SELF;
792 status.port = port;
793 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
794 if (rc < 0)
795 continue;
796 if (status.status != EVTCHNSTAT_virq)
797 continue;
798 if (status.u.virq == virq && status.vcpu == cpu) {
799 rc = port;
800 break;
801 }
802 }
803 return rc;
804}
f87e4cac 805
4fe7d5a7 806int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
e46cdb66
JF
807{
808 struct evtchn_bind_virq bind_virq;
62cc5fc7 809 int evtchn, irq, ret;
e46cdb66 810
77365948 811 mutex_lock(&irq_mapping_update_lock);
e46cdb66
JF
812
813 irq = per_cpu(virq_to_irq, cpu)[virq];
814
815 if (irq == -1) {
c9df1ce5 816 irq = xen_allocate_irq_dynamic();
68ba45ff 817 if (irq < 0)
7bee9768 818 goto out;
a52521f1 819
c442b806 820 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
a52521f1
JF
821 handle_percpu_irq, "virq");
822
e46cdb66
JF
823 bind_virq.virq = virq;
824 bind_virq.vcpu = cpu;
62cc5fc7
OH
825 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
826 &bind_virq);
827 if (ret == 0)
828 evtchn = bind_virq.port;
829 else {
830 if (ret == -EEXIST)
831 ret = find_virq(virq, cpu);
832 BUG_ON(ret < 0);
833 evtchn = ret;
834 }
e46cdb66 835
3d4cfa37 836 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
e46cdb66
JF
837
838 bind_evtchn_to_cpu(evtchn, cpu);
5e152e6c
KRW
839 } else {
840 struct irq_info *info = info_for_irq(irq);
841 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
e46cdb66
JF
842 }
843
7bee9768 844out:
77365948 845 mutex_unlock(&irq_mapping_update_lock);
e46cdb66
JF
846
847 return irq;
848}
849
850static void unbind_from_irq(unsigned int irq)
851{
852 struct evtchn_close close;
853 int evtchn = evtchn_from_irq(irq);
420eb554 854 struct irq_info *info = irq_get_handler_data(irq);
e46cdb66 855
94032c50
KRW
856 if (WARN_ON(!info))
857 return;
858
77365948 859 mutex_lock(&irq_mapping_update_lock);
e46cdb66 860
420eb554
DDG
861 if (info->refcnt > 0) {
862 info->refcnt--;
863 if (info->refcnt != 0)
864 goto done;
865 }
866
d77bbd4d 867 if (VALID_EVTCHN(evtchn)) {
e46cdb66
JF
868 close.port = evtchn;
869 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
870 BUG();
871
872 switch (type_from_irq(irq)) {
873 case IRQT_VIRQ:
874 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
ced40d0f 875 [virq_from_irq(irq)] = -1;
e46cdb66 876 break;
d68d82af
AN
877 case IRQT_IPI:
878 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
ced40d0f 879 [ipi_from_irq(irq)] = -1;
d68d82af 880 break;
e46cdb66
JF
881 default:
882 break;
883 }
884
885 /* Closed ports are implicitly re-bound to VCPU0. */
886 bind_evtchn_to_cpu(evtchn, 0);
887
888 evtchn_to_irq[evtchn] = -1;
fed5ea87
IC
889 }
890
ca62ce8c 891 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
e46cdb66 892
9158c358 893 xen_free_irq(irq);
e46cdb66 894
420eb554 895 done:
77365948 896 mutex_unlock(&irq_mapping_update_lock);
e46cdb66
JF
897}
898
899int bind_evtchn_to_irqhandler(unsigned int evtchn,
7c239975 900 irq_handler_t handler,
e46cdb66
JF
901 unsigned long irqflags,
902 const char *devname, void *dev_id)
903{
361ae8cb 904 int irq, retval;
e46cdb66
JF
905
906 irq = bind_evtchn_to_irq(evtchn);
7bee9768
IC
907 if (irq < 0)
908 return irq;
e46cdb66
JF
909 retval = request_irq(irq, handler, irqflags, devname, dev_id);
910 if (retval != 0) {
911 unbind_from_irq(irq);
912 return retval;
913 }
914
915 return irq;
916}
917EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
918
2e820f58
IC
919int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
920 unsigned int remote_port,
921 irq_handler_t handler,
922 unsigned long irqflags,
923 const char *devname,
924 void *dev_id)
925{
926 int irq, retval;
927
928 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
929 if (irq < 0)
930 return irq;
931
932 retval = request_irq(irq, handler, irqflags, devname, dev_id);
933 if (retval != 0) {
934 unbind_from_irq(irq);
935 return retval;
936 }
937
938 return irq;
939}
940EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
941
e46cdb66 942int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
7c239975 943 irq_handler_t handler,
e46cdb66
JF
944 unsigned long irqflags, const char *devname, void *dev_id)
945{
361ae8cb 946 int irq, retval;
e46cdb66
JF
947
948 irq = bind_virq_to_irq(virq, cpu);
7bee9768
IC
949 if (irq < 0)
950 return irq;
e46cdb66
JF
951 retval = request_irq(irq, handler, irqflags, devname, dev_id);
952 if (retval != 0) {
953 unbind_from_irq(irq);
954 return retval;
955 }
956
957 return irq;
958}
959EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
960
f87e4cac
JF
961int bind_ipi_to_irqhandler(enum ipi_vector ipi,
962 unsigned int cpu,
963 irq_handler_t handler,
964 unsigned long irqflags,
965 const char *devname,
966 void *dev_id)
967{
968 int irq, retval;
969
970 irq = bind_ipi_to_irq(ipi, cpu);
971 if (irq < 0)
972 return irq;
973
9bab0b7f 974 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
f87e4cac
JF
975 retval = request_irq(irq, handler, irqflags, devname, dev_id);
976 if (retval != 0) {
977 unbind_from_irq(irq);
978 return retval;
979 }
980
981 return irq;
982}
983
e46cdb66
JF
984void unbind_from_irqhandler(unsigned int irq, void *dev_id)
985{
94032c50
KRW
986 struct irq_info *info = irq_get_handler_data(irq);
987
988 if (WARN_ON(!info))
989 return;
e46cdb66
JF
990 free_irq(irq, dev_id);
991 unbind_from_irq(irq);
992}
993EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
994
420eb554
DDG
995int evtchn_make_refcounted(unsigned int evtchn)
996{
997 int irq = evtchn_to_irq[evtchn];
998 struct irq_info *info;
999
1000 if (irq == -1)
1001 return -ENOENT;
1002
1003 info = irq_get_handler_data(irq);
1004
1005 if (!info)
1006 return -ENOENT;
1007
1008 WARN_ON(info->refcnt != -1);
1009
1010 info->refcnt = 1;
1011
1012 return 0;
1013}
1014EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1015
1016int evtchn_get(unsigned int evtchn)
1017{
1018 int irq;
1019 struct irq_info *info;
1020 int err = -ENOENT;
1021
c3b3f16d
DDG
1022 if (evtchn >= NR_EVENT_CHANNELS)
1023 return -EINVAL;
1024
420eb554
DDG
1025 mutex_lock(&irq_mapping_update_lock);
1026
1027 irq = evtchn_to_irq[evtchn];
1028 if (irq == -1)
1029 goto done;
1030
1031 info = irq_get_handler_data(irq);
1032
1033 if (!info)
1034 goto done;
1035
1036 err = -EINVAL;
1037 if (info->refcnt <= 0)
1038 goto done;
1039
1040 info->refcnt++;
1041 err = 0;
1042 done:
1043 mutex_unlock(&irq_mapping_update_lock);
1044
1045 return err;
1046}
1047EXPORT_SYMBOL_GPL(evtchn_get);
1048
1049void evtchn_put(unsigned int evtchn)
1050{
1051 int irq = evtchn_to_irq[evtchn];
1052 if (WARN_ON(irq == -1))
1053 return;
1054 unbind_from_irq(irq);
1055}
1056EXPORT_SYMBOL_GPL(evtchn_put);
1057
f87e4cac
JF
1058void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1059{
6efa20e4
KRW
1060 int irq;
1061
072b2064 1062#ifdef CONFIG_X86
6efa20e4
KRW
1063 if (unlikely(vector == XEN_NMI_VECTOR)) {
1064 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1065 if (rc < 0)
1066 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1067 return;
1068 }
072b2064 1069#endif
6efa20e4 1070 irq = per_cpu(ipi_to_irq, cpu)[vector];
f87e4cac
JF
1071 BUG_ON(irq < 0);
1072 notify_remote_via_irq(irq);
1073}
1074
245b2e70
TH
1075static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1076
38e20b07 1077static void __xen_evtchn_do_upcall(void)
e46cdb66 1078{
780f36d8 1079 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
9a489f45 1080 int cpu = get_cpu();
088c05a8 1081 unsigned count;
e46cdb66 1082
229664be 1083 do {
229664be 1084 vcpu_info->evtchn_upcall_pending = 0;
e46cdb66 1085
b2e4ae69 1086 if (__this_cpu_inc_return(xed_nesting_count) - 1)
229664be 1087 goto out;
e46cdb66 1088
9a489f45 1089 xen_evtchn_handle_events(cpu);
e46cdb66 1090
229664be
JF
1091 BUG_ON(!irqs_disabled());
1092
780f36d8
CL
1093 count = __this_cpu_read(xed_nesting_count);
1094 __this_cpu_write(xed_nesting_count, 0);
183d03cc 1095 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
229664be
JF
1096
1097out:
38e20b07
SY
1098
1099 put_cpu();
1100}
1101
1102void xen_evtchn_do_upcall(struct pt_regs *regs)
1103{
1104 struct pt_regs *old_regs = set_irq_regs(regs);
1105
772aebce 1106 irq_enter();
0ec53ecf 1107#ifdef CONFIG_X86
38e20b07 1108 exit_idle();
0ec53ecf 1109#endif
38e20b07
SY
1110
1111 __xen_evtchn_do_upcall();
1112
3445a8fd
JF
1113 irq_exit();
1114 set_irq_regs(old_regs);
38e20b07 1115}
3445a8fd 1116
38e20b07
SY
1117void xen_hvm_evtchn_do_upcall(void)
1118{
1119 __xen_evtchn_do_upcall();
e46cdb66 1120}
183d03cc 1121EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
e46cdb66 1122
eb1e305f
JF
1123/* Rebind a new event channel to an existing irq. */
1124void rebind_evtchn_irq(int evtchn, int irq)
1125{
d77bbd4d
JF
1126 struct irq_info *info = info_for_irq(irq);
1127
94032c50
KRW
1128 if (WARN_ON(!info))
1129 return;
1130
eb1e305f
JF
1131 /* Make sure the irq is masked, since the new event channel
1132 will also be masked. */
1133 disable_irq(irq);
1134
77365948 1135 mutex_lock(&irq_mapping_update_lock);
eb1e305f
JF
1136
1137 /* After resume the irq<->evtchn mappings are all cleared out */
1138 BUG_ON(evtchn_to_irq[evtchn] != -1);
1139 /* Expect irq to have been bound before,
d77bbd4d
JF
1140 so there should be a proper type */
1141 BUG_ON(info->type == IRQT_UNBOUND);
eb1e305f 1142
9158c358 1143 xen_irq_info_evtchn_init(irq, evtchn);
eb1e305f 1144
77365948 1145 mutex_unlock(&irq_mapping_update_lock);
eb1e305f
JF
1146
1147 /* new event channels are always bound to cpu 0 */
0de26520 1148 irq_set_affinity(irq, cpumask_of(0));
eb1e305f
JF
1149
1150 /* Unmask the event channel. */
1151 enable_irq(irq);
1152}
1153
e46cdb66 1154/* Rebind an evtchn so that it gets delivered to a specific cpu */
d5dedd45 1155static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
e46cdb66
JF
1156{
1157 struct evtchn_bind_vcpu bind_vcpu;
1158 int evtchn = evtchn_from_irq(irq);
4704fe4f 1159 int masked;
e46cdb66 1160
be49472f
IC
1161 if (!VALID_EVTCHN(evtchn))
1162 return -1;
1163
1164 /*
1165 * Events delivered via platform PCI interrupts are always
1166 * routed to vcpu 0 and hence cannot be rebound.
1167 */
1168 if (xen_hvm_domain() && !xen_have_vector_callback)
d5dedd45 1169 return -1;
e46cdb66
JF
1170
1171 /* Send future instances of this interrupt to other vcpu. */
1172 bind_vcpu.port = evtchn;
1173 bind_vcpu.vcpu = tcpu;
1174
4704fe4f
DV
1175 /*
1176 * Mask the event while changing the VCPU binding to prevent
1177 * it being delivered on an unexpected VCPU.
1178 */
3f70fa82 1179 masked = test_and_set_mask(evtchn);
4704fe4f 1180
e46cdb66
JF
1181 /*
1182 * If this fails, it usually just indicates that we're dealing with a
1183 * virq or IPI channel, which don't actually need to be rebound. Ignore
1184 * it, but don't do the xenlinux-level rebind in that case.
1185 */
1186 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1187 bind_evtchn_to_cpu(evtchn, tcpu);
e46cdb66 1188
4704fe4f
DV
1189 if (!masked)
1190 unmask_evtchn(evtchn);
1191
d5dedd45
YL
1192 return 0;
1193}
e46cdb66 1194
c9e265e0
TG
1195static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1196 bool force)
e46cdb66 1197{
0de26520 1198 unsigned tcpu = cpumask_first(dest);
d5dedd45 1199
c9e265e0 1200 return rebind_irq_to_cpu(data->irq, tcpu);
e46cdb66
JF
1201}
1202
87295185 1203static int retrigger_evtchn(int evtchn)
642e0c88 1204{
87295185 1205 int masked;
642e0c88
IY
1206
1207 if (!VALID_EVTCHN(evtchn))
87295185 1208 return 0;
642e0c88 1209
3f70fa82 1210 masked = test_and_set_mask(evtchn);
76ec8d64 1211 set_evtchn(evtchn);
642e0c88
IY
1212 if (!masked)
1213 unmask_evtchn(evtchn);
1214
1215 return 1;
1216}
1217
87295185
DV
1218int resend_irq_on_evtchn(unsigned int irq)
1219{
1220 return retrigger_evtchn(evtchn_from_irq(irq));
1221}
1222
c9e265e0 1223static void enable_dynirq(struct irq_data *data)
e46cdb66 1224{
c9e265e0 1225 int evtchn = evtchn_from_irq(data->irq);
e46cdb66
JF
1226
1227 if (VALID_EVTCHN(evtchn))
1228 unmask_evtchn(evtchn);
1229}
1230
c9e265e0 1231static void disable_dynirq(struct irq_data *data)
e46cdb66 1232{
c9e265e0 1233 int evtchn = evtchn_from_irq(data->irq);
e46cdb66
JF
1234
1235 if (VALID_EVTCHN(evtchn))
1236 mask_evtchn(evtchn);
1237}
1238
c9e265e0 1239static void ack_dynirq(struct irq_data *data)
e46cdb66 1240{
c9e265e0 1241 int evtchn = evtchn_from_irq(data->irq);
e46cdb66 1242
7e186bdd 1243 irq_move_irq(data);
e46cdb66
JF
1244
1245 if (VALID_EVTCHN(evtchn))
7e186bdd
SS
1246 clear_evtchn(evtchn);
1247}
1248
1249static void mask_ack_dynirq(struct irq_data *data)
1250{
1251 disable_dynirq(data);
1252 ack_dynirq(data);
e46cdb66
JF
1253}
1254
c9e265e0 1255static int retrigger_dynirq(struct irq_data *data)
e46cdb66 1256{
87295185 1257 return retrigger_evtchn(evtchn_from_irq(data->irq));
e46cdb66
JF
1258}
1259
0a85226f 1260static void restore_pirqs(void)
9a069c33
SS
1261{
1262 int pirq, rc, irq, gsi;
1263 struct physdev_map_pirq map_irq;
69c358ce 1264 struct irq_info *info;
9a069c33 1265
69c358ce
IC
1266 list_for_each_entry(info, &xen_irq_list_head, list) {
1267 if (info->type != IRQT_PIRQ)
9a069c33
SS
1268 continue;
1269
69c358ce
IC
1270 pirq = info->u.pirq.pirq;
1271 gsi = info->u.pirq.gsi;
1272 irq = info->irq;
1273
9a069c33
SS
1274 /* save/restore of PT devices doesn't work, so at this point the
1275 * only devices present are GSI based emulated devices */
9a069c33
SS
1276 if (!gsi)
1277 continue;
1278
1279 map_irq.domid = DOMID_SELF;
1280 map_irq.type = MAP_PIRQ_TYPE_GSI;
1281 map_irq.index = gsi;
1282 map_irq.pirq = pirq;
1283
1284 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1285 if (rc) {
283c0972
JP
1286 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1287 gsi, irq, pirq, rc);
9158c358 1288 xen_free_irq(irq);
9a069c33
SS
1289 continue;
1290 }
1291
1292 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1293
c9e265e0 1294 __startup_pirq(irq);
9a069c33
SS
1295 }
1296}
1297
0e91398f
JF
1298static void restore_cpu_virqs(unsigned int cpu)
1299{
1300 struct evtchn_bind_virq bind_virq;
1301 int virq, irq, evtchn;
1302
1303 for (virq = 0; virq < NR_VIRQS; virq++) {
1304 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1305 continue;
1306
ced40d0f 1307 BUG_ON(virq_from_irq(irq) != virq);
0e91398f
JF
1308
1309 /* Get a new binding from Xen. */
1310 bind_virq.virq = virq;
1311 bind_virq.vcpu = cpu;
1312 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1313 &bind_virq) != 0)
1314 BUG();
1315 evtchn = bind_virq.port;
1316
1317 /* Record the new mapping. */
3d4cfa37 1318 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
0e91398f 1319 bind_evtchn_to_cpu(evtchn, cpu);
0e91398f
JF
1320 }
1321}
1322
1323static void restore_cpu_ipis(unsigned int cpu)
1324{
1325 struct evtchn_bind_ipi bind_ipi;
1326 int ipi, irq, evtchn;
1327
1328 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1329 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1330 continue;
1331
ced40d0f 1332 BUG_ON(ipi_from_irq(irq) != ipi);
0e91398f
JF
1333
1334 /* Get a new binding from Xen. */
1335 bind_ipi.vcpu = cpu;
1336 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1337 &bind_ipi) != 0)
1338 BUG();
1339 evtchn = bind_ipi.port;
1340
1341 /* Record the new mapping. */
3d4cfa37 1342 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
0e91398f 1343 bind_evtchn_to_cpu(evtchn, cpu);
0e91398f
JF
1344 }
1345}
1346
2d9e1e2f
JF
1347/* Clear an irq's pending state, in preparation for polling on it */
1348void xen_clear_irq_pending(int irq)
1349{
1350 int evtchn = evtchn_from_irq(irq);
1351
1352 if (VALID_EVTCHN(evtchn))
1353 clear_evtchn(evtchn);
1354}
d9a8814f 1355EXPORT_SYMBOL(xen_clear_irq_pending);
168d2f46
JF
1356void xen_set_irq_pending(int irq)
1357{
1358 int evtchn = evtchn_from_irq(irq);
1359
1360 if (VALID_EVTCHN(evtchn))
1361 set_evtchn(evtchn);
1362}
1363
1364bool xen_test_irq_pending(int irq)
1365{
1366 int evtchn = evtchn_from_irq(irq);
1367 bool ret = false;
1368
1369 if (VALID_EVTCHN(evtchn))
1370 ret = test_evtchn(evtchn);
1371
1372 return ret;
1373}
1374
d9a8814f
KRW
1375/* Poll waiting for an irq to become pending with timeout. In the usual case,
1376 * the irq will be disabled so it won't deliver an interrupt. */
1377void xen_poll_irq_timeout(int irq, u64 timeout)
2d9e1e2f
JF
1378{
1379 evtchn_port_t evtchn = evtchn_from_irq(irq);
1380
1381 if (VALID_EVTCHN(evtchn)) {
1382 struct sched_poll poll;
1383
1384 poll.nr_ports = 1;
d9a8814f 1385 poll.timeout = timeout;
ff3c5362 1386 set_xen_guest_handle(poll.ports, &evtchn);
2d9e1e2f
JF
1387
1388 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1389 BUG();
1390 }
1391}
d9a8814f
KRW
1392EXPORT_SYMBOL(xen_poll_irq_timeout);
1393/* Poll waiting for an irq to become pending. In the usual case, the
1394 * irq will be disabled so it won't deliver an interrupt. */
1395void xen_poll_irq(int irq)
1396{
1397 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1398}
2d9e1e2f 1399
c7c2c3a2
KRW
1400/* Check whether the IRQ line is shared with other guests. */
1401int xen_test_irq_shared(int irq)
1402{
1403 struct irq_info *info = info_for_irq(irq);
94032c50
KRW
1404 struct physdev_irq_status_query irq_status;
1405
1406 if (WARN_ON(!info))
1407 return -ENOENT;
1408
1409 irq_status.irq = info->u.pirq.pirq;
c7c2c3a2
KRW
1410
1411 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1412 return 0;
1413 return !(irq_status.flags & XENIRQSTAT_shared);
1414}
1415EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1416
0e91398f
JF
1417void xen_irq_resume(void)
1418{
6cb6537d
IC
1419 unsigned int cpu, evtchn;
1420 struct irq_info *info;
0e91398f 1421
0e91398f
JF
1422 /* New event-channel space is not 'live' yet. */
1423 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1424 mask_evtchn(evtchn);
1425
1426 /* No IRQ <-> event-channel mappings. */
6cb6537d
IC
1427 list_for_each_entry(info, &xen_irq_list_head, list)
1428 info->evtchn = 0; /* zap event-channel binding */
0e91398f
JF
1429
1430 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1431 evtchn_to_irq[evtchn] = -1;
1432
1433 for_each_possible_cpu(cpu) {
1434 restore_cpu_virqs(cpu);
1435 restore_cpu_ipis(cpu);
1436 }
6903591f 1437
0a85226f 1438 restore_pirqs();
0e91398f
JF
1439}
1440
e46cdb66 1441static struct irq_chip xen_dynamic_chip __read_mostly = {
c9e265e0 1442 .name = "xen-dyn",
54a353a0 1443
c9e265e0
TG
1444 .irq_disable = disable_dynirq,
1445 .irq_mask = disable_dynirq,
1446 .irq_unmask = enable_dynirq,
54a353a0 1447
7e186bdd
SS
1448 .irq_ack = ack_dynirq,
1449 .irq_mask_ack = mask_ack_dynirq,
1450
c9e265e0
TG
1451 .irq_set_affinity = set_affinity_irq,
1452 .irq_retrigger = retrigger_dynirq,
e46cdb66
JF
1453};
1454
d46a78b0 1455static struct irq_chip xen_pirq_chip __read_mostly = {
c9e265e0 1456 .name = "xen-pirq",
d46a78b0 1457
c9e265e0
TG
1458 .irq_startup = startup_pirq,
1459 .irq_shutdown = shutdown_pirq,
c9e265e0 1460 .irq_enable = enable_pirq,
c9e265e0 1461 .irq_disable = disable_pirq,
d46a78b0 1462
7e186bdd
SS
1463 .irq_mask = disable_dynirq,
1464 .irq_unmask = enable_dynirq,
1465
1466 .irq_ack = eoi_pirq,
1467 .irq_eoi = eoi_pirq,
1468 .irq_mask_ack = mask_ack_pirq,
d46a78b0 1469
c9e265e0 1470 .irq_set_affinity = set_affinity_irq,
d46a78b0 1471
c9e265e0 1472 .irq_retrigger = retrigger_dynirq,
d46a78b0
JF
1473};
1474
aaca4964 1475static struct irq_chip xen_percpu_chip __read_mostly = {
c9e265e0 1476 .name = "xen-percpu",
aaca4964 1477
c9e265e0
TG
1478 .irq_disable = disable_dynirq,
1479 .irq_mask = disable_dynirq,
1480 .irq_unmask = enable_dynirq,
aaca4964 1481
c9e265e0 1482 .irq_ack = ack_dynirq,
aaca4964
JF
1483};
1484
38e20b07
SY
1485int xen_set_callback_via(uint64_t via)
1486{
1487 struct xen_hvm_param a;
1488 a.domid = DOMID_SELF;
1489 a.index = HVM_PARAM_CALLBACK_IRQ;
1490 a.value = via;
1491 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1492}
1493EXPORT_SYMBOL_GPL(xen_set_callback_via);
1494
ca65f9fc 1495#ifdef CONFIG_XEN_PVHVM
38e20b07
SY
1496/* Vector callbacks are better than PCI interrupts to receive event
1497 * channel notifications because we can receive vector callbacks on any
1498 * vcpu and we don't need PCI support or APIC interactions. */
1499void xen_callback_vector(void)
1500{
1501 int rc;
1502 uint64_t callback_via;
1503 if (xen_have_vector_callback) {
bc2b0331 1504 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
38e20b07
SY
1505 rc = xen_set_callback_via(callback_via);
1506 if (rc) {
283c0972 1507 pr_err("Request for Xen HVM callback vector failed\n");
38e20b07
SY
1508 xen_have_vector_callback = 0;
1509 return;
1510 }
283c0972 1511 pr_info("Xen HVM callback vector for event delivery is enabled\n");
38e20b07 1512 /* in the restore case the vector has already been allocated */
bc2b0331
S
1513 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1514 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1515 xen_hvm_callback_vector);
38e20b07
SY
1516 }
1517}
ca65f9fc
SS
1518#else
1519void xen_callback_vector(void) {}
1520#endif
38e20b07 1521
2e3d8860 1522void __init xen_init_IRQ(void)
e46cdb66 1523{
0ec53ecf 1524 int i;
c7a3589e 1525
b21ddbf5
JF
1526 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1527 GFP_KERNEL);
9d093e29 1528 BUG_ON(!evtchn_to_irq);
b21ddbf5
JF
1529 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1530 evtchn_to_irq[i] = -1;
e46cdb66 1531
e46cdb66
JF
1532 /* No event channels are 'live' right now. */
1533 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1534 mask_evtchn(i);
1535
9846ff10
SS
1536 pirq_needs_eoi = pirq_needs_eoi_flag;
1537
0ec53ecf 1538#ifdef CONFIG_X86
38e20b07
SY
1539 if (xen_hvm_domain()) {
1540 xen_callback_vector();
1541 native_init_IRQ();
3942b740
SS
1542 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1543 * __acpi_register_gsi can point at the right function */
1544 pci_xen_hvm_init();
38e20b07 1545 } else {
0ec53ecf 1546 int rc;
9846ff10
SS
1547 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1548
38e20b07 1549 irq_ctx_init(smp_processor_id());
38aa66fc 1550 if (xen_initial_domain())
a0ee0567 1551 pci_xen_initial_domain();
9846ff10
SS
1552
1553 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1554 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1555 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1556 if (rc != 0) {
1557 free_page((unsigned long) pirq_eoi_map);
1558 pirq_eoi_map = NULL;
1559 } else
1560 pirq_needs_eoi = pirq_check_eoi_map;
38e20b07 1561 }
0ec53ecf 1562#endif
e46cdb66 1563}
This page took 0.497641 seconds and 5 git commands to generate.