| 1 | /* |
| 2 | * Xen event channels |
| 3 | * |
| 4 | * Xen models interrupts with abstract event channels. Because each |
| 5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we |
| 6 | * must dynamically map irqs<->event channels. The event channels |
| 7 | * interface with the rest of the kernel by defining a xen interrupt |
| 8 | * chip. When an event is recieved, it is mapped to an irq and sent |
| 9 | * through the normal interrupt processing path. |
| 10 | * |
| 11 | * There are four kinds of events which can be mapped to an event |
| 12 | * channel: |
| 13 | * |
| 14 | * 1. Inter-domain notifications. This includes all the virtual |
| 15 | * device events, since they're driven by front-ends in another domain |
| 16 | * (typically dom0). |
| 17 | * 2. VIRQs, typically used for timers. These are per-cpu events. |
| 18 | * 3. IPIs. |
| 19 | * 4. PIRQs - Hardware interrupts. |
| 20 | * |
| 21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 22 | */ |
| 23 | |
| 24 | #include <linux/linkage.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/irq.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/bootmem.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/irqnr.h> |
| 32 | |
| 33 | #include <asm/desc.h> |
| 34 | #include <asm/ptrace.h> |
| 35 | #include <asm/irq.h> |
| 36 | #include <asm/idle.h> |
| 37 | #include <asm/io_apic.h> |
| 38 | #include <asm/sync_bitops.h> |
| 39 | #include <asm/xen/pci.h> |
| 40 | #include <asm/xen/hypercall.h> |
| 41 | #include <asm/xen/hypervisor.h> |
| 42 | |
| 43 | #include <xen/xen.h> |
| 44 | #include <xen/hvm.h> |
| 45 | #include <xen/xen-ops.h> |
| 46 | #include <xen/events.h> |
| 47 | #include <xen/interface/xen.h> |
| 48 | #include <xen/interface/event_channel.h> |
| 49 | #include <xen/interface/hvm/hvm_op.h> |
| 50 | #include <xen/interface/hvm/params.h> |
| 51 | |
| 52 | /* |
| 53 | * This lock protects updates to the following mapping and reference-count |
| 54 | * arrays. The lock does not need to be acquired to read the mapping tables. |
| 55 | */ |
| 56 | static DEFINE_SPINLOCK(irq_mapping_update_lock); |
| 57 | |
| 58 | /* IRQ <-> VIRQ mapping. */ |
| 59 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
| 60 | |
| 61 | /* IRQ <-> IPI mapping */ |
| 62 | static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
| 63 | |
| 64 | /* Interrupt types. */ |
| 65 | enum xen_irq_type { |
| 66 | IRQT_UNBOUND = 0, |
| 67 | IRQT_PIRQ, |
| 68 | IRQT_VIRQ, |
| 69 | IRQT_IPI, |
| 70 | IRQT_EVTCHN |
| 71 | }; |
| 72 | |
| 73 | /* |
| 74 | * Packed IRQ information: |
| 75 | * type - enum xen_irq_type |
| 76 | * event channel - irq->event channel mapping |
| 77 | * cpu - cpu this event channel is bound to |
| 78 | * index - type-specific information: |
| 79 | * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM |
| 80 | * guest, or GSI (real passthrough IRQ) of the device. |
| 81 | * VIRQ - virq number |
| 82 | * IPI - IPI vector |
| 83 | * EVTCHN - |
| 84 | */ |
| 85 | struct irq_info |
| 86 | { |
| 87 | enum xen_irq_type type; /* type */ |
| 88 | unsigned short evtchn; /* event channel */ |
| 89 | unsigned short cpu; /* cpu bound */ |
| 90 | |
| 91 | union { |
| 92 | unsigned short virq; |
| 93 | enum ipi_vector ipi; |
| 94 | struct { |
| 95 | unsigned short pirq; |
| 96 | unsigned short gsi; |
| 97 | unsigned char vector; |
| 98 | unsigned char flags; |
| 99 | } pirq; |
| 100 | } u; |
| 101 | }; |
| 102 | #define PIRQ_NEEDS_EOI (1 << 0) |
| 103 | #define PIRQ_SHAREABLE (1 << 1) |
| 104 | |
| 105 | static struct irq_info *irq_info; |
| 106 | static int *pirq_to_irq; |
| 107 | static int nr_pirqs; |
| 108 | |
| 109 | static int *evtchn_to_irq; |
| 110 | struct cpu_evtchn_s { |
| 111 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; |
| 112 | }; |
| 113 | |
| 114 | static __initdata struct cpu_evtchn_s init_evtchn_mask = { |
| 115 | .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, |
| 116 | }; |
| 117 | static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask; |
| 118 | |
| 119 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
| 120 | { |
| 121 | return cpu_evtchn_mask_p[cpu].bits; |
| 122 | } |
| 123 | |
| 124 | /* Xen will never allocate port zero for any purpose. */ |
| 125 | #define VALID_EVTCHN(chn) ((chn) != 0) |
| 126 | |
| 127 | static struct irq_chip xen_dynamic_chip; |
| 128 | static struct irq_chip xen_percpu_chip; |
| 129 | static struct irq_chip xen_pirq_chip; |
| 130 | |
| 131 | /* Constructor for packed IRQ information. */ |
| 132 | static struct irq_info mk_unbound_info(void) |
| 133 | { |
| 134 | return (struct irq_info) { .type = IRQT_UNBOUND }; |
| 135 | } |
| 136 | |
| 137 | static struct irq_info mk_evtchn_info(unsigned short evtchn) |
| 138 | { |
| 139 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, |
| 140 | .cpu = 0 }; |
| 141 | } |
| 142 | |
| 143 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) |
| 144 | { |
| 145 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
| 146 | .cpu = 0, .u.ipi = ipi }; |
| 147 | } |
| 148 | |
| 149 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) |
| 150 | { |
| 151 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, |
| 152 | .cpu = 0, .u.virq = virq }; |
| 153 | } |
| 154 | |
| 155 | static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq, |
| 156 | unsigned short gsi, unsigned short vector) |
| 157 | { |
| 158 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, |
| 159 | .cpu = 0, |
| 160 | .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } }; |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * Accessors for packed IRQ information. |
| 165 | */ |
| 166 | static struct irq_info *info_for_irq(unsigned irq) |
| 167 | { |
| 168 | return &irq_info[irq]; |
| 169 | } |
| 170 | |
| 171 | static unsigned int evtchn_from_irq(unsigned irq) |
| 172 | { |
| 173 | return info_for_irq(irq)->evtchn; |
| 174 | } |
| 175 | |
| 176 | unsigned irq_from_evtchn(unsigned int evtchn) |
| 177 | { |
| 178 | return evtchn_to_irq[evtchn]; |
| 179 | } |
| 180 | EXPORT_SYMBOL_GPL(irq_from_evtchn); |
| 181 | |
| 182 | static enum ipi_vector ipi_from_irq(unsigned irq) |
| 183 | { |
| 184 | struct irq_info *info = info_for_irq(irq); |
| 185 | |
| 186 | BUG_ON(info == NULL); |
| 187 | BUG_ON(info->type != IRQT_IPI); |
| 188 | |
| 189 | return info->u.ipi; |
| 190 | } |
| 191 | |
| 192 | static unsigned virq_from_irq(unsigned irq) |
| 193 | { |
| 194 | struct irq_info *info = info_for_irq(irq); |
| 195 | |
| 196 | BUG_ON(info == NULL); |
| 197 | BUG_ON(info->type != IRQT_VIRQ); |
| 198 | |
| 199 | return info->u.virq; |
| 200 | } |
| 201 | |
| 202 | static unsigned pirq_from_irq(unsigned irq) |
| 203 | { |
| 204 | struct irq_info *info = info_for_irq(irq); |
| 205 | |
| 206 | BUG_ON(info == NULL); |
| 207 | BUG_ON(info->type != IRQT_PIRQ); |
| 208 | |
| 209 | return info->u.pirq.pirq; |
| 210 | } |
| 211 | |
| 212 | static unsigned gsi_from_irq(unsigned irq) |
| 213 | { |
| 214 | struct irq_info *info = info_for_irq(irq); |
| 215 | |
| 216 | BUG_ON(info == NULL); |
| 217 | BUG_ON(info->type != IRQT_PIRQ); |
| 218 | |
| 219 | return info->u.pirq.gsi; |
| 220 | } |
| 221 | |
| 222 | static unsigned vector_from_irq(unsigned irq) |
| 223 | { |
| 224 | struct irq_info *info = info_for_irq(irq); |
| 225 | |
| 226 | BUG_ON(info == NULL); |
| 227 | BUG_ON(info->type != IRQT_PIRQ); |
| 228 | |
| 229 | return info->u.pirq.vector; |
| 230 | } |
| 231 | |
| 232 | static enum xen_irq_type type_from_irq(unsigned irq) |
| 233 | { |
| 234 | return info_for_irq(irq)->type; |
| 235 | } |
| 236 | |
| 237 | static unsigned cpu_from_irq(unsigned irq) |
| 238 | { |
| 239 | return info_for_irq(irq)->cpu; |
| 240 | } |
| 241 | |
| 242 | static unsigned int cpu_from_evtchn(unsigned int evtchn) |
| 243 | { |
| 244 | int irq = evtchn_to_irq[evtchn]; |
| 245 | unsigned ret = 0; |
| 246 | |
| 247 | if (irq != -1) |
| 248 | ret = cpu_from_irq(irq); |
| 249 | |
| 250 | return ret; |
| 251 | } |
| 252 | |
| 253 | static bool pirq_needs_eoi(unsigned irq) |
| 254 | { |
| 255 | struct irq_info *info = info_for_irq(irq); |
| 256 | |
| 257 | BUG_ON(info->type != IRQT_PIRQ); |
| 258 | |
| 259 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; |
| 260 | } |
| 261 | |
| 262 | static inline unsigned long active_evtchns(unsigned int cpu, |
| 263 | struct shared_info *sh, |
| 264 | unsigned int idx) |
| 265 | { |
| 266 | return (sh->evtchn_pending[idx] & |
| 267 | cpu_evtchn_mask(cpu)[idx] & |
| 268 | ~sh->evtchn_mask[idx]); |
| 269 | } |
| 270 | |
| 271 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) |
| 272 | { |
| 273 | int irq = evtchn_to_irq[chn]; |
| 274 | |
| 275 | BUG_ON(irq == -1); |
| 276 | #ifdef CONFIG_SMP |
| 277 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
| 278 | #endif |
| 279 | |
| 280 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
| 281 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
| 282 | |
| 283 | irq_info[irq].cpu = cpu; |
| 284 | } |
| 285 | |
| 286 | static void init_evtchn_cpu_bindings(void) |
| 287 | { |
| 288 | #ifdef CONFIG_SMP |
| 289 | struct irq_desc *desc; |
| 290 | int i; |
| 291 | |
| 292 | /* By default all event channels notify CPU#0. */ |
| 293 | for_each_irq_desc(i, desc) { |
| 294 | cpumask_copy(desc->affinity, cpumask_of(0)); |
| 295 | } |
| 296 | #endif |
| 297 | |
| 298 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
| 299 | } |
| 300 | |
| 301 | static inline void clear_evtchn(int port) |
| 302 | { |
| 303 | struct shared_info *s = HYPERVISOR_shared_info; |
| 304 | sync_clear_bit(port, &s->evtchn_pending[0]); |
| 305 | } |
| 306 | |
| 307 | static inline void set_evtchn(int port) |
| 308 | { |
| 309 | struct shared_info *s = HYPERVISOR_shared_info; |
| 310 | sync_set_bit(port, &s->evtchn_pending[0]); |
| 311 | } |
| 312 | |
| 313 | static inline int test_evtchn(int port) |
| 314 | { |
| 315 | struct shared_info *s = HYPERVISOR_shared_info; |
| 316 | return sync_test_bit(port, &s->evtchn_pending[0]); |
| 317 | } |
| 318 | |
| 319 | |
| 320 | /** |
| 321 | * notify_remote_via_irq - send event to remote end of event channel via irq |
| 322 | * @irq: irq of event channel to send event to |
| 323 | * |
| 324 | * Unlike notify_remote_via_evtchn(), this is safe to use across |
| 325 | * save/restore. Notifications on a broken connection are silently |
| 326 | * dropped. |
| 327 | */ |
| 328 | void notify_remote_via_irq(int irq) |
| 329 | { |
| 330 | int evtchn = evtchn_from_irq(irq); |
| 331 | |
| 332 | if (VALID_EVTCHN(evtchn)) |
| 333 | notify_remote_via_evtchn(evtchn); |
| 334 | } |
| 335 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); |
| 336 | |
| 337 | static void mask_evtchn(int port) |
| 338 | { |
| 339 | struct shared_info *s = HYPERVISOR_shared_info; |
| 340 | sync_set_bit(port, &s->evtchn_mask[0]); |
| 341 | } |
| 342 | |
| 343 | static void unmask_evtchn(int port) |
| 344 | { |
| 345 | struct shared_info *s = HYPERVISOR_shared_info; |
| 346 | unsigned int cpu = get_cpu(); |
| 347 | |
| 348 | BUG_ON(!irqs_disabled()); |
| 349 | |
| 350 | /* Slow path (hypercall) if this is a non-local port. */ |
| 351 | if (unlikely(cpu != cpu_from_evtchn(port))) { |
| 352 | struct evtchn_unmask unmask = { .port = port }; |
| 353 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
| 354 | } else { |
| 355 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
| 356 | |
| 357 | sync_clear_bit(port, &s->evtchn_mask[0]); |
| 358 | |
| 359 | /* |
| 360 | * The following is basically the equivalent of |
| 361 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose |
| 362 | * the interrupt edge' if the channel is masked. |
| 363 | */ |
| 364 | if (sync_test_bit(port, &s->evtchn_pending[0]) && |
| 365 | !sync_test_and_set_bit(port / BITS_PER_LONG, |
| 366 | &vcpu_info->evtchn_pending_sel)) |
| 367 | vcpu_info->evtchn_upcall_pending = 1; |
| 368 | } |
| 369 | |
| 370 | put_cpu(); |
| 371 | } |
| 372 | |
| 373 | static int get_nr_hw_irqs(void) |
| 374 | { |
| 375 | int ret = 1; |
| 376 | |
| 377 | #ifdef CONFIG_X86_IO_APIC |
| 378 | ret = get_nr_irqs_gsi(); |
| 379 | #endif |
| 380 | |
| 381 | return ret; |
| 382 | } |
| 383 | |
| 384 | /* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs |
| 385 | * succeeded otherwise nr_pirqs won't hold the right value */ |
| 386 | static int find_unbound_pirq(void) |
| 387 | { |
| 388 | int i; |
| 389 | for (i = nr_pirqs-1; i >= 0; i--) { |
| 390 | if (pirq_to_irq[i] < 0) |
| 391 | return i; |
| 392 | } |
| 393 | return -1; |
| 394 | } |
| 395 | |
| 396 | static int find_unbound_irq(void) |
| 397 | { |
| 398 | struct irq_data *data; |
| 399 | int irq, res; |
| 400 | int start = get_nr_hw_irqs(); |
| 401 | |
| 402 | if (start == nr_irqs) |
| 403 | goto no_irqs; |
| 404 | |
| 405 | /* nr_irqs is a magic value. Must not use it.*/ |
| 406 | for (irq = nr_irqs-1; irq > start; irq--) { |
| 407 | data = irq_get_irq_data(irq); |
| 408 | /* only 0->15 have init'd desc; handle irq > 16 */ |
| 409 | if (!data) |
| 410 | break; |
| 411 | if (data->chip == &no_irq_chip) |
| 412 | break; |
| 413 | if (data->chip != &xen_dynamic_chip) |
| 414 | continue; |
| 415 | if (irq_info[irq].type == IRQT_UNBOUND) |
| 416 | return irq; |
| 417 | } |
| 418 | |
| 419 | if (irq == start) |
| 420 | goto no_irqs; |
| 421 | |
| 422 | res = irq_alloc_desc_at(irq, 0); |
| 423 | |
| 424 | if (WARN_ON(res != irq)) |
| 425 | return -1; |
| 426 | |
| 427 | return irq; |
| 428 | |
| 429 | no_irqs: |
| 430 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
| 431 | } |
| 432 | |
| 433 | static bool identity_mapped_irq(unsigned irq) |
| 434 | { |
| 435 | /* identity map all the hardware irqs */ |
| 436 | return irq < get_nr_hw_irqs(); |
| 437 | } |
| 438 | |
| 439 | static void pirq_unmask_notify(int irq) |
| 440 | { |
| 441 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; |
| 442 | |
| 443 | if (unlikely(pirq_needs_eoi(irq))) { |
| 444 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); |
| 445 | WARN_ON(rc); |
| 446 | } |
| 447 | } |
| 448 | |
| 449 | static void pirq_query_unmask(int irq) |
| 450 | { |
| 451 | struct physdev_irq_status_query irq_status; |
| 452 | struct irq_info *info = info_for_irq(irq); |
| 453 | |
| 454 | BUG_ON(info->type != IRQT_PIRQ); |
| 455 | |
| 456 | irq_status.irq = pirq_from_irq(irq); |
| 457 | if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) |
| 458 | irq_status.flags = 0; |
| 459 | |
| 460 | info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; |
| 461 | if (irq_status.flags & XENIRQSTAT_needs_eoi) |
| 462 | info->u.pirq.flags |= PIRQ_NEEDS_EOI; |
| 463 | } |
| 464 | |
| 465 | static bool probing_irq(int irq) |
| 466 | { |
| 467 | struct irq_desc *desc = irq_to_desc(irq); |
| 468 | |
| 469 | return desc && desc->action == NULL; |
| 470 | } |
| 471 | |
| 472 | static unsigned int startup_pirq(unsigned int irq) |
| 473 | { |
| 474 | struct evtchn_bind_pirq bind_pirq; |
| 475 | struct irq_info *info = info_for_irq(irq); |
| 476 | int evtchn = evtchn_from_irq(irq); |
| 477 | int rc; |
| 478 | |
| 479 | BUG_ON(info->type != IRQT_PIRQ); |
| 480 | |
| 481 | if (VALID_EVTCHN(evtchn)) |
| 482 | goto out; |
| 483 | |
| 484 | bind_pirq.pirq = pirq_from_irq(irq); |
| 485 | /* NB. We are happy to share unless we are probing. */ |
| 486 | bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? |
| 487 | BIND_PIRQ__WILL_SHARE : 0; |
| 488 | rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); |
| 489 | if (rc != 0) { |
| 490 | if (!probing_irq(irq)) |
| 491 | printk(KERN_INFO "Failed to obtain physical IRQ %d\n", |
| 492 | irq); |
| 493 | return 0; |
| 494 | } |
| 495 | evtchn = bind_pirq.port; |
| 496 | |
| 497 | pirq_query_unmask(irq); |
| 498 | |
| 499 | evtchn_to_irq[evtchn] = irq; |
| 500 | bind_evtchn_to_cpu(evtchn, 0); |
| 501 | info->evtchn = evtchn; |
| 502 | |
| 503 | out: |
| 504 | unmask_evtchn(evtchn); |
| 505 | pirq_unmask_notify(irq); |
| 506 | |
| 507 | return 0; |
| 508 | } |
| 509 | |
| 510 | static void shutdown_pirq(unsigned int irq) |
| 511 | { |
| 512 | struct evtchn_close close; |
| 513 | struct irq_info *info = info_for_irq(irq); |
| 514 | int evtchn = evtchn_from_irq(irq); |
| 515 | |
| 516 | BUG_ON(info->type != IRQT_PIRQ); |
| 517 | |
| 518 | if (!VALID_EVTCHN(evtchn)) |
| 519 | return; |
| 520 | |
| 521 | mask_evtchn(evtchn); |
| 522 | |
| 523 | close.port = evtchn; |
| 524 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
| 525 | BUG(); |
| 526 | |
| 527 | bind_evtchn_to_cpu(evtchn, 0); |
| 528 | evtchn_to_irq[evtchn] = -1; |
| 529 | info->evtchn = 0; |
| 530 | } |
| 531 | |
| 532 | static void enable_pirq(unsigned int irq) |
| 533 | { |
| 534 | startup_pirq(irq); |
| 535 | } |
| 536 | |
| 537 | static void disable_pirq(unsigned int irq) |
| 538 | { |
| 539 | } |
| 540 | |
| 541 | static void ack_pirq(unsigned int irq) |
| 542 | { |
| 543 | int evtchn = evtchn_from_irq(irq); |
| 544 | |
| 545 | move_native_irq(irq); |
| 546 | |
| 547 | if (VALID_EVTCHN(evtchn)) { |
| 548 | mask_evtchn(evtchn); |
| 549 | clear_evtchn(evtchn); |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | static void end_pirq(unsigned int irq) |
| 554 | { |
| 555 | int evtchn = evtchn_from_irq(irq); |
| 556 | struct irq_desc *desc = irq_to_desc(irq); |
| 557 | |
| 558 | if (WARN_ON(!desc)) |
| 559 | return; |
| 560 | |
| 561 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == |
| 562 | (IRQ_DISABLED|IRQ_PENDING)) { |
| 563 | shutdown_pirq(irq); |
| 564 | } else if (VALID_EVTCHN(evtchn)) { |
| 565 | unmask_evtchn(evtchn); |
| 566 | pirq_unmask_notify(irq); |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | static int find_irq_by_gsi(unsigned gsi) |
| 571 | { |
| 572 | int irq; |
| 573 | |
| 574 | for (irq = 0; irq < nr_irqs; irq++) { |
| 575 | struct irq_info *info = info_for_irq(irq); |
| 576 | |
| 577 | if (info == NULL || info->type != IRQT_PIRQ) |
| 578 | continue; |
| 579 | |
| 580 | if (gsi_from_irq(irq) == gsi) |
| 581 | return irq; |
| 582 | } |
| 583 | |
| 584 | return -1; |
| 585 | } |
| 586 | |
| 587 | int xen_allocate_pirq(unsigned gsi, int shareable, char *name) |
| 588 | { |
| 589 | return xen_map_pirq_gsi(gsi, gsi, shareable, name); |
| 590 | } |
| 591 | |
| 592 | /* xen_map_pirq_gsi might allocate irqs from the top down, as a |
| 593 | * consequence don't assume that the irq number returned has a low value |
| 594 | * or can be used as a pirq number unless you know otherwise. |
| 595 | * |
| 596 | * One notable exception is when xen_map_pirq_gsi is called passing an |
| 597 | * hardware gsi as argument, in that case the irq number returned |
| 598 | * matches the gsi number passed as second argument. |
| 599 | * |
| 600 | * Note: We don't assign an event channel until the irq actually started |
| 601 | * up. Return an existing irq if we've already got one for the gsi. |
| 602 | */ |
| 603 | int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) |
| 604 | { |
| 605 | int irq = 0; |
| 606 | struct physdev_irq irq_op; |
| 607 | |
| 608 | spin_lock(&irq_mapping_update_lock); |
| 609 | |
| 610 | if ((pirq > nr_pirqs) || (gsi > nr_irqs)) { |
| 611 | printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", |
| 612 | pirq > nr_pirqs ? "nr_pirqs" :"", |
| 613 | gsi > nr_irqs ? "nr_irqs" : ""); |
| 614 | goto out; |
| 615 | } |
| 616 | |
| 617 | irq = find_irq_by_gsi(gsi); |
| 618 | if (irq != -1) { |
| 619 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", |
| 620 | irq, gsi); |
| 621 | goto out; /* XXX need refcount? */ |
| 622 | } |
| 623 | |
| 624 | /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore |
| 625 | * we are using the !xen_initial_domain() to drop in the function.*/ |
| 626 | if (identity_mapped_irq(gsi) || !xen_initial_domain()) { |
| 627 | irq = gsi; |
| 628 | irq_alloc_desc_at(irq, 0); |
| 629 | } else |
| 630 | irq = find_unbound_irq(); |
| 631 | |
| 632 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
| 633 | handle_level_irq, name); |
| 634 | |
| 635 | irq_op.irq = irq; |
| 636 | irq_op.vector = 0; |
| 637 | |
| 638 | /* Only the privileged domain can do this. For non-priv, the pcifront |
| 639 | * driver provides a PCI bus that does the call to do exactly |
| 640 | * this in the priv domain. */ |
| 641 | if (xen_initial_domain() && |
| 642 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
| 643 | irq_free_desc(irq); |
| 644 | irq = -ENOSPC; |
| 645 | goto out; |
| 646 | } |
| 647 | |
| 648 | irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector); |
| 649 | irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0; |
| 650 | pirq_to_irq[pirq] = irq; |
| 651 | |
| 652 | out: |
| 653 | spin_unlock(&irq_mapping_update_lock); |
| 654 | |
| 655 | return irq; |
| 656 | } |
| 657 | |
| 658 | int xen_destroy_irq(int irq) |
| 659 | { |
| 660 | struct irq_desc *desc; |
| 661 | int rc = -ENOENT; |
| 662 | |
| 663 | spin_lock(&irq_mapping_update_lock); |
| 664 | |
| 665 | desc = irq_to_desc(irq); |
| 666 | if (!desc) |
| 667 | goto out; |
| 668 | |
| 669 | irq_info[irq] = mk_unbound_info(); |
| 670 | |
| 671 | irq_free_desc(irq); |
| 672 | |
| 673 | out: |
| 674 | spin_unlock(&irq_mapping_update_lock); |
| 675 | return rc; |
| 676 | } |
| 677 | |
| 678 | int xen_vector_from_irq(unsigned irq) |
| 679 | { |
| 680 | return vector_from_irq(irq); |
| 681 | } |
| 682 | |
| 683 | int xen_gsi_from_irq(unsigned irq) |
| 684 | { |
| 685 | return gsi_from_irq(irq); |
| 686 | } |
| 687 | |
| 688 | int bind_evtchn_to_irq(unsigned int evtchn) |
| 689 | { |
| 690 | int irq; |
| 691 | |
| 692 | spin_lock(&irq_mapping_update_lock); |
| 693 | |
| 694 | irq = evtchn_to_irq[evtchn]; |
| 695 | |
| 696 | if (irq == -1) { |
| 697 | irq = find_unbound_irq(); |
| 698 | |
| 699 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
| 700 | handle_edge_irq, "event"); |
| 701 | |
| 702 | evtchn_to_irq[evtchn] = irq; |
| 703 | irq_info[irq] = mk_evtchn_info(evtchn); |
| 704 | } |
| 705 | |
| 706 | spin_unlock(&irq_mapping_update_lock); |
| 707 | |
| 708 | return irq; |
| 709 | } |
| 710 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
| 711 | |
| 712 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
| 713 | { |
| 714 | struct evtchn_bind_ipi bind_ipi; |
| 715 | int evtchn, irq; |
| 716 | |
| 717 | spin_lock(&irq_mapping_update_lock); |
| 718 | |
| 719 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
| 720 | |
| 721 | if (irq == -1) { |
| 722 | irq = find_unbound_irq(); |
| 723 | if (irq < 0) |
| 724 | goto out; |
| 725 | |
| 726 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
| 727 | handle_percpu_irq, "ipi"); |
| 728 | |
| 729 | bind_ipi.vcpu = cpu; |
| 730 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, |
| 731 | &bind_ipi) != 0) |
| 732 | BUG(); |
| 733 | evtchn = bind_ipi.port; |
| 734 | |
| 735 | evtchn_to_irq[evtchn] = irq; |
| 736 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
| 737 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
| 738 | |
| 739 | bind_evtchn_to_cpu(evtchn, cpu); |
| 740 | } |
| 741 | |
| 742 | out: |
| 743 | spin_unlock(&irq_mapping_update_lock); |
| 744 | return irq; |
| 745 | } |
| 746 | |
| 747 | |
| 748 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
| 749 | { |
| 750 | struct evtchn_bind_virq bind_virq; |
| 751 | int evtchn, irq; |
| 752 | |
| 753 | spin_lock(&irq_mapping_update_lock); |
| 754 | |
| 755 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
| 756 | |
| 757 | if (irq == -1) { |
| 758 | bind_virq.virq = virq; |
| 759 | bind_virq.vcpu = cpu; |
| 760 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, |
| 761 | &bind_virq) != 0) |
| 762 | BUG(); |
| 763 | evtchn = bind_virq.port; |
| 764 | |
| 765 | irq = find_unbound_irq(); |
| 766 | |
| 767 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
| 768 | handle_percpu_irq, "virq"); |
| 769 | |
| 770 | evtchn_to_irq[evtchn] = irq; |
| 771 | irq_info[irq] = mk_virq_info(evtchn, virq); |
| 772 | |
| 773 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
| 774 | |
| 775 | bind_evtchn_to_cpu(evtchn, cpu); |
| 776 | } |
| 777 | |
| 778 | spin_unlock(&irq_mapping_update_lock); |
| 779 | |
| 780 | return irq; |
| 781 | } |
| 782 | |
| 783 | static void unbind_from_irq(unsigned int irq) |
| 784 | { |
| 785 | struct evtchn_close close; |
| 786 | int evtchn = evtchn_from_irq(irq); |
| 787 | |
| 788 | spin_lock(&irq_mapping_update_lock); |
| 789 | |
| 790 | if (VALID_EVTCHN(evtchn)) { |
| 791 | close.port = evtchn; |
| 792 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
| 793 | BUG(); |
| 794 | |
| 795 | switch (type_from_irq(irq)) { |
| 796 | case IRQT_VIRQ: |
| 797 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
| 798 | [virq_from_irq(irq)] = -1; |
| 799 | break; |
| 800 | case IRQT_IPI: |
| 801 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
| 802 | [ipi_from_irq(irq)] = -1; |
| 803 | break; |
| 804 | default: |
| 805 | break; |
| 806 | } |
| 807 | |
| 808 | /* Closed ports are implicitly re-bound to VCPU0. */ |
| 809 | bind_evtchn_to_cpu(evtchn, 0); |
| 810 | |
| 811 | evtchn_to_irq[evtchn] = -1; |
| 812 | } |
| 813 | |
| 814 | if (irq_info[irq].type != IRQT_UNBOUND) { |
| 815 | irq_info[irq] = mk_unbound_info(); |
| 816 | |
| 817 | irq_free_desc(irq); |
| 818 | } |
| 819 | |
| 820 | spin_unlock(&irq_mapping_update_lock); |
| 821 | } |
| 822 | |
| 823 | int bind_evtchn_to_irqhandler(unsigned int evtchn, |
| 824 | irq_handler_t handler, |
| 825 | unsigned long irqflags, |
| 826 | const char *devname, void *dev_id) |
| 827 | { |
| 828 | unsigned int irq; |
| 829 | int retval; |
| 830 | |
| 831 | irq = bind_evtchn_to_irq(evtchn); |
| 832 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 833 | if (retval != 0) { |
| 834 | unbind_from_irq(irq); |
| 835 | return retval; |
| 836 | } |
| 837 | |
| 838 | return irq; |
| 839 | } |
| 840 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); |
| 841 | |
| 842 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
| 843 | irq_handler_t handler, |
| 844 | unsigned long irqflags, const char *devname, void *dev_id) |
| 845 | { |
| 846 | unsigned int irq; |
| 847 | int retval; |
| 848 | |
| 849 | irq = bind_virq_to_irq(virq, cpu); |
| 850 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 851 | if (retval != 0) { |
| 852 | unbind_from_irq(irq); |
| 853 | return retval; |
| 854 | } |
| 855 | |
| 856 | return irq; |
| 857 | } |
| 858 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); |
| 859 | |
| 860 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
| 861 | unsigned int cpu, |
| 862 | irq_handler_t handler, |
| 863 | unsigned long irqflags, |
| 864 | const char *devname, |
| 865 | void *dev_id) |
| 866 | { |
| 867 | int irq, retval; |
| 868 | |
| 869 | irq = bind_ipi_to_irq(ipi, cpu); |
| 870 | if (irq < 0) |
| 871 | return irq; |
| 872 | |
| 873 | irqflags |= IRQF_NO_SUSPEND; |
| 874 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 875 | if (retval != 0) { |
| 876 | unbind_from_irq(irq); |
| 877 | return retval; |
| 878 | } |
| 879 | |
| 880 | return irq; |
| 881 | } |
| 882 | |
| 883 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
| 884 | { |
| 885 | free_irq(irq, dev_id); |
| 886 | unbind_from_irq(irq); |
| 887 | } |
| 888 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); |
| 889 | |
| 890 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
| 891 | { |
| 892 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; |
| 893 | BUG_ON(irq < 0); |
| 894 | notify_remote_via_irq(irq); |
| 895 | } |
| 896 | |
| 897 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
| 898 | { |
| 899 | struct shared_info *sh = HYPERVISOR_shared_info; |
| 900 | int cpu = smp_processor_id(); |
| 901 | int i; |
| 902 | unsigned long flags; |
| 903 | static DEFINE_SPINLOCK(debug_lock); |
| 904 | |
| 905 | spin_lock_irqsave(&debug_lock, flags); |
| 906 | |
| 907 | printk("vcpu %d\n ", cpu); |
| 908 | |
| 909 | for_each_online_cpu(i) { |
| 910 | struct vcpu_info *v = per_cpu(xen_vcpu, i); |
| 911 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, |
| 912 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
| 913 | v->evtchn_upcall_pending, |
| 914 | v->evtchn_pending_sel); |
| 915 | } |
| 916 | printk("pending:\n "); |
| 917 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) |
| 918 | printk("%08lx%s", sh->evtchn_pending[i], |
| 919 | i % 8 == 0 ? "\n " : " "); |
| 920 | printk("\nmasks:\n "); |
| 921 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
| 922 | printk("%08lx%s", sh->evtchn_mask[i], |
| 923 | i % 8 == 0 ? "\n " : " "); |
| 924 | |
| 925 | printk("\nunmasked:\n "); |
| 926 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
| 927 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], |
| 928 | i % 8 == 0 ? "\n " : " "); |
| 929 | |
| 930 | printk("\npending list:\n"); |
| 931 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
| 932 | if (sync_test_bit(i, sh->evtchn_pending)) { |
| 933 | printk(" %d: event %d -> irq %d\n", |
| 934 | cpu_from_evtchn(i), i, |
| 935 | evtchn_to_irq[i]); |
| 936 | } |
| 937 | } |
| 938 | |
| 939 | spin_unlock_irqrestore(&debug_lock, flags); |
| 940 | |
| 941 | return IRQ_HANDLED; |
| 942 | } |
| 943 | |
| 944 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
| 945 | |
| 946 | /* |
| 947 | * Search the CPUs pending events bitmasks. For each one found, map |
| 948 | * the event number to an irq, and feed it into do_IRQ() for |
| 949 | * handling. |
| 950 | * |
| 951 | * Xen uses a two-level bitmap to speed searching. The first level is |
| 952 | * a bitset of words which contain pending event bits. The second |
| 953 | * level is a bitset of pending events themselves. |
| 954 | */ |
| 955 | static void __xen_evtchn_do_upcall(void) |
| 956 | { |
| 957 | int cpu = get_cpu(); |
| 958 | struct shared_info *s = HYPERVISOR_shared_info; |
| 959 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
| 960 | unsigned count; |
| 961 | |
| 962 | do { |
| 963 | unsigned long pending_words; |
| 964 | |
| 965 | vcpu_info->evtchn_upcall_pending = 0; |
| 966 | |
| 967 | if (__get_cpu_var(xed_nesting_count)++) |
| 968 | goto out; |
| 969 | |
| 970 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
| 971 | /* Clear master flag /before/ clearing selector flag. */ |
| 972 | wmb(); |
| 973 | #endif |
| 974 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
| 975 | while (pending_words != 0) { |
| 976 | unsigned long pending_bits; |
| 977 | int word_idx = __ffs(pending_words); |
| 978 | pending_words &= ~(1UL << word_idx); |
| 979 | |
| 980 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { |
| 981 | int bit_idx = __ffs(pending_bits); |
| 982 | int port = (word_idx * BITS_PER_LONG) + bit_idx; |
| 983 | int irq = evtchn_to_irq[port]; |
| 984 | struct irq_desc *desc; |
| 985 | |
| 986 | if (irq != -1) { |
| 987 | desc = irq_to_desc(irq); |
| 988 | if (desc) |
| 989 | generic_handle_irq_desc(irq, desc); |
| 990 | } |
| 991 | } |
| 992 | } |
| 993 | |
| 994 | BUG_ON(!irqs_disabled()); |
| 995 | |
| 996 | count = __get_cpu_var(xed_nesting_count); |
| 997 | __get_cpu_var(xed_nesting_count) = 0; |
| 998 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
| 999 | |
| 1000 | out: |
| 1001 | |
| 1002 | put_cpu(); |
| 1003 | } |
| 1004 | |
| 1005 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
| 1006 | { |
| 1007 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 1008 | |
| 1009 | exit_idle(); |
| 1010 | irq_enter(); |
| 1011 | |
| 1012 | __xen_evtchn_do_upcall(); |
| 1013 | |
| 1014 | irq_exit(); |
| 1015 | set_irq_regs(old_regs); |
| 1016 | } |
| 1017 | |
| 1018 | void xen_hvm_evtchn_do_upcall(void) |
| 1019 | { |
| 1020 | __xen_evtchn_do_upcall(); |
| 1021 | } |
| 1022 | EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); |
| 1023 | |
| 1024 | /* Rebind a new event channel to an existing irq. */ |
| 1025 | void rebind_evtchn_irq(int evtchn, int irq) |
| 1026 | { |
| 1027 | struct irq_info *info = info_for_irq(irq); |
| 1028 | |
| 1029 | /* Make sure the irq is masked, since the new event channel |
| 1030 | will also be masked. */ |
| 1031 | disable_irq(irq); |
| 1032 | |
| 1033 | spin_lock(&irq_mapping_update_lock); |
| 1034 | |
| 1035 | /* After resume the irq<->evtchn mappings are all cleared out */ |
| 1036 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
| 1037 | /* Expect irq to have been bound before, |
| 1038 | so there should be a proper type */ |
| 1039 | BUG_ON(info->type == IRQT_UNBOUND); |
| 1040 | |
| 1041 | evtchn_to_irq[evtchn] = irq; |
| 1042 | irq_info[irq] = mk_evtchn_info(evtchn); |
| 1043 | |
| 1044 | spin_unlock(&irq_mapping_update_lock); |
| 1045 | |
| 1046 | /* new event channels are always bound to cpu 0 */ |
| 1047 | irq_set_affinity(irq, cpumask_of(0)); |
| 1048 | |
| 1049 | /* Unmask the event channel. */ |
| 1050 | enable_irq(irq); |
| 1051 | } |
| 1052 | |
| 1053 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
| 1054 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
| 1055 | { |
| 1056 | struct evtchn_bind_vcpu bind_vcpu; |
| 1057 | int evtchn = evtchn_from_irq(irq); |
| 1058 | |
| 1059 | /* events delivered via platform PCI interrupts are always |
| 1060 | * routed to vcpu 0 */ |
| 1061 | if (!VALID_EVTCHN(evtchn) || |
| 1062 | (xen_hvm_domain() && !xen_have_vector_callback)) |
| 1063 | return -1; |
| 1064 | |
| 1065 | /* Send future instances of this interrupt to other vcpu. */ |
| 1066 | bind_vcpu.port = evtchn; |
| 1067 | bind_vcpu.vcpu = tcpu; |
| 1068 | |
| 1069 | /* |
| 1070 | * If this fails, it usually just indicates that we're dealing with a |
| 1071 | * virq or IPI channel, which don't actually need to be rebound. Ignore |
| 1072 | * it, but don't do the xenlinux-level rebind in that case. |
| 1073 | */ |
| 1074 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
| 1075 | bind_evtchn_to_cpu(evtchn, tcpu); |
| 1076 | |
| 1077 | return 0; |
| 1078 | } |
| 1079 | |
| 1080 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) |
| 1081 | { |
| 1082 | unsigned tcpu = cpumask_first(dest); |
| 1083 | |
| 1084 | return rebind_irq_to_cpu(irq, tcpu); |
| 1085 | } |
| 1086 | |
| 1087 | int resend_irq_on_evtchn(unsigned int irq) |
| 1088 | { |
| 1089 | int masked, evtchn = evtchn_from_irq(irq); |
| 1090 | struct shared_info *s = HYPERVISOR_shared_info; |
| 1091 | |
| 1092 | if (!VALID_EVTCHN(evtchn)) |
| 1093 | return 1; |
| 1094 | |
| 1095 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); |
| 1096 | sync_set_bit(evtchn, s->evtchn_pending); |
| 1097 | if (!masked) |
| 1098 | unmask_evtchn(evtchn); |
| 1099 | |
| 1100 | return 1; |
| 1101 | } |
| 1102 | |
| 1103 | static void enable_dynirq(unsigned int irq) |
| 1104 | { |
| 1105 | int evtchn = evtchn_from_irq(irq); |
| 1106 | |
| 1107 | if (VALID_EVTCHN(evtchn)) |
| 1108 | unmask_evtchn(evtchn); |
| 1109 | } |
| 1110 | |
| 1111 | static void disable_dynirq(unsigned int irq) |
| 1112 | { |
| 1113 | int evtchn = evtchn_from_irq(irq); |
| 1114 | |
| 1115 | if (VALID_EVTCHN(evtchn)) |
| 1116 | mask_evtchn(evtchn); |
| 1117 | } |
| 1118 | |
| 1119 | static void ack_dynirq(unsigned int irq) |
| 1120 | { |
| 1121 | int evtchn = evtchn_from_irq(irq); |
| 1122 | |
| 1123 | move_native_irq(irq); |
| 1124 | |
| 1125 | if (VALID_EVTCHN(evtchn)) |
| 1126 | clear_evtchn(evtchn); |
| 1127 | } |
| 1128 | |
| 1129 | static int retrigger_dynirq(unsigned int irq) |
| 1130 | { |
| 1131 | int evtchn = evtchn_from_irq(irq); |
| 1132 | struct shared_info *sh = HYPERVISOR_shared_info; |
| 1133 | int ret = 0; |
| 1134 | |
| 1135 | if (VALID_EVTCHN(evtchn)) { |
| 1136 | int masked; |
| 1137 | |
| 1138 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); |
| 1139 | sync_set_bit(evtchn, sh->evtchn_pending); |
| 1140 | if (!masked) |
| 1141 | unmask_evtchn(evtchn); |
| 1142 | ret = 1; |
| 1143 | } |
| 1144 | |
| 1145 | return ret; |
| 1146 | } |
| 1147 | |
| 1148 | static void restore_cpu_virqs(unsigned int cpu) |
| 1149 | { |
| 1150 | struct evtchn_bind_virq bind_virq; |
| 1151 | int virq, irq, evtchn; |
| 1152 | |
| 1153 | for (virq = 0; virq < NR_VIRQS; virq++) { |
| 1154 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
| 1155 | continue; |
| 1156 | |
| 1157 | BUG_ON(virq_from_irq(irq) != virq); |
| 1158 | |
| 1159 | /* Get a new binding from Xen. */ |
| 1160 | bind_virq.virq = virq; |
| 1161 | bind_virq.vcpu = cpu; |
| 1162 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, |
| 1163 | &bind_virq) != 0) |
| 1164 | BUG(); |
| 1165 | evtchn = bind_virq.port; |
| 1166 | |
| 1167 | /* Record the new mapping. */ |
| 1168 | evtchn_to_irq[evtchn] = irq; |
| 1169 | irq_info[irq] = mk_virq_info(evtchn, virq); |
| 1170 | bind_evtchn_to_cpu(evtchn, cpu); |
| 1171 | |
| 1172 | /* Ready for use. */ |
| 1173 | unmask_evtchn(evtchn); |
| 1174 | } |
| 1175 | } |
| 1176 | |
| 1177 | static void restore_cpu_ipis(unsigned int cpu) |
| 1178 | { |
| 1179 | struct evtchn_bind_ipi bind_ipi; |
| 1180 | int ipi, irq, evtchn; |
| 1181 | |
| 1182 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { |
| 1183 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
| 1184 | continue; |
| 1185 | |
| 1186 | BUG_ON(ipi_from_irq(irq) != ipi); |
| 1187 | |
| 1188 | /* Get a new binding from Xen. */ |
| 1189 | bind_ipi.vcpu = cpu; |
| 1190 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, |
| 1191 | &bind_ipi) != 0) |
| 1192 | BUG(); |
| 1193 | evtchn = bind_ipi.port; |
| 1194 | |
| 1195 | /* Record the new mapping. */ |
| 1196 | evtchn_to_irq[evtchn] = irq; |
| 1197 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
| 1198 | bind_evtchn_to_cpu(evtchn, cpu); |
| 1199 | |
| 1200 | /* Ready for use. */ |
| 1201 | unmask_evtchn(evtchn); |
| 1202 | |
| 1203 | } |
| 1204 | } |
| 1205 | |
| 1206 | /* Clear an irq's pending state, in preparation for polling on it */ |
| 1207 | void xen_clear_irq_pending(int irq) |
| 1208 | { |
| 1209 | int evtchn = evtchn_from_irq(irq); |
| 1210 | |
| 1211 | if (VALID_EVTCHN(evtchn)) |
| 1212 | clear_evtchn(evtchn); |
| 1213 | } |
| 1214 | EXPORT_SYMBOL(xen_clear_irq_pending); |
| 1215 | void xen_set_irq_pending(int irq) |
| 1216 | { |
| 1217 | int evtchn = evtchn_from_irq(irq); |
| 1218 | |
| 1219 | if (VALID_EVTCHN(evtchn)) |
| 1220 | set_evtchn(evtchn); |
| 1221 | } |
| 1222 | |
| 1223 | bool xen_test_irq_pending(int irq) |
| 1224 | { |
| 1225 | int evtchn = evtchn_from_irq(irq); |
| 1226 | bool ret = false; |
| 1227 | |
| 1228 | if (VALID_EVTCHN(evtchn)) |
| 1229 | ret = test_evtchn(evtchn); |
| 1230 | |
| 1231 | return ret; |
| 1232 | } |
| 1233 | |
| 1234 | /* Poll waiting for an irq to become pending with timeout. In the usual case, |
| 1235 | * the irq will be disabled so it won't deliver an interrupt. */ |
| 1236 | void xen_poll_irq_timeout(int irq, u64 timeout) |
| 1237 | { |
| 1238 | evtchn_port_t evtchn = evtchn_from_irq(irq); |
| 1239 | |
| 1240 | if (VALID_EVTCHN(evtchn)) { |
| 1241 | struct sched_poll poll; |
| 1242 | |
| 1243 | poll.nr_ports = 1; |
| 1244 | poll.timeout = timeout; |
| 1245 | set_xen_guest_handle(poll.ports, &evtchn); |
| 1246 | |
| 1247 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) |
| 1248 | BUG(); |
| 1249 | } |
| 1250 | } |
| 1251 | EXPORT_SYMBOL(xen_poll_irq_timeout); |
| 1252 | /* Poll waiting for an irq to become pending. In the usual case, the |
| 1253 | * irq will be disabled so it won't deliver an interrupt. */ |
| 1254 | void xen_poll_irq(int irq) |
| 1255 | { |
| 1256 | xen_poll_irq_timeout(irq, 0 /* no timeout */); |
| 1257 | } |
| 1258 | |
| 1259 | void xen_irq_resume(void) |
| 1260 | { |
| 1261 | unsigned int cpu, irq, evtchn; |
| 1262 | |
| 1263 | init_evtchn_cpu_bindings(); |
| 1264 | |
| 1265 | /* New event-channel space is not 'live' yet. */ |
| 1266 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
| 1267 | mask_evtchn(evtchn); |
| 1268 | |
| 1269 | /* No IRQ <-> event-channel mappings. */ |
| 1270 | for (irq = 0; irq < nr_irqs; irq++) |
| 1271 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
| 1272 | |
| 1273 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
| 1274 | evtchn_to_irq[evtchn] = -1; |
| 1275 | |
| 1276 | for_each_possible_cpu(cpu) { |
| 1277 | restore_cpu_virqs(cpu); |
| 1278 | restore_cpu_ipis(cpu); |
| 1279 | } |
| 1280 | } |
| 1281 | |
| 1282 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
| 1283 | .name = "xen-dyn", |
| 1284 | |
| 1285 | .disable = disable_dynirq, |
| 1286 | .mask = disable_dynirq, |
| 1287 | .unmask = enable_dynirq, |
| 1288 | |
| 1289 | .ack = ack_dynirq, |
| 1290 | .set_affinity = set_affinity_irq, |
| 1291 | .retrigger = retrigger_dynirq, |
| 1292 | }; |
| 1293 | |
| 1294 | static struct irq_chip xen_pirq_chip __read_mostly = { |
| 1295 | .name = "xen-pirq", |
| 1296 | |
| 1297 | .startup = startup_pirq, |
| 1298 | .shutdown = shutdown_pirq, |
| 1299 | |
| 1300 | .enable = enable_pirq, |
| 1301 | .unmask = enable_pirq, |
| 1302 | |
| 1303 | .disable = disable_pirq, |
| 1304 | .mask = disable_pirq, |
| 1305 | |
| 1306 | .ack = ack_pirq, |
| 1307 | .end = end_pirq, |
| 1308 | |
| 1309 | .set_affinity = set_affinity_irq, |
| 1310 | |
| 1311 | .retrigger = retrigger_dynirq, |
| 1312 | }; |
| 1313 | |
| 1314 | static struct irq_chip xen_percpu_chip __read_mostly = { |
| 1315 | .name = "xen-percpu", |
| 1316 | |
| 1317 | .disable = disable_dynirq, |
| 1318 | .mask = disable_dynirq, |
| 1319 | .unmask = enable_dynirq, |
| 1320 | |
| 1321 | .ack = ack_dynirq, |
| 1322 | }; |
| 1323 | |
| 1324 | int xen_set_callback_via(uint64_t via) |
| 1325 | { |
| 1326 | struct xen_hvm_param a; |
| 1327 | a.domid = DOMID_SELF; |
| 1328 | a.index = HVM_PARAM_CALLBACK_IRQ; |
| 1329 | a.value = via; |
| 1330 | return HYPERVISOR_hvm_op(HVMOP_set_param, &a); |
| 1331 | } |
| 1332 | EXPORT_SYMBOL_GPL(xen_set_callback_via); |
| 1333 | |
| 1334 | #ifdef CONFIG_XEN_PVHVM |
| 1335 | /* Vector callbacks are better than PCI interrupts to receive event |
| 1336 | * channel notifications because we can receive vector callbacks on any |
| 1337 | * vcpu and we don't need PCI support or APIC interactions. */ |
| 1338 | void xen_callback_vector(void) |
| 1339 | { |
| 1340 | int rc; |
| 1341 | uint64_t callback_via; |
| 1342 | if (xen_have_vector_callback) { |
| 1343 | callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); |
| 1344 | rc = xen_set_callback_via(callback_via); |
| 1345 | if (rc) { |
| 1346 | printk(KERN_ERR "Request for Xen HVM callback vector" |
| 1347 | " failed.\n"); |
| 1348 | xen_have_vector_callback = 0; |
| 1349 | return; |
| 1350 | } |
| 1351 | printk(KERN_INFO "Xen HVM callback vector for event delivery is " |
| 1352 | "enabled\n"); |
| 1353 | /* in the restore case the vector has already been allocated */ |
| 1354 | if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) |
| 1355 | alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); |
| 1356 | } |
| 1357 | } |
| 1358 | #else |
| 1359 | void xen_callback_vector(void) {} |
| 1360 | #endif |
| 1361 | |
| 1362 | void __init xen_init_IRQ(void) |
| 1363 | { |
| 1364 | int i, rc; |
| 1365 | struct physdev_nr_pirqs op_nr_pirqs; |
| 1366 | |
| 1367 | cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), |
| 1368 | GFP_KERNEL); |
| 1369 | irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); |
| 1370 | |
| 1371 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs); |
| 1372 | if (rc < 0) { |
| 1373 | nr_pirqs = nr_irqs; |
| 1374 | if (rc != -ENOSYS) |
| 1375 | printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc); |
| 1376 | } else { |
| 1377 | if (xen_pv_domain() && !xen_initial_domain()) |
| 1378 | nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs); |
| 1379 | else |
| 1380 | nr_pirqs = op_nr_pirqs.nr_pirqs; |
| 1381 | } |
| 1382 | pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL); |
| 1383 | for (i = 0; i < nr_pirqs; i++) |
| 1384 | pirq_to_irq[i] = -1; |
| 1385 | |
| 1386 | evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), |
| 1387 | GFP_KERNEL); |
| 1388 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
| 1389 | evtchn_to_irq[i] = -1; |
| 1390 | |
| 1391 | init_evtchn_cpu_bindings(); |
| 1392 | |
| 1393 | /* No event channels are 'live' right now. */ |
| 1394 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
| 1395 | mask_evtchn(i); |
| 1396 | |
| 1397 | if (xen_hvm_domain()) { |
| 1398 | xen_callback_vector(); |
| 1399 | native_init_IRQ(); |
| 1400 | } else { |
| 1401 | irq_ctx_init(smp_processor_id()); |
| 1402 | } |
| 1403 | } |