2 * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
3 * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
4 * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
5 * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
6 * 0xcf8 PCI configuration read/write.
8 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
9 * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
10 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/pci.h>
15 #include <linux/acpi.h>
18 #include <asm/io_apic.h>
19 #include <asm/pci_x86.h>
21 #include <asm/xen/hypervisor.h>
23 #include <xen/features.h>
24 #include <xen/events.h>
25 #include <asm/xen/pci.h>
27 static int xen_pcifront_enable_irq(struct pci_dev
*dev
)
34 rc
= pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &gsi
);
36 dev_warn(&dev
->dev
, "Xen PCI: failed to read interrupt line: %d\n",
40 /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
43 if (gsi
< NR_IRQS_LEGACY
)
46 rc
= xen_bind_pirq_gsi_to_irq(gsi
, pirq
, share
, "pcifront");
48 dev_warn(&dev
->dev
, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
54 dev_info(&dev
->dev
, "Xen PCI mapped GSI%d to IRQ%d\n", gsi
, dev
->irq
);
59 static int xen_register_pirq(u32 gsi
, int gsi_override
, int triggering
,
62 int rc
, pirq
= -1, irq
= -1;
63 struct physdev_map_pirq map_irq
;
67 irq
= xen_irq_from_gsi(gsi
);
74 map_irq
.domid
= DOMID_SELF
;
75 map_irq
.type
= MAP_PIRQ_TYPE_GSI
;
79 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
81 printk(KERN_WARNING
"xen map irq failed %d\n", rc
);
85 if (triggering
== ACPI_EDGE_SENSITIVE
) {
90 name
= "ioapic-level";
93 if (gsi_override
>= 0)
96 irq
= xen_bind_pirq_gsi_to_irq(gsi
, map_irq
.pirq
, shareable
, name
);
100 printk(KERN_DEBUG
"xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq
.pirq
, irq
, gsi
);
105 static int acpi_register_gsi_xen_hvm(struct device
*dev
, u32 gsi
,
106 int trigger
, int polarity
)
108 if (!xen_hvm_domain())
111 return xen_register_pirq(gsi
, -1 /* no GSI override */, trigger
,
112 false /* no mapping of GSI to PIRQ */);
115 #ifdef CONFIG_XEN_DOM0
116 static int xen_register_gsi(u32 gsi
, int gsi_override
, int triggering
, int polarity
)
119 struct physdev_setup_gsi setup_gsi
;
121 if (!xen_pv_domain())
124 printk(KERN_DEBUG
"xen: registering gsi %u triggering %d polarity %d\n",
125 gsi
, triggering
, polarity
);
127 irq
= xen_register_pirq(gsi
, gsi_override
, triggering
, true);
130 setup_gsi
.triggering
= (triggering
== ACPI_EDGE_SENSITIVE
? 0 : 1);
131 setup_gsi
.polarity
= (polarity
== ACPI_ACTIVE_HIGH
? 0 : 1);
133 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi
, &setup_gsi
);
135 printk(KERN_INFO
"Already setup the GSI :%d\n", gsi
);
137 printk(KERN_ERR
"Failed to setup GSI :%d, err_code:%d\n",
144 static int acpi_register_gsi_xen(struct device
*dev
, u32 gsi
,
145 int trigger
, int polarity
)
147 return xen_register_gsi(gsi
, -1 /* no GSI override */, trigger
, polarity
);
152 #if defined(CONFIG_PCI_MSI)
153 #include <linux/msi.h>
154 #include <asm/msidef.h>
156 struct xen_pci_frontend_ops
*xen_pci_frontend
;
157 EXPORT_SYMBOL_GPL(xen_pci_frontend
);
159 static int xen_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
162 struct msi_desc
*msidesc
;
165 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
168 v
= kzalloc(sizeof(int) * max(1, nvec
), GFP_KERNEL
);
172 if (type
== PCI_CAP_ID_MSIX
)
173 ret
= xen_pci_frontend_enable_msix(dev
, v
, nvec
);
175 ret
= xen_pci_frontend_enable_msi(dev
, v
);
179 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
180 irq
= xen_bind_pirq_msi_to_irq(dev
, msidesc
, v
[i
],
181 (type
== PCI_CAP_ID_MSI
) ? nvec
: 1,
182 (type
== PCI_CAP_ID_MSIX
) ?
196 dev_err(&dev
->dev
, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
202 #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
203 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
205 static void xen_msi_compose_msg(struct pci_dev
*pdev
, unsigned int pirq
,
208 /* We set vector == 0 to tell the hypervisor we don't care about it,
209 * but we want a pirq setup instead.
210 * We use the dest_id field to pass the pirq that we want. */
211 msg
->address_hi
= MSI_ADDR_BASE_HI
| MSI_ADDR_EXT_DEST_ID(pirq
);
214 MSI_ADDR_DEST_MODE_PHYSICAL
|
215 MSI_ADDR_REDIRECTION_CPU
|
216 MSI_ADDR_DEST_ID(pirq
);
218 msg
->data
= XEN_PIRQ_MSI_DATA
;
221 static int xen_hvm_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
224 struct msi_desc
*msidesc
;
227 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
230 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
231 __read_msi_msg(msidesc
, &msg
);
232 pirq
= MSI_ADDR_EXT_DEST_ID(msg
.address_hi
) |
233 ((msg
.address_lo
>> MSI_ADDR_DEST_ID_SHIFT
) & 0xff);
234 if (msg
.data
!= XEN_PIRQ_MSI_DATA
||
235 xen_irq_from_pirq(pirq
) < 0) {
236 pirq
= xen_allocate_pirq_msi(dev
, msidesc
);
241 xen_msi_compose_msg(dev
, pirq
, &msg
);
242 __write_msi_msg(msidesc
, &msg
);
243 dev_dbg(&dev
->dev
, "xen: msi bound to pirq=%d\n", pirq
);
246 "xen: msi already bound to pirq=%d\n", pirq
);
248 irq
= xen_bind_pirq_msi_to_irq(dev
, msidesc
, pirq
,
249 (type
== PCI_CAP_ID_MSI
) ? nvec
: 1,
250 (type
== PCI_CAP_ID_MSIX
) ?
256 "xen: msi --> pirq=%d --> irq=%d\n", pirq
, irq
);
262 "Xen PCI frontend has not registered MSI/MSI-X support!\n");
266 #ifdef CONFIG_XEN_DOM0
267 static bool __read_mostly pci_seg_supported
= true;
269 static int xen_initdom_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
272 struct msi_desc
*msidesc
;
274 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
275 struct physdev_map_pirq map_irq
;
278 domid
= ret
= xen_find_device_domain_owner(dev
);
279 /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
280 * hence check ret value for < 0. */
284 memset(&map_irq
, 0, sizeof(map_irq
));
285 map_irq
.domid
= domid
;
286 map_irq
.type
= MAP_PIRQ_TYPE_MSI_SEG
;
289 map_irq
.bus
= dev
->bus
->number
|
290 (pci_domain_nr(dev
->bus
) << 16);
291 map_irq
.devfn
= dev
->devfn
;
293 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1) {
294 map_irq
.type
= MAP_PIRQ_TYPE_MULTI_MSI
;
295 map_irq
.entry_nr
= nvec
;
296 } else if (type
== PCI_CAP_ID_MSIX
) {
298 u32 table_offset
, bir
;
301 pci_read_config_dword(dev
, pos
+ PCI_MSIX_TABLE
,
303 bir
= (u8
)(table_offset
& PCI_MSIX_TABLE_BIR
);
305 map_irq
.table_base
= pci_resource_start(dev
, bir
);
306 map_irq
.entry_nr
= msidesc
->msi_attrib
.entry_nr
;
310 if (pci_seg_supported
)
311 ret
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
,
313 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1 && ret
) {
315 * If MAP_PIRQ_TYPE_MULTI_MSI is not available
316 * there's nothing else we can do in this case.
317 * Just set ret > 0 so driver can retry with
323 if (ret
== -EINVAL
&& !pci_domain_nr(dev
->bus
)) {
324 map_irq
.type
= MAP_PIRQ_TYPE_MSI
;
327 map_irq
.bus
= dev
->bus
->number
;
328 ret
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
,
331 pci_seg_supported
= false;
334 dev_warn(&dev
->dev
, "xen map irq failed %d for %d domain\n",
339 ret
= xen_bind_pirq_msi_to_irq(dev
, msidesc
, map_irq
.pirq
,
340 (type
== PCI_CAP_ID_MSI
) ? nvec
: 1,
341 (type
== PCI_CAP_ID_MSIX
) ? "msi-x" : "msi",
351 static void xen_initdom_restore_msi_irqs(struct pci_dev
*dev
)
355 if (pci_seg_supported
) {
356 struct physdev_pci_device restore_ext
;
358 restore_ext
.seg
= pci_domain_nr(dev
->bus
);
359 restore_ext
.bus
= dev
->bus
->number
;
360 restore_ext
.devfn
= dev
->devfn
;
361 ret
= HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext
,
364 pci_seg_supported
= false;
365 WARN(ret
&& ret
!= -ENOSYS
, "restore_msi_ext -> %d\n", ret
);
367 if (!pci_seg_supported
) {
368 struct physdev_restore_msi restore
;
370 restore
.bus
= dev
->bus
->number
;
371 restore
.devfn
= dev
->devfn
;
372 ret
= HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi
, &restore
);
373 WARN(ret
&& ret
!= -ENOSYS
, "restore_msi -> %d\n", ret
);
378 static void xen_teardown_msi_irqs(struct pci_dev
*dev
)
380 struct msi_desc
*msidesc
;
382 msidesc
= list_entry(dev
->msi_list
.next
, struct msi_desc
, list
);
383 if (msidesc
->msi_attrib
.is_msix
)
384 xen_pci_frontend_disable_msix(dev
);
386 xen_pci_frontend_disable_msi(dev
);
388 /* Free the IRQ's and the msidesc using the generic code. */
389 default_teardown_msi_irqs(dev
);
392 static void xen_teardown_msi_irq(unsigned int irq
)
394 xen_destroy_irq(irq
);
396 static u32
xen_nop_msi_mask_irq(struct msi_desc
*desc
, u32 mask
, u32 flag
)
400 static u32
xen_nop_msix_mask_irq(struct msi_desc
*desc
, u32 flag
)
406 int __init
pci_xen_init(void)
408 if (!xen_pv_domain() || xen_initial_domain())
411 printk(KERN_INFO
"PCI: setting up Xen PCI frontend stub\n");
413 pcibios_set_cache_line_size();
415 pcibios_enable_irq
= xen_pcifront_enable_irq
;
416 pcibios_disable_irq
= NULL
;
419 /* Keep ACPI out of the picture */
423 #ifdef CONFIG_PCI_MSI
424 x86_msi
.setup_msi_irqs
= xen_setup_msi_irqs
;
425 x86_msi
.teardown_msi_irq
= xen_teardown_msi_irq
;
426 x86_msi
.teardown_msi_irqs
= xen_teardown_msi_irqs
;
427 x86_msi
.msi_mask_irq
= xen_nop_msi_mask_irq
;
428 x86_msi
.msix_mask_irq
= xen_nop_msix_mask_irq
;
433 int __init
pci_xen_hvm_init(void)
435 if (!xen_have_vector_callback
|| !xen_feature(XENFEAT_hvm_pirqs
))
440 * We don't want to change the actual ACPI delivery model,
441 * just how GSIs get registered.
443 __acpi_register_gsi
= acpi_register_gsi_xen_hvm
;
446 #ifdef CONFIG_PCI_MSI
447 x86_msi
.setup_msi_irqs
= xen_hvm_setup_msi_irqs
;
448 x86_msi
.teardown_msi_irq
= xen_teardown_msi_irq
;
453 #ifdef CONFIG_XEN_DOM0
454 static __init
void xen_setup_acpi_sci(void)
457 int trigger
, polarity
;
458 int gsi
= acpi_sci_override_gsi
;
460 int gsi_override
= -1;
465 rc
= acpi_get_override_irq(gsi
, &trigger
, &polarity
);
467 printk(KERN_WARNING
"xen: acpi_get_override_irq failed for acpi"
468 " sci, rc=%d\n", rc
);
471 trigger
= trigger
? ACPI_LEVEL_SENSITIVE
: ACPI_EDGE_SENSITIVE
;
472 polarity
= polarity
? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH
;
474 printk(KERN_INFO
"xen: sci override: global_irq=%d trigger=%d "
475 "polarity=%d\n", gsi
, trigger
, polarity
);
477 /* Before we bind the GSI to a Linux IRQ, check whether
478 * we need to override it with bus_irq (IRQ) value. Usually for
479 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
480 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
481 * but there are oddballs where the IRQ != GSI:
482 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
483 * which ends up being: gsi_to_irq[9] == 20
484 * (which is what acpi_gsi_to_irq ends up calling when starting the
485 * the ACPI interpreter and keels over since IRQ 9 has not been
486 * setup as we had setup IRQ 20 for it).
488 if (acpi_gsi_to_irq(gsi
, &irq
) == 0) {
489 /* Use the provided value if it's valid. */
494 gsi
= xen_register_gsi(gsi
, gsi_override
, trigger
, polarity
);
495 printk(KERN_INFO
"xen: acpi sci %d\n", gsi
);
500 int __init
pci_xen_initial_domain(void)
504 #ifdef CONFIG_PCI_MSI
505 x86_msi
.setup_msi_irqs
= xen_initdom_setup_msi_irqs
;
506 x86_msi
.teardown_msi_irq
= xen_teardown_msi_irq
;
507 x86_msi
.restore_msi_irqs
= xen_initdom_restore_msi_irqs
;
508 x86_msi
.msi_mask_irq
= xen_nop_msi_mask_irq
;
509 x86_msi
.msix_mask_irq
= xen_nop_msix_mask_irq
;
511 xen_setup_acpi_sci();
512 __acpi_register_gsi
= acpi_register_gsi_xen
;
513 /* Pre-allocate legacy irqs */
514 for (irq
= 0; irq
< NR_IRQS_LEGACY
; irq
++) {
515 int trigger
, polarity
;
517 if (acpi_get_override_irq(irq
, &trigger
, &polarity
) == -1)
520 xen_register_pirq(irq
, -1 /* no GSI override */,
521 trigger
? ACPI_LEVEL_SENSITIVE
: ACPI_EDGE_SENSITIVE
,
522 true /* Map GSI to PIRQ */);
524 if (0 == nr_ioapics
) {
525 for (irq
= 0; irq
< NR_IRQS_LEGACY
; irq
++)
526 xen_bind_pirq_gsi_to_irq(irq
, irq
, 0, "xt-pic");
531 struct xen_device_domain_owner
{
534 struct list_head list
;
537 static DEFINE_SPINLOCK(dev_domain_list_spinlock
);
538 static struct list_head dev_domain_list
= LIST_HEAD_INIT(dev_domain_list
);
540 static struct xen_device_domain_owner
*find_device(struct pci_dev
*dev
)
542 struct xen_device_domain_owner
*owner
;
544 list_for_each_entry(owner
, &dev_domain_list
, list
) {
545 if (owner
->dev
== dev
)
551 int xen_find_device_domain_owner(struct pci_dev
*dev
)
553 struct xen_device_domain_owner
*owner
;
554 int domain
= -ENODEV
;
556 spin_lock(&dev_domain_list_spinlock
);
557 owner
= find_device(dev
);
559 domain
= owner
->domain
;
560 spin_unlock(&dev_domain_list_spinlock
);
563 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner
);
565 int xen_register_device_domain_owner(struct pci_dev
*dev
, uint16_t domain
)
567 struct xen_device_domain_owner
*owner
;
569 owner
= kzalloc(sizeof(struct xen_device_domain_owner
), GFP_KERNEL
);
573 spin_lock(&dev_domain_list_spinlock
);
574 if (find_device(dev
)) {
575 spin_unlock(&dev_domain_list_spinlock
);
579 owner
->domain
= domain
;
581 list_add_tail(&owner
->list
, &dev_domain_list
);
582 spin_unlock(&dev_domain_list_spinlock
);
585 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner
);
587 int xen_unregister_device_domain_owner(struct pci_dev
*dev
)
589 struct xen_device_domain_owner
*owner
;
591 spin_lock(&dev_domain_list_spinlock
);
592 owner
= find_device(dev
);
594 spin_unlock(&dev_domain_list_spinlock
);
597 list_del(&owner
->list
);
598 spin_unlock(&dev_domain_list_spinlock
);
602 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner
);