Commit | Line | Data |
---|---|---|
b5401a96 AN |
1 | /* |
2 | * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux | |
3 | * x86 PCI core to support the Xen PCI Frontend | |
4 | * | |
5 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/pci.h> | |
10 | #include <linux/acpi.h> | |
11 | ||
12 | #include <linux/io.h> | |
13 | #include <asm/pci_x86.h> | |
14 | ||
15 | #include <asm/xen/hypervisor.h> | |
16 | ||
3942b740 | 17 | #include <xen/features.h> |
b5401a96 AN |
18 | #include <xen/events.h> |
19 | #include <asm/xen/pci.h> | |
20 | ||
42a1de56 SS |
21 | #ifdef CONFIG_ACPI |
22 | static int xen_hvm_register_pirq(u32 gsi, int triggering) | |
23 | { | |
24 | int rc, irq; | |
25 | struct physdev_map_pirq map_irq; | |
26 | int shareable = 0; | |
27 | char *name; | |
28 | ||
29 | if (!xen_hvm_domain()) | |
30 | return -1; | |
31 | ||
32 | map_irq.domid = DOMID_SELF; | |
33 | map_irq.type = MAP_PIRQ_TYPE_GSI; | |
34 | map_irq.index = gsi; | |
35 | map_irq.pirq = -1; | |
36 | ||
37 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | |
38 | if (rc) { | |
39 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | |
40 | return -1; | |
41 | } | |
42 | ||
43 | if (triggering == ACPI_EDGE_SENSITIVE) { | |
44 | shareable = 0; | |
45 | name = "ioapic-edge"; | |
46 | } else { | |
47 | shareable = 1; | |
48 | name = "ioapic-level"; | |
49 | } | |
50 | ||
51 | irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name); | |
52 | ||
53 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | |
54 | ||
55 | return irq; | |
56 | } | |
90f6881e JF |
57 | |
58 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | |
59 | int trigger, int polarity) | |
60 | { | |
61 | return xen_hvm_register_pirq(gsi, trigger); | |
62 | } | |
42a1de56 SS |
63 | #endif |
64 | ||
b5401a96 AN |
65 | #if defined(CONFIG_PCI_MSI) |
66 | #include <linux/msi.h> | |
67 | ||
68 | struct xen_pci_frontend_ops *xen_pci_frontend; | |
69 | EXPORT_SYMBOL_GPL(xen_pci_frontend); | |
70 | ||
71 | /* | |
72 | * For MSI interrupts we have to use drivers/xen/event.s functions to | |
73 | * allocate an irq_desc and setup the right */ | |
74 | ||
75 | ||
76 | static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |
77 | { | |
78 | int irq, ret, i; | |
79 | struct msi_desc *msidesc; | |
80 | int *v; | |
81 | ||
82 | v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); | |
83 | if (!v) | |
84 | return -ENOMEM; | |
85 | ||
86 | if (!xen_initial_domain()) { | |
87 | if (type == PCI_CAP_ID_MSIX) | |
88 | ret = xen_pci_frontend_enable_msix(dev, &v, nvec); | |
89 | else | |
90 | ret = xen_pci_frontend_enable_msi(dev, &v); | |
91 | if (ret) | |
92 | goto error; | |
93 | } | |
94 | i = 0; | |
95 | list_for_each_entry(msidesc, &dev->msi_list, list) { | |
96 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | |
97 | (type == PCI_CAP_ID_MSIX) ? | |
98 | "pcifront-msi-x" : "pcifront-msi"); | |
99 | if (irq < 0) | |
100 | return -1; | |
101 | ||
102 | ret = set_irq_msi(irq, msidesc); | |
103 | if (ret) | |
104 | goto error_while; | |
105 | i++; | |
106 | } | |
107 | kfree(v); | |
108 | return 0; | |
109 | ||
110 | error_while: | |
111 | unbind_from_irqhandler(irq, NULL); | |
112 | error: | |
113 | if (ret == -ENODEV) | |
114 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | |
115 | " MSI/MSI-X support!\n"); | |
116 | ||
117 | kfree(v); | |
118 | return ret; | |
119 | } | |
120 | ||
121 | static void xen_teardown_msi_irqs(struct pci_dev *dev) | |
122 | { | |
123 | /* Only do this when were are in non-privileged mode.*/ | |
124 | if (!xen_initial_domain()) { | |
125 | struct msi_desc *msidesc; | |
126 | ||
127 | msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); | |
128 | if (msidesc->msi_attrib.is_msix) | |
129 | xen_pci_frontend_disable_msix(dev); | |
130 | else | |
131 | xen_pci_frontend_disable_msi(dev); | |
132 | } | |
133 | ||
134 | } | |
135 | ||
136 | static void xen_teardown_msi_irq(unsigned int irq) | |
137 | { | |
138 | xen_destroy_irq(irq); | |
139 | } | |
140 | #endif | |
141 | ||
142 | static int xen_pcifront_enable_irq(struct pci_dev *dev) | |
143 | { | |
144 | int rc; | |
145 | int share = 1; | |
146 | ||
147 | dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); | |
148 | ||
149 | if (dev->irq < 0) | |
150 | return -EINVAL; | |
151 | ||
152 | if (dev->irq < NR_IRQS_LEGACY) | |
153 | share = 0; | |
154 | ||
155 | rc = xen_allocate_pirq(dev->irq, share, "pcifront"); | |
156 | if (rc < 0) { | |
157 | dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", | |
158 | dev->irq, rc); | |
159 | return rc; | |
160 | } | |
161 | return 0; | |
162 | } | |
163 | ||
164 | int __init pci_xen_init(void) | |
165 | { | |
166 | if (!xen_pv_domain() || xen_initial_domain()) | |
167 | return -ENODEV; | |
168 | ||
169 | printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n"); | |
170 | ||
171 | pcibios_set_cache_line_size(); | |
172 | ||
173 | pcibios_enable_irq = xen_pcifront_enable_irq; | |
174 | pcibios_disable_irq = NULL; | |
175 | ||
176 | #ifdef CONFIG_ACPI | |
177 | /* Keep ACPI out of the picture */ | |
178 | acpi_noirq = 1; | |
179 | #endif | |
180 | ||
b5401a96 AN |
181 | #ifdef CONFIG_PCI_MSI |
182 | x86_msi.setup_msi_irqs = xen_setup_msi_irqs; | |
183 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | |
184 | x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; | |
185 | #endif | |
186 | return 0; | |
187 | } | |
3942b740 SS |
188 | |
189 | int __init pci_xen_hvm_init(void) | |
190 | { | |
191 | if (!xen_feature(XENFEAT_hvm_pirqs)) | |
192 | return 0; | |
193 | ||
194 | #ifdef CONFIG_ACPI | |
195 | /* | |
196 | * We don't want to change the actual ACPI delivery model, | |
197 | * just how GSIs get registered. | |
198 | */ | |
199 | __acpi_register_gsi = acpi_register_gsi_xen_hvm; | |
200 | #endif | |
201 | return 0; | |
202 | } |