Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 | 3 | #include <linux/spinlock.h> |
5a0e3ad6 | 4 | #include <linux/slab.h> |
2ae21010 | 5 | #include <linux/jiffies.h> |
20f3097b | 6 | #include <linux/hpet.h> |
2ae21010 | 7 | #include <linux/pci.h> |
b6fcb33a | 8 | #include <linux/irq.h> |
ad3ad3f6 | 9 | #include <asm/io_apic.h> |
17483a1f | 10 | #include <asm/smp.h> |
6d652ea1 | 11 | #include <asm/cpu.h> |
38717946 | 12 | #include <linux/intel-iommu.h> |
ad3ad3f6 | 13 | #include "intr_remapping.h" |
46f06b72 | 14 | #include <acpi/acpi.h> |
f007e99c WH |
15 | #include <asm/pci-direct.h> |
16 | #include "pci.h" | |
ad3ad3f6 SS |
17 | |
18 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |
20f3097b SS |
19 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
20 | static int ir_ioapic_num, ir_hpet_num; | |
2ae21010 SS |
21 | int intr_remapping_enabled; |
22 | ||
03ea8155 | 23 | static int disable_intremap; |
d1423d56 CW |
24 | static int disable_sourceid_checking; |
25 | ||
03ea8155 WH |
26 | static __init int setup_nointremap(char *str) |
27 | { | |
28 | disable_intremap = 1; | |
29 | return 0; | |
30 | } | |
31 | early_param("nointremap", setup_nointremap); | |
32 | ||
d1423d56 CW |
33 | static __init int setup_intremap(char *str) |
34 | { | |
35 | if (!str) | |
36 | return -EINVAL; | |
37 | ||
38 | if (!strncmp(str, "on", 2)) | |
39 | disable_intremap = 0; | |
40 | else if (!strncmp(str, "off", 3)) | |
41 | disable_intremap = 1; | |
42 | else if (!strncmp(str, "nosid", 5)) | |
43 | disable_sourceid_checking = 1; | |
44 | ||
45 | return 0; | |
46 | } | |
47 | early_param("intremap", setup_intremap); | |
48 | ||
d585d060 TG |
49 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
50 | ||
e420dfb4 YL |
51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
52 | { | |
349d6767 TG |
53 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
54 | return cfg ? &cfg->irq_2_iommu : NULL; | |
0b8f1efa YL |
55 | } |
56 | ||
b6fcb33a SS |
57 | int get_irte(int irq, struct irte *entry) |
58 | { | |
d585d060 | 59 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 60 | unsigned long flags; |
d585d060 | 61 | int index; |
b6fcb33a | 62 | |
d585d060 | 63 | if (!entry || !irq_iommu) |
b6fcb33a SS |
64 | return -1; |
65 | ||
4c5502b1 | 66 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 67 | |
e420dfb4 YL |
68 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
69 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 70 | |
4c5502b1 | 71 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
72 | return 0; |
73 | } | |
74 | ||
75 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |
76 | { | |
77 | struct ir_table *table = iommu->ir_table; | |
d585d060 | 78 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
b6fcb33a SS |
79 | u16 index, start_index; |
80 | unsigned int mask = 0; | |
4c5502b1 | 81 | unsigned long flags; |
b6fcb33a SS |
82 | int i; |
83 | ||
d585d060 | 84 | if (!count || !irq_iommu) |
e420dfb4 | 85 | return -1; |
e420dfb4 | 86 | |
b6fcb33a SS |
87 | /* |
88 | * start the IRTE search from index 0. | |
89 | */ | |
90 | index = start_index = 0; | |
91 | ||
92 | if (count > 1) { | |
93 | count = __roundup_pow_of_two(count); | |
94 | mask = ilog2(count); | |
95 | } | |
96 | ||
97 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
98 | printk(KERN_ERR | |
99 | "Requested mask %x exceeds the max invalidation handle" | |
100 | " mask value %Lx\n", mask, | |
101 | ecap_max_handle_mask(iommu->ecap)); | |
102 | return -1; | |
103 | } | |
104 | ||
4c5502b1 | 105 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a SS |
106 | do { |
107 | for (i = index; i < index + count; i++) | |
108 | if (table->base[i].present) | |
109 | break; | |
110 | /* empty index found */ | |
111 | if (i == index + count) | |
112 | break; | |
113 | ||
114 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | |
115 | ||
116 | if (index == start_index) { | |
4c5502b1 | 117 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
118 | printk(KERN_ERR "can't allocate an IRTE\n"); |
119 | return -1; | |
120 | } | |
121 | } while (1); | |
122 | ||
123 | for (i = index; i < index + count; i++) | |
124 | table->base[i].present = 1; | |
125 | ||
e420dfb4 YL |
126 | irq_iommu->iommu = iommu; |
127 | irq_iommu->irte_index = index; | |
128 | irq_iommu->sub_handle = 0; | |
129 | irq_iommu->irte_mask = mask; | |
b6fcb33a | 130 | |
4c5502b1 | 131 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
132 | |
133 | return index; | |
134 | } | |
135 | ||
704126ad | 136 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
137 | { |
138 | struct qi_desc desc; | |
139 | ||
140 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
141 | | QI_IEC_SELECTIVE; | |
142 | desc.high = 0; | |
143 | ||
704126ad | 144 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
145 | } |
146 | ||
147 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |
148 | { | |
d585d060 | 149 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 150 | unsigned long flags; |
d585d060 | 151 | int index; |
b6fcb33a | 152 | |
d585d060 | 153 | if (!irq_iommu) |
b6fcb33a | 154 | return -1; |
b6fcb33a | 155 | |
d585d060 | 156 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
157 | *sub_handle = irq_iommu->sub_handle; |
158 | index = irq_iommu->irte_index; | |
4c5502b1 | 159 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
160 | return index; |
161 | } | |
162 | ||
163 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |
164 | { | |
d585d060 | 165 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 166 | unsigned long flags; |
e420dfb4 | 167 | |
d585d060 | 168 | if (!irq_iommu) |
0b8f1efa | 169 | return -1; |
d585d060 TG |
170 | |
171 | spin_lock_irqsave(&irq_2_ir_lock, flags); | |
0b8f1efa | 172 | |
e420dfb4 YL |
173 | irq_iommu->iommu = iommu; |
174 | irq_iommu->irte_index = index; | |
175 | irq_iommu->sub_handle = subhandle; | |
176 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 177 | |
4c5502b1 | 178 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
179 | |
180 | return 0; | |
181 | } | |
182 | ||
b6fcb33a SS |
183 | int modify_irte(int irq, struct irte *irte_modified) |
184 | { | |
d585d060 | 185 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
b6fcb33a | 186 | struct intel_iommu *iommu; |
4c5502b1 | 187 | unsigned long flags; |
d585d060 TG |
188 | struct irte *irte; |
189 | int rc, index; | |
b6fcb33a | 190 | |
d585d060 | 191 | if (!irq_iommu) |
b6fcb33a | 192 | return -1; |
d585d060 TG |
193 | |
194 | spin_lock_irqsave(&irq_2_ir_lock, flags); | |
b6fcb33a | 195 | |
e420dfb4 | 196 | iommu = irq_iommu->iommu; |
b6fcb33a | 197 | |
e420dfb4 | 198 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
199 | irte = &iommu->ir_table->base[index]; |
200 | ||
c513b67e LT |
201 | set_64bit(&irte->low, irte_modified->low); |
202 | set_64bit(&irte->high, irte_modified->high); | |
b6fcb33a SS |
203 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
204 | ||
704126ad | 205 | rc = qi_flush_iec(iommu, index, 0); |
4c5502b1 | 206 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
207 | |
208 | return rc; | |
b6fcb33a SS |
209 | } |
210 | ||
20f3097b SS |
211 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
212 | { | |
213 | int i; | |
214 | ||
215 | for (i = 0; i < MAX_HPET_TBS; i++) | |
216 | if (ir_hpet[i].id == hpet_id) | |
217 | return ir_hpet[i].iommu; | |
218 | return NULL; | |
219 | } | |
220 | ||
89027d35 SS |
221 | struct intel_iommu *map_ioapic_to_ir(int apic) |
222 | { | |
223 | int i; | |
224 | ||
225 | for (i = 0; i < MAX_IO_APICS; i++) | |
226 | if (ir_ioapic[i].id == apic) | |
227 | return ir_ioapic[i].iommu; | |
228 | return NULL; | |
229 | } | |
230 | ||
75c46fa6 SS |
231 | struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
232 | { | |
233 | struct dmar_drhd_unit *drhd; | |
234 | ||
235 | drhd = dmar_find_matched_drhd_unit(dev); | |
236 | if (!drhd) | |
237 | return NULL; | |
238 | ||
239 | return drhd->iommu; | |
240 | } | |
241 | ||
c4658b4e WH |
242 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
243 | { | |
244 | struct irte *start, *entry, *end; | |
245 | struct intel_iommu *iommu; | |
246 | int index; | |
247 | ||
248 | if (irq_iommu->sub_handle) | |
249 | return 0; | |
250 | ||
251 | iommu = irq_iommu->iommu; | |
252 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
253 | ||
254 | start = iommu->ir_table->base + index; | |
255 | end = start + (1 << irq_iommu->irte_mask); | |
256 | ||
257 | for (entry = start; entry < end; entry++) { | |
c513b67e LT |
258 | set_64bit(&entry->low, 0); |
259 | set_64bit(&entry->high, 0); | |
c4658b4e WH |
260 | } |
261 | ||
262 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
263 | } | |
264 | ||
b6fcb33a SS |
265 | int free_irte(int irq) |
266 | { | |
d585d060 | 267 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 268 | unsigned long flags; |
d585d060 | 269 | int rc; |
b6fcb33a | 270 | |
d585d060 | 271 | if (!irq_iommu) |
b6fcb33a | 272 | return -1; |
d585d060 TG |
273 | |
274 | spin_lock_irqsave(&irq_2_ir_lock, flags); | |
b6fcb33a | 275 | |
c4658b4e | 276 | rc = clear_entries(irq_iommu); |
b6fcb33a | 277 | |
e420dfb4 YL |
278 | irq_iommu->iommu = NULL; |
279 | irq_iommu->irte_index = 0; | |
280 | irq_iommu->sub_handle = 0; | |
281 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 282 | |
4c5502b1 | 283 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 284 | |
704126ad | 285 | return rc; |
b6fcb33a SS |
286 | } |
287 | ||
f007e99c WH |
288 | /* |
289 | * source validation type | |
290 | */ | |
291 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
292 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | |
293 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | |
294 | ||
295 | /* | |
296 | * source-id qualifier | |
297 | */ | |
298 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
299 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
300 | * the third least significant bit | |
301 | */ | |
302 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
303 | * the second and third least significant bits | |
304 | */ | |
305 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
306 | * the least three significant bits | |
307 | */ | |
308 | ||
309 | /* | |
310 | * set SVT, SQ and SID fields of irte to verify | |
311 | * source ids of interrupt requests | |
312 | */ | |
313 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
314 | unsigned int sq, unsigned int sid) | |
315 | { | |
d1423d56 CW |
316 | if (disable_sourceid_checking) |
317 | svt = SVT_NO_VERIFY; | |
f007e99c WH |
318 | irte->svt = svt; |
319 | irte->sq = sq; | |
320 | irte->sid = sid; | |
321 | } | |
322 | ||
323 | int set_ioapic_sid(struct irte *irte, int apic) | |
324 | { | |
325 | int i; | |
326 | u16 sid = 0; | |
327 | ||
328 | if (!irte) | |
329 | return -1; | |
330 | ||
331 | for (i = 0; i < MAX_IO_APICS; i++) { | |
332 | if (ir_ioapic[i].id == apic) { | |
333 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | |
334 | break; | |
335 | } | |
336 | } | |
337 | ||
338 | if (sid == 0) { | |
339 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | |
340 | return -1; | |
341 | } | |
342 | ||
343 | set_irte_sid(irte, 1, 0, sid); | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
20f3097b SS |
348 | int set_hpet_sid(struct irte *irte, u8 id) |
349 | { | |
350 | int i; | |
351 | u16 sid = 0; | |
352 | ||
353 | if (!irte) | |
354 | return -1; | |
355 | ||
356 | for (i = 0; i < MAX_HPET_TBS; i++) { | |
357 | if (ir_hpet[i].id == id) { | |
358 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | |
359 | break; | |
360 | } | |
361 | } | |
362 | ||
363 | if (sid == 0) { | |
364 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | |
365 | return -1; | |
366 | } | |
367 | ||
368 | /* | |
369 | * Should really use SQ_ALL_16. Some platforms are broken. | |
370 | * While we figure out the right quirks for these broken platforms, use | |
371 | * SQ_13_IGNORE_3 for now. | |
372 | */ | |
373 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
f007e99c WH |
378 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
379 | { | |
380 | struct pci_dev *bridge; | |
381 | ||
382 | if (!irte || !dev) | |
383 | return -1; | |
384 | ||
385 | /* PCIe device or Root Complex integrated PCI device */ | |
5f4d91a1 | 386 | if (pci_is_pcie(dev) || !dev->bus->parent) { |
f007e99c WH |
387 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
388 | (dev->bus->number << 8) | dev->devfn); | |
389 | return 0; | |
390 | } | |
391 | ||
392 | bridge = pci_find_upstream_pcie_bridge(dev); | |
393 | if (bridge) { | |
45e829ea | 394 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
f007e99c WH |
395 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
396 | (bridge->bus->number << 8) | dev->bus->number); | |
397 | else /* this is a legacy PCI bridge */ | |
398 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
399 | (bridge->bus->number << 8) | bridge->devfn); | |
400 | } | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
2ae21010 SS |
405 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
406 | { | |
407 | u64 addr; | |
c416daa9 | 408 | u32 sts; |
2ae21010 SS |
409 | unsigned long flags; |
410 | ||
411 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
412 | ||
413 | spin_lock_irqsave(&iommu->register_lock, flags); | |
414 | ||
415 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
416 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
417 | ||
418 | /* Set interrupt-remapping table pointer */ | |
161fde08 | 419 | iommu->gcmd |= DMA_GCMD_SIRTP; |
c416daa9 | 420 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
421 | |
422 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
423 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
424 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
425 | ||
426 | /* | |
427 | * global invalidation of interrupt entry cache before enabling | |
428 | * interrupt-remapping. | |
429 | */ | |
430 | qi_global_iec(iommu); | |
431 | ||
432 | spin_lock_irqsave(&iommu->register_lock, flags); | |
433 | ||
434 | /* Enable interrupt-remapping */ | |
2ae21010 | 435 | iommu->gcmd |= DMA_GCMD_IRE; |
c416daa9 | 436 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
437 | |
438 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
439 | readl, (sts & DMA_GSTS_IRES), sts); | |
440 | ||
441 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
442 | } | |
443 | ||
444 | ||
445 | static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |
446 | { | |
447 | struct ir_table *ir_table; | |
448 | struct page *pages; | |
449 | ||
450 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 451 | GFP_ATOMIC); |
2ae21010 SS |
452 | |
453 | if (!iommu->ir_table) | |
454 | return -ENOMEM; | |
455 | ||
824cd75b SS |
456 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
457 | INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
458 | |
459 | if (!pages) { | |
460 | printk(KERN_ERR "failed to allocate pages of order %d\n", | |
461 | INTR_REMAP_PAGE_ORDER); | |
462 | kfree(iommu->ir_table); | |
463 | return -ENOMEM; | |
464 | } | |
465 | ||
466 | ir_table->base = page_address(pages); | |
467 | ||
468 | iommu_set_intr_remapping(iommu, mode); | |
469 | return 0; | |
470 | } | |
471 | ||
eba67e5d SS |
472 | /* |
473 | * Disable Interrupt Remapping. | |
474 | */ | |
b24696bc | 475 | static void iommu_disable_intr_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
476 | { |
477 | unsigned long flags; | |
478 | u32 sts; | |
479 | ||
480 | if (!ecap_ir_support(iommu->ecap)) | |
481 | return; | |
482 | ||
b24696bc FY |
483 | /* |
484 | * global invalidation of interrupt entry cache before disabling | |
485 | * interrupt-remapping. | |
486 | */ | |
487 | qi_global_iec(iommu); | |
488 | ||
eba67e5d SS |
489 | spin_lock_irqsave(&iommu->register_lock, flags); |
490 | ||
491 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
492 | if (!(sts & DMA_GSTS_IRES)) | |
493 | goto end; | |
494 | ||
495 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
496 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
497 | ||
498 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
499 | readl, !(sts & DMA_GSTS_IRES), sts); | |
500 | ||
501 | end: | |
502 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
503 | } | |
504 | ||
93758238 WH |
505 | int __init intr_remapping_supported(void) |
506 | { | |
507 | struct dmar_drhd_unit *drhd; | |
508 | ||
03ea8155 WH |
509 | if (disable_intremap) |
510 | return 0; | |
511 | ||
074835f0 YS |
512 | if (!dmar_ir_support()) |
513 | return 0; | |
514 | ||
93758238 WH |
515 | for_each_drhd_unit(drhd) { |
516 | struct intel_iommu *iommu = drhd->iommu; | |
517 | ||
518 | if (!ecap_ir_support(iommu->ecap)) | |
519 | return 0; | |
520 | } | |
521 | ||
522 | return 1; | |
523 | } | |
524 | ||
2ae21010 SS |
525 | int __init enable_intr_remapping(int eim) |
526 | { | |
527 | struct dmar_drhd_unit *drhd; | |
528 | int setup = 0; | |
529 | ||
e936d077 YS |
530 | if (parse_ioapics_under_ir() != 1) { |
531 | printk(KERN_INFO "Not enable interrupt remapping\n"); | |
532 | return -1; | |
533 | } | |
534 | ||
1531a6a6 SS |
535 | for_each_drhd_unit(drhd) { |
536 | struct intel_iommu *iommu = drhd->iommu; | |
537 | ||
34aaaa94 HW |
538 | /* |
539 | * If the queued invalidation is already initialized, | |
540 | * shouldn't disable it. | |
541 | */ | |
542 | if (iommu->qi) | |
543 | continue; | |
544 | ||
1531a6a6 SS |
545 | /* |
546 | * Clear previous faults. | |
547 | */ | |
548 | dmar_fault(-1, iommu); | |
549 | ||
550 | /* | |
551 | * Disable intr remapping and queued invalidation, if already | |
552 | * enabled prior to OS handover. | |
553 | */ | |
b24696bc | 554 | iommu_disable_intr_remapping(iommu); |
1531a6a6 SS |
555 | |
556 | dmar_disable_qi(iommu); | |
557 | } | |
558 | ||
2ae21010 SS |
559 | /* |
560 | * check for the Interrupt-remapping support | |
561 | */ | |
562 | for_each_drhd_unit(drhd) { | |
563 | struct intel_iommu *iommu = drhd->iommu; | |
564 | ||
565 | if (!ecap_ir_support(iommu->ecap)) | |
566 | continue; | |
567 | ||
568 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
569 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
570 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
571 | return -1; | |
572 | } | |
573 | } | |
574 | ||
575 | /* | |
576 | * Enable queued invalidation for all the DRHD's. | |
577 | */ | |
578 | for_each_drhd_unit(drhd) { | |
579 | int ret; | |
580 | struct intel_iommu *iommu = drhd->iommu; | |
581 | ret = dmar_enable_qi(iommu); | |
582 | ||
583 | if (ret) { | |
584 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
585 | " invalidation, ecap %Lx, ret %d\n", | |
586 | drhd->reg_base_addr, iommu->ecap, ret); | |
587 | return -1; | |
588 | } | |
589 | } | |
590 | ||
591 | /* | |
592 | * Setup Interrupt-remapping for all the DRHD's now. | |
593 | */ | |
594 | for_each_drhd_unit(drhd) { | |
595 | struct intel_iommu *iommu = drhd->iommu; | |
596 | ||
597 | if (!ecap_ir_support(iommu->ecap)) | |
598 | continue; | |
599 | ||
600 | if (setup_intr_remapping(iommu, eim)) | |
601 | goto error; | |
602 | ||
603 | setup = 1; | |
604 | } | |
605 | ||
606 | if (!setup) | |
607 | goto error; | |
608 | ||
609 | intr_remapping_enabled = 1; | |
610 | ||
611 | return 0; | |
612 | ||
613 | error: | |
614 | /* | |
615 | * handle error condition gracefully here! | |
616 | */ | |
617 | return -1; | |
618 | } | |
ad3ad3f6 | 619 | |
20f3097b SS |
620 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
621 | struct intel_iommu *iommu) | |
622 | { | |
623 | struct acpi_dmar_pci_path *path; | |
624 | u8 bus; | |
625 | int count; | |
626 | ||
627 | bus = scope->bus; | |
628 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
629 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
630 | / sizeof(struct acpi_dmar_pci_path); | |
631 | ||
632 | while (--count > 0) { | |
633 | /* | |
634 | * Access PCI directly due to the PCI | |
635 | * subsystem isn't initialized yet. | |
636 | */ | |
637 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
638 | PCI_SECONDARY_BUS); | |
639 | path++; | |
640 | } | |
641 | ir_hpet[ir_hpet_num].bus = bus; | |
642 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
643 | ir_hpet[ir_hpet_num].iommu = iommu; | |
644 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | |
645 | ir_hpet_num++; | |
646 | } | |
647 | ||
f007e99c WH |
648 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
649 | struct intel_iommu *iommu) | |
650 | { | |
651 | struct acpi_dmar_pci_path *path; | |
652 | u8 bus; | |
653 | int count; | |
654 | ||
655 | bus = scope->bus; | |
656 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
657 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
658 | / sizeof(struct acpi_dmar_pci_path); | |
659 | ||
660 | while (--count > 0) { | |
661 | /* | |
662 | * Access PCI directly due to the PCI | |
663 | * subsystem isn't initialized yet. | |
664 | */ | |
665 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
666 | PCI_SECONDARY_BUS); | |
667 | path++; | |
668 | } | |
669 | ||
670 | ir_ioapic[ir_ioapic_num].bus = bus; | |
671 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
672 | ir_ioapic[ir_ioapic_num].iommu = iommu; | |
673 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
674 | ir_ioapic_num++; | |
675 | } | |
676 | ||
20f3097b SS |
677 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
678 | struct intel_iommu *iommu) | |
ad3ad3f6 SS |
679 | { |
680 | struct acpi_dmar_hardware_unit *drhd; | |
681 | struct acpi_dmar_device_scope *scope; | |
682 | void *start, *end; | |
683 | ||
684 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
685 | ||
686 | start = (void *)(drhd + 1); | |
687 | end = ((void *)drhd) + header->length; | |
688 | ||
689 | while (start < end) { | |
690 | scope = start; | |
691 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
692 | if (ir_ioapic_num == MAX_IO_APICS) { | |
693 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
694 | return -1; | |
695 | } | |
696 | ||
680a7524 YL |
697 | printk(KERN_INFO "IOAPIC id %d under DRHD base " |
698 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | |
699 | drhd->address, iommu->seq_id); | |
ad3ad3f6 | 700 | |
f007e99c | 701 | ir_parse_one_ioapic_scope(scope, iommu); |
20f3097b SS |
702 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
703 | if (ir_hpet_num == MAX_HPET_TBS) { | |
704 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | |
705 | return -1; | |
706 | } | |
707 | ||
708 | printk(KERN_INFO "HPET id %d under DRHD base" | |
709 | " 0x%Lx\n", scope->enumeration_id, | |
710 | drhd->address); | |
711 | ||
712 | ir_parse_one_hpet_scope(scope, iommu); | |
ad3ad3f6 SS |
713 | } |
714 | start += scope->length; | |
715 | } | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
720 | /* | |
721 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
722 | * hardware unit. | |
723 | */ | |
724 | int __init parse_ioapics_under_ir(void) | |
725 | { | |
726 | struct dmar_drhd_unit *drhd; | |
727 | int ir_supported = 0; | |
728 | ||
729 | for_each_drhd_unit(drhd) { | |
730 | struct intel_iommu *iommu = drhd->iommu; | |
731 | ||
732 | if (ecap_ir_support(iommu->ecap)) { | |
20f3097b | 733 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
734 | return -1; |
735 | ||
736 | ir_supported = 1; | |
737 | } | |
738 | } | |
739 | ||
740 | if (ir_supported && ir_ioapic_num != nr_ioapics) { | |
741 | printk(KERN_WARNING | |
742 | "Not all IO-APIC's listed under remapping hardware\n"); | |
743 | return -1; | |
744 | } | |
745 | ||
746 | return ir_supported; | |
747 | } | |
b24696bc FY |
748 | |
749 | void disable_intr_remapping(void) | |
750 | { | |
751 | struct dmar_drhd_unit *drhd; | |
752 | struct intel_iommu *iommu = NULL; | |
753 | ||
754 | /* | |
755 | * Disable Interrupt-remapping for all the DRHD's now. | |
756 | */ | |
757 | for_each_iommu(iommu, drhd) { | |
758 | if (!ecap_ir_support(iommu->ecap)) | |
759 | continue; | |
760 | ||
761 | iommu_disable_intr_remapping(iommu); | |
762 | } | |
763 | } | |
764 | ||
765 | int reenable_intr_remapping(int eim) | |
766 | { | |
767 | struct dmar_drhd_unit *drhd; | |
768 | int setup = 0; | |
769 | struct intel_iommu *iommu = NULL; | |
770 | ||
771 | for_each_iommu(iommu, drhd) | |
772 | if (iommu->qi) | |
773 | dmar_reenable_qi(iommu); | |
774 | ||
775 | /* | |
776 | * Setup Interrupt-remapping for all the DRHD's now. | |
777 | */ | |
778 | for_each_iommu(iommu, drhd) { | |
779 | if (!ecap_ir_support(iommu->ecap)) | |
780 | continue; | |
781 | ||
782 | /* Set up interrupt remapping for iommu.*/ | |
783 | iommu_set_intr_remapping(iommu, eim); | |
784 | setup = 1; | |
785 | } | |
786 | ||
787 | if (!setup) | |
788 | goto error; | |
789 | ||
790 | return 0; | |
791 | ||
792 | error: | |
793 | /* | |
794 | * handle error condition gracefully here! | |
795 | */ | |
796 | return -1; | |
797 | } | |
798 |