Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 | 3 | #include <linux/spinlock.h> |
5a0e3ad6 | 4 | #include <linux/slab.h> |
2ae21010 | 5 | #include <linux/jiffies.h> |
20f3097b | 6 | #include <linux/hpet.h> |
2ae21010 | 7 | #include <linux/pci.h> |
b6fcb33a | 8 | #include <linux/irq.h> |
ad3ad3f6 | 9 | #include <asm/io_apic.h> |
17483a1f | 10 | #include <asm/smp.h> |
6d652ea1 | 11 | #include <asm/cpu.h> |
38717946 | 12 | #include <linux/intel-iommu.h> |
ad3ad3f6 | 13 | #include "intr_remapping.h" |
46f06b72 | 14 | #include <acpi/acpi.h> |
f007e99c WH |
15 | #include <asm/pci-direct.h> |
16 | #include "pci.h" | |
ad3ad3f6 SS |
17 | |
18 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |
20f3097b SS |
19 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
20 | static int ir_ioapic_num, ir_hpet_num; | |
2ae21010 SS |
21 | int intr_remapping_enabled; |
22 | ||
03ea8155 | 23 | static int disable_intremap; |
d1423d56 CW |
24 | static int disable_sourceid_checking; |
25 | ||
03ea8155 WH |
26 | static __init int setup_nointremap(char *str) |
27 | { | |
28 | disable_intremap = 1; | |
29 | return 0; | |
30 | } | |
31 | early_param("nointremap", setup_nointremap); | |
32 | ||
d1423d56 CW |
33 | static __init int setup_intremap(char *str) |
34 | { | |
35 | if (!str) | |
36 | return -EINVAL; | |
37 | ||
38 | if (!strncmp(str, "on", 2)) | |
39 | disable_intremap = 0; | |
40 | else if (!strncmp(str, "off", 3)) | |
41 | disable_intremap = 1; | |
42 | else if (!strncmp(str, "nosid", 5)) | |
43 | disable_sourceid_checking = 1; | |
44 | ||
45 | return 0; | |
46 | } | |
47 | early_param("intremap", setup_intremap); | |
48 | ||
d7e51e66 | 49 | #ifdef CONFIG_GENERIC_HARDIRQS |
e420dfb4 YL |
50 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
51 | { | |
a8ef54ae | 52 | return get_irq_iommu(irq); |
0b8f1efa YL |
53 | } |
54 | ||
70590ea7 | 55 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
0b8f1efa | 56 | { |
a8ef54ae | 57 | struct irq_data *data = irq_get_irq_data(irq); |
0b8f1efa | 58 | |
a8ef54ae TG |
59 | if (WARN_ONCE(data->irq_2_iommu, |
60 | KERN_DEBUG "irq_2_iommu!=NULL irq %u\n", irq)) | |
61 | return data->irq_2_iommu; | |
0b8f1efa | 62 | |
a8ef54ae TG |
63 | data->irq_2_iommu = kzalloc_node(sizeof(*data->irq_2_iommu), |
64 | GFP_ATOMIC, data->node); | |
65 | return data->irq_2_iommu; | |
0b8f1efa YL |
66 | } |
67 | ||
0e1e367a TG |
68 | static void irq_2_iommu_free(unsigned int irq) |
69 | { | |
70 | struct irq_data *d = irq_get_irq_data(irq); | |
71 | struct irq_2_iommu *p = d->irq_2_iommu; | |
72 | ||
73 | d->irq_2_iommu = NULL; | |
74 | kfree(p); | |
75 | } | |
76 | ||
0b8f1efa YL |
77 | #else /* !CONFIG_SPARSE_IRQ */ |
78 | ||
79 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | |
80 | ||
81 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |
82 | { | |
83 | if (irq < nr_irqs) | |
84 | return &irq_2_iommuX[irq]; | |
85 | ||
86 | return NULL; | |
87 | } | |
e420dfb4 YL |
88 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
89 | { | |
90 | return irq_2_iommu(irq); | |
91 | } | |
0e1e367a TG |
92 | |
93 | static void irq_2_iommu_free(unsigned int irq) { } | |
94 | ||
0b8f1efa | 95 | #endif |
b6fcb33a SS |
96 | |
97 | static DEFINE_SPINLOCK(irq_2_ir_lock); | |
98 | ||
e420dfb4 | 99 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
b6fcb33a | 100 | { |
e420dfb4 YL |
101 | struct irq_2_iommu *irq_iommu; |
102 | ||
103 | irq_iommu = irq_2_iommu(irq); | |
b6fcb33a | 104 | |
e420dfb4 YL |
105 | if (!irq_iommu) |
106 | return NULL; | |
b6fcb33a | 107 | |
e420dfb4 YL |
108 | if (!irq_iommu->iommu) |
109 | return NULL; | |
b6fcb33a | 110 | |
e420dfb4 YL |
111 | return irq_iommu; |
112 | } | |
b6fcb33a | 113 | |
e420dfb4 YL |
114 | int irq_remapped(int irq) |
115 | { | |
116 | return valid_irq_2_iommu(irq) != NULL; | |
b6fcb33a SS |
117 | } |
118 | ||
119 | int get_irte(int irq, struct irte *entry) | |
120 | { | |
121 | int index; | |
e420dfb4 | 122 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 123 | unsigned long flags; |
b6fcb33a | 124 | |
e420dfb4 | 125 | if (!entry) |
b6fcb33a SS |
126 | return -1; |
127 | ||
4c5502b1 | 128 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
129 | irq_iommu = valid_irq_2_iommu(irq); |
130 | if (!irq_iommu) { | |
4c5502b1 | 131 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
132 | return -1; |
133 | } | |
134 | ||
e420dfb4 YL |
135 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
136 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 137 | |
4c5502b1 | 138 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
139 | return 0; |
140 | } | |
141 | ||
142 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |
143 | { | |
144 | struct ir_table *table = iommu->ir_table; | |
e420dfb4 | 145 | struct irq_2_iommu *irq_iommu; |
b6fcb33a SS |
146 | u16 index, start_index; |
147 | unsigned int mask = 0; | |
4c5502b1 | 148 | unsigned long flags; |
b6fcb33a SS |
149 | int i; |
150 | ||
151 | if (!count) | |
152 | return -1; | |
153 | ||
0b8f1efa | 154 | #ifndef CONFIG_SPARSE_IRQ |
e420dfb4 YL |
155 | /* protect irq_2_iommu_alloc later */ |
156 | if (irq >= nr_irqs) | |
157 | return -1; | |
0b8f1efa | 158 | #endif |
e420dfb4 | 159 | |
b6fcb33a SS |
160 | /* |
161 | * start the IRTE search from index 0. | |
162 | */ | |
163 | index = start_index = 0; | |
164 | ||
165 | if (count > 1) { | |
166 | count = __roundup_pow_of_two(count); | |
167 | mask = ilog2(count); | |
168 | } | |
169 | ||
170 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
171 | printk(KERN_ERR | |
172 | "Requested mask %x exceeds the max invalidation handle" | |
173 | " mask value %Lx\n", mask, | |
174 | ecap_max_handle_mask(iommu->ecap)); | |
175 | return -1; | |
176 | } | |
177 | ||
4c5502b1 | 178 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a SS |
179 | do { |
180 | for (i = index; i < index + count; i++) | |
181 | if (table->base[i].present) | |
182 | break; | |
183 | /* empty index found */ | |
184 | if (i == index + count) | |
185 | break; | |
186 | ||
187 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | |
188 | ||
189 | if (index == start_index) { | |
4c5502b1 | 190 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
191 | printk(KERN_ERR "can't allocate an IRTE\n"); |
192 | return -1; | |
193 | } | |
194 | } while (1); | |
195 | ||
196 | for (i = index; i < index + count; i++) | |
197 | table->base[i].present = 1; | |
198 | ||
e420dfb4 | 199 | irq_iommu = irq_2_iommu_alloc(irq); |
0b8f1efa | 200 | if (!irq_iommu) { |
4c5502b1 | 201 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
202 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
203 | return -1; | |
204 | } | |
205 | ||
e420dfb4 YL |
206 | irq_iommu->iommu = iommu; |
207 | irq_iommu->irte_index = index; | |
208 | irq_iommu->sub_handle = 0; | |
209 | irq_iommu->irte_mask = mask; | |
b6fcb33a | 210 | |
4c5502b1 | 211 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
212 | |
213 | return index; | |
214 | } | |
215 | ||
704126ad | 216 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
217 | { |
218 | struct qi_desc desc; | |
219 | ||
220 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
221 | | QI_IEC_SELECTIVE; | |
222 | desc.high = 0; | |
223 | ||
704126ad | 224 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
225 | } |
226 | ||
227 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |
228 | { | |
229 | int index; | |
e420dfb4 | 230 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 231 | unsigned long flags; |
b6fcb33a | 232 | |
4c5502b1 | 233 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
234 | irq_iommu = valid_irq_2_iommu(irq); |
235 | if (!irq_iommu) { | |
4c5502b1 | 236 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
237 | return -1; |
238 | } | |
239 | ||
e420dfb4 YL |
240 | *sub_handle = irq_iommu->sub_handle; |
241 | index = irq_iommu->irte_index; | |
4c5502b1 | 242 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
243 | return index; |
244 | } | |
245 | ||
246 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |
247 | { | |
e420dfb4 | 248 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 249 | unsigned long flags; |
e420dfb4 | 250 | |
4c5502b1 | 251 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 252 | |
7ddfb650 | 253 | irq_iommu = irq_2_iommu_alloc(irq); |
b6fcb33a | 254 | |
0b8f1efa | 255 | if (!irq_iommu) { |
4c5502b1 | 256 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
257 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
258 | return -1; | |
259 | } | |
260 | ||
e420dfb4 YL |
261 | irq_iommu->iommu = iommu; |
262 | irq_iommu->irte_index = index; | |
263 | irq_iommu->sub_handle = subhandle; | |
264 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 265 | |
4c5502b1 | 266 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
267 | |
268 | return 0; | |
269 | } | |
270 | ||
b6fcb33a SS |
271 | int modify_irte(int irq, struct irte *irte_modified) |
272 | { | |
704126ad | 273 | int rc; |
b6fcb33a SS |
274 | int index; |
275 | struct irte *irte; | |
276 | struct intel_iommu *iommu; | |
e420dfb4 | 277 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 278 | unsigned long flags; |
b6fcb33a | 279 | |
4c5502b1 | 280 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
281 | irq_iommu = valid_irq_2_iommu(irq); |
282 | if (!irq_iommu) { | |
4c5502b1 | 283 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
284 | return -1; |
285 | } | |
286 | ||
e420dfb4 | 287 | iommu = irq_iommu->iommu; |
b6fcb33a | 288 | |
e420dfb4 | 289 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
290 | irte = &iommu->ir_table->base[index]; |
291 | ||
c513b67e LT |
292 | set_64bit(&irte->low, irte_modified->low); |
293 | set_64bit(&irte->high, irte_modified->high); | |
b6fcb33a SS |
294 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
295 | ||
704126ad | 296 | rc = qi_flush_iec(iommu, index, 0); |
4c5502b1 | 297 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
298 | |
299 | return rc; | |
b6fcb33a SS |
300 | } |
301 | ||
20f3097b SS |
302 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
303 | { | |
304 | int i; | |
305 | ||
306 | for (i = 0; i < MAX_HPET_TBS; i++) | |
307 | if (ir_hpet[i].id == hpet_id) | |
308 | return ir_hpet[i].iommu; | |
309 | return NULL; | |
310 | } | |
311 | ||
89027d35 SS |
312 | struct intel_iommu *map_ioapic_to_ir(int apic) |
313 | { | |
314 | int i; | |
315 | ||
316 | for (i = 0; i < MAX_IO_APICS; i++) | |
317 | if (ir_ioapic[i].id == apic) | |
318 | return ir_ioapic[i].iommu; | |
319 | return NULL; | |
320 | } | |
321 | ||
75c46fa6 SS |
322 | struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
323 | { | |
324 | struct dmar_drhd_unit *drhd; | |
325 | ||
326 | drhd = dmar_find_matched_drhd_unit(dev); | |
327 | if (!drhd) | |
328 | return NULL; | |
329 | ||
330 | return drhd->iommu; | |
331 | } | |
332 | ||
c4658b4e WH |
333 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
334 | { | |
335 | struct irte *start, *entry, *end; | |
336 | struct intel_iommu *iommu; | |
337 | int index; | |
338 | ||
339 | if (irq_iommu->sub_handle) | |
340 | return 0; | |
341 | ||
342 | iommu = irq_iommu->iommu; | |
343 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
344 | ||
345 | start = iommu->ir_table->base + index; | |
346 | end = start + (1 << irq_iommu->irte_mask); | |
347 | ||
348 | for (entry = start; entry < end; entry++) { | |
c513b67e LT |
349 | set_64bit(&entry->low, 0); |
350 | set_64bit(&entry->high, 0); | |
c4658b4e WH |
351 | } |
352 | ||
353 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
354 | } | |
355 | ||
b6fcb33a SS |
356 | int free_irte(int irq) |
357 | { | |
704126ad | 358 | int rc = 0; |
e420dfb4 | 359 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 360 | unsigned long flags; |
b6fcb33a | 361 | |
4c5502b1 | 362 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
363 | irq_iommu = valid_irq_2_iommu(irq); |
364 | if (!irq_iommu) { | |
4c5502b1 | 365 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
366 | return -1; |
367 | } | |
368 | ||
c4658b4e | 369 | rc = clear_entries(irq_iommu); |
b6fcb33a | 370 | |
e420dfb4 YL |
371 | irq_iommu->iommu = NULL; |
372 | irq_iommu->irte_index = 0; | |
373 | irq_iommu->sub_handle = 0; | |
374 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 375 | |
4c5502b1 | 376 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 377 | |
0e1e367a TG |
378 | irq_2_iommu_free(irq); |
379 | ||
704126ad | 380 | return rc; |
b6fcb33a SS |
381 | } |
382 | ||
f007e99c WH |
383 | /* |
384 | * source validation type | |
385 | */ | |
386 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
387 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | |
388 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | |
389 | ||
390 | /* | |
391 | * source-id qualifier | |
392 | */ | |
393 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
394 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
395 | * the third least significant bit | |
396 | */ | |
397 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
398 | * the second and third least significant bits | |
399 | */ | |
400 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
401 | * the least three significant bits | |
402 | */ | |
403 | ||
404 | /* | |
405 | * set SVT, SQ and SID fields of irte to verify | |
406 | * source ids of interrupt requests | |
407 | */ | |
408 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
409 | unsigned int sq, unsigned int sid) | |
410 | { | |
d1423d56 CW |
411 | if (disable_sourceid_checking) |
412 | svt = SVT_NO_VERIFY; | |
f007e99c WH |
413 | irte->svt = svt; |
414 | irte->sq = sq; | |
415 | irte->sid = sid; | |
416 | } | |
417 | ||
418 | int set_ioapic_sid(struct irte *irte, int apic) | |
419 | { | |
420 | int i; | |
421 | u16 sid = 0; | |
422 | ||
423 | if (!irte) | |
424 | return -1; | |
425 | ||
426 | for (i = 0; i < MAX_IO_APICS; i++) { | |
427 | if (ir_ioapic[i].id == apic) { | |
428 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | |
429 | break; | |
430 | } | |
431 | } | |
432 | ||
433 | if (sid == 0) { | |
434 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | |
435 | return -1; | |
436 | } | |
437 | ||
438 | set_irte_sid(irte, 1, 0, sid); | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
20f3097b SS |
443 | int set_hpet_sid(struct irte *irte, u8 id) |
444 | { | |
445 | int i; | |
446 | u16 sid = 0; | |
447 | ||
448 | if (!irte) | |
449 | return -1; | |
450 | ||
451 | for (i = 0; i < MAX_HPET_TBS; i++) { | |
452 | if (ir_hpet[i].id == id) { | |
453 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | |
454 | break; | |
455 | } | |
456 | } | |
457 | ||
458 | if (sid == 0) { | |
459 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | |
460 | return -1; | |
461 | } | |
462 | ||
463 | /* | |
464 | * Should really use SQ_ALL_16. Some platforms are broken. | |
465 | * While we figure out the right quirks for these broken platforms, use | |
466 | * SQ_13_IGNORE_3 for now. | |
467 | */ | |
468 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
469 | ||
470 | return 0; | |
471 | } | |
472 | ||
f007e99c WH |
473 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
474 | { | |
475 | struct pci_dev *bridge; | |
476 | ||
477 | if (!irte || !dev) | |
478 | return -1; | |
479 | ||
480 | /* PCIe device or Root Complex integrated PCI device */ | |
5f4d91a1 | 481 | if (pci_is_pcie(dev) || !dev->bus->parent) { |
f007e99c WH |
482 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
483 | (dev->bus->number << 8) | dev->devfn); | |
484 | return 0; | |
485 | } | |
486 | ||
487 | bridge = pci_find_upstream_pcie_bridge(dev); | |
488 | if (bridge) { | |
45e829ea | 489 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
f007e99c WH |
490 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
491 | (bridge->bus->number << 8) | dev->bus->number); | |
492 | else /* this is a legacy PCI bridge */ | |
493 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
494 | (bridge->bus->number << 8) | bridge->devfn); | |
495 | } | |
496 | ||
497 | return 0; | |
498 | } | |
499 | ||
2ae21010 SS |
500 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
501 | { | |
502 | u64 addr; | |
c416daa9 | 503 | u32 sts; |
2ae21010 SS |
504 | unsigned long flags; |
505 | ||
506 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
507 | ||
508 | spin_lock_irqsave(&iommu->register_lock, flags); | |
509 | ||
510 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
511 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
512 | ||
513 | /* Set interrupt-remapping table pointer */ | |
161fde08 | 514 | iommu->gcmd |= DMA_GCMD_SIRTP; |
c416daa9 | 515 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
516 | |
517 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
518 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
519 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
520 | ||
521 | /* | |
522 | * global invalidation of interrupt entry cache before enabling | |
523 | * interrupt-remapping. | |
524 | */ | |
525 | qi_global_iec(iommu); | |
526 | ||
527 | spin_lock_irqsave(&iommu->register_lock, flags); | |
528 | ||
529 | /* Enable interrupt-remapping */ | |
2ae21010 | 530 | iommu->gcmd |= DMA_GCMD_IRE; |
c416daa9 | 531 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
532 | |
533 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
534 | readl, (sts & DMA_GSTS_IRES), sts); | |
535 | ||
536 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
537 | } | |
538 | ||
539 | ||
540 | static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |
541 | { | |
542 | struct ir_table *ir_table; | |
543 | struct page *pages; | |
544 | ||
545 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 546 | GFP_ATOMIC); |
2ae21010 SS |
547 | |
548 | if (!iommu->ir_table) | |
549 | return -ENOMEM; | |
550 | ||
824cd75b SS |
551 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
552 | INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
553 | |
554 | if (!pages) { | |
555 | printk(KERN_ERR "failed to allocate pages of order %d\n", | |
556 | INTR_REMAP_PAGE_ORDER); | |
557 | kfree(iommu->ir_table); | |
558 | return -ENOMEM; | |
559 | } | |
560 | ||
561 | ir_table->base = page_address(pages); | |
562 | ||
563 | iommu_set_intr_remapping(iommu, mode); | |
564 | return 0; | |
565 | } | |
566 | ||
eba67e5d SS |
567 | /* |
568 | * Disable Interrupt Remapping. | |
569 | */ | |
b24696bc | 570 | static void iommu_disable_intr_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
571 | { |
572 | unsigned long flags; | |
573 | u32 sts; | |
574 | ||
575 | if (!ecap_ir_support(iommu->ecap)) | |
576 | return; | |
577 | ||
b24696bc FY |
578 | /* |
579 | * global invalidation of interrupt entry cache before disabling | |
580 | * interrupt-remapping. | |
581 | */ | |
582 | qi_global_iec(iommu); | |
583 | ||
eba67e5d SS |
584 | spin_lock_irqsave(&iommu->register_lock, flags); |
585 | ||
586 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
587 | if (!(sts & DMA_GSTS_IRES)) | |
588 | goto end; | |
589 | ||
590 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
591 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
592 | ||
593 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
594 | readl, !(sts & DMA_GSTS_IRES), sts); | |
595 | ||
596 | end: | |
597 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
598 | } | |
599 | ||
93758238 WH |
600 | int __init intr_remapping_supported(void) |
601 | { | |
602 | struct dmar_drhd_unit *drhd; | |
603 | ||
03ea8155 WH |
604 | if (disable_intremap) |
605 | return 0; | |
606 | ||
074835f0 YS |
607 | if (!dmar_ir_support()) |
608 | return 0; | |
609 | ||
93758238 WH |
610 | for_each_drhd_unit(drhd) { |
611 | struct intel_iommu *iommu = drhd->iommu; | |
612 | ||
613 | if (!ecap_ir_support(iommu->ecap)) | |
614 | return 0; | |
615 | } | |
616 | ||
617 | return 1; | |
618 | } | |
619 | ||
2ae21010 SS |
620 | int __init enable_intr_remapping(int eim) |
621 | { | |
622 | struct dmar_drhd_unit *drhd; | |
623 | int setup = 0; | |
624 | ||
e936d077 YS |
625 | if (parse_ioapics_under_ir() != 1) { |
626 | printk(KERN_INFO "Not enable interrupt remapping\n"); | |
627 | return -1; | |
628 | } | |
629 | ||
1531a6a6 SS |
630 | for_each_drhd_unit(drhd) { |
631 | struct intel_iommu *iommu = drhd->iommu; | |
632 | ||
34aaaa94 HW |
633 | /* |
634 | * If the queued invalidation is already initialized, | |
635 | * shouldn't disable it. | |
636 | */ | |
637 | if (iommu->qi) | |
638 | continue; | |
639 | ||
1531a6a6 SS |
640 | /* |
641 | * Clear previous faults. | |
642 | */ | |
643 | dmar_fault(-1, iommu); | |
644 | ||
645 | /* | |
646 | * Disable intr remapping and queued invalidation, if already | |
647 | * enabled prior to OS handover. | |
648 | */ | |
b24696bc | 649 | iommu_disable_intr_remapping(iommu); |
1531a6a6 SS |
650 | |
651 | dmar_disable_qi(iommu); | |
652 | } | |
653 | ||
2ae21010 SS |
654 | /* |
655 | * check for the Interrupt-remapping support | |
656 | */ | |
657 | for_each_drhd_unit(drhd) { | |
658 | struct intel_iommu *iommu = drhd->iommu; | |
659 | ||
660 | if (!ecap_ir_support(iommu->ecap)) | |
661 | continue; | |
662 | ||
663 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
664 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
665 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
666 | return -1; | |
667 | } | |
668 | } | |
669 | ||
670 | /* | |
671 | * Enable queued invalidation for all the DRHD's. | |
672 | */ | |
673 | for_each_drhd_unit(drhd) { | |
674 | int ret; | |
675 | struct intel_iommu *iommu = drhd->iommu; | |
676 | ret = dmar_enable_qi(iommu); | |
677 | ||
678 | if (ret) { | |
679 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
680 | " invalidation, ecap %Lx, ret %d\n", | |
681 | drhd->reg_base_addr, iommu->ecap, ret); | |
682 | return -1; | |
683 | } | |
684 | } | |
685 | ||
686 | /* | |
687 | * Setup Interrupt-remapping for all the DRHD's now. | |
688 | */ | |
689 | for_each_drhd_unit(drhd) { | |
690 | struct intel_iommu *iommu = drhd->iommu; | |
691 | ||
692 | if (!ecap_ir_support(iommu->ecap)) | |
693 | continue; | |
694 | ||
695 | if (setup_intr_remapping(iommu, eim)) | |
696 | goto error; | |
697 | ||
698 | setup = 1; | |
699 | } | |
700 | ||
701 | if (!setup) | |
702 | goto error; | |
703 | ||
704 | intr_remapping_enabled = 1; | |
705 | ||
706 | return 0; | |
707 | ||
708 | error: | |
709 | /* | |
710 | * handle error condition gracefully here! | |
711 | */ | |
712 | return -1; | |
713 | } | |
ad3ad3f6 | 714 | |
20f3097b SS |
715 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
716 | struct intel_iommu *iommu) | |
717 | { | |
718 | struct acpi_dmar_pci_path *path; | |
719 | u8 bus; | |
720 | int count; | |
721 | ||
722 | bus = scope->bus; | |
723 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
724 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
725 | / sizeof(struct acpi_dmar_pci_path); | |
726 | ||
727 | while (--count > 0) { | |
728 | /* | |
729 | * Access PCI directly due to the PCI | |
730 | * subsystem isn't initialized yet. | |
731 | */ | |
732 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
733 | PCI_SECONDARY_BUS); | |
734 | path++; | |
735 | } | |
736 | ir_hpet[ir_hpet_num].bus = bus; | |
737 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
738 | ir_hpet[ir_hpet_num].iommu = iommu; | |
739 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | |
740 | ir_hpet_num++; | |
741 | } | |
742 | ||
f007e99c WH |
743 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
744 | struct intel_iommu *iommu) | |
745 | { | |
746 | struct acpi_dmar_pci_path *path; | |
747 | u8 bus; | |
748 | int count; | |
749 | ||
750 | bus = scope->bus; | |
751 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
752 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
753 | / sizeof(struct acpi_dmar_pci_path); | |
754 | ||
755 | while (--count > 0) { | |
756 | /* | |
757 | * Access PCI directly due to the PCI | |
758 | * subsystem isn't initialized yet. | |
759 | */ | |
760 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
761 | PCI_SECONDARY_BUS); | |
762 | path++; | |
763 | } | |
764 | ||
765 | ir_ioapic[ir_ioapic_num].bus = bus; | |
766 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
767 | ir_ioapic[ir_ioapic_num].iommu = iommu; | |
768 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
769 | ir_ioapic_num++; | |
770 | } | |
771 | ||
20f3097b SS |
772 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
773 | struct intel_iommu *iommu) | |
ad3ad3f6 SS |
774 | { |
775 | struct acpi_dmar_hardware_unit *drhd; | |
776 | struct acpi_dmar_device_scope *scope; | |
777 | void *start, *end; | |
778 | ||
779 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
780 | ||
781 | start = (void *)(drhd + 1); | |
782 | end = ((void *)drhd) + header->length; | |
783 | ||
784 | while (start < end) { | |
785 | scope = start; | |
786 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
787 | if (ir_ioapic_num == MAX_IO_APICS) { | |
788 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
789 | return -1; | |
790 | } | |
791 | ||
680a7524 YL |
792 | printk(KERN_INFO "IOAPIC id %d under DRHD base " |
793 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | |
794 | drhd->address, iommu->seq_id); | |
ad3ad3f6 | 795 | |
f007e99c | 796 | ir_parse_one_ioapic_scope(scope, iommu); |
20f3097b SS |
797 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
798 | if (ir_hpet_num == MAX_HPET_TBS) { | |
799 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | |
800 | return -1; | |
801 | } | |
802 | ||
803 | printk(KERN_INFO "HPET id %d under DRHD base" | |
804 | " 0x%Lx\n", scope->enumeration_id, | |
805 | drhd->address); | |
806 | ||
807 | ir_parse_one_hpet_scope(scope, iommu); | |
ad3ad3f6 SS |
808 | } |
809 | start += scope->length; | |
810 | } | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
817 | * hardware unit. | |
818 | */ | |
819 | int __init parse_ioapics_under_ir(void) | |
820 | { | |
821 | struct dmar_drhd_unit *drhd; | |
822 | int ir_supported = 0; | |
823 | ||
824 | for_each_drhd_unit(drhd) { | |
825 | struct intel_iommu *iommu = drhd->iommu; | |
826 | ||
827 | if (ecap_ir_support(iommu->ecap)) { | |
20f3097b | 828 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
829 | return -1; |
830 | ||
831 | ir_supported = 1; | |
832 | } | |
833 | } | |
834 | ||
835 | if (ir_supported && ir_ioapic_num != nr_ioapics) { | |
836 | printk(KERN_WARNING | |
837 | "Not all IO-APIC's listed under remapping hardware\n"); | |
838 | return -1; | |
839 | } | |
840 | ||
841 | return ir_supported; | |
842 | } | |
b24696bc FY |
843 | |
844 | void disable_intr_remapping(void) | |
845 | { | |
846 | struct dmar_drhd_unit *drhd; | |
847 | struct intel_iommu *iommu = NULL; | |
848 | ||
849 | /* | |
850 | * Disable Interrupt-remapping for all the DRHD's now. | |
851 | */ | |
852 | for_each_iommu(iommu, drhd) { | |
853 | if (!ecap_ir_support(iommu->ecap)) | |
854 | continue; | |
855 | ||
856 | iommu_disable_intr_remapping(iommu); | |
857 | } | |
858 | } | |
859 | ||
860 | int reenable_intr_remapping(int eim) | |
861 | { | |
862 | struct dmar_drhd_unit *drhd; | |
863 | int setup = 0; | |
864 | struct intel_iommu *iommu = NULL; | |
865 | ||
866 | for_each_iommu(iommu, drhd) | |
867 | if (iommu->qi) | |
868 | dmar_reenable_qi(iommu); | |
869 | ||
870 | /* | |
871 | * Setup Interrupt-remapping for all the DRHD's now. | |
872 | */ | |
873 | for_each_iommu(iommu, drhd) { | |
874 | if (!ecap_ir_support(iommu->ecap)) | |
875 | continue; | |
876 | ||
877 | /* Set up interrupt remapping for iommu.*/ | |
878 | iommu_set_intr_remapping(iommu, eim); | |
879 | setup = 1; | |
880 | } | |
881 | ||
882 | if (!setup) | |
883 | goto error; | |
884 | ||
885 | return 0; | |
886 | ||
887 | error: | |
888 | /* | |
889 | * handle error condition gracefully here! | |
890 | */ | |
891 | return -1; | |
892 | } | |
893 |