Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / iommu / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
7 #include <linux/pci.h>
8 #include <linux/irq.h>
9 #include <asm/io_apic.h>
10 #include <asm/smp.h>
11 #include <asm/cpu.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
16
17 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
18 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
19 static int ir_ioapic_num, ir_hpet_num;
20 int intr_remapping_enabled;
21
22 static int disable_intremap;
23 static int disable_sourceid_checking;
24
25 static __init int setup_nointremap(char *str)
26 {
27 disable_intremap = 1;
28 return 0;
29 }
30 early_param("nointremap", setup_nointremap);
31
32 static __init int setup_intremap(char *str)
33 {
34 if (!str)
35 return -EINVAL;
36
37 if (!strncmp(str, "on", 2))
38 disable_intremap = 0;
39 else if (!strncmp(str, "off", 3))
40 disable_intremap = 1;
41 else if (!strncmp(str, "nosid", 5))
42 disable_sourceid_checking = 1;
43
44 return 0;
45 }
46 early_param("intremap", setup_intremap);
47
48 static DEFINE_SPINLOCK(irq_2_ir_lock);
49
50 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
51 {
52 struct irq_cfg *cfg = irq_get_chip_data(irq);
53 return cfg ? &cfg->irq_2_iommu : NULL;
54 }
55
56 int get_irte(int irq, struct irte *entry)
57 {
58 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
59 unsigned long flags;
60 int index;
61
62 if (!entry || !irq_iommu)
63 return -1;
64
65 spin_lock_irqsave(&irq_2_ir_lock, flags);
66
67 index = irq_iommu->irte_index + irq_iommu->sub_handle;
68 *entry = *(irq_iommu->iommu->ir_table->base + index);
69
70 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
71 return 0;
72 }
73
74 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
75 {
76 struct ir_table *table = iommu->ir_table;
77 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
78 u16 index, start_index;
79 unsigned int mask = 0;
80 unsigned long flags;
81 int i;
82
83 if (!count || !irq_iommu)
84 return -1;
85
86 /*
87 * start the IRTE search from index 0.
88 */
89 index = start_index = 0;
90
91 if (count > 1) {
92 count = __roundup_pow_of_two(count);
93 mask = ilog2(count);
94 }
95
96 if (mask > ecap_max_handle_mask(iommu->ecap)) {
97 printk(KERN_ERR
98 "Requested mask %x exceeds the max invalidation handle"
99 " mask value %Lx\n", mask,
100 ecap_max_handle_mask(iommu->ecap));
101 return -1;
102 }
103
104 spin_lock_irqsave(&irq_2_ir_lock, flags);
105 do {
106 for (i = index; i < index + count; i++)
107 if (table->base[i].present)
108 break;
109 /* empty index found */
110 if (i == index + count)
111 break;
112
113 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
114
115 if (index == start_index) {
116 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
117 printk(KERN_ERR "can't allocate an IRTE\n");
118 return -1;
119 }
120 } while (1);
121
122 for (i = index; i < index + count; i++)
123 table->base[i].present = 1;
124
125 irq_iommu->iommu = iommu;
126 irq_iommu->irte_index = index;
127 irq_iommu->sub_handle = 0;
128 irq_iommu->irte_mask = mask;
129
130 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
131
132 return index;
133 }
134
135 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
136 {
137 struct qi_desc desc;
138
139 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
140 | QI_IEC_SELECTIVE;
141 desc.high = 0;
142
143 return qi_submit_sync(&desc, iommu);
144 }
145
146 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
147 {
148 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
149 unsigned long flags;
150 int index;
151
152 if (!irq_iommu)
153 return -1;
154
155 spin_lock_irqsave(&irq_2_ir_lock, flags);
156 *sub_handle = irq_iommu->sub_handle;
157 index = irq_iommu->irte_index;
158 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
159 return index;
160 }
161
162 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
163 {
164 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
165 unsigned long flags;
166
167 if (!irq_iommu)
168 return -1;
169
170 spin_lock_irqsave(&irq_2_ir_lock, flags);
171
172 irq_iommu->iommu = iommu;
173 irq_iommu->irte_index = index;
174 irq_iommu->sub_handle = subhandle;
175 irq_iommu->irte_mask = 0;
176
177 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
178
179 return 0;
180 }
181
182 int modify_irte(int irq, struct irte *irte_modified)
183 {
184 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
185 struct intel_iommu *iommu;
186 unsigned long flags;
187 struct irte *irte;
188 int rc, index;
189
190 if (!irq_iommu)
191 return -1;
192
193 spin_lock_irqsave(&irq_2_ir_lock, flags);
194
195 iommu = irq_iommu->iommu;
196
197 index = irq_iommu->irte_index + irq_iommu->sub_handle;
198 irte = &iommu->ir_table->base[index];
199
200 set_64bit(&irte->low, irte_modified->low);
201 set_64bit(&irte->high, irte_modified->high);
202 __iommu_flush_cache(iommu, irte, sizeof(*irte));
203
204 rc = qi_flush_iec(iommu, index, 0);
205 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
206
207 return rc;
208 }
209
210 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
211 {
212 int i;
213
214 for (i = 0; i < MAX_HPET_TBS; i++)
215 if (ir_hpet[i].id == hpet_id)
216 return ir_hpet[i].iommu;
217 return NULL;
218 }
219
220 struct intel_iommu *map_ioapic_to_ir(int apic)
221 {
222 int i;
223
224 for (i = 0; i < MAX_IO_APICS; i++)
225 if (ir_ioapic[i].id == apic)
226 return ir_ioapic[i].iommu;
227 return NULL;
228 }
229
230 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
231 {
232 struct dmar_drhd_unit *drhd;
233
234 drhd = dmar_find_matched_drhd_unit(dev);
235 if (!drhd)
236 return NULL;
237
238 return drhd->iommu;
239 }
240
241 static int clear_entries(struct irq_2_iommu *irq_iommu)
242 {
243 struct irte *start, *entry, *end;
244 struct intel_iommu *iommu;
245 int index;
246
247 if (irq_iommu->sub_handle)
248 return 0;
249
250 iommu = irq_iommu->iommu;
251 index = irq_iommu->irte_index + irq_iommu->sub_handle;
252
253 start = iommu->ir_table->base + index;
254 end = start + (1 << irq_iommu->irte_mask);
255
256 for (entry = start; entry < end; entry++) {
257 set_64bit(&entry->low, 0);
258 set_64bit(&entry->high, 0);
259 }
260
261 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
262 }
263
264 int free_irte(int irq)
265 {
266 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
267 unsigned long flags;
268 int rc;
269
270 if (!irq_iommu)
271 return -1;
272
273 spin_lock_irqsave(&irq_2_ir_lock, flags);
274
275 rc = clear_entries(irq_iommu);
276
277 irq_iommu->iommu = NULL;
278 irq_iommu->irte_index = 0;
279 irq_iommu->sub_handle = 0;
280 irq_iommu->irte_mask = 0;
281
282 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
283
284 return rc;
285 }
286
287 /*
288 * source validation type
289 */
290 #define SVT_NO_VERIFY 0x0 /* no verification is required */
291 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
292 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
293
294 /*
295 * source-id qualifier
296 */
297 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
298 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
299 * the third least significant bit
300 */
301 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
302 * the second and third least significant bits
303 */
304 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
305 * the least three significant bits
306 */
307
308 /*
309 * set SVT, SQ and SID fields of irte to verify
310 * source ids of interrupt requests
311 */
312 static void set_irte_sid(struct irte *irte, unsigned int svt,
313 unsigned int sq, unsigned int sid)
314 {
315 if (disable_sourceid_checking)
316 svt = SVT_NO_VERIFY;
317 irte->svt = svt;
318 irte->sq = sq;
319 irte->sid = sid;
320 }
321
322 int set_ioapic_sid(struct irte *irte, int apic)
323 {
324 int i;
325 u16 sid = 0;
326
327 if (!irte)
328 return -1;
329
330 for (i = 0; i < MAX_IO_APICS; i++) {
331 if (ir_ioapic[i].id == apic) {
332 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
333 break;
334 }
335 }
336
337 if (sid == 0) {
338 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
339 return -1;
340 }
341
342 set_irte_sid(irte, 1, 0, sid);
343
344 return 0;
345 }
346
347 int set_hpet_sid(struct irte *irte, u8 id)
348 {
349 int i;
350 u16 sid = 0;
351
352 if (!irte)
353 return -1;
354
355 for (i = 0; i < MAX_HPET_TBS; i++) {
356 if (ir_hpet[i].id == id) {
357 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
358 break;
359 }
360 }
361
362 if (sid == 0) {
363 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
364 return -1;
365 }
366
367 /*
368 * Should really use SQ_ALL_16. Some platforms are broken.
369 * While we figure out the right quirks for these broken platforms, use
370 * SQ_13_IGNORE_3 for now.
371 */
372 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
373
374 return 0;
375 }
376
377 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
378 {
379 struct pci_dev *bridge;
380
381 if (!irte || !dev)
382 return -1;
383
384 /* PCIe device or Root Complex integrated PCI device */
385 if (pci_is_pcie(dev) || !dev->bus->parent) {
386 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
387 (dev->bus->number << 8) | dev->devfn);
388 return 0;
389 }
390
391 bridge = pci_find_upstream_pcie_bridge(dev);
392 if (bridge) {
393 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
394 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
395 (bridge->bus->number << 8) | dev->bus->number);
396 else /* this is a legacy PCI bridge */
397 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
398 (bridge->bus->number << 8) | bridge->devfn);
399 }
400
401 return 0;
402 }
403
404 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
405 {
406 u64 addr;
407 u32 sts;
408 unsigned long flags;
409
410 addr = virt_to_phys((void *)iommu->ir_table->base);
411
412 spin_lock_irqsave(&iommu->register_lock, flags);
413
414 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
415 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
416
417 /* Set interrupt-remapping table pointer */
418 iommu->gcmd |= DMA_GCMD_SIRTP;
419 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
420
421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
422 readl, (sts & DMA_GSTS_IRTPS), sts);
423 spin_unlock_irqrestore(&iommu->register_lock, flags);
424
425 /*
426 * global invalidation of interrupt entry cache before enabling
427 * interrupt-remapping.
428 */
429 qi_global_iec(iommu);
430
431 spin_lock_irqsave(&iommu->register_lock, flags);
432
433 /* Enable interrupt-remapping */
434 iommu->gcmd |= DMA_GCMD_IRE;
435 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
436
437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
438 readl, (sts & DMA_GSTS_IRES), sts);
439
440 spin_unlock_irqrestore(&iommu->register_lock, flags);
441 }
442
443
444 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
445 {
446 struct ir_table *ir_table;
447 struct page *pages;
448
449 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
450 GFP_ATOMIC);
451
452 if (!iommu->ir_table)
453 return -ENOMEM;
454
455 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
456 INTR_REMAP_PAGE_ORDER);
457
458 if (!pages) {
459 printk(KERN_ERR "failed to allocate pages of order %d\n",
460 INTR_REMAP_PAGE_ORDER);
461 kfree(iommu->ir_table);
462 return -ENOMEM;
463 }
464
465 ir_table->base = page_address(pages);
466
467 iommu_set_intr_remapping(iommu, mode);
468 return 0;
469 }
470
471 /*
472 * Disable Interrupt Remapping.
473 */
474 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
475 {
476 unsigned long flags;
477 u32 sts;
478
479 if (!ecap_ir_support(iommu->ecap))
480 return;
481
482 /*
483 * global invalidation of interrupt entry cache before disabling
484 * interrupt-remapping.
485 */
486 qi_global_iec(iommu);
487
488 spin_lock_irqsave(&iommu->register_lock, flags);
489
490 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
491 if (!(sts & DMA_GSTS_IRES))
492 goto end;
493
494 iommu->gcmd &= ~DMA_GCMD_IRE;
495 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
496
497 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
498 readl, !(sts & DMA_GSTS_IRES), sts);
499
500 end:
501 spin_unlock_irqrestore(&iommu->register_lock, flags);
502 }
503
504 int __init intr_remapping_supported(void)
505 {
506 struct dmar_drhd_unit *drhd;
507
508 if (disable_intremap)
509 return 0;
510
511 if (!dmar_ir_support())
512 return 0;
513
514 for_each_drhd_unit(drhd) {
515 struct intel_iommu *iommu = drhd->iommu;
516
517 if (!ecap_ir_support(iommu->ecap))
518 return 0;
519 }
520
521 return 1;
522 }
523
524 int __init enable_intr_remapping(int eim)
525 {
526 struct dmar_drhd_unit *drhd;
527 int setup = 0;
528
529 if (parse_ioapics_under_ir() != 1) {
530 printk(KERN_INFO "Not enable interrupt remapping\n");
531 return -1;
532 }
533
534 for_each_drhd_unit(drhd) {
535 struct intel_iommu *iommu = drhd->iommu;
536
537 /*
538 * If the queued invalidation is already initialized,
539 * shouldn't disable it.
540 */
541 if (iommu->qi)
542 continue;
543
544 /*
545 * Clear previous faults.
546 */
547 dmar_fault(-1, iommu);
548
549 /*
550 * Disable intr remapping and queued invalidation, if already
551 * enabled prior to OS handover.
552 */
553 iommu_disable_intr_remapping(iommu);
554
555 dmar_disable_qi(iommu);
556 }
557
558 /*
559 * check for the Interrupt-remapping support
560 */
561 for_each_drhd_unit(drhd) {
562 struct intel_iommu *iommu = drhd->iommu;
563
564 if (!ecap_ir_support(iommu->ecap))
565 continue;
566
567 if (eim && !ecap_eim_support(iommu->ecap)) {
568 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
569 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
570 return -1;
571 }
572 }
573
574 /*
575 * Enable queued invalidation for all the DRHD's.
576 */
577 for_each_drhd_unit(drhd) {
578 int ret;
579 struct intel_iommu *iommu = drhd->iommu;
580 ret = dmar_enable_qi(iommu);
581
582 if (ret) {
583 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
584 " invalidation, ecap %Lx, ret %d\n",
585 drhd->reg_base_addr, iommu->ecap, ret);
586 return -1;
587 }
588 }
589
590 /*
591 * Setup Interrupt-remapping for all the DRHD's now.
592 */
593 for_each_drhd_unit(drhd) {
594 struct intel_iommu *iommu = drhd->iommu;
595
596 if (!ecap_ir_support(iommu->ecap))
597 continue;
598
599 if (setup_intr_remapping(iommu, eim))
600 goto error;
601
602 setup = 1;
603 }
604
605 if (!setup)
606 goto error;
607
608 intr_remapping_enabled = 1;
609
610 return 0;
611
612 error:
613 /*
614 * handle error condition gracefully here!
615 */
616 return -1;
617 }
618
619 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
620 struct intel_iommu *iommu)
621 {
622 struct acpi_dmar_pci_path *path;
623 u8 bus;
624 int count;
625
626 bus = scope->bus;
627 path = (struct acpi_dmar_pci_path *)(scope + 1);
628 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
629 / sizeof(struct acpi_dmar_pci_path);
630
631 while (--count > 0) {
632 /*
633 * Access PCI directly due to the PCI
634 * subsystem isn't initialized yet.
635 */
636 bus = read_pci_config_byte(bus, path->dev, path->fn,
637 PCI_SECONDARY_BUS);
638 path++;
639 }
640 ir_hpet[ir_hpet_num].bus = bus;
641 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
642 ir_hpet[ir_hpet_num].iommu = iommu;
643 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
644 ir_hpet_num++;
645 }
646
647 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
648 struct intel_iommu *iommu)
649 {
650 struct acpi_dmar_pci_path *path;
651 u8 bus;
652 int count;
653
654 bus = scope->bus;
655 path = (struct acpi_dmar_pci_path *)(scope + 1);
656 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
657 / sizeof(struct acpi_dmar_pci_path);
658
659 while (--count > 0) {
660 /*
661 * Access PCI directly due to the PCI
662 * subsystem isn't initialized yet.
663 */
664 bus = read_pci_config_byte(bus, path->dev, path->fn,
665 PCI_SECONDARY_BUS);
666 path++;
667 }
668
669 ir_ioapic[ir_ioapic_num].bus = bus;
670 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
671 ir_ioapic[ir_ioapic_num].iommu = iommu;
672 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
673 ir_ioapic_num++;
674 }
675
676 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
677 struct intel_iommu *iommu)
678 {
679 struct acpi_dmar_hardware_unit *drhd;
680 struct acpi_dmar_device_scope *scope;
681 void *start, *end;
682
683 drhd = (struct acpi_dmar_hardware_unit *)header;
684
685 start = (void *)(drhd + 1);
686 end = ((void *)drhd) + header->length;
687
688 while (start < end) {
689 scope = start;
690 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
691 if (ir_ioapic_num == MAX_IO_APICS) {
692 printk(KERN_WARNING "Exceeded Max IO APICS\n");
693 return -1;
694 }
695
696 printk(KERN_INFO "IOAPIC id %d under DRHD base "
697 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
698 drhd->address, iommu->seq_id);
699
700 ir_parse_one_ioapic_scope(scope, iommu);
701 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
702 if (ir_hpet_num == MAX_HPET_TBS) {
703 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
704 return -1;
705 }
706
707 printk(KERN_INFO "HPET id %d under DRHD base"
708 " 0x%Lx\n", scope->enumeration_id,
709 drhd->address);
710
711 ir_parse_one_hpet_scope(scope, iommu);
712 }
713 start += scope->length;
714 }
715
716 return 0;
717 }
718
719 /*
720 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
721 * hardware unit.
722 */
723 int __init parse_ioapics_under_ir(void)
724 {
725 struct dmar_drhd_unit *drhd;
726 int ir_supported = 0;
727
728 for_each_drhd_unit(drhd) {
729 struct intel_iommu *iommu = drhd->iommu;
730
731 if (ecap_ir_support(iommu->ecap)) {
732 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
733 return -1;
734
735 ir_supported = 1;
736 }
737 }
738
739 if (ir_supported && ir_ioapic_num != nr_ioapics) {
740 printk(KERN_WARNING
741 "Not all IO-APIC's listed under remapping hardware\n");
742 return -1;
743 }
744
745 return ir_supported;
746 }
747
748 void disable_intr_remapping(void)
749 {
750 struct dmar_drhd_unit *drhd;
751 struct intel_iommu *iommu = NULL;
752
753 /*
754 * Disable Interrupt-remapping for all the DRHD's now.
755 */
756 for_each_iommu(iommu, drhd) {
757 if (!ecap_ir_support(iommu->ecap))
758 continue;
759
760 iommu_disable_intr_remapping(iommu);
761 }
762 }
763
764 int reenable_intr_remapping(int eim)
765 {
766 struct dmar_drhd_unit *drhd;
767 int setup = 0;
768 struct intel_iommu *iommu = NULL;
769
770 for_each_iommu(iommu, drhd)
771 if (iommu->qi)
772 dmar_reenable_qi(iommu);
773
774 /*
775 * Setup Interrupt-remapping for all the DRHD's now.
776 */
777 for_each_iommu(iommu, drhd) {
778 if (!ecap_ir_support(iommu->ecap))
779 continue;
780
781 /* Set up interrupt remapping for iommu.*/
782 iommu_set_intr_remapping(iommu, eim);
783 setup = 1;
784 }
785
786 if (!setup)
787 goto error;
788
789 return 0;
790
791 error:
792 /*
793 * handle error condition gracefully here!
794 */
795 return -1;
796 }
797
This page took 0.060254 seconds and 5 git commands to generate.