2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
58 DECLARE_RWSEM(dmar_global_lock
);
59 LIST_HEAD(dmar_drhd_units
);
61 struct acpi_table_header
* __initdata dmar_tbl
;
62 static acpi_size dmar_tbl_size
;
63 static int dmar_dev_scope_status
= 1;
65 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
66 static void free_iommu(struct intel_iommu
*iommu
);
68 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
74 if (drhd
->include_all
)
75 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
77 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
80 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
82 struct acpi_dmar_device_scope
*scope
;
87 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ACPI
||
88 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
89 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
91 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
92 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
93 pr_warn("Unsupported device scope\n");
95 start
+= scope
->length
;
100 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
103 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
106 struct device
*tmp_dev
;
108 if (*devices
&& *cnt
) {
109 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
118 /* Optimize out kzalloc()/kfree() for normal cases */
119 static char dmar_pci_notify_info_buf
[64];
121 static struct dmar_pci_notify_info
*
122 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
127 struct dmar_pci_notify_info
*info
;
129 BUG_ON(dev
->is_virtfn
);
131 /* Only generate path[] for device addition event */
132 if (event
== BUS_NOTIFY_ADD_DEVICE
)
133 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
136 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
137 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
138 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
140 info
= kzalloc(size
, GFP_KERNEL
);
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev
));
144 if (dmar_dev_scope_status
== 0)
145 dmar_dev_scope_status
= -ENOMEM
;
152 info
->seg
= pci_domain_nr(dev
->bus
);
154 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
155 for (tmp
= dev
, level
--; tmp
; tmp
= tmp
->bus
->self
) {
156 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
157 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
158 if (pci_is_root_bus(tmp
->bus
))
159 info
->bus
= tmp
->bus
->number
;
166 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
168 if ((void *)info
!= dmar_pci_notify_info_buf
)
172 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
173 struct acpi_dmar_pci_path
*path
, int count
)
177 if (info
->bus
!= bus
)
179 if (info
->level
!= count
)
182 for (i
= 0; i
< count
; i
++) {
183 if (path
[i
].device
!= info
->path
[i
].device
||
184 path
[i
].function
!= info
->path
[i
].function
)
191 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
192 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
193 void *start
, void*end
, u16 segment
,
194 struct dmar_dev_scope
*devices
,
198 struct device
*tmp
, *dev
= &info
->dev
->dev
;
199 struct acpi_dmar_device_scope
*scope
;
200 struct acpi_dmar_pci_path
*path
;
202 if (segment
!= info
->seg
)
205 for (; start
< end
; start
+= scope
->length
) {
207 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
208 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
211 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
212 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
213 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
216 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
217 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
218 pr_warn("Device scope type does not match for %s\n",
219 pci_name(info
->dev
));
223 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
225 devices
[i
].bus
= info
->dev
->bus
->number
;
226 devices
[i
].devfn
= info
->dev
->devfn
;
227 rcu_assign_pointer(devices
[i
].dev
,
231 BUG_ON(i
>= devices_cnt
);
237 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
238 struct dmar_dev_scope
*devices
, int count
)
243 if (info
->seg
!= segment
)
246 for_each_active_dev_scope(devices
, count
, index
, tmp
)
247 if (tmp
== &info
->dev
->dev
) {
248 rcu_assign_pointer(devices
[index
].dev
, NULL
);
257 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
260 struct dmar_drhd_unit
*dmaru
;
261 struct acpi_dmar_hardware_unit
*drhd
;
263 for_each_drhd_unit(dmaru
) {
264 if (dmaru
->include_all
)
267 drhd
= container_of(dmaru
->hdr
,
268 struct acpi_dmar_hardware_unit
, header
);
269 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
270 ((void *)drhd
) + drhd
->header
.length
,
272 dmaru
->devices
, dmaru
->devices_cnt
);
277 ret
= dmar_iommu_notify_scope_dev(info
);
278 if (ret
< 0 && dmar_dev_scope_status
== 0)
279 dmar_dev_scope_status
= ret
;
284 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
286 struct dmar_drhd_unit
*dmaru
;
288 for_each_drhd_unit(dmaru
)
289 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
290 dmaru
->devices
, dmaru
->devices_cnt
))
292 dmar_iommu_notify_scope_dev(info
);
295 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
296 unsigned long action
, void *data
)
298 struct pci_dev
*pdev
= to_pci_dev(data
);
299 struct dmar_pci_notify_info
*info
;
301 /* Only care about add/remove events for physical functions */
304 if (action
!= BUS_NOTIFY_ADD_DEVICE
&& action
!= BUS_NOTIFY_DEL_DEVICE
)
307 info
= dmar_alloc_pci_notify_info(pdev
, action
);
311 down_write(&dmar_global_lock
);
312 if (action
== BUS_NOTIFY_ADD_DEVICE
)
313 dmar_pci_bus_add_dev(info
);
314 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
315 dmar_pci_bus_del_dev(info
);
316 up_write(&dmar_global_lock
);
318 dmar_free_pci_notify_info(info
);
323 static struct notifier_block dmar_pci_bus_nb
= {
324 .notifier_call
= dmar_pci_bus_notifier
,
329 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
330 * structure which uniquely represent one DMA remapping hardware unit
331 * present in the platform
334 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
336 struct acpi_dmar_hardware_unit
*drhd
;
337 struct dmar_drhd_unit
*dmaru
;
340 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
341 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
346 dmaru
->reg_base_addr
= drhd
->address
;
347 dmaru
->segment
= drhd
->segment
;
348 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
349 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
350 ((void *)drhd
) + drhd
->header
.length
,
351 &dmaru
->devices_cnt
);
352 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
357 ret
= alloc_iommu(dmaru
);
359 dmar_free_dev_scope(&dmaru
->devices
,
360 &dmaru
->devices_cnt
);
364 dmar_register_drhd_unit(dmaru
);
368 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
370 if (dmaru
->devices
&& dmaru
->devices_cnt
)
371 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
373 free_iommu(dmaru
->iommu
);
377 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
)
379 struct acpi_dmar_andd
*andd
= (void *)header
;
381 /* Check for NUL termination within the designated length */
382 if (strnlen(andd
->object_name
, header
->length
- 8) == header
->length
- 8) {
383 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
384 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
385 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
386 dmi_get_system_info(DMI_BIOS_VENDOR
),
387 dmi_get_system_info(DMI_BIOS_VERSION
),
388 dmi_get_system_info(DMI_PRODUCT_VERSION
));
391 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
397 #ifdef CONFIG_ACPI_NUMA
399 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
401 struct acpi_dmar_rhsa
*rhsa
;
402 struct dmar_drhd_unit
*drhd
;
404 rhsa
= (struct acpi_dmar_rhsa
*)header
;
405 for_each_drhd_unit(drhd
) {
406 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
407 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
409 if (!node_online(node
))
411 drhd
->iommu
->node
= node
;
416 1, TAINT_FIRMWARE_WORKAROUND
,
417 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
418 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
420 dmi_get_system_info(DMI_BIOS_VENDOR
),
421 dmi_get_system_info(DMI_BIOS_VERSION
),
422 dmi_get_system_info(DMI_PRODUCT_VERSION
));
429 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
431 struct acpi_dmar_hardware_unit
*drhd
;
432 struct acpi_dmar_reserved_memory
*rmrr
;
433 struct acpi_dmar_atsr
*atsr
;
434 struct acpi_dmar_rhsa
*rhsa
;
436 switch (header
->type
) {
437 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
438 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
440 pr_info("DRHD base: %#016Lx flags: %#x\n",
441 (unsigned long long)drhd
->address
, drhd
->flags
);
443 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
444 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
446 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
447 (unsigned long long)rmrr
->base_address
,
448 (unsigned long long)rmrr
->end_address
);
450 case ACPI_DMAR_TYPE_ATSR
:
451 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
452 pr_info("ATSR flags: %#x\n", atsr
->flags
);
454 case ACPI_DMAR_HARDWARE_AFFINITY
:
455 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
456 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
457 (unsigned long long)rhsa
->base_address
,
458 rhsa
->proximity_domain
);
460 case ACPI_DMAR_TYPE_ANDD
:
461 /* We don't print this here because we need to sanity-check
462 it first. So print it in dmar_parse_one_andd() instead. */
468 * dmar_table_detect - checks to see if the platform supports DMAR devices
470 static int __init
dmar_table_detect(void)
472 acpi_status status
= AE_OK
;
474 /* if we could find DMAR table, then there are DMAR devices */
475 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
476 (struct acpi_table_header
**)&dmar_tbl
,
479 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
480 pr_warn("Unable to map DMAR\n");
481 status
= AE_NOT_FOUND
;
484 return (ACPI_SUCCESS(status
) ? 1 : 0);
488 * parse_dmar_table - parses the DMA reporting table
491 parse_dmar_table(void)
493 struct acpi_table_dmar
*dmar
;
494 struct acpi_dmar_header
*entry_header
;
499 * Do it again, earlier dmar_tbl mapping could be mapped with
505 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
506 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
508 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
510 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
514 if (dmar
->width
< PAGE_SHIFT
- 1) {
515 pr_warn("Invalid DMAR haw\n");
519 pr_info("Host address width %d\n", dmar
->width
+ 1);
521 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
522 while (((unsigned long)entry_header
) <
523 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
524 /* Avoid looping forever on bad ACPI tables */
525 if (entry_header
->length
== 0) {
526 pr_warn("Invalid 0-length structure\n");
531 dmar_table_print_dmar_entry(entry_header
);
533 switch (entry_header
->type
) {
534 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
536 ret
= dmar_parse_one_drhd(entry_header
);
538 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
539 ret
= dmar_parse_one_rmrr(entry_header
);
541 case ACPI_DMAR_TYPE_ATSR
:
542 ret
= dmar_parse_one_atsr(entry_header
);
544 case ACPI_DMAR_HARDWARE_AFFINITY
:
545 #ifdef CONFIG_ACPI_NUMA
546 ret
= dmar_parse_one_rhsa(entry_header
);
549 case ACPI_DMAR_TYPE_ANDD
:
550 ret
= dmar_parse_one_andd(entry_header
);
553 pr_warn("Unknown DMAR structure type %d\n",
555 ret
= 0; /* for forward compatibility */
561 entry_header
= ((void *)entry_header
+ entry_header
->length
);
564 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
568 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
569 int cnt
, struct pci_dev
*dev
)
575 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
576 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
579 /* Check our parent */
580 dev
= dev
->bus
->self
;
586 struct dmar_drhd_unit
*
587 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
589 struct dmar_drhd_unit
*dmaru
;
590 struct acpi_dmar_hardware_unit
*drhd
;
592 dev
= pci_physfn(dev
);
595 for_each_drhd_unit(dmaru
) {
596 drhd
= container_of(dmaru
->hdr
,
597 struct acpi_dmar_hardware_unit
,
600 if (dmaru
->include_all
&&
601 drhd
->segment
== pci_domain_nr(dev
->bus
))
604 if (dmar_pci_device_match(dmaru
->devices
,
605 dmaru
->devices_cnt
, dev
))
615 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
616 struct acpi_device
*adev
)
618 struct dmar_drhd_unit
*dmaru
;
619 struct acpi_dmar_hardware_unit
*drhd
;
620 struct acpi_dmar_device_scope
*scope
;
623 struct acpi_dmar_pci_path
*path
;
625 for_each_drhd_unit(dmaru
) {
626 drhd
= container_of(dmaru
->hdr
,
627 struct acpi_dmar_hardware_unit
,
630 for (scope
= (void *)(drhd
+ 1);
631 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
632 scope
= ((void *)scope
) + scope
->length
) {
633 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ACPI
)
635 if (scope
->enumeration_id
!= device_number
)
638 path
= (void *)(scope
+ 1);
639 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
640 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
641 scope
->bus
, path
->device
, path
->function
);
642 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
644 dmaru
->devices
[i
].bus
= scope
->bus
;
645 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
647 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
648 get_device(&adev
->dev
));
651 BUG_ON(i
>= dmaru
->devices_cnt
);
654 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
655 device_number
, dev_name(&adev
->dev
));
658 static int __init
dmar_acpi_dev_scope_init(void)
660 struct acpi_dmar_andd
*andd
;
662 if (dmar_tbl
== NULL
)
665 andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
667 while (((unsigned long)andd
) <
668 ((unsigned long)dmar_tbl
) + dmar_tbl
->length
) {
669 if (andd
->header
.type
== ACPI_DMAR_TYPE_ANDD
) {
671 struct acpi_device
*adev
;
673 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
676 pr_err("Failed to find handle for ACPI object %s\n",
680 acpi_bus_get_device(h
, &adev
);
682 pr_err("Failed to get device for ACPI object %s\n",
686 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
688 andd
= ((void *)andd
) + andd
->header
.length
;
693 int __init
dmar_dev_scope_init(void)
695 struct pci_dev
*dev
= NULL
;
696 struct dmar_pci_notify_info
*info
;
698 if (dmar_dev_scope_status
!= 1)
699 return dmar_dev_scope_status
;
701 if (list_empty(&dmar_drhd_units
)) {
702 dmar_dev_scope_status
= -ENODEV
;
704 dmar_dev_scope_status
= 0;
706 dmar_acpi_dev_scope_init();
708 for_each_pci_dev(dev
) {
712 info
= dmar_alloc_pci_notify_info(dev
,
713 BUS_NOTIFY_ADD_DEVICE
);
715 return dmar_dev_scope_status
;
717 dmar_pci_bus_add_dev(info
);
718 dmar_free_pci_notify_info(info
);
722 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
725 return dmar_dev_scope_status
;
729 int __init
dmar_table_init(void)
731 static int dmar_table_initialized
;
734 if (dmar_table_initialized
== 0) {
735 ret
= parse_dmar_table();
738 pr_info("parse DMAR table failure.\n");
739 } else if (list_empty(&dmar_drhd_units
)) {
740 pr_info("No DMAR devices found\n");
745 dmar_table_initialized
= ret
;
747 dmar_table_initialized
= 1;
750 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
753 static void warn_invalid_dmar(u64 addr
, const char *message
)
756 1, TAINT_FIRMWARE_WORKAROUND
,
757 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
758 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
760 dmi_get_system_info(DMI_BIOS_VENDOR
),
761 dmi_get_system_info(DMI_BIOS_VERSION
),
762 dmi_get_system_info(DMI_PRODUCT_VERSION
));
765 static int __init
check_zero_address(void)
767 struct acpi_table_dmar
*dmar
;
768 struct acpi_dmar_header
*entry_header
;
769 struct acpi_dmar_hardware_unit
*drhd
;
771 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
772 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
774 while (((unsigned long)entry_header
) <
775 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
776 /* Avoid looping forever on bad ACPI tables */
777 if (entry_header
->length
== 0) {
778 pr_warn("Invalid 0-length structure\n");
782 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
786 drhd
= (void *)entry_header
;
787 if (!drhd
->address
) {
788 warn_invalid_dmar(0, "");
792 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
794 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
797 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
798 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
799 early_iounmap(addr
, VTD_PAGE_SIZE
);
800 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
801 warn_invalid_dmar(drhd
->address
,
802 " returns all ones");
807 entry_header
= ((void *)entry_header
+ entry_header
->length
);
815 int __init
detect_intel_iommu(void)
819 down_write(&dmar_global_lock
);
820 ret
= dmar_table_detect();
822 ret
= check_zero_address();
824 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
826 /* Make sure ACS will be enabled */
832 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
835 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
837 up_write(&dmar_global_lock
);
839 return ret
? 1 : -ENODEV
;
843 static void unmap_iommu(struct intel_iommu
*iommu
)
846 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
850 * map_iommu: map the iommu's registers
851 * @iommu: the iommu to map
852 * @phys_addr: the physical address of the base resgister
854 * Memory map the iommu's registers. Start w/ a single page, and
855 * possibly expand if that turns out to be insufficent.
857 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
861 iommu
->reg_phys
= phys_addr
;
862 iommu
->reg_size
= VTD_PAGE_SIZE
;
864 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
865 pr_err("IOMMU: can't reserve memory\n");
870 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
872 pr_err("IOMMU: can't map the region\n");
877 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
878 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
880 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
882 warn_invalid_dmar(phys_addr
, " returns all ones");
886 /* the registers might be more than one page */
887 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
888 cap_max_fault_reg_offset(iommu
->cap
));
889 map_size
= VTD_PAGE_ALIGN(map_size
);
890 if (map_size
> iommu
->reg_size
) {
892 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
893 iommu
->reg_size
= map_size
;
894 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
896 pr_err("IOMMU: can't reserve memory\n");
900 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
902 pr_err("IOMMU: can't map the region\n");
913 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
918 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
920 struct intel_iommu
*iommu
;
922 static int iommu_allocated
= 0;
927 if (!drhd
->reg_base_addr
) {
928 warn_invalid_dmar(0, "");
932 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
936 iommu
->seq_id
= iommu_allocated
++;
937 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
939 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
941 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
946 agaw
= iommu_calculate_agaw(iommu
);
948 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
952 msagaw
= iommu_calculate_max_sagaw(iommu
);
954 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
959 iommu
->msagaw
= msagaw
;
960 iommu
->segment
= drhd
->segment
;
964 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
965 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
967 (unsigned long long)drhd
->reg_base_addr
,
968 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
969 (unsigned long long)iommu
->cap
,
970 (unsigned long long)iommu
->ecap
);
972 /* Reflect status in gcmd */
973 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
974 if (sts
& DMA_GSTS_IRES
)
975 iommu
->gcmd
|= DMA_GCMD_IRE
;
976 if (sts
& DMA_GSTS_TES
)
977 iommu
->gcmd
|= DMA_GCMD_TE
;
978 if (sts
& DMA_GSTS_QIES
)
979 iommu
->gcmd
|= DMA_GCMD_QIE
;
981 raw_spin_lock_init(&iommu
->register_lock
);
993 static void free_iommu(struct intel_iommu
*iommu
)
996 free_irq(iommu
->irq
, iommu
);
997 irq_set_handler_data(iommu
->irq
, NULL
);
998 destroy_irq(iommu
->irq
);
1002 free_page((unsigned long)iommu
->qi
->desc
);
1003 kfree(iommu
->qi
->desc_status
);
1014 * Reclaim all the submitted descriptors which have completed its work.
1016 static inline void reclaim_free_desc(struct q_inval
*qi
)
1018 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1019 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1020 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1021 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1026 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1030 struct q_inval
*qi
= iommu
->qi
;
1031 int wait_index
= (index
+ 1) % QI_LENGTH
;
1033 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1036 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1039 * If IQE happens, the head points to the descriptor associated
1040 * with the error. No new descriptors are fetched until the IQE
1043 if (fault
& DMA_FSTS_IQE
) {
1044 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1045 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1046 pr_err("VT-d detected invalid descriptor: "
1047 "low=%llx, high=%llx\n",
1048 (unsigned long long)qi
->desc
[index
].low
,
1049 (unsigned long long)qi
->desc
[index
].high
);
1050 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1051 sizeof(struct qi_desc
));
1052 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1053 sizeof(struct qi_desc
));
1054 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1060 * If ITE happens, all pending wait_desc commands are aborted.
1061 * No new descriptors are fetched until the ITE is cleared.
1063 if (fault
& DMA_FSTS_ITE
) {
1064 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1065 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1067 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1068 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1070 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1073 if (qi
->desc_status
[head
] == QI_IN_USE
)
1074 qi
->desc_status
[head
] = QI_ABORT
;
1075 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1076 } while (head
!= tail
);
1078 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1082 if (fault
& DMA_FSTS_ICE
)
1083 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1089 * Submit the queued invalidation descriptor to the remapping
1090 * hardware unit and wait for its completion.
1092 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1095 struct q_inval
*qi
= iommu
->qi
;
1096 struct qi_desc
*hw
, wait_desc
;
1097 int wait_index
, index
;
1098 unsigned long flags
;
1108 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1109 while (qi
->free_cnt
< 3) {
1110 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1112 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1115 index
= qi
->free_head
;
1116 wait_index
= (index
+ 1) % QI_LENGTH
;
1118 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1122 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1123 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1124 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1126 hw
[wait_index
] = wait_desc
;
1128 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1129 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1131 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1135 * update the HW tail register indicating the presence of
1138 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1140 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1142 * We will leave the interrupts disabled, to prevent interrupt
1143 * context to queue another cmd while a cmd is already submitted
1144 * and waiting for completion on this cpu. This is to avoid
1145 * a deadlock where the interrupt context can wait indefinitely
1146 * for free slots in the queue.
1148 rc
= qi_check_fault(iommu
, index
);
1152 raw_spin_unlock(&qi
->q_lock
);
1154 raw_spin_lock(&qi
->q_lock
);
1157 qi
->desc_status
[index
] = QI_DONE
;
1159 reclaim_free_desc(qi
);
1160 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1169 * Flush the global interrupt entry cache.
1171 void qi_global_iec(struct intel_iommu
*iommu
)
1173 struct qi_desc desc
;
1175 desc
.low
= QI_IEC_TYPE
;
1178 /* should never fail */
1179 qi_submit_sync(&desc
, iommu
);
1182 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1185 struct qi_desc desc
;
1187 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1188 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1191 qi_submit_sync(&desc
, iommu
);
1194 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1195 unsigned int size_order
, u64 type
)
1199 struct qi_desc desc
;
1202 if (cap_write_drain(iommu
->cap
))
1205 if (cap_read_drain(iommu
->cap
))
1208 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1209 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1210 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1211 | QI_IOTLB_AM(size_order
);
1213 qi_submit_sync(&desc
, iommu
);
1216 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1217 u64 addr
, unsigned mask
)
1219 struct qi_desc desc
;
1222 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1223 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1224 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1226 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1228 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1231 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1234 qi_submit_sync(&desc
, iommu
);
1238 * Disable Queued Invalidation interface.
1240 void dmar_disable_qi(struct intel_iommu
*iommu
)
1242 unsigned long flags
;
1244 cycles_t start_time
= get_cycles();
1246 if (!ecap_qis(iommu
->ecap
))
1249 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1251 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
1252 if (!(sts
& DMA_GSTS_QIES
))
1256 * Give a chance to HW to complete the pending invalidation requests.
1258 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1259 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1260 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1263 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1264 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1266 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1267 !(sts
& DMA_GSTS_QIES
), sts
);
1269 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1273 * Enable queued invalidation.
1275 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1278 unsigned long flags
;
1279 struct q_inval
*qi
= iommu
->qi
;
1281 qi
->free_head
= qi
->free_tail
= 0;
1282 qi
->free_cnt
= QI_LENGTH
;
1284 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1286 /* write zero to the tail reg */
1287 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1289 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1291 iommu
->gcmd
|= DMA_GCMD_QIE
;
1292 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1294 /* Make sure hardware complete it */
1295 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1297 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1301 * Enable Queued Invalidation interface. This is a must to support
1302 * interrupt-remapping. Also used by DMA-remapping, which replaces
1303 * register based IOTLB invalidation.
1305 int dmar_enable_qi(struct intel_iommu
*iommu
)
1308 struct page
*desc_page
;
1310 if (!ecap_qis(iommu
->ecap
))
1314 * queued invalidation is already setup and enabled.
1319 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1326 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1333 qi
->desc
= page_address(desc_page
);
1335 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1336 if (!qi
->desc_status
) {
1337 free_page((unsigned long) qi
->desc
);
1343 qi
->free_head
= qi
->free_tail
= 0;
1344 qi
->free_cnt
= QI_LENGTH
;
1346 raw_spin_lock_init(&qi
->q_lock
);
1348 __dmar_enable_qi(iommu
);
1353 /* iommu interrupt handling. Most stuff are MSI-like. */
1361 static const char *dma_remap_fault_reasons
[] =
1364 "Present bit in root entry is clear",
1365 "Present bit in context entry is clear",
1366 "Invalid context entry",
1367 "Access beyond MGAW",
1368 "PTE Write access is not set",
1369 "PTE Read access is not set",
1370 "Next page table ptr is invalid",
1371 "Root table address invalid",
1372 "Context table ptr is invalid",
1373 "non-zero reserved fields in RTP",
1374 "non-zero reserved fields in CTP",
1375 "non-zero reserved fields in PTE",
1376 "PCE for translation request specifies blocking",
1379 static const char *irq_remap_fault_reasons
[] =
1381 "Detected reserved fields in the decoded interrupt-remapped request",
1382 "Interrupt index exceeded the interrupt-remapping table size",
1383 "Present field in the IRTE entry is clear",
1384 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1385 "Detected reserved fields in the IRTE entry",
1386 "Blocked a compatibility format interrupt request",
1387 "Blocked an interrupt request due to source-id verification failure",
1390 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1392 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1393 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1394 *fault_type
= INTR_REMAP
;
1395 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1396 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1397 *fault_type
= DMA_REMAP
;
1398 return dma_remap_fault_reasons
[fault_reason
];
1400 *fault_type
= UNKNOWN
;
1405 void dmar_msi_unmask(struct irq_data
*data
)
1407 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1411 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1412 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1413 /* Read a reg to force flush the post write */
1414 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1415 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1418 void dmar_msi_mask(struct irq_data
*data
)
1421 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1424 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1425 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1426 /* Read a reg to force flush the post write */
1427 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1428 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1431 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1433 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1436 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1437 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1438 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1439 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1440 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1443 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1445 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1448 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1449 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1450 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1451 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1452 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1455 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1456 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1461 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1463 if (fault_type
== INTR_REMAP
)
1464 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1465 "fault index %llx\n"
1466 "INTR-REMAP:[fault reason %02d] %s\n",
1467 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1468 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1469 fault_reason
, reason
);
1471 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1472 "fault addr %llx \n"
1473 "DMAR:[fault reason %02d] %s\n",
1474 (type
? "DMA Read" : "DMA Write"),
1475 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1476 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1480 #define PRIMARY_FAULT_REG_LEN (16)
1481 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1483 struct intel_iommu
*iommu
= dev_id
;
1484 int reg
, fault_index
;
1488 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1489 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1491 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1493 /* TBD: ignore advanced fault log currently */
1494 if (!(fault_status
& DMA_FSTS_PPF
))
1497 fault_index
= dma_fsts_fault_record_index(fault_status
);
1498 reg
= cap_fault_reg_offset(iommu
->cap
);
1506 /* highest 32 bits */
1507 data
= readl(iommu
->reg
+ reg
+
1508 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1509 if (!(data
& DMA_FRCD_F
))
1512 fault_reason
= dma_frcd_fault_reason(data
);
1513 type
= dma_frcd_type(data
);
1515 data
= readl(iommu
->reg
+ reg
+
1516 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1517 source_id
= dma_frcd_source_id(data
);
1519 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1520 fault_index
* PRIMARY_FAULT_REG_LEN
);
1521 guest_addr
= dma_frcd_page_addr(guest_addr
);
1522 /* clear the fault */
1523 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1524 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1526 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1528 dmar_fault_do_one(iommu
, type
, fault_reason
,
1529 source_id
, guest_addr
);
1532 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1534 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1537 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1540 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1544 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1549 * Check if the fault interrupt is already initialized.
1556 pr_err("IOMMU: no free vectors\n");
1560 irq_set_handler_data(irq
, iommu
);
1563 ret
= arch_setup_dmar_msi(irq
);
1565 irq_set_handler_data(irq
, NULL
);
1571 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1573 pr_err("IOMMU: can't request irq\n");
1577 int __init
enable_drhd_fault_handling(void)
1579 struct dmar_drhd_unit
*drhd
;
1580 struct intel_iommu
*iommu
;
1583 * Enable fault control interrupt.
1585 for_each_iommu(iommu
, drhd
) {
1587 int ret
= dmar_set_interrupt(iommu
);
1590 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1591 (unsigned long long)drhd
->reg_base_addr
, ret
);
1596 * Clear any previous faults.
1598 dmar_fault(iommu
->irq
, iommu
);
1599 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1600 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1607 * Re-enable Queued Invalidation interface.
1609 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1611 if (!ecap_qis(iommu
->ecap
))
1618 * First disable queued invalidation.
1620 dmar_disable_qi(iommu
);
1622 * Then enable queued invalidation again. Since there is no pending
1623 * invalidation requests now, it's safe to re-enable queued
1626 __dmar_enable_qi(iommu
);
1632 * Check interrupt remapping support in DMAR table description.
1634 int __init
dmar_ir_support(void)
1636 struct acpi_table_dmar
*dmar
;
1637 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1640 return dmar
->flags
& 0x1;
1643 static int __init
dmar_free_unused_resources(void)
1645 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1647 /* DMAR units are in use */
1648 if (irq_remapping_enabled
|| intel_iommu_enabled
)
1651 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1652 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1654 down_write(&dmar_global_lock
);
1655 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1656 list_del(&dmaru
->list
);
1657 dmar_free_drhd(dmaru
);
1659 up_write(&dmar_global_lock
);
1664 late_initcall(dmar_free_unused_resources
);
1665 IOMMU_INIT_POST(detect_intel_iommu
);