2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
58 DECLARE_RWSEM(dmar_global_lock
);
59 LIST_HEAD(dmar_drhd_units
);
61 struct acpi_table_header
* __initdata dmar_tbl
;
62 static acpi_size dmar_tbl_size
;
63 static int dmar_dev_scope_status
= 1;
65 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
66 static void free_iommu(struct intel_iommu
*iommu
);
68 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
74 if (drhd
->include_all
)
75 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
77 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
80 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
82 struct acpi_dmar_device_scope
*scope
;
87 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ACPI
||
88 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
89 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
91 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
92 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
93 pr_warn("Unsupported device scope\n");
95 start
+= scope
->length
;
100 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
103 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
106 struct device
*tmp_dev
;
108 if (*devices
&& *cnt
) {
109 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
118 /* Optimize out kzalloc()/kfree() for normal cases */
119 static char dmar_pci_notify_info_buf
[64];
121 static struct dmar_pci_notify_info
*
122 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
127 struct dmar_pci_notify_info
*info
;
129 BUG_ON(dev
->is_virtfn
);
131 /* Only generate path[] for device addition event */
132 if (event
== BUS_NOTIFY_ADD_DEVICE
)
133 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
136 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
137 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
138 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
140 info
= kzalloc(size
, GFP_KERNEL
);
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev
));
144 if (dmar_dev_scope_status
== 0)
145 dmar_dev_scope_status
= -ENOMEM
;
152 info
->seg
= pci_domain_nr(dev
->bus
);
154 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
155 for (tmp
= dev
, level
--; tmp
; tmp
= tmp
->bus
->self
) {
156 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
157 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
158 if (pci_is_root_bus(tmp
->bus
))
159 info
->bus
= tmp
->bus
->number
;
166 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
168 if ((void *)info
!= dmar_pci_notify_info_buf
)
172 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
173 struct acpi_dmar_pci_path
*path
, int count
)
177 if (info
->bus
!= bus
)
179 if (info
->level
!= count
)
182 for (i
= 0; i
< count
; i
++) {
183 if (path
[i
].device
!= info
->path
[i
].device
||
184 path
[i
].function
!= info
->path
[i
].function
)
191 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
192 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
193 void *start
, void*end
, u16 segment
,
194 struct dmar_dev_scope
*devices
,
198 struct device
*tmp
, *dev
= &info
->dev
->dev
;
199 struct acpi_dmar_device_scope
*scope
;
200 struct acpi_dmar_pci_path
*path
;
202 if (segment
!= info
->seg
)
205 for (; start
< end
; start
+= scope
->length
) {
207 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
208 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
211 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
212 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
213 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
216 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
217 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
218 pr_warn("Device scope type does not match for %s\n",
219 pci_name(info
->dev
));
223 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
225 devices
[i
].bus
= info
->dev
->bus
->number
;
226 devices
[i
].devfn
= info
->dev
->devfn
;
227 rcu_assign_pointer(devices
[i
].dev
,
231 BUG_ON(i
>= devices_cnt
);
237 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
238 struct dmar_dev_scope
*devices
, int count
)
243 if (info
->seg
!= segment
)
246 for_each_active_dev_scope(devices
, count
, index
, tmp
)
247 if (tmp
== &info
->dev
->dev
) {
248 rcu_assign_pointer(devices
[index
].dev
, NULL
);
257 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
260 struct dmar_drhd_unit
*dmaru
;
261 struct acpi_dmar_hardware_unit
*drhd
;
263 for_each_drhd_unit(dmaru
) {
264 if (dmaru
->include_all
)
267 drhd
= container_of(dmaru
->hdr
,
268 struct acpi_dmar_hardware_unit
, header
);
269 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
270 ((void *)drhd
) + drhd
->header
.length
,
272 dmaru
->devices
, dmaru
->devices_cnt
);
277 ret
= dmar_iommu_notify_scope_dev(info
);
278 if (ret
< 0 && dmar_dev_scope_status
== 0)
279 dmar_dev_scope_status
= ret
;
284 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
286 struct dmar_drhd_unit
*dmaru
;
288 for_each_drhd_unit(dmaru
)
289 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
290 dmaru
->devices
, dmaru
->devices_cnt
))
292 dmar_iommu_notify_scope_dev(info
);
295 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
296 unsigned long action
, void *data
)
298 struct pci_dev
*pdev
= to_pci_dev(data
);
299 struct dmar_pci_notify_info
*info
;
301 /* Only care about add/remove events for physical functions */
304 if (action
!= BUS_NOTIFY_ADD_DEVICE
&& action
!= BUS_NOTIFY_DEL_DEVICE
)
307 info
= dmar_alloc_pci_notify_info(pdev
, action
);
311 down_write(&dmar_global_lock
);
312 if (action
== BUS_NOTIFY_ADD_DEVICE
)
313 dmar_pci_bus_add_dev(info
);
314 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
315 dmar_pci_bus_del_dev(info
);
316 up_write(&dmar_global_lock
);
318 dmar_free_pci_notify_info(info
);
323 static struct notifier_block dmar_pci_bus_nb
= {
324 .notifier_call
= dmar_pci_bus_notifier
,
329 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
330 * structure which uniquely represent one DMA remapping hardware unit
331 * present in the platform
334 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
336 struct acpi_dmar_hardware_unit
*drhd
;
337 struct dmar_drhd_unit
*dmaru
;
340 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
341 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
346 dmaru
->reg_base_addr
= drhd
->address
;
347 dmaru
->segment
= drhd
->segment
;
348 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
349 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
350 ((void *)drhd
) + drhd
->header
.length
,
351 &dmaru
->devices_cnt
);
352 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
357 ret
= alloc_iommu(dmaru
);
359 dmar_free_dev_scope(&dmaru
->devices
,
360 &dmaru
->devices_cnt
);
364 dmar_register_drhd_unit(dmaru
);
368 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
370 if (dmaru
->devices
&& dmaru
->devices_cnt
)
371 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
373 free_iommu(dmaru
->iommu
);
377 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
)
379 struct acpi_dmar_andd
*andd
= (void *)header
;
381 /* Check for NUL termination within the designated length */
382 if (strnlen(andd
->object_name
, header
->length
- 8) == header
->length
- 8) {
383 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
384 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
385 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
386 dmi_get_system_info(DMI_BIOS_VENDOR
),
387 dmi_get_system_info(DMI_BIOS_VERSION
),
388 dmi_get_system_info(DMI_PRODUCT_VERSION
));
391 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
397 #ifdef CONFIG_ACPI_NUMA
399 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
401 struct acpi_dmar_rhsa
*rhsa
;
402 struct dmar_drhd_unit
*drhd
;
404 rhsa
= (struct acpi_dmar_rhsa
*)header
;
405 for_each_drhd_unit(drhd
) {
406 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
407 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
409 if (!node_online(node
))
411 drhd
->iommu
->node
= node
;
416 1, TAINT_FIRMWARE_WORKAROUND
,
417 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
418 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
420 dmi_get_system_info(DMI_BIOS_VENDOR
),
421 dmi_get_system_info(DMI_BIOS_VERSION
),
422 dmi_get_system_info(DMI_PRODUCT_VERSION
));
429 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
431 struct acpi_dmar_hardware_unit
*drhd
;
432 struct acpi_dmar_reserved_memory
*rmrr
;
433 struct acpi_dmar_atsr
*atsr
;
434 struct acpi_dmar_rhsa
*rhsa
;
436 switch (header
->type
) {
437 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
438 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
440 pr_info("DRHD base: %#016Lx flags: %#x\n",
441 (unsigned long long)drhd
->address
, drhd
->flags
);
443 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
444 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
446 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
447 (unsigned long long)rmrr
->base_address
,
448 (unsigned long long)rmrr
->end_address
);
450 case ACPI_DMAR_TYPE_ATSR
:
451 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
452 pr_info("ATSR flags: %#x\n", atsr
->flags
);
454 case ACPI_DMAR_HARDWARE_AFFINITY
:
455 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
456 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
457 (unsigned long long)rhsa
->base_address
,
458 rhsa
->proximity_domain
);
460 case ACPI_DMAR_TYPE_ANDD
:
461 /* We don't print this here because we need to sanity-check
462 it first. So print it in dmar_parse_one_andd() instead. */
468 * dmar_table_detect - checks to see if the platform supports DMAR devices
470 static int __init
dmar_table_detect(void)
472 acpi_status status
= AE_OK
;
474 /* if we could find DMAR table, then there are DMAR devices */
475 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
476 (struct acpi_table_header
**)&dmar_tbl
,
479 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
480 pr_warn("Unable to map DMAR\n");
481 status
= AE_NOT_FOUND
;
484 return (ACPI_SUCCESS(status
) ? 1 : 0);
488 * parse_dmar_table - parses the DMA reporting table
491 parse_dmar_table(void)
493 struct acpi_table_dmar
*dmar
;
494 struct acpi_dmar_header
*entry_header
;
499 * Do it again, earlier dmar_tbl mapping could be mapped with
505 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
506 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
508 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
510 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
514 if (dmar
->width
< PAGE_SHIFT
- 1) {
515 pr_warn("Invalid DMAR haw\n");
519 pr_info("Host address width %d\n", dmar
->width
+ 1);
521 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
522 while (((unsigned long)entry_header
) <
523 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
524 /* Avoid looping forever on bad ACPI tables */
525 if (entry_header
->length
== 0) {
526 pr_warn("Invalid 0-length structure\n");
531 dmar_table_print_dmar_entry(entry_header
);
533 switch (entry_header
->type
) {
534 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
536 ret
= dmar_parse_one_drhd(entry_header
);
538 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
539 ret
= dmar_parse_one_rmrr(entry_header
);
541 case ACPI_DMAR_TYPE_ATSR
:
542 ret
= dmar_parse_one_atsr(entry_header
);
544 case ACPI_DMAR_HARDWARE_AFFINITY
:
545 #ifdef CONFIG_ACPI_NUMA
546 ret
= dmar_parse_one_rhsa(entry_header
);
549 case ACPI_DMAR_TYPE_ANDD
:
550 ret
= dmar_parse_one_andd(entry_header
);
553 pr_warn("Unknown DMAR structure type %d\n",
555 ret
= 0; /* for forward compatibility */
561 entry_header
= ((void *)entry_header
+ entry_header
->length
);
564 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
568 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
569 int cnt
, struct pci_dev
*dev
)
575 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
576 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
579 /* Check our parent */
580 dev
= dev
->bus
->self
;
586 struct dmar_drhd_unit
*
587 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
589 struct dmar_drhd_unit
*dmaru
;
590 struct acpi_dmar_hardware_unit
*drhd
;
592 dev
= pci_physfn(dev
);
595 for_each_drhd_unit(dmaru
) {
596 drhd
= container_of(dmaru
->hdr
,
597 struct acpi_dmar_hardware_unit
,
600 if (dmaru
->include_all
&&
601 drhd
->segment
== pci_domain_nr(dev
->bus
))
604 if (dmar_pci_device_match(dmaru
->devices
,
605 dmaru
->devices_cnt
, dev
))
615 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
616 struct acpi_device
*adev
)
618 struct dmar_drhd_unit
*dmaru
;
619 struct acpi_dmar_hardware_unit
*drhd
;
620 struct acpi_dmar_device_scope
*scope
;
623 struct acpi_dmar_pci_path
*path
;
625 for_each_drhd_unit(dmaru
) {
626 drhd
= container_of(dmaru
->hdr
,
627 struct acpi_dmar_hardware_unit
,
630 for (scope
= (void *)(drhd
+ 1);
631 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
632 scope
= ((void *)scope
) + scope
->length
) {
633 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ACPI
)
635 if (scope
->enumeration_id
!= device_number
)
638 path
= (void *)(scope
+ 1);
639 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
640 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
641 scope
->bus
, path
->device
, path
->function
);
642 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
644 dmaru
->devices
[i
].bus
= scope
->bus
;
645 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
647 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
648 get_device(&adev
->dev
));
651 BUG_ON(i
>= dmaru
->devices_cnt
);
654 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
655 device_number
, dev_name(&adev
->dev
));
658 static int __init
dmar_acpi_dev_scope_init(void)
660 struct acpi_dmar_andd
*andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
662 while (((unsigned long)andd
) <
663 ((unsigned long)dmar_tbl
) + dmar_tbl
->length
) {
664 if (andd
->header
.type
== ACPI_DMAR_TYPE_ANDD
) {
666 struct acpi_device
*adev
;
668 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
671 pr_err("Failed to find handle for ACPI object %s\n",
675 acpi_bus_get_device(h
, &adev
);
677 pr_err("Failed to get device for ACPI object %s\n",
681 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
683 andd
= ((void *)andd
) + andd
->header
.length
;
688 int __init
dmar_dev_scope_init(void)
690 struct pci_dev
*dev
= NULL
;
691 struct dmar_pci_notify_info
*info
;
693 if (dmar_dev_scope_status
!= 1)
694 return dmar_dev_scope_status
;
696 dmar_acpi_dev_scope_init();
698 if (list_empty(&dmar_drhd_units
)) {
699 dmar_dev_scope_status
= -ENODEV
;
701 dmar_dev_scope_status
= 0;
703 for_each_pci_dev(dev
) {
707 info
= dmar_alloc_pci_notify_info(dev
,
708 BUS_NOTIFY_ADD_DEVICE
);
710 return dmar_dev_scope_status
;
712 dmar_pci_bus_add_dev(info
);
713 dmar_free_pci_notify_info(info
);
717 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
720 return dmar_dev_scope_status
;
724 int __init
dmar_table_init(void)
726 static int dmar_table_initialized
;
729 if (dmar_table_initialized
== 0) {
730 ret
= parse_dmar_table();
733 pr_info("parse DMAR table failure.\n");
734 } else if (list_empty(&dmar_drhd_units
)) {
735 pr_info("No DMAR devices found\n");
740 dmar_table_initialized
= ret
;
742 dmar_table_initialized
= 1;
745 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
748 static void warn_invalid_dmar(u64 addr
, const char *message
)
751 1, TAINT_FIRMWARE_WORKAROUND
,
752 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
753 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
755 dmi_get_system_info(DMI_BIOS_VENDOR
),
756 dmi_get_system_info(DMI_BIOS_VERSION
),
757 dmi_get_system_info(DMI_PRODUCT_VERSION
));
760 static int __init
check_zero_address(void)
762 struct acpi_table_dmar
*dmar
;
763 struct acpi_dmar_header
*entry_header
;
764 struct acpi_dmar_hardware_unit
*drhd
;
766 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
767 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
769 while (((unsigned long)entry_header
) <
770 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
771 /* Avoid looping forever on bad ACPI tables */
772 if (entry_header
->length
== 0) {
773 pr_warn("Invalid 0-length structure\n");
777 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
781 drhd
= (void *)entry_header
;
782 if (!drhd
->address
) {
783 warn_invalid_dmar(0, "");
787 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
789 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
792 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
793 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
794 early_iounmap(addr
, VTD_PAGE_SIZE
);
795 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
796 warn_invalid_dmar(drhd
->address
,
797 " returns all ones");
802 entry_header
= ((void *)entry_header
+ entry_header
->length
);
810 int __init
detect_intel_iommu(void)
814 down_write(&dmar_global_lock
);
815 ret
= dmar_table_detect();
817 ret
= check_zero_address();
819 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
821 /* Make sure ACS will be enabled */
827 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
830 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
832 up_write(&dmar_global_lock
);
834 return ret
? 1 : -ENODEV
;
838 static void unmap_iommu(struct intel_iommu
*iommu
)
841 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
845 * map_iommu: map the iommu's registers
846 * @iommu: the iommu to map
847 * @phys_addr: the physical address of the base resgister
849 * Memory map the iommu's registers. Start w/ a single page, and
850 * possibly expand if that turns out to be insufficent.
852 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
856 iommu
->reg_phys
= phys_addr
;
857 iommu
->reg_size
= VTD_PAGE_SIZE
;
859 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
860 pr_err("IOMMU: can't reserve memory\n");
865 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
867 pr_err("IOMMU: can't map the region\n");
872 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
873 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
875 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
877 warn_invalid_dmar(phys_addr
, " returns all ones");
881 /* the registers might be more than one page */
882 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
883 cap_max_fault_reg_offset(iommu
->cap
));
884 map_size
= VTD_PAGE_ALIGN(map_size
);
885 if (map_size
> iommu
->reg_size
) {
887 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
888 iommu
->reg_size
= map_size
;
889 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
891 pr_err("IOMMU: can't reserve memory\n");
895 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
897 pr_err("IOMMU: can't map the region\n");
908 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
913 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
915 struct intel_iommu
*iommu
;
917 static int iommu_allocated
= 0;
922 if (!drhd
->reg_base_addr
) {
923 warn_invalid_dmar(0, "");
927 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
931 iommu
->seq_id
= iommu_allocated
++;
932 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
934 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
936 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
941 agaw
= iommu_calculate_agaw(iommu
);
943 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
947 msagaw
= iommu_calculate_max_sagaw(iommu
);
949 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
954 iommu
->msagaw
= msagaw
;
955 iommu
->segment
= drhd
->segment
;
959 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
960 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
962 (unsigned long long)drhd
->reg_base_addr
,
963 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
964 (unsigned long long)iommu
->cap
,
965 (unsigned long long)iommu
->ecap
);
967 /* Reflect status in gcmd */
968 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
969 if (sts
& DMA_GSTS_IRES
)
970 iommu
->gcmd
|= DMA_GCMD_IRE
;
971 if (sts
& DMA_GSTS_TES
)
972 iommu
->gcmd
|= DMA_GCMD_TE
;
973 if (sts
& DMA_GSTS_QIES
)
974 iommu
->gcmd
|= DMA_GCMD_QIE
;
976 raw_spin_lock_init(&iommu
->register_lock
);
988 static void free_iommu(struct intel_iommu
*iommu
)
991 free_irq(iommu
->irq
, iommu
);
992 irq_set_handler_data(iommu
->irq
, NULL
);
993 destroy_irq(iommu
->irq
);
997 free_page((unsigned long)iommu
->qi
->desc
);
998 kfree(iommu
->qi
->desc_status
);
1009 * Reclaim all the submitted descriptors which have completed its work.
1011 static inline void reclaim_free_desc(struct q_inval
*qi
)
1013 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1014 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1015 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1016 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1021 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1025 struct q_inval
*qi
= iommu
->qi
;
1026 int wait_index
= (index
+ 1) % QI_LENGTH
;
1028 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1031 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1034 * If IQE happens, the head points to the descriptor associated
1035 * with the error. No new descriptors are fetched until the IQE
1038 if (fault
& DMA_FSTS_IQE
) {
1039 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1040 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1041 pr_err("VT-d detected invalid descriptor: "
1042 "low=%llx, high=%llx\n",
1043 (unsigned long long)qi
->desc
[index
].low
,
1044 (unsigned long long)qi
->desc
[index
].high
);
1045 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1046 sizeof(struct qi_desc
));
1047 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1048 sizeof(struct qi_desc
));
1049 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1055 * If ITE happens, all pending wait_desc commands are aborted.
1056 * No new descriptors are fetched until the ITE is cleared.
1058 if (fault
& DMA_FSTS_ITE
) {
1059 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1060 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1062 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1063 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1065 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1068 if (qi
->desc_status
[head
] == QI_IN_USE
)
1069 qi
->desc_status
[head
] = QI_ABORT
;
1070 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1071 } while (head
!= tail
);
1073 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1077 if (fault
& DMA_FSTS_ICE
)
1078 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1084 * Submit the queued invalidation descriptor to the remapping
1085 * hardware unit and wait for its completion.
1087 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1090 struct q_inval
*qi
= iommu
->qi
;
1091 struct qi_desc
*hw
, wait_desc
;
1092 int wait_index
, index
;
1093 unsigned long flags
;
1103 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1104 while (qi
->free_cnt
< 3) {
1105 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1107 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1110 index
= qi
->free_head
;
1111 wait_index
= (index
+ 1) % QI_LENGTH
;
1113 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1117 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1118 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1119 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1121 hw
[wait_index
] = wait_desc
;
1123 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1124 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1126 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1130 * update the HW tail register indicating the presence of
1133 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1135 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1137 * We will leave the interrupts disabled, to prevent interrupt
1138 * context to queue another cmd while a cmd is already submitted
1139 * and waiting for completion on this cpu. This is to avoid
1140 * a deadlock where the interrupt context can wait indefinitely
1141 * for free slots in the queue.
1143 rc
= qi_check_fault(iommu
, index
);
1147 raw_spin_unlock(&qi
->q_lock
);
1149 raw_spin_lock(&qi
->q_lock
);
1152 qi
->desc_status
[index
] = QI_DONE
;
1154 reclaim_free_desc(qi
);
1155 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1164 * Flush the global interrupt entry cache.
1166 void qi_global_iec(struct intel_iommu
*iommu
)
1168 struct qi_desc desc
;
1170 desc
.low
= QI_IEC_TYPE
;
1173 /* should never fail */
1174 qi_submit_sync(&desc
, iommu
);
1177 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1180 struct qi_desc desc
;
1182 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1183 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1186 qi_submit_sync(&desc
, iommu
);
1189 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1190 unsigned int size_order
, u64 type
)
1194 struct qi_desc desc
;
1197 if (cap_write_drain(iommu
->cap
))
1200 if (cap_read_drain(iommu
->cap
))
1203 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1204 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1205 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1206 | QI_IOTLB_AM(size_order
);
1208 qi_submit_sync(&desc
, iommu
);
1211 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1212 u64 addr
, unsigned mask
)
1214 struct qi_desc desc
;
1217 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1218 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1219 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1221 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1223 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1226 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1229 qi_submit_sync(&desc
, iommu
);
1233 * Disable Queued Invalidation interface.
1235 void dmar_disable_qi(struct intel_iommu
*iommu
)
1237 unsigned long flags
;
1239 cycles_t start_time
= get_cycles();
1241 if (!ecap_qis(iommu
->ecap
))
1244 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1246 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
1247 if (!(sts
& DMA_GSTS_QIES
))
1251 * Give a chance to HW to complete the pending invalidation requests.
1253 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1254 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1255 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1258 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1259 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1261 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1262 !(sts
& DMA_GSTS_QIES
), sts
);
1264 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1268 * Enable queued invalidation.
1270 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1273 unsigned long flags
;
1274 struct q_inval
*qi
= iommu
->qi
;
1276 qi
->free_head
= qi
->free_tail
= 0;
1277 qi
->free_cnt
= QI_LENGTH
;
1279 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1281 /* write zero to the tail reg */
1282 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1284 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1286 iommu
->gcmd
|= DMA_GCMD_QIE
;
1287 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1289 /* Make sure hardware complete it */
1290 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1292 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1296 * Enable Queued Invalidation interface. This is a must to support
1297 * interrupt-remapping. Also used by DMA-remapping, which replaces
1298 * register based IOTLB invalidation.
1300 int dmar_enable_qi(struct intel_iommu
*iommu
)
1303 struct page
*desc_page
;
1305 if (!ecap_qis(iommu
->ecap
))
1309 * queued invalidation is already setup and enabled.
1314 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1321 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1328 qi
->desc
= page_address(desc_page
);
1330 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1331 if (!qi
->desc_status
) {
1332 free_page((unsigned long) qi
->desc
);
1338 qi
->free_head
= qi
->free_tail
= 0;
1339 qi
->free_cnt
= QI_LENGTH
;
1341 raw_spin_lock_init(&qi
->q_lock
);
1343 __dmar_enable_qi(iommu
);
1348 /* iommu interrupt handling. Most stuff are MSI-like. */
1356 static const char *dma_remap_fault_reasons
[] =
1359 "Present bit in root entry is clear",
1360 "Present bit in context entry is clear",
1361 "Invalid context entry",
1362 "Access beyond MGAW",
1363 "PTE Write access is not set",
1364 "PTE Read access is not set",
1365 "Next page table ptr is invalid",
1366 "Root table address invalid",
1367 "Context table ptr is invalid",
1368 "non-zero reserved fields in RTP",
1369 "non-zero reserved fields in CTP",
1370 "non-zero reserved fields in PTE",
1371 "PCE for translation request specifies blocking",
1374 static const char *irq_remap_fault_reasons
[] =
1376 "Detected reserved fields in the decoded interrupt-remapped request",
1377 "Interrupt index exceeded the interrupt-remapping table size",
1378 "Present field in the IRTE entry is clear",
1379 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1380 "Detected reserved fields in the IRTE entry",
1381 "Blocked a compatibility format interrupt request",
1382 "Blocked an interrupt request due to source-id verification failure",
1385 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1387 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1388 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1389 *fault_type
= INTR_REMAP
;
1390 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1391 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1392 *fault_type
= DMA_REMAP
;
1393 return dma_remap_fault_reasons
[fault_reason
];
1395 *fault_type
= UNKNOWN
;
1400 void dmar_msi_unmask(struct irq_data
*data
)
1402 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1406 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1407 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1408 /* Read a reg to force flush the post write */
1409 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1410 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1413 void dmar_msi_mask(struct irq_data
*data
)
1416 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1419 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1420 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1421 /* Read a reg to force flush the post write */
1422 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1423 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1426 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1428 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1431 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1432 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1433 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1434 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1435 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1438 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1440 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1443 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1444 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1445 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1446 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1447 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1450 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1451 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1456 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1458 if (fault_type
== INTR_REMAP
)
1459 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1460 "fault index %llx\n"
1461 "INTR-REMAP:[fault reason %02d] %s\n",
1462 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1463 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1464 fault_reason
, reason
);
1466 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1467 "fault addr %llx \n"
1468 "DMAR:[fault reason %02d] %s\n",
1469 (type
? "DMA Read" : "DMA Write"),
1470 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1471 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1475 #define PRIMARY_FAULT_REG_LEN (16)
1476 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1478 struct intel_iommu
*iommu
= dev_id
;
1479 int reg
, fault_index
;
1483 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1484 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1486 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1488 /* TBD: ignore advanced fault log currently */
1489 if (!(fault_status
& DMA_FSTS_PPF
))
1492 fault_index
= dma_fsts_fault_record_index(fault_status
);
1493 reg
= cap_fault_reg_offset(iommu
->cap
);
1501 /* highest 32 bits */
1502 data
= readl(iommu
->reg
+ reg
+
1503 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1504 if (!(data
& DMA_FRCD_F
))
1507 fault_reason
= dma_frcd_fault_reason(data
);
1508 type
= dma_frcd_type(data
);
1510 data
= readl(iommu
->reg
+ reg
+
1511 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1512 source_id
= dma_frcd_source_id(data
);
1514 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1515 fault_index
* PRIMARY_FAULT_REG_LEN
);
1516 guest_addr
= dma_frcd_page_addr(guest_addr
);
1517 /* clear the fault */
1518 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1519 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1521 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1523 dmar_fault_do_one(iommu
, type
, fault_reason
,
1524 source_id
, guest_addr
);
1527 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1529 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1532 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1535 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1539 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1544 * Check if the fault interrupt is already initialized.
1551 pr_err("IOMMU: no free vectors\n");
1555 irq_set_handler_data(irq
, iommu
);
1558 ret
= arch_setup_dmar_msi(irq
);
1560 irq_set_handler_data(irq
, NULL
);
1566 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1568 pr_err("IOMMU: can't request irq\n");
1572 int __init
enable_drhd_fault_handling(void)
1574 struct dmar_drhd_unit
*drhd
;
1575 struct intel_iommu
*iommu
;
1578 * Enable fault control interrupt.
1580 for_each_iommu(iommu
, drhd
) {
1582 int ret
= dmar_set_interrupt(iommu
);
1585 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1586 (unsigned long long)drhd
->reg_base_addr
, ret
);
1591 * Clear any previous faults.
1593 dmar_fault(iommu
->irq
, iommu
);
1594 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1595 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1602 * Re-enable Queued Invalidation interface.
1604 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1606 if (!ecap_qis(iommu
->ecap
))
1613 * First disable queued invalidation.
1615 dmar_disable_qi(iommu
);
1617 * Then enable queued invalidation again. Since there is no pending
1618 * invalidation requests now, it's safe to re-enable queued
1621 __dmar_enable_qi(iommu
);
1627 * Check interrupt remapping support in DMAR table description.
1629 int __init
dmar_ir_support(void)
1631 struct acpi_table_dmar
*dmar
;
1632 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1635 return dmar
->flags
& 0x1;
1638 static int __init
dmar_free_unused_resources(void)
1640 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1642 /* DMAR units are in use */
1643 if (irq_remapping_enabled
|| intel_iommu_enabled
)
1646 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1647 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1649 down_write(&dmar_global_lock
);
1650 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1651 list_del(&dmaru
->list
);
1652 dmar_free_drhd(dmaru
);
1654 up_write(&dmar_global_lock
);
1659 late_initcall(dmar_free_unused_resources
);
1660 IOMMU_INIT_POST(detect_intel_iommu
);