2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t
)(struct acpi_dmar_header
*, void *);
48 struct dmar_res_callback
{
49 dmar_res_handler_t cb
[ACPI_DMAR_TYPE_RESERVED
];
50 void *arg
[ACPI_DMAR_TYPE_RESERVED
];
51 bool ignore_unhandled
;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock
);
68 LIST_HEAD(dmar_drhd_units
);
70 struct acpi_table_header
* __initdata dmar_tbl
;
71 static acpi_size dmar_tbl_size
;
72 static int dmar_dev_scope_status
= 1;
73 static unsigned long dmar_seq_ids
[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED
)];
75 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
76 static void free_iommu(struct intel_iommu
*iommu
);
78 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
81 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 if (drhd
->include_all
)
85 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
87 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
90 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
92 struct acpi_dmar_device_scope
*scope
;
97 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_NAMESPACE
||
98 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
99 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
101 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
102 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
103 pr_warn("Unsupported device scope\n");
105 start
+= scope
->length
;
110 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
113 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
116 struct device
*tmp_dev
;
118 if (*devices
&& *cnt
) {
119 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf
[64];
131 static struct dmar_pci_notify_info
*
132 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
137 struct dmar_pci_notify_info
*info
;
139 BUG_ON(dev
->is_virtfn
);
141 /* Only generate path[] for device addition event */
142 if (event
== BUS_NOTIFY_ADD_DEVICE
)
143 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
146 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
147 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
148 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
150 info
= kzalloc(size
, GFP_KERNEL
);
152 pr_warn("Out of memory when allocating notify_info "
153 "for %s.\n", pci_name(dev
));
154 if (dmar_dev_scope_status
== 0)
155 dmar_dev_scope_status
= -ENOMEM
;
162 info
->seg
= pci_domain_nr(dev
->bus
);
164 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
165 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
167 info
->path
[level
].bus
= tmp
->bus
->number
;
168 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
169 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
170 if (pci_is_root_bus(tmp
->bus
))
171 info
->bus
= tmp
->bus
->number
;
178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
180 if ((void *)info
!= dmar_pci_notify_info_buf
)
184 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
185 struct acpi_dmar_pci_path
*path
, int count
)
189 if (info
->bus
!= bus
)
191 if (info
->level
!= count
)
194 for (i
= 0; i
< count
; i
++) {
195 if (path
[i
].device
!= info
->path
[i
].device
||
196 path
[i
].function
!= info
->path
[i
].function
)
208 if (bus
== info
->path
[i
].bus
&&
209 path
[0].device
== info
->path
[i
].device
&&
210 path
[0].function
== info
->path
[i
].function
) {
211 pr_info(FW_BUG
"RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 bus
, path
[0].device
, path
[0].function
);
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
221 void *start
, void*end
, u16 segment
,
222 struct dmar_dev_scope
*devices
,
226 struct device
*tmp
, *dev
= &info
->dev
->dev
;
227 struct acpi_dmar_device_scope
*scope
;
228 struct acpi_dmar_pci_path
*path
;
230 if (segment
!= info
->seg
)
233 for (; start
< end
; start
+= scope
->length
) {
235 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
236 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
239 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
240 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
241 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
244 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
) ^
245 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
)) {
246 pr_warn("Device scope type does not match for %s\n",
247 pci_name(info
->dev
));
251 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
253 devices
[i
].bus
= info
->dev
->bus
->number
;
254 devices
[i
].devfn
= info
->dev
->devfn
;
255 rcu_assign_pointer(devices
[i
].dev
,
259 BUG_ON(i
>= devices_cnt
);
265 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
266 struct dmar_dev_scope
*devices
, int count
)
271 if (info
->seg
!= segment
)
274 for_each_active_dev_scope(devices
, count
, index
, tmp
)
275 if (tmp
== &info
->dev
->dev
) {
276 RCU_INIT_POINTER(devices
[index
].dev
, NULL
);
285 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
288 struct dmar_drhd_unit
*dmaru
;
289 struct acpi_dmar_hardware_unit
*drhd
;
291 for_each_drhd_unit(dmaru
) {
292 if (dmaru
->include_all
)
295 drhd
= container_of(dmaru
->hdr
,
296 struct acpi_dmar_hardware_unit
, header
);
297 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
298 ((void *)drhd
) + drhd
->header
.length
,
300 dmaru
->devices
, dmaru
->devices_cnt
);
305 ret
= dmar_iommu_notify_scope_dev(info
);
306 if (ret
< 0 && dmar_dev_scope_status
== 0)
307 dmar_dev_scope_status
= ret
;
312 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
314 struct dmar_drhd_unit
*dmaru
;
316 for_each_drhd_unit(dmaru
)
317 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
318 dmaru
->devices
, dmaru
->devices_cnt
))
320 dmar_iommu_notify_scope_dev(info
);
323 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
324 unsigned long action
, void *data
)
326 struct pci_dev
*pdev
= to_pci_dev(data
);
327 struct dmar_pci_notify_info
*info
;
329 /* Only care about add/remove events for physical functions */
332 if (action
!= BUS_NOTIFY_ADD_DEVICE
&& action
!= BUS_NOTIFY_DEL_DEVICE
)
335 info
= dmar_alloc_pci_notify_info(pdev
, action
);
339 down_write(&dmar_global_lock
);
340 if (action
== BUS_NOTIFY_ADD_DEVICE
)
341 dmar_pci_bus_add_dev(info
);
342 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
343 dmar_pci_bus_del_dev(info
);
344 up_write(&dmar_global_lock
);
346 dmar_free_pci_notify_info(info
);
351 static struct notifier_block dmar_pci_bus_nb
= {
352 .notifier_call
= dmar_pci_bus_notifier
,
357 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
358 * structure which uniquely represent one DMA remapping hardware unit
359 * present in the platform
362 dmar_parse_one_drhd(struct acpi_dmar_header
*header
, void *arg
)
364 struct acpi_dmar_hardware_unit
*drhd
;
365 struct dmar_drhd_unit
*dmaru
;
368 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
369 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
374 dmaru
->reg_base_addr
= drhd
->address
;
375 dmaru
->segment
= drhd
->segment
;
376 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
377 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
378 ((void *)drhd
) + drhd
->header
.length
,
379 &dmaru
->devices_cnt
);
380 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
385 ret
= alloc_iommu(dmaru
);
387 dmar_free_dev_scope(&dmaru
->devices
,
388 &dmaru
->devices_cnt
);
392 dmar_register_drhd_unit(dmaru
);
400 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
402 if (dmaru
->devices
&& dmaru
->devices_cnt
)
403 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
405 free_iommu(dmaru
->iommu
);
409 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
,
412 struct acpi_dmar_andd
*andd
= (void *)header
;
414 /* Check for NUL termination within the designated length */
415 if (strnlen(andd
->device_name
, header
->length
- 8) == header
->length
- 8) {
416 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
417 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
418 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
419 dmi_get_system_info(DMI_BIOS_VENDOR
),
420 dmi_get_system_info(DMI_BIOS_VERSION
),
421 dmi_get_system_info(DMI_PRODUCT_VERSION
));
424 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
430 #ifdef CONFIG_ACPI_NUMA
432 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
, void *arg
)
434 struct acpi_dmar_rhsa
*rhsa
;
435 struct dmar_drhd_unit
*drhd
;
437 rhsa
= (struct acpi_dmar_rhsa
*)header
;
438 for_each_drhd_unit(drhd
) {
439 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
440 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
442 if (!node_online(node
))
444 drhd
->iommu
->node
= node
;
449 1, TAINT_FIRMWARE_WORKAROUND
,
450 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
451 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
453 dmi_get_system_info(DMI_BIOS_VENDOR
),
454 dmi_get_system_info(DMI_BIOS_VERSION
),
455 dmi_get_system_info(DMI_PRODUCT_VERSION
));
460 #define dmar_parse_one_rhsa dmar_res_noop
464 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
466 struct acpi_dmar_hardware_unit
*drhd
;
467 struct acpi_dmar_reserved_memory
*rmrr
;
468 struct acpi_dmar_atsr
*atsr
;
469 struct acpi_dmar_rhsa
*rhsa
;
471 switch (header
->type
) {
472 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
473 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
475 pr_info("DRHD base: %#016Lx flags: %#x\n",
476 (unsigned long long)drhd
->address
, drhd
->flags
);
478 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
479 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
481 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
482 (unsigned long long)rmrr
->base_address
,
483 (unsigned long long)rmrr
->end_address
);
485 case ACPI_DMAR_TYPE_ROOT_ATS
:
486 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
487 pr_info("ATSR flags: %#x\n", atsr
->flags
);
489 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY
:
490 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
491 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
492 (unsigned long long)rhsa
->base_address
,
493 rhsa
->proximity_domain
);
495 case ACPI_DMAR_TYPE_NAMESPACE
:
496 /* We don't print this here because we need to sanity-check
497 it first. So print it in dmar_parse_one_andd() instead. */
503 * dmar_table_detect - checks to see if the platform supports DMAR devices
505 static int __init
dmar_table_detect(void)
507 acpi_status status
= AE_OK
;
509 /* if we could find DMAR table, then there are DMAR devices */
510 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
511 (struct acpi_table_header
**)&dmar_tbl
,
514 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
515 pr_warn("Unable to map DMAR\n");
516 status
= AE_NOT_FOUND
;
519 return (ACPI_SUCCESS(status
) ? 1 : 0);
522 static int dmar_walk_remapping_entries(struct acpi_dmar_header
*start
,
523 size_t len
, struct dmar_res_callback
*cb
)
526 struct acpi_dmar_header
*iter
, *next
;
527 struct acpi_dmar_header
*end
= ((void *)start
) + len
;
529 for (iter
= start
; iter
< end
&& ret
== 0; iter
= next
) {
530 next
= (void *)iter
+ iter
->length
;
531 if (iter
->length
== 0) {
532 /* Avoid looping forever on bad ACPI tables */
533 pr_debug(FW_BUG
"Invalid 0-length structure\n");
535 } else if (next
> end
) {
536 /* Avoid passing table end */
537 pr_warn(FW_BUG
"record passes table end\n");
543 dmar_table_print_dmar_entry(iter
);
545 if (iter
->type
>= ACPI_DMAR_TYPE_RESERVED
) {
546 /* continue for forward compatibility */
547 pr_debug("Unknown DMAR structure type %d\n",
549 } else if (cb
->cb
[iter
->type
]) {
550 ret
= cb
->cb
[iter
->type
](iter
, cb
->arg
[iter
->type
]);
551 } else if (!cb
->ignore_unhandled
) {
552 pr_warn("No handler for DMAR structure type %d\n",
561 static inline int dmar_walk_dmar_table(struct acpi_table_dmar
*dmar
,
562 struct dmar_res_callback
*cb
)
564 return dmar_walk_remapping_entries((void *)(dmar
+ 1),
565 dmar
->header
.length
- sizeof(*dmar
), cb
);
569 * parse_dmar_table - parses the DMA reporting table
572 parse_dmar_table(void)
574 struct acpi_table_dmar
*dmar
;
577 struct dmar_res_callback cb
= {
579 .ignore_unhandled
= true,
580 .arg
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &drhd_count
,
581 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_parse_one_drhd
,
582 .cb
[ACPI_DMAR_TYPE_RESERVED_MEMORY
] = &dmar_parse_one_rmrr
,
583 .cb
[ACPI_DMAR_TYPE_ROOT_ATS
] = &dmar_parse_one_atsr
,
584 .cb
[ACPI_DMAR_TYPE_HARDWARE_AFFINITY
] = &dmar_parse_one_rhsa
,
585 .cb
[ACPI_DMAR_TYPE_NAMESPACE
] = &dmar_parse_one_andd
,
589 * Do it again, earlier dmar_tbl mapping could be mapped with
595 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
596 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
598 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
600 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
604 if (dmar
->width
< PAGE_SHIFT
- 1) {
605 pr_warn("Invalid DMAR haw\n");
609 pr_info("Host address width %d\n", dmar
->width
+ 1);
610 ret
= dmar_walk_dmar_table(dmar
, &cb
);
611 if (ret
== 0 && drhd_count
== 0)
612 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
617 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
618 int cnt
, struct pci_dev
*dev
)
624 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
625 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
628 /* Check our parent */
629 dev
= dev
->bus
->self
;
635 struct dmar_drhd_unit
*
636 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
638 struct dmar_drhd_unit
*dmaru
;
639 struct acpi_dmar_hardware_unit
*drhd
;
641 dev
= pci_physfn(dev
);
644 for_each_drhd_unit(dmaru
) {
645 drhd
= container_of(dmaru
->hdr
,
646 struct acpi_dmar_hardware_unit
,
649 if (dmaru
->include_all
&&
650 drhd
->segment
== pci_domain_nr(dev
->bus
))
653 if (dmar_pci_device_match(dmaru
->devices
,
654 dmaru
->devices_cnt
, dev
))
664 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
665 struct acpi_device
*adev
)
667 struct dmar_drhd_unit
*dmaru
;
668 struct acpi_dmar_hardware_unit
*drhd
;
669 struct acpi_dmar_device_scope
*scope
;
672 struct acpi_dmar_pci_path
*path
;
674 for_each_drhd_unit(dmaru
) {
675 drhd
= container_of(dmaru
->hdr
,
676 struct acpi_dmar_hardware_unit
,
679 for (scope
= (void *)(drhd
+ 1);
680 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
681 scope
= ((void *)scope
) + scope
->length
) {
682 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_NAMESPACE
)
684 if (scope
->enumeration_id
!= device_number
)
687 path
= (void *)(scope
+ 1);
688 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
689 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
690 scope
->bus
, path
->device
, path
->function
);
691 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
693 dmaru
->devices
[i
].bus
= scope
->bus
;
694 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
696 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
697 get_device(&adev
->dev
));
700 BUG_ON(i
>= dmaru
->devices_cnt
);
703 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
704 device_number
, dev_name(&adev
->dev
));
707 static int __init
dmar_acpi_dev_scope_init(void)
709 struct acpi_dmar_andd
*andd
;
711 if (dmar_tbl
== NULL
)
714 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
715 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
716 andd
= ((void *)andd
) + andd
->header
.length
) {
717 if (andd
->header
.type
== ACPI_DMAR_TYPE_NAMESPACE
) {
719 struct acpi_device
*adev
;
721 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
724 pr_err("Failed to find handle for ACPI object %s\n",
728 if (acpi_bus_get_device(h
, &adev
)) {
729 pr_err("Failed to get device for ACPI object %s\n",
733 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
739 int __init
dmar_dev_scope_init(void)
741 struct pci_dev
*dev
= NULL
;
742 struct dmar_pci_notify_info
*info
;
744 if (dmar_dev_scope_status
!= 1)
745 return dmar_dev_scope_status
;
747 if (list_empty(&dmar_drhd_units
)) {
748 dmar_dev_scope_status
= -ENODEV
;
750 dmar_dev_scope_status
= 0;
752 dmar_acpi_dev_scope_init();
754 for_each_pci_dev(dev
) {
758 info
= dmar_alloc_pci_notify_info(dev
,
759 BUS_NOTIFY_ADD_DEVICE
);
761 return dmar_dev_scope_status
;
763 dmar_pci_bus_add_dev(info
);
764 dmar_free_pci_notify_info(info
);
768 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
771 return dmar_dev_scope_status
;
775 int __init
dmar_table_init(void)
777 static int dmar_table_initialized
;
780 if (dmar_table_initialized
== 0) {
781 ret
= parse_dmar_table();
784 pr_info("parse DMAR table failure.\n");
785 } else if (list_empty(&dmar_drhd_units
)) {
786 pr_info("No DMAR devices found\n");
791 dmar_table_initialized
= ret
;
793 dmar_table_initialized
= 1;
796 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
799 static void warn_invalid_dmar(u64 addr
, const char *message
)
802 1, TAINT_FIRMWARE_WORKAROUND
,
803 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
804 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
806 dmi_get_system_info(DMI_BIOS_VENDOR
),
807 dmi_get_system_info(DMI_BIOS_VERSION
),
808 dmi_get_system_info(DMI_PRODUCT_VERSION
));
812 dmar_validate_one_drhd(struct acpi_dmar_header
*entry
, void *arg
)
814 struct acpi_dmar_hardware_unit
*drhd
;
818 drhd
= (void *)entry
;
819 if (!drhd
->address
) {
820 warn_invalid_dmar(0, "");
824 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
826 pr_warn("IOMMU: can't validate: %llx\n", drhd
->address
);
829 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
830 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
831 early_iounmap(addr
, VTD_PAGE_SIZE
);
833 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
834 warn_invalid_dmar(drhd
->address
, " returns all ones");
841 int __init
detect_intel_iommu(void)
844 struct dmar_res_callback validate_drhd_cb
= {
845 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_validate_one_drhd
,
846 .ignore_unhandled
= true,
849 down_write(&dmar_global_lock
);
850 ret
= dmar_table_detect();
852 ret
= !dmar_walk_dmar_table((struct acpi_table_dmar
*)dmar_tbl
,
854 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
856 /* Make sure ACS will be enabled */
862 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
865 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
867 up_write(&dmar_global_lock
);
869 return ret
? 1 : -ENODEV
;
873 static void unmap_iommu(struct intel_iommu
*iommu
)
876 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
880 * map_iommu: map the iommu's registers
881 * @iommu: the iommu to map
882 * @phys_addr: the physical address of the base resgister
884 * Memory map the iommu's registers. Start w/ a single page, and
885 * possibly expand if that turns out to be insufficent.
887 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
891 iommu
->reg_phys
= phys_addr
;
892 iommu
->reg_size
= VTD_PAGE_SIZE
;
894 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
895 pr_err("IOMMU: can't reserve memory\n");
900 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
902 pr_err("IOMMU: can't map the region\n");
907 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
908 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
910 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
912 warn_invalid_dmar(phys_addr
, " returns all ones");
916 /* the registers might be more than one page */
917 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
918 cap_max_fault_reg_offset(iommu
->cap
));
919 map_size
= VTD_PAGE_ALIGN(map_size
);
920 if (map_size
> iommu
->reg_size
) {
922 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
923 iommu
->reg_size
= map_size
;
924 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
926 pr_err("IOMMU: can't reserve memory\n");
930 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
932 pr_err("IOMMU: can't map the region\n");
943 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
948 static int dmar_alloc_seq_id(struct intel_iommu
*iommu
)
950 iommu
->seq_id
= find_first_zero_bit(dmar_seq_ids
,
951 DMAR_UNITS_SUPPORTED
);
952 if (iommu
->seq_id
>= DMAR_UNITS_SUPPORTED
) {
955 set_bit(iommu
->seq_id
, dmar_seq_ids
);
956 sprintf(iommu
->name
, "dmar%d", iommu
->seq_id
);
959 return iommu
->seq_id
;
962 static void dmar_free_seq_id(struct intel_iommu
*iommu
)
964 if (iommu
->seq_id
>= 0) {
965 clear_bit(iommu
->seq_id
, dmar_seq_ids
);
970 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
972 struct intel_iommu
*iommu
;
978 if (!drhd
->reg_base_addr
) {
979 warn_invalid_dmar(0, "");
983 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
987 if (dmar_alloc_seq_id(iommu
) < 0) {
988 pr_err("IOMMU: failed to allocate seq_id\n");
993 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
995 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
996 goto error_free_seq_id
;
1000 agaw
= iommu_calculate_agaw(iommu
);
1002 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1006 msagaw
= iommu_calculate_max_sagaw(iommu
);
1008 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1013 iommu
->msagaw
= msagaw
;
1014 iommu
->segment
= drhd
->segment
;
1018 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
1019 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1021 (unsigned long long)drhd
->reg_base_addr
,
1022 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
1023 (unsigned long long)iommu
->cap
,
1024 (unsigned long long)iommu
->ecap
);
1026 /* Reflect status in gcmd */
1027 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1028 if (sts
& DMA_GSTS_IRES
)
1029 iommu
->gcmd
|= DMA_GCMD_IRE
;
1030 if (sts
& DMA_GSTS_TES
)
1031 iommu
->gcmd
|= DMA_GCMD_TE
;
1032 if (sts
& DMA_GSTS_QIES
)
1033 iommu
->gcmd
|= DMA_GCMD_QIE
;
1035 raw_spin_lock_init(&iommu
->register_lock
);
1037 drhd
->iommu
= iommu
;
1039 if (intel_iommu_enabled
)
1040 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
1049 dmar_free_seq_id(iommu
);
1055 static void free_iommu(struct intel_iommu
*iommu
)
1057 iommu_device_destroy(iommu
->iommu_dev
);
1060 free_irq(iommu
->irq
, iommu
);
1061 irq_set_handler_data(iommu
->irq
, NULL
);
1062 dmar_free_hwirq(iommu
->irq
);
1066 free_page((unsigned long)iommu
->qi
->desc
);
1067 kfree(iommu
->qi
->desc_status
);
1074 dmar_free_seq_id(iommu
);
1079 * Reclaim all the submitted descriptors which have completed its work.
1081 static inline void reclaim_free_desc(struct q_inval
*qi
)
1083 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1084 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1085 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1086 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1091 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1095 struct q_inval
*qi
= iommu
->qi
;
1096 int wait_index
= (index
+ 1) % QI_LENGTH
;
1098 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1101 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1104 * If IQE happens, the head points to the descriptor associated
1105 * with the error. No new descriptors are fetched until the IQE
1108 if (fault
& DMA_FSTS_IQE
) {
1109 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1110 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
1111 pr_err("VT-d detected invalid descriptor: "
1112 "low=%llx, high=%llx\n",
1113 (unsigned long long)qi
->desc
[index
].low
,
1114 (unsigned long long)qi
->desc
[index
].high
);
1115 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
1116 sizeof(struct qi_desc
));
1117 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
1118 sizeof(struct qi_desc
));
1119 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1125 * If ITE happens, all pending wait_desc commands are aborted.
1126 * No new descriptors are fetched until the ITE is cleared.
1128 if (fault
& DMA_FSTS_ITE
) {
1129 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1130 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1132 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1133 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1135 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1138 if (qi
->desc_status
[head
] == QI_IN_USE
)
1139 qi
->desc_status
[head
] = QI_ABORT
;
1140 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1141 } while (head
!= tail
);
1143 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1147 if (fault
& DMA_FSTS_ICE
)
1148 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1154 * Submit the queued invalidation descriptor to the remapping
1155 * hardware unit and wait for its completion.
1157 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1160 struct q_inval
*qi
= iommu
->qi
;
1161 struct qi_desc
*hw
, wait_desc
;
1162 int wait_index
, index
;
1163 unsigned long flags
;
1173 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1174 while (qi
->free_cnt
< 3) {
1175 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1177 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1180 index
= qi
->free_head
;
1181 wait_index
= (index
+ 1) % QI_LENGTH
;
1183 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1187 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
1188 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1189 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1191 hw
[wait_index
] = wait_desc
;
1193 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
1194 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
1196 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1200 * update the HW tail register indicating the presence of
1203 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
1205 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1207 * We will leave the interrupts disabled, to prevent interrupt
1208 * context to queue another cmd while a cmd is already submitted
1209 * and waiting for completion on this cpu. This is to avoid
1210 * a deadlock where the interrupt context can wait indefinitely
1211 * for free slots in the queue.
1213 rc
= qi_check_fault(iommu
, index
);
1217 raw_spin_unlock(&qi
->q_lock
);
1219 raw_spin_lock(&qi
->q_lock
);
1222 qi
->desc_status
[index
] = QI_DONE
;
1224 reclaim_free_desc(qi
);
1225 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1234 * Flush the global interrupt entry cache.
1236 void qi_global_iec(struct intel_iommu
*iommu
)
1238 struct qi_desc desc
;
1240 desc
.low
= QI_IEC_TYPE
;
1243 /* should never fail */
1244 qi_submit_sync(&desc
, iommu
);
1247 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1250 struct qi_desc desc
;
1252 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1253 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1256 qi_submit_sync(&desc
, iommu
);
1259 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1260 unsigned int size_order
, u64 type
)
1264 struct qi_desc desc
;
1267 if (cap_write_drain(iommu
->cap
))
1270 if (cap_read_drain(iommu
->cap
))
1273 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1274 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1275 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1276 | QI_IOTLB_AM(size_order
);
1278 qi_submit_sync(&desc
, iommu
);
1281 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1282 u64 addr
, unsigned mask
)
1284 struct qi_desc desc
;
1287 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1288 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1289 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1291 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1293 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1296 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1299 qi_submit_sync(&desc
, iommu
);
1303 * Disable Queued Invalidation interface.
1305 void dmar_disable_qi(struct intel_iommu
*iommu
)
1307 unsigned long flags
;
1309 cycles_t start_time
= get_cycles();
1311 if (!ecap_qis(iommu
->ecap
))
1314 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1316 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
1317 if (!(sts
& DMA_GSTS_QIES
))
1321 * Give a chance to HW to complete the pending invalidation requests.
1323 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1324 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1325 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1328 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1329 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1331 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1332 !(sts
& DMA_GSTS_QIES
), sts
);
1334 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1338 * Enable queued invalidation.
1340 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1343 unsigned long flags
;
1344 struct q_inval
*qi
= iommu
->qi
;
1346 qi
->free_head
= qi
->free_tail
= 0;
1347 qi
->free_cnt
= QI_LENGTH
;
1349 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1351 /* write zero to the tail reg */
1352 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1354 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1356 iommu
->gcmd
|= DMA_GCMD_QIE
;
1357 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1359 /* Make sure hardware complete it */
1360 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1362 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1366 * Enable Queued Invalidation interface. This is a must to support
1367 * interrupt-remapping. Also used by DMA-remapping, which replaces
1368 * register based IOTLB invalidation.
1370 int dmar_enable_qi(struct intel_iommu
*iommu
)
1373 struct page
*desc_page
;
1375 if (!ecap_qis(iommu
->ecap
))
1379 * queued invalidation is already setup and enabled.
1384 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1391 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1398 qi
->desc
= page_address(desc_page
);
1400 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1401 if (!qi
->desc_status
) {
1402 free_page((unsigned long) qi
->desc
);
1408 raw_spin_lock_init(&qi
->q_lock
);
1410 __dmar_enable_qi(iommu
);
1415 /* iommu interrupt handling. Most stuff are MSI-like. */
1423 static const char *dma_remap_fault_reasons
[] =
1426 "Present bit in root entry is clear",
1427 "Present bit in context entry is clear",
1428 "Invalid context entry",
1429 "Access beyond MGAW",
1430 "PTE Write access is not set",
1431 "PTE Read access is not set",
1432 "Next page table ptr is invalid",
1433 "Root table address invalid",
1434 "Context table ptr is invalid",
1435 "non-zero reserved fields in RTP",
1436 "non-zero reserved fields in CTP",
1437 "non-zero reserved fields in PTE",
1438 "PCE for translation request specifies blocking",
1441 static const char *irq_remap_fault_reasons
[] =
1443 "Detected reserved fields in the decoded interrupt-remapped request",
1444 "Interrupt index exceeded the interrupt-remapping table size",
1445 "Present field in the IRTE entry is clear",
1446 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1447 "Detected reserved fields in the IRTE entry",
1448 "Blocked a compatibility format interrupt request",
1449 "Blocked an interrupt request due to source-id verification failure",
1452 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1454 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1455 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1456 *fault_type
= INTR_REMAP
;
1457 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1458 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1459 *fault_type
= DMA_REMAP
;
1460 return dma_remap_fault_reasons
[fault_reason
];
1462 *fault_type
= UNKNOWN
;
1467 void dmar_msi_unmask(struct irq_data
*data
)
1469 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1473 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1474 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1475 /* Read a reg to force flush the post write */
1476 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1477 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1480 void dmar_msi_mask(struct irq_data
*data
)
1483 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1486 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1487 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1488 /* Read a reg to force flush the post write */
1489 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1490 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1493 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1495 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1498 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1499 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1500 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1501 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1502 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1505 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1507 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1510 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1511 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1512 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1513 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1514 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1517 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1518 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1523 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1525 if (fault_type
== INTR_REMAP
)
1526 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1527 "fault index %llx\n"
1528 "INTR-REMAP:[fault reason %02d] %s\n",
1529 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1530 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1531 fault_reason
, reason
);
1533 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1534 "fault addr %llx \n"
1535 "DMAR:[fault reason %02d] %s\n",
1536 (type
? "DMA Read" : "DMA Write"),
1537 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1538 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1542 #define PRIMARY_FAULT_REG_LEN (16)
1543 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1545 struct intel_iommu
*iommu
= dev_id
;
1546 int reg
, fault_index
;
1550 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1551 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1553 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1555 /* TBD: ignore advanced fault log currently */
1556 if (!(fault_status
& DMA_FSTS_PPF
))
1559 fault_index
= dma_fsts_fault_record_index(fault_status
);
1560 reg
= cap_fault_reg_offset(iommu
->cap
);
1568 /* highest 32 bits */
1569 data
= readl(iommu
->reg
+ reg
+
1570 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1571 if (!(data
& DMA_FRCD_F
))
1574 fault_reason
= dma_frcd_fault_reason(data
);
1575 type
= dma_frcd_type(data
);
1577 data
= readl(iommu
->reg
+ reg
+
1578 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1579 source_id
= dma_frcd_source_id(data
);
1581 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1582 fault_index
* PRIMARY_FAULT_REG_LEN
);
1583 guest_addr
= dma_frcd_page_addr(guest_addr
);
1584 /* clear the fault */
1585 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1586 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1588 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1590 dmar_fault_do_one(iommu
, type
, fault_reason
,
1591 source_id
, guest_addr
);
1594 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1596 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1599 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1602 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1606 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1611 * Check if the fault interrupt is already initialized.
1616 irq
= dmar_alloc_hwirq();
1618 pr_err("IOMMU: no free vectors\n");
1622 irq_set_handler_data(irq
, iommu
);
1625 ret
= arch_setup_dmar_msi(irq
);
1627 irq_set_handler_data(irq
, NULL
);
1629 dmar_free_hwirq(irq
);
1633 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1635 pr_err("IOMMU: can't request irq\n");
1639 int __init
enable_drhd_fault_handling(void)
1641 struct dmar_drhd_unit
*drhd
;
1642 struct intel_iommu
*iommu
;
1645 * Enable fault control interrupt.
1647 for_each_iommu(iommu
, drhd
) {
1649 int ret
= dmar_set_interrupt(iommu
);
1652 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1653 (unsigned long long)drhd
->reg_base_addr
, ret
);
1658 * Clear any previous faults.
1660 dmar_fault(iommu
->irq
, iommu
);
1661 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1662 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1669 * Re-enable Queued Invalidation interface.
1671 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1673 if (!ecap_qis(iommu
->ecap
))
1680 * First disable queued invalidation.
1682 dmar_disable_qi(iommu
);
1684 * Then enable queued invalidation again. Since there is no pending
1685 * invalidation requests now, it's safe to re-enable queued
1688 __dmar_enable_qi(iommu
);
1694 * Check interrupt remapping support in DMAR table description.
1696 int __init
dmar_ir_support(void)
1698 struct acpi_table_dmar
*dmar
;
1699 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1702 return dmar
->flags
& 0x1;
1705 static int __init
dmar_free_unused_resources(void)
1707 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1709 /* DMAR units are in use */
1710 if (irq_remapping_enabled
|| intel_iommu_enabled
)
1713 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1714 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1716 down_write(&dmar_global_lock
);
1717 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1718 list_del(&dmaru
->list
);
1719 dmar_free_drhd(dmaru
);
1721 up_write(&dmar_global_lock
);
1726 late_initcall(dmar_free_unused_resources
);
1727 IOMMU_INIT_POST(detect_intel_iommu
);