2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units
);
52 struct acpi_table_header
* __initdata dmar_tbl
;
53 static acpi_size dmar_tbl_size
;
55 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
56 static void free_iommu(struct intel_iommu
*iommu
);
58 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
64 if (drhd
->include_all
)
65 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
67 list_add(&drhd
->list
, &dmar_drhd_units
);
70 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
71 struct pci_dev
**dev
, u16 segment
)
74 struct pci_dev
*pdev
= NULL
;
75 struct acpi_dmar_pci_path
*path
;
78 bus
= pci_find_bus(segment
, scope
->bus
);
79 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
80 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
81 / sizeof(struct acpi_dmar_pci_path
);
87 * Some BIOSes list non-exist devices in DMAR table, just
91 pr_warn("Device scope bus [%d] not found\n", scope
->bus
);
94 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->device
, path
->function
));
96 /* warning will be printed below */
101 bus
= pdev
->subordinate
;
104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
105 segment
, scope
->bus
, path
->device
, path
->function
);
108 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
109 pdev
->subordinate
) || (scope
->entry_type
== \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
112 pr_warn("Device scope type does not match for %s\n",
120 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
122 struct acpi_dmar_device_scope
*scope
;
125 while (start
< end
) {
127 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
128 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
130 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
131 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
132 pr_warn("Unsupported device scope\n");
134 start
+= scope
->length
;
139 return kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
142 int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
143 struct pci_dev
***devices
, u16 segment
)
145 struct acpi_dmar_device_scope
*scope
;
148 *devices
= dmar_alloc_dev_scope(start
, end
, cnt
);
154 for (index
= 0; start
< end
; start
+= scope
->length
) {
156 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
157 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
158 ret
= dmar_parse_one_dev_scope(scope
,
159 &(*devices
)[index
], segment
);
161 dmar_free_dev_scope(devices
, cnt
);
171 void dmar_free_dev_scope(struct pci_dev
***devices
, int *cnt
)
173 if (*devices
&& *cnt
) {
175 pci_dev_put((*devices
)[*cnt
]);
183 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
184 * structure which uniquely represent one DMA remapping hardware unit
185 * present in the platform
188 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
190 struct acpi_dmar_hardware_unit
*drhd
;
191 struct dmar_drhd_unit
*dmaru
;
194 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
195 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
200 dmaru
->reg_base_addr
= drhd
->address
;
201 dmaru
->segment
= drhd
->segment
;
202 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
204 ret
= alloc_iommu(dmaru
);
209 dmar_register_drhd_unit(dmaru
);
213 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
215 if (dmaru
->devices
&& dmaru
->devices_cnt
)
216 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
218 free_iommu(dmaru
->iommu
);
222 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
224 struct acpi_dmar_hardware_unit
*drhd
;
226 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
228 if (dmaru
->include_all
)
231 return dmar_parse_dev_scope((void *)(drhd
+ 1),
232 ((void *)drhd
) + drhd
->header
.length
,
233 &dmaru
->devices_cnt
, &dmaru
->devices
,
237 #ifdef CONFIG_ACPI_NUMA
239 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
241 struct acpi_dmar_rhsa
*rhsa
;
242 struct dmar_drhd_unit
*drhd
;
244 rhsa
= (struct acpi_dmar_rhsa
*)header
;
245 for_each_drhd_unit(drhd
) {
246 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
247 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
249 if (!node_online(node
))
251 drhd
->iommu
->node
= node
;
256 1, TAINT_FIRMWARE_WORKAROUND
,
257 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
258 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
260 dmi_get_system_info(DMI_BIOS_VENDOR
),
261 dmi_get_system_info(DMI_BIOS_VERSION
),
262 dmi_get_system_info(DMI_PRODUCT_VERSION
));
269 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
271 struct acpi_dmar_hardware_unit
*drhd
;
272 struct acpi_dmar_reserved_memory
*rmrr
;
273 struct acpi_dmar_atsr
*atsr
;
274 struct acpi_dmar_rhsa
*rhsa
;
276 switch (header
->type
) {
277 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
278 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
280 pr_info("DRHD base: %#016Lx flags: %#x\n",
281 (unsigned long long)drhd
->address
, drhd
->flags
);
283 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
284 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
286 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
287 (unsigned long long)rmrr
->base_address
,
288 (unsigned long long)rmrr
->end_address
);
290 case ACPI_DMAR_TYPE_ATSR
:
291 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
292 pr_info("ATSR flags: %#x\n", atsr
->flags
);
294 case ACPI_DMAR_HARDWARE_AFFINITY
:
295 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
296 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
297 (unsigned long long)rhsa
->base_address
,
298 rhsa
->proximity_domain
);
304 * dmar_table_detect - checks to see if the platform supports DMAR devices
306 static int __init
dmar_table_detect(void)
308 acpi_status status
= AE_OK
;
310 /* if we could find DMAR table, then there are DMAR devices */
311 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
312 (struct acpi_table_header
**)&dmar_tbl
,
315 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
316 pr_warn("Unable to map DMAR\n");
317 status
= AE_NOT_FOUND
;
320 return (ACPI_SUCCESS(status
) ? 1 : 0);
324 * parse_dmar_table - parses the DMA reporting table
327 parse_dmar_table(void)
329 struct acpi_table_dmar
*dmar
;
330 struct acpi_dmar_header
*entry_header
;
335 * Do it again, earlier dmar_tbl mapping could be mapped with
341 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
342 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
344 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
346 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
350 if (dmar
->width
< PAGE_SHIFT
- 1) {
351 pr_warn("Invalid DMAR haw\n");
355 pr_info("Host address width %d\n", dmar
->width
+ 1);
357 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
358 while (((unsigned long)entry_header
) <
359 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
360 /* Avoid looping forever on bad ACPI tables */
361 if (entry_header
->length
== 0) {
362 pr_warn("Invalid 0-length structure\n");
367 dmar_table_print_dmar_entry(entry_header
);
369 switch (entry_header
->type
) {
370 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
372 ret
= dmar_parse_one_drhd(entry_header
);
374 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
375 ret
= dmar_parse_one_rmrr(entry_header
);
377 case ACPI_DMAR_TYPE_ATSR
:
378 ret
= dmar_parse_one_atsr(entry_header
);
380 case ACPI_DMAR_HARDWARE_AFFINITY
:
381 #ifdef CONFIG_ACPI_NUMA
382 ret
= dmar_parse_one_rhsa(entry_header
);
386 pr_warn("Unknown DMAR structure type %d\n",
388 ret
= 0; /* for forward compatibility */
394 entry_header
= ((void *)entry_header
+ entry_header
->length
);
397 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
401 static int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
407 for (index
= 0; index
< cnt
; index
++)
408 if (dev
== devices
[index
])
411 /* Check our parent */
412 dev
= dev
->bus
->self
;
418 struct dmar_drhd_unit
*
419 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
421 struct dmar_drhd_unit
*dmaru
= NULL
;
422 struct acpi_dmar_hardware_unit
*drhd
;
424 dev
= pci_physfn(dev
);
426 for_each_drhd_unit(dmaru
) {
427 drhd
= container_of(dmaru
->hdr
,
428 struct acpi_dmar_hardware_unit
,
431 if (dmaru
->include_all
&&
432 drhd
->segment
== pci_domain_nr(dev
->bus
))
435 if (dmar_pci_device_match(dmaru
->devices
,
436 dmaru
->devices_cnt
, dev
))
443 int __init
dmar_dev_scope_init(void)
445 static int dmar_dev_scope_initialized
;
446 struct dmar_drhd_unit
*drhd
;
449 if (dmar_dev_scope_initialized
)
450 return dmar_dev_scope_initialized
;
452 if (list_empty(&dmar_drhd_units
))
455 list_for_each_entry(drhd
, &dmar_drhd_units
, list
) {
456 ret
= dmar_parse_dev(drhd
);
461 ret
= dmar_parse_rmrr_atsr_dev();
465 dmar_dev_scope_initialized
= 1;
469 dmar_dev_scope_initialized
= ret
;
474 int __init
dmar_table_init(void)
476 static int dmar_table_initialized
;
479 if (dmar_table_initialized
== 0) {
480 ret
= parse_dmar_table();
483 pr_info("parse DMAR table failure.\n");
484 } else if (list_empty(&dmar_drhd_units
)) {
485 pr_info("No DMAR devices found\n");
490 dmar_table_initialized
= ret
;
492 dmar_table_initialized
= 1;
495 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
498 static void warn_invalid_dmar(u64 addr
, const char *message
)
501 1, TAINT_FIRMWARE_WORKAROUND
,
502 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
503 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
505 dmi_get_system_info(DMI_BIOS_VENDOR
),
506 dmi_get_system_info(DMI_BIOS_VERSION
),
507 dmi_get_system_info(DMI_PRODUCT_VERSION
));
510 static int __init
check_zero_address(void)
512 struct acpi_table_dmar
*dmar
;
513 struct acpi_dmar_header
*entry_header
;
514 struct acpi_dmar_hardware_unit
*drhd
;
516 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
517 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
519 while (((unsigned long)entry_header
) <
520 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
521 /* Avoid looping forever on bad ACPI tables */
522 if (entry_header
->length
== 0) {
523 pr_warn("Invalid 0-length structure\n");
527 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
531 drhd
= (void *)entry_header
;
532 if (!drhd
->address
) {
533 warn_invalid_dmar(0, "");
537 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
539 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
542 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
543 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
544 early_iounmap(addr
, VTD_PAGE_SIZE
);
545 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
546 warn_invalid_dmar(drhd
->address
,
547 " returns all ones");
552 entry_header
= ((void *)entry_header
+ entry_header
->length
);
560 int __init
detect_intel_iommu(void)
564 ret
= dmar_table_detect();
566 ret
= check_zero_address();
568 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
570 /* Make sure ACS will be enabled */
576 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
579 early_acpi_os_unmap_memory((void __iomem
*)dmar_tbl
, dmar_tbl_size
);
582 return ret
? 1 : -ENODEV
;
586 static void unmap_iommu(struct intel_iommu
*iommu
)
589 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
593 * map_iommu: map the iommu's registers
594 * @iommu: the iommu to map
595 * @phys_addr: the physical address of the base resgister
597 * Memory map the iommu's registers. Start w/ a single page, and
598 * possibly expand if that turns out to be insufficent.
600 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
604 iommu
->reg_phys
= phys_addr
;
605 iommu
->reg_size
= VTD_PAGE_SIZE
;
607 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
608 pr_err("IOMMU: can't reserve memory\n");
613 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
615 pr_err("IOMMU: can't map the region\n");
620 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
621 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
623 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
625 warn_invalid_dmar(phys_addr
, " returns all ones");
629 /* the registers might be more than one page */
630 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
631 cap_max_fault_reg_offset(iommu
->cap
));
632 map_size
= VTD_PAGE_ALIGN(map_size
);
633 if (map_size
> iommu
->reg_size
) {
635 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
636 iommu
->reg_size
= map_size
;
637 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
639 pr_err("IOMMU: can't reserve memory\n");
643 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
645 pr_err("IOMMU: can't map the region\n");
656 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
661 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
663 struct intel_iommu
*iommu
;
665 static int iommu_allocated
= 0;
670 if (!drhd
->reg_base_addr
) {
671 warn_invalid_dmar(0, "");
675 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
679 iommu
->seq_id
= iommu_allocated
++;
680 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
682 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
684 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
689 agaw
= iommu_calculate_agaw(iommu
);
691 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
695 msagaw
= iommu_calculate_max_sagaw(iommu
);
697 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
702 iommu
->msagaw
= msagaw
;
706 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
707 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
709 (unsigned long long)drhd
->reg_base_addr
,
710 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
711 (unsigned long long)iommu
->cap
,
712 (unsigned long long)iommu
->ecap
);
714 /* Reflect status in gcmd */
715 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
716 if (sts
& DMA_GSTS_IRES
)
717 iommu
->gcmd
|= DMA_GCMD_IRE
;
718 if (sts
& DMA_GSTS_TES
)
719 iommu
->gcmd
|= DMA_GCMD_TE
;
720 if (sts
& DMA_GSTS_QIES
)
721 iommu
->gcmd
|= DMA_GCMD_QIE
;
723 raw_spin_lock_init(&iommu
->register_lock
);
735 static void free_iommu(struct intel_iommu
*iommu
)
738 free_irq(iommu
->irq
, iommu
);
739 irq_set_handler_data(iommu
->irq
, NULL
);
740 destroy_irq(iommu
->irq
);
744 free_page((unsigned long)iommu
->qi
->desc
);
745 kfree(iommu
->qi
->desc_status
);
756 * Reclaim all the submitted descriptors which have completed its work.
758 static inline void reclaim_free_desc(struct q_inval
*qi
)
760 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
761 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
762 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
763 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
768 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
772 struct q_inval
*qi
= iommu
->qi
;
773 int wait_index
= (index
+ 1) % QI_LENGTH
;
775 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
778 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
781 * If IQE happens, the head points to the descriptor associated
782 * with the error. No new descriptors are fetched until the IQE
785 if (fault
& DMA_FSTS_IQE
) {
786 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
787 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
788 pr_err("VT-d detected invalid descriptor: "
789 "low=%llx, high=%llx\n",
790 (unsigned long long)qi
->desc
[index
].low
,
791 (unsigned long long)qi
->desc
[index
].high
);
792 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
793 sizeof(struct qi_desc
));
794 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
795 sizeof(struct qi_desc
));
796 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
802 * If ITE happens, all pending wait_desc commands are aborted.
803 * No new descriptors are fetched until the ITE is cleared.
805 if (fault
& DMA_FSTS_ITE
) {
806 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
807 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
809 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
810 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
812 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
815 if (qi
->desc_status
[head
] == QI_IN_USE
)
816 qi
->desc_status
[head
] = QI_ABORT
;
817 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
818 } while (head
!= tail
);
820 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
824 if (fault
& DMA_FSTS_ICE
)
825 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
831 * Submit the queued invalidation descriptor to the remapping
832 * hardware unit and wait for its completion.
834 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
837 struct q_inval
*qi
= iommu
->qi
;
838 struct qi_desc
*hw
, wait_desc
;
839 int wait_index
, index
;
850 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
851 while (qi
->free_cnt
< 3) {
852 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
854 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
857 index
= qi
->free_head
;
858 wait_index
= (index
+ 1) % QI_LENGTH
;
860 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
864 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
865 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
866 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
868 hw
[wait_index
] = wait_desc
;
870 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
871 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
873 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
877 * update the HW tail register indicating the presence of
880 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
882 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
884 * We will leave the interrupts disabled, to prevent interrupt
885 * context to queue another cmd while a cmd is already submitted
886 * and waiting for completion on this cpu. This is to avoid
887 * a deadlock where the interrupt context can wait indefinitely
888 * for free slots in the queue.
890 rc
= qi_check_fault(iommu
, index
);
894 raw_spin_unlock(&qi
->q_lock
);
896 raw_spin_lock(&qi
->q_lock
);
899 qi
->desc_status
[index
] = QI_DONE
;
901 reclaim_free_desc(qi
);
902 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
911 * Flush the global interrupt entry cache.
913 void qi_global_iec(struct intel_iommu
*iommu
)
917 desc
.low
= QI_IEC_TYPE
;
920 /* should never fail */
921 qi_submit_sync(&desc
, iommu
);
924 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
929 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
930 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
933 qi_submit_sync(&desc
, iommu
);
936 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
937 unsigned int size_order
, u64 type
)
944 if (cap_write_drain(iommu
->cap
))
947 if (cap_read_drain(iommu
->cap
))
950 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
951 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
952 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
953 | QI_IOTLB_AM(size_order
);
955 qi_submit_sync(&desc
, iommu
);
958 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
959 u64 addr
, unsigned mask
)
964 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
965 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
966 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
968 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
970 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
973 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
976 qi_submit_sync(&desc
, iommu
);
980 * Disable Queued Invalidation interface.
982 void dmar_disable_qi(struct intel_iommu
*iommu
)
986 cycles_t start_time
= get_cycles();
988 if (!ecap_qis(iommu
->ecap
))
991 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
993 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
994 if (!(sts
& DMA_GSTS_QIES
))
998 * Give a chance to HW to complete the pending invalidation requests.
1000 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1001 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1002 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1005 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1006 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1008 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1009 !(sts
& DMA_GSTS_QIES
), sts
);
1011 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1015 * Enable queued invalidation.
1017 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1020 unsigned long flags
;
1021 struct q_inval
*qi
= iommu
->qi
;
1023 qi
->free_head
= qi
->free_tail
= 0;
1024 qi
->free_cnt
= QI_LENGTH
;
1026 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1028 /* write zero to the tail reg */
1029 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1031 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1033 iommu
->gcmd
|= DMA_GCMD_QIE
;
1034 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1039 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1043 * Enable Queued Invalidation interface. This is a must to support
1044 * interrupt-remapping. Also used by DMA-remapping, which replaces
1045 * register based IOTLB invalidation.
1047 int dmar_enable_qi(struct intel_iommu
*iommu
)
1050 struct page
*desc_page
;
1052 if (!ecap_qis(iommu
->ecap
))
1056 * queued invalidation is already setup and enabled.
1061 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1068 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1075 qi
->desc
= page_address(desc_page
);
1077 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1078 if (!qi
->desc_status
) {
1079 free_page((unsigned long) qi
->desc
);
1085 qi
->free_head
= qi
->free_tail
= 0;
1086 qi
->free_cnt
= QI_LENGTH
;
1088 raw_spin_lock_init(&qi
->q_lock
);
1090 __dmar_enable_qi(iommu
);
1095 /* iommu interrupt handling. Most stuff are MSI-like. */
1103 static const char *dma_remap_fault_reasons
[] =
1106 "Present bit in root entry is clear",
1107 "Present bit in context entry is clear",
1108 "Invalid context entry",
1109 "Access beyond MGAW",
1110 "PTE Write access is not set",
1111 "PTE Read access is not set",
1112 "Next page table ptr is invalid",
1113 "Root table address invalid",
1114 "Context table ptr is invalid",
1115 "non-zero reserved fields in RTP",
1116 "non-zero reserved fields in CTP",
1117 "non-zero reserved fields in PTE",
1118 "PCE for translation request specifies blocking",
1121 static const char *irq_remap_fault_reasons
[] =
1123 "Detected reserved fields in the decoded interrupt-remapped request",
1124 "Interrupt index exceeded the interrupt-remapping table size",
1125 "Present field in the IRTE entry is clear",
1126 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1127 "Detected reserved fields in the IRTE entry",
1128 "Blocked a compatibility format interrupt request",
1129 "Blocked an interrupt request due to source-id verification failure",
1132 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1134 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1135 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1136 *fault_type
= INTR_REMAP
;
1137 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1138 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1139 *fault_type
= DMA_REMAP
;
1140 return dma_remap_fault_reasons
[fault_reason
];
1142 *fault_type
= UNKNOWN
;
1147 void dmar_msi_unmask(struct irq_data
*data
)
1149 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1153 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1154 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1155 /* Read a reg to force flush the post write */
1156 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1157 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1160 void dmar_msi_mask(struct irq_data
*data
)
1163 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1166 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1167 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1168 /* Read a reg to force flush the post write */
1169 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1170 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1173 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1175 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1178 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1179 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1180 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1181 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1182 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1185 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1187 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1190 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1191 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1192 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1193 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1194 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1197 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1198 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1203 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1205 if (fault_type
== INTR_REMAP
)
1206 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1210 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1211 fault_reason
, reason
);
1213 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1214 "fault addr %llx \n"
1215 "DMAR:[fault reason %02d] %s\n",
1216 (type
? "DMA Read" : "DMA Write"),
1217 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1218 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1222 #define PRIMARY_FAULT_REG_LEN (16)
1223 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1225 struct intel_iommu
*iommu
= dev_id
;
1226 int reg
, fault_index
;
1230 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1231 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1233 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1235 /* TBD: ignore advanced fault log currently */
1236 if (!(fault_status
& DMA_FSTS_PPF
))
1239 fault_index
= dma_fsts_fault_record_index(fault_status
);
1240 reg
= cap_fault_reg_offset(iommu
->cap
);
1248 /* highest 32 bits */
1249 data
= readl(iommu
->reg
+ reg
+
1250 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1251 if (!(data
& DMA_FRCD_F
))
1254 fault_reason
= dma_frcd_fault_reason(data
);
1255 type
= dma_frcd_type(data
);
1257 data
= readl(iommu
->reg
+ reg
+
1258 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1259 source_id
= dma_frcd_source_id(data
);
1261 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1262 fault_index
* PRIMARY_FAULT_REG_LEN
);
1263 guest_addr
= dma_frcd_page_addr(guest_addr
);
1264 /* clear the fault */
1265 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1266 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1268 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1270 dmar_fault_do_one(iommu
, type
, fault_reason
,
1271 source_id
, guest_addr
);
1274 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1276 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1279 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1282 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1286 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1291 * Check if the fault interrupt is already initialized.
1298 pr_err("IOMMU: no free vectors\n");
1302 irq_set_handler_data(irq
, iommu
);
1305 ret
= arch_setup_dmar_msi(irq
);
1307 irq_set_handler_data(irq
, NULL
);
1313 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1315 pr_err("IOMMU: can't request irq\n");
1319 int __init
enable_drhd_fault_handling(void)
1321 struct dmar_drhd_unit
*drhd
;
1322 struct intel_iommu
*iommu
;
1325 * Enable fault control interrupt.
1327 for_each_iommu(iommu
, drhd
) {
1329 int ret
= dmar_set_interrupt(iommu
);
1332 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1333 (unsigned long long)drhd
->reg_base_addr
, ret
);
1338 * Clear any previous faults.
1340 dmar_fault(iommu
->irq
, iommu
);
1341 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1342 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1349 * Re-enable Queued Invalidation interface.
1351 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1353 if (!ecap_qis(iommu
->ecap
))
1360 * First disable queued invalidation.
1362 dmar_disable_qi(iommu
);
1364 * Then enable queued invalidation again. Since there is no pending
1365 * invalidation requests now, it's safe to re-enable queued
1368 __dmar_enable_qi(iommu
);
1374 * Check interrupt remapping support in DMAR table description.
1376 int __init
dmar_ir_support(void)
1378 struct acpi_table_dmar
*dmar
;
1379 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1382 return dmar
->flags
& 0x1;
1385 static int __init
dmar_free_unused_resources(void)
1387 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1389 /* DMAR units are in use */
1390 if (irq_remapping_enabled
|| intel_iommu_enabled
)
1393 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1394 list_del(&dmaru
->list
);
1395 dmar_free_drhd(dmaru
);
1401 late_initcall(dmar_free_unused_resources
);
1402 IOMMU_INIT_POST(detect_intel_iommu
);