2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <asm/irq_remapping.h>
47 #include <asm/cacheflush.h>
48 #include <asm/iommu.h>
50 #include "irq_remapping.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
57 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
58 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60 #define IOAPIC_RANGE_START (0xfee00000)
61 #define IOAPIC_RANGE_END (0xfeefffff)
62 #define IOVA_START_ADDR (0x1000)
64 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66 #define MAX_AGAW_WIDTH 64
67 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
70 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78 /* IO virtual address start page frame number */
79 #define IOVA_START_PFN (1)
81 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
82 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
83 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
85 /* page table handling */
86 #define LEVEL_STRIDE (9)
87 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
90 * This bitmap is used to advertise the page sizes our hardware support
91 * to the IOMMU core, which will then use this information to split
92 * physically contiguous memory regions it is mapping into page sizes
95 * Traditionally the IOMMU core just handed us the mappings directly,
96 * after making sure the size is an order of a 4KiB page and that the
97 * mapping has natural alignment.
99 * To retain this behavior, we currently advertise that we support
100 * all page sizes that are an order of 4KiB.
102 * If at some point we'd like to utilize the IOMMU core's new behavior,
103 * we could change this to advertise the real page sizes we support.
105 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107 static inline int agaw_to_level(int agaw
)
112 static inline int agaw_to_width(int agaw
)
114 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
117 static inline int width_to_agaw(int width
)
119 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
122 static inline unsigned int level_to_offset_bits(int level
)
124 return (level
- 1) * LEVEL_STRIDE
;
127 static inline int pfn_level_offset(unsigned long pfn
, int level
)
129 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
132 static inline unsigned long level_mask(int level
)
134 return -1UL << level_to_offset_bits(level
);
137 static inline unsigned long level_size(int level
)
139 return 1UL << level_to_offset_bits(level
);
142 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
144 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
147 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
149 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
152 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
153 are never going to work. */
154 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
156 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
159 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
161 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
163 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
165 return mm_to_dma_pfn(page_to_pfn(pg
));
167 static inline unsigned long virt_to_dma_pfn(void *p
)
169 return page_to_dma_pfn(virt_to_page(p
));
172 /* global iommu list, set NULL for ignored DMAR units */
173 static struct intel_iommu
**g_iommus
;
175 static void __init
check_tylersburg_isoch(void);
176 static int rwbf_quirk
;
179 * set to 1 to panic kernel if can't successfully enable VT-d
180 * (used when kernel is launched w/ TXT)
182 static int force_on
= 0;
187 * 12-63: Context Ptr (12 - (haw-1))
194 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
200 * 1: fault processing disable
201 * 2-3: translation type
202 * 12-63: address space root
208 struct context_entry
{
213 static inline bool context_present(struct context_entry
*context
)
215 return (context
->lo
& 1);
217 static inline void context_set_present(struct context_entry
*context
)
222 static inline void context_set_fault_enable(struct context_entry
*context
)
224 context
->lo
&= (((u64
)-1) << 2) | 1;
227 static inline void context_set_translation_type(struct context_entry
*context
,
230 context
->lo
&= (((u64
)-1) << 4) | 3;
231 context
->lo
|= (value
& 3) << 2;
234 static inline void context_set_address_root(struct context_entry
*context
,
237 context
->lo
&= ~VTD_PAGE_MASK
;
238 context
->lo
|= value
& VTD_PAGE_MASK
;
241 static inline void context_set_address_width(struct context_entry
*context
,
244 context
->hi
|= value
& 7;
247 static inline void context_set_domain_id(struct context_entry
*context
,
250 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
253 static inline void context_clear_entry(struct context_entry
*context
)
266 * 12-63: Host physcial address
272 static inline void dma_clear_pte(struct dma_pte
*pte
)
277 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
280 return pte
->val
& VTD_PAGE_MASK
;
282 /* Must have a full atomic 64-bit read */
283 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
287 static inline bool dma_pte_present(struct dma_pte
*pte
)
289 return (pte
->val
& 3) != 0;
292 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
294 return (pte
->val
& DMA_PTE_LARGE_PAGE
);
297 static inline int first_pte_in_page(struct dma_pte
*pte
)
299 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
303 * This domain is a statically identity mapping domain.
304 * 1. This domain creats a static 1:1 mapping to all usable memory.
305 * 2. It maps to each iommu if successful.
306 * 3. Each iommu mapps to this domain if successful.
308 static struct dmar_domain
*si_domain
;
309 static int hw_pass_through
= 1;
311 /* domain represents a virtual machine, more than one devices
312 * across iommus may be owned in one domain, e.g. kvm guest.
314 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
316 /* si_domain contains mulitple devices */
317 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
320 int id
; /* domain id */
321 int nid
; /* node id */
322 DECLARE_BITMAP(iommu_bmp
, DMAR_UNITS_SUPPORTED
);
323 /* bitmap of iommus this domain uses*/
325 struct list_head devices
; /* all devices' list */
326 struct iova_domain iovad
; /* iova's that belong to this domain */
328 struct dma_pte
*pgd
; /* virtual address */
329 int gaw
; /* max guest address width */
331 /* adjusted guest address width, 0 is level 2 30-bit */
334 int flags
; /* flags to find out type of domain */
336 int iommu_coherency
;/* indicate coherency of iommu access */
337 int iommu_snooping
; /* indicate snooping control feature*/
338 int iommu_count
; /* reference count of iommu */
339 int iommu_superpage
;/* Level of superpages supported:
340 0 == 4KiB (no superpages), 1 == 2MiB,
341 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
342 spinlock_t iommu_lock
; /* protect iommu set in domain */
343 u64 max_addr
; /* maximum mapped address */
345 struct iommu_domain domain
; /* generic domain data structure for
349 /* PCI domain-device relationship */
350 struct device_domain_info
{
351 struct list_head link
; /* link to domain siblings */
352 struct list_head global
; /* link to global list */
353 u8 bus
; /* PCI bus number */
354 u8 devfn
; /* PCI devfn number */
355 struct device
*dev
; /* it's NULL for PCIe-to-PCI bridge */
356 struct intel_iommu
*iommu
; /* IOMMU used by this device */
357 struct dmar_domain
*domain
; /* pointer to domain */
360 struct dmar_rmrr_unit
{
361 struct list_head list
; /* list of rmrr units */
362 struct acpi_dmar_header
*hdr
; /* ACPI header */
363 u64 base_address
; /* reserved base address*/
364 u64 end_address
; /* reserved end address */
365 struct dmar_dev_scope
*devices
; /* target devices */
366 int devices_cnt
; /* target device count */
369 struct dmar_atsr_unit
{
370 struct list_head list
; /* list of ATSR units */
371 struct acpi_dmar_header
*hdr
; /* ACPI header */
372 struct dmar_dev_scope
*devices
; /* target devices */
373 int devices_cnt
; /* target device count */
374 u8 include_all
:1; /* include all ports */
377 static LIST_HEAD(dmar_atsr_units
);
378 static LIST_HEAD(dmar_rmrr_units
);
380 #define for_each_rmrr_units(rmrr) \
381 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
383 static void flush_unmaps_timeout(unsigned long data
);
385 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
387 #define HIGH_WATER_MARK 250
388 struct deferred_flush_tables
{
390 struct iova
*iova
[HIGH_WATER_MARK
];
391 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
392 struct page
*freelist
[HIGH_WATER_MARK
];
395 static struct deferred_flush_tables
*deferred_flush
;
397 /* bitmap for indexing intel_iommus */
398 static int g_num_of_iommus
;
400 static DEFINE_SPINLOCK(async_umap_flush_lock
);
401 static LIST_HEAD(unmaps_to_do
);
404 static long list_size
;
406 static void domain_exit(struct dmar_domain
*domain
);
407 static void domain_remove_dev_info(struct dmar_domain
*domain
);
408 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
410 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
412 static int domain_detach_iommu(struct dmar_domain
*domain
,
413 struct intel_iommu
*iommu
);
415 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
416 int dmar_disabled
= 0;
418 int dmar_disabled
= 1;
419 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
421 int intel_iommu_enabled
= 0;
422 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
424 static int dmar_map_gfx
= 1;
425 static int dmar_forcedac
;
426 static int intel_iommu_strict
;
427 static int intel_iommu_superpage
= 1;
428 static int intel_iommu_ecs
= 1;
430 /* We only actually use ECS when PASID support (on the new bit 40)
431 * is also advertised. Some early implementations — the ones with
432 * PASID support on bit 28 — have issues even when we *only* use
433 * extended root/context tables. */
434 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
435 ecap_pasid(iommu->ecap))
437 int intel_iommu_gfx_mapped
;
438 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
440 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
441 static DEFINE_SPINLOCK(device_domain_lock
);
442 static LIST_HEAD(device_domain_list
);
444 static const struct iommu_ops intel_iommu_ops
;
446 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
447 static struct dmar_domain
*to_dmar_domain(struct iommu_domain
*dom
)
449 return container_of(dom
, struct dmar_domain
, domain
);
452 static int __init
intel_iommu_setup(char *str
)
457 if (!strncmp(str
, "on", 2)) {
459 pr_info("IOMMU enabled\n");
460 } else if (!strncmp(str
, "off", 3)) {
462 pr_info("IOMMU disabled\n");
463 } else if (!strncmp(str
, "igfx_off", 8)) {
465 pr_info("Disable GFX device mapping\n");
466 } else if (!strncmp(str
, "forcedac", 8)) {
467 pr_info("Forcing DAC for PCI devices\n");
469 } else if (!strncmp(str
, "strict", 6)) {
470 pr_info("Disable batched IOTLB flush\n");
471 intel_iommu_strict
= 1;
472 } else if (!strncmp(str
, "sp_off", 6)) {
473 pr_info("Disable supported super page\n");
474 intel_iommu_superpage
= 0;
475 } else if (!strncmp(str
, "ecs_off", 7)) {
477 "Intel-IOMMU: disable extended context table support\n");
481 str
+= strcspn(str
, ",");
487 __setup("intel_iommu=", intel_iommu_setup
);
489 static struct kmem_cache
*iommu_domain_cache
;
490 static struct kmem_cache
*iommu_devinfo_cache
;
492 static inline void *alloc_pgtable_page(int node
)
497 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
499 vaddr
= page_address(page
);
503 static inline void free_pgtable_page(void *vaddr
)
505 free_page((unsigned long)vaddr
);
508 static inline void *alloc_domain_mem(void)
510 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
513 static void free_domain_mem(void *vaddr
)
515 kmem_cache_free(iommu_domain_cache
, vaddr
);
518 static inline void * alloc_devinfo_mem(void)
520 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
523 static inline void free_devinfo_mem(void *vaddr
)
525 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
528 static inline int domain_type_is_vm(struct dmar_domain
*domain
)
530 return domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
;
533 static inline int domain_type_is_vm_or_si(struct dmar_domain
*domain
)
535 return domain
->flags
& (DOMAIN_FLAG_VIRTUAL_MACHINE
|
536 DOMAIN_FLAG_STATIC_IDENTITY
);
539 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
542 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
544 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
547 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
552 sagaw
= cap_sagaw(iommu
->cap
);
553 for (agaw
= width_to_agaw(max_gaw
);
555 if (test_bit(agaw
, &sagaw
))
563 * Calculate max SAGAW for each iommu.
565 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
567 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
571 * calculate agaw for each iommu.
572 * "SAGAW" may be different across iommus, use a default agaw, and
573 * get a supported less agaw for iommus that don't support the default agaw.
575 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
577 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
580 /* This functionin only returns single iommu in a domain */
581 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
585 /* si_domain and vm domain should not get here. */
586 BUG_ON(domain_type_is_vm_or_si(domain
));
587 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
588 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
591 return g_iommus
[iommu_id
];
594 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
596 struct dmar_drhd_unit
*drhd
;
597 struct intel_iommu
*iommu
;
601 domain
->iommu_coherency
= 1;
603 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
605 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
606 domain
->iommu_coherency
= 0;
613 /* No hardware attached; use lowest common denominator */
615 for_each_active_iommu(iommu
, drhd
) {
616 if (!ecap_coherent(iommu
->ecap
)) {
617 domain
->iommu_coherency
= 0;
624 static int domain_update_iommu_snooping(struct intel_iommu
*skip
)
626 struct dmar_drhd_unit
*drhd
;
627 struct intel_iommu
*iommu
;
631 for_each_active_iommu(iommu
, drhd
) {
633 if (!ecap_sc_support(iommu
->ecap
)) {
644 static int domain_update_iommu_superpage(struct intel_iommu
*skip
)
646 struct dmar_drhd_unit
*drhd
;
647 struct intel_iommu
*iommu
;
650 if (!intel_iommu_superpage
) {
654 /* set iommu_superpage to the smallest common denominator */
656 for_each_active_iommu(iommu
, drhd
) {
658 mask
&= cap_super_page_val(iommu
->cap
);
668 /* Some capabilities may be different across iommus */
669 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
671 domain_update_iommu_coherency(domain
);
672 domain
->iommu_snooping
= domain_update_iommu_snooping(NULL
);
673 domain
->iommu_superpage
= domain_update_iommu_superpage(NULL
);
676 static inline struct context_entry
*iommu_context_addr(struct intel_iommu
*iommu
,
677 u8 bus
, u8 devfn
, int alloc
)
679 struct root_entry
*root
= &iommu
->root_entry
[bus
];
680 struct context_entry
*context
;
683 if (ecs_enabled(iommu
)) {
692 context
= phys_to_virt(*entry
& VTD_PAGE_MASK
);
694 unsigned long phy_addr
;
698 context
= alloc_pgtable_page(iommu
->node
);
702 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
703 phy_addr
= virt_to_phys((void *)context
);
704 *entry
= phy_addr
| 1;
705 __iommu_flush_cache(iommu
, entry
, sizeof(*entry
));
707 return &context
[devfn
];
710 static int iommu_dummy(struct device
*dev
)
712 return dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
715 static struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
717 struct dmar_drhd_unit
*drhd
= NULL
;
718 struct intel_iommu
*iommu
;
720 struct pci_dev
*ptmp
, *pdev
= NULL
;
724 if (iommu_dummy(dev
))
727 if (dev_is_pci(dev
)) {
728 pdev
= to_pci_dev(dev
);
729 segment
= pci_domain_nr(pdev
->bus
);
730 } else if (has_acpi_companion(dev
))
731 dev
= &ACPI_COMPANION(dev
)->dev
;
734 for_each_active_iommu(iommu
, drhd
) {
735 if (pdev
&& segment
!= drhd
->segment
)
738 for_each_active_dev_scope(drhd
->devices
,
739 drhd
->devices_cnt
, i
, tmp
) {
741 *bus
= drhd
->devices
[i
].bus
;
742 *devfn
= drhd
->devices
[i
].devfn
;
746 if (!pdev
|| !dev_is_pci(tmp
))
749 ptmp
= to_pci_dev(tmp
);
750 if (ptmp
->subordinate
&&
751 ptmp
->subordinate
->number
<= pdev
->bus
->number
&&
752 ptmp
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
756 if (pdev
&& drhd
->include_all
) {
758 *bus
= pdev
->bus
->number
;
759 *devfn
= pdev
->devfn
;
770 static void domain_flush_cache(struct dmar_domain
*domain
,
771 void *addr
, int size
)
773 if (!domain
->iommu_coherency
)
774 clflush_cache_range(addr
, size
);
777 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
779 struct context_entry
*context
;
783 spin_lock_irqsave(&iommu
->lock
, flags
);
784 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
786 ret
= context_present(context
);
787 spin_unlock_irqrestore(&iommu
->lock
, flags
);
791 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
793 struct context_entry
*context
;
796 spin_lock_irqsave(&iommu
->lock
, flags
);
797 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
799 context_clear_entry(context
);
800 __iommu_flush_cache(iommu
, context
, sizeof(*context
));
802 spin_unlock_irqrestore(&iommu
->lock
, flags
);
805 static void free_context_table(struct intel_iommu
*iommu
)
809 struct context_entry
*context
;
811 spin_lock_irqsave(&iommu
->lock
, flags
);
812 if (!iommu
->root_entry
) {
815 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
816 context
= iommu_context_addr(iommu
, i
, 0, 0);
818 free_pgtable_page(context
);
820 if (!ecs_enabled(iommu
))
823 context
= iommu_context_addr(iommu
, i
, 0x80, 0);
825 free_pgtable_page(context
);
828 free_pgtable_page(iommu
->root_entry
);
829 iommu
->root_entry
= NULL
;
831 spin_unlock_irqrestore(&iommu
->lock
, flags
);
834 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
835 unsigned long pfn
, int *target_level
)
837 struct dma_pte
*parent
, *pte
= NULL
;
838 int level
= agaw_to_level(domain
->agaw
);
841 BUG_ON(!domain
->pgd
);
843 if (!domain_pfn_supported(domain
, pfn
))
844 /* Address beyond IOMMU's addressing capabilities. */
847 parent
= domain
->pgd
;
852 offset
= pfn_level_offset(pfn
, level
);
853 pte
= &parent
[offset
];
854 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
856 if (level
== *target_level
)
859 if (!dma_pte_present(pte
)) {
862 tmp_page
= alloc_pgtable_page(domain
->nid
);
867 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
868 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
869 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
870 /* Someone else set it while we were thinking; use theirs. */
871 free_pgtable_page(tmp_page
);
873 domain_flush_cache(domain
, pte
, sizeof(*pte
));
878 parent
= phys_to_virt(dma_pte_addr(pte
));
883 *target_level
= level
;
889 /* return address's pte at specific level */
890 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
892 int level
, int *large_page
)
894 struct dma_pte
*parent
, *pte
= NULL
;
895 int total
= agaw_to_level(domain
->agaw
);
898 parent
= domain
->pgd
;
899 while (level
<= total
) {
900 offset
= pfn_level_offset(pfn
, total
);
901 pte
= &parent
[offset
];
905 if (!dma_pte_present(pte
)) {
910 if (dma_pte_superpage(pte
)) {
915 parent
= phys_to_virt(dma_pte_addr(pte
));
921 /* clear last level pte, a tlb flush should be followed */
922 static void dma_pte_clear_range(struct dmar_domain
*domain
,
923 unsigned long start_pfn
,
924 unsigned long last_pfn
)
926 unsigned int large_page
= 1;
927 struct dma_pte
*first_pte
, *pte
;
929 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
930 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
931 BUG_ON(start_pfn
> last_pfn
);
933 /* we don't need lock here; nobody else touches the iova range */
936 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
938 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
943 start_pfn
+= lvl_to_nr_pages(large_page
);
945 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
947 domain_flush_cache(domain
, first_pte
,
948 (void *)pte
- (void *)first_pte
);
950 } while (start_pfn
&& start_pfn
<= last_pfn
);
953 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
954 struct dma_pte
*pte
, unsigned long pfn
,
955 unsigned long start_pfn
, unsigned long last_pfn
)
957 pfn
= max(start_pfn
, pfn
);
958 pte
= &pte
[pfn_level_offset(pfn
, level
)];
961 unsigned long level_pfn
;
962 struct dma_pte
*level_pte
;
964 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
967 level_pfn
= pfn
& level_mask(level
- 1);
968 level_pte
= phys_to_virt(dma_pte_addr(pte
));
971 dma_pte_free_level(domain
, level
- 1, level_pte
,
972 level_pfn
, start_pfn
, last_pfn
);
974 /* If range covers entire pagetable, free it */
975 if (!(start_pfn
> level_pfn
||
976 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
978 domain_flush_cache(domain
, pte
, sizeof(*pte
));
979 free_pgtable_page(level_pte
);
982 pfn
+= level_size(level
);
983 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
986 /* free page table pages. last level pte should already be cleared */
987 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
988 unsigned long start_pfn
,
989 unsigned long last_pfn
)
991 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
992 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
993 BUG_ON(start_pfn
> last_pfn
);
995 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
997 /* We don't need lock here; nobody else touches the iova range */
998 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
999 domain
->pgd
, 0, start_pfn
, last_pfn
);
1002 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1003 free_pgtable_page(domain
->pgd
);
1008 /* When a page at a given level is being unlinked from its parent, we don't
1009 need to *modify* it at all. All we need to do is make a list of all the
1010 pages which can be freed just as soon as we've flushed the IOTLB and we
1011 know the hardware page-walk will no longer touch them.
1012 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1014 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
1015 int level
, struct dma_pte
*pte
,
1016 struct page
*freelist
)
1020 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1021 pg
->freelist
= freelist
;
1027 pte
= page_address(pg
);
1029 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1030 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
1033 } while (!first_pte_in_page(pte
));
1038 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1039 struct dma_pte
*pte
, unsigned long pfn
,
1040 unsigned long start_pfn
,
1041 unsigned long last_pfn
,
1042 struct page
*freelist
)
1044 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1046 pfn
= max(start_pfn
, pfn
);
1047 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1050 unsigned long level_pfn
;
1052 if (!dma_pte_present(pte
))
1055 level_pfn
= pfn
& level_mask(level
);
1057 /* If range covers entire pagetable, free it */
1058 if (start_pfn
<= level_pfn
&&
1059 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1060 /* These suborbinate page tables are going away entirely. Don't
1061 bother to clear them; we're just going to *free* them. */
1062 if (level
> 1 && !dma_pte_superpage(pte
))
1063 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1069 } else if (level
> 1) {
1070 /* Recurse down into a level that isn't *entirely* obsolete */
1071 freelist
= dma_pte_clear_level(domain
, level
- 1,
1072 phys_to_virt(dma_pte_addr(pte
)),
1073 level_pfn
, start_pfn
, last_pfn
,
1077 pfn
+= level_size(level
);
1078 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1081 domain_flush_cache(domain
, first_pte
,
1082 (void *)++last_pte
- (void *)first_pte
);
1087 /* We can't just free the pages because the IOMMU may still be walking
1088 the page tables, and may have cached the intermediate levels. The
1089 pages can only be freed after the IOTLB flush has been done. */
1090 struct page
*domain_unmap(struct dmar_domain
*domain
,
1091 unsigned long start_pfn
,
1092 unsigned long last_pfn
)
1094 struct page
*freelist
= NULL
;
1096 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1097 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1098 BUG_ON(start_pfn
> last_pfn
);
1100 /* we don't need lock here; nobody else touches the iova range */
1101 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1102 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1105 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1106 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1107 pgd_page
->freelist
= freelist
;
1108 freelist
= pgd_page
;
1116 void dma_free_pagelist(struct page
*freelist
)
1120 while ((pg
= freelist
)) {
1121 freelist
= pg
->freelist
;
1122 free_pgtable_page(page_address(pg
));
1126 /* iommu handling */
1127 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1129 struct root_entry
*root
;
1130 unsigned long flags
;
1132 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1134 pr_err("Allocating root entry for %s failed\n",
1139 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1141 spin_lock_irqsave(&iommu
->lock
, flags
);
1142 iommu
->root_entry
= root
;
1143 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1148 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1154 addr
= virt_to_phys(iommu
->root_entry
);
1155 if (ecs_enabled(iommu
))
1156 addr
|= DMA_RTADDR_RTT
;
1158 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1159 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, addr
);
1161 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1163 /* Make sure hardware complete it */
1164 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1165 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1167 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1170 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1175 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1178 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1179 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1181 /* Make sure hardware complete it */
1182 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1183 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1185 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1188 /* return value determine if we need a write buffer flush */
1189 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1190 u16 did
, u16 source_id
, u8 function_mask
,
1197 case DMA_CCMD_GLOBAL_INVL
:
1198 val
= DMA_CCMD_GLOBAL_INVL
;
1200 case DMA_CCMD_DOMAIN_INVL
:
1201 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1203 case DMA_CCMD_DEVICE_INVL
:
1204 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1205 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1210 val
|= DMA_CCMD_ICC
;
1212 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1213 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1215 /* Make sure hardware complete it */
1216 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1217 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1219 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1222 /* return value determine if we need a write buffer flush */
1223 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1224 u64 addr
, unsigned int size_order
, u64 type
)
1226 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1227 u64 val
= 0, val_iva
= 0;
1231 case DMA_TLB_GLOBAL_FLUSH
:
1232 /* global flush doesn't need set IVA_REG */
1233 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1235 case DMA_TLB_DSI_FLUSH
:
1236 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1238 case DMA_TLB_PSI_FLUSH
:
1239 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1240 /* IH bit is passed in as part of address */
1241 val_iva
= size_order
| addr
;
1246 /* Note: set drain read/write */
1249 * This is probably to be super secure.. Looks like we can
1250 * ignore it without any impact.
1252 if (cap_read_drain(iommu
->cap
))
1253 val
|= DMA_TLB_READ_DRAIN
;
1255 if (cap_write_drain(iommu
->cap
))
1256 val
|= DMA_TLB_WRITE_DRAIN
;
1258 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1259 /* Note: Only uses first TLB reg currently */
1261 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1262 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1264 /* Make sure hardware complete it */
1265 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1266 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1268 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1270 /* check IOTLB invalidation granularity */
1271 if (DMA_TLB_IAIG(val
) == 0)
1272 pr_err("Flush IOTLB failed\n");
1273 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1274 pr_debug("TLB flush request %Lx, actual %Lx\n",
1275 (unsigned long long)DMA_TLB_IIRG(type
),
1276 (unsigned long long)DMA_TLB_IAIG(val
));
1279 static struct device_domain_info
*
1280 iommu_support_dev_iotlb (struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1284 unsigned long flags
;
1285 struct device_domain_info
*info
;
1286 struct pci_dev
*pdev
;
1288 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1294 spin_lock_irqsave(&device_domain_lock
, flags
);
1295 list_for_each_entry(info
, &domain
->devices
, link
)
1296 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1297 info
->devfn
== devfn
) {
1301 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1303 if (!found
|| !info
->dev
|| !dev_is_pci(info
->dev
))
1306 pdev
= to_pci_dev(info
->dev
);
1308 if (!pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
))
1311 if (!dmar_find_matched_atsr_unit(pdev
))
1317 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1319 if (!info
|| !dev_is_pci(info
->dev
))
1322 pci_enable_ats(to_pci_dev(info
->dev
), VTD_PAGE_SHIFT
);
1325 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1327 if (!info
->dev
|| !dev_is_pci(info
->dev
) ||
1328 !pci_ats_enabled(to_pci_dev(info
->dev
)))
1331 pci_disable_ats(to_pci_dev(info
->dev
));
1334 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1335 u64 addr
, unsigned mask
)
1338 unsigned long flags
;
1339 struct device_domain_info
*info
;
1341 spin_lock_irqsave(&device_domain_lock
, flags
);
1342 list_for_each_entry(info
, &domain
->devices
, link
) {
1343 struct pci_dev
*pdev
;
1344 if (!info
->dev
|| !dev_is_pci(info
->dev
))
1347 pdev
= to_pci_dev(info
->dev
);
1348 if (!pci_ats_enabled(pdev
))
1351 sid
= info
->bus
<< 8 | info
->devfn
;
1352 qdep
= pci_ats_queue_depth(pdev
);
1353 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1355 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1358 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1359 unsigned long pfn
, unsigned int pages
, int ih
, int map
)
1361 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1362 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1369 * Fallback to domain selective flush if no PSI support or the size is
1371 * PSI requires page size to be 2 ^ x, and the base address is naturally
1372 * aligned to the size
1374 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1375 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1378 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1382 * In caching mode, changes of pages from non-present to present require
1383 * flush. However, device IOTLB doesn't need to be flushed in this case.
1385 if (!cap_caching_mode(iommu
->cap
) || !map
)
1386 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1389 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1392 unsigned long flags
;
1394 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1395 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1396 pmen
&= ~DMA_PMEN_EPM
;
1397 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1399 /* wait for the protected region status bit to clear */
1400 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1401 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1403 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1406 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1409 unsigned long flags
;
1411 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1412 iommu
->gcmd
|= DMA_GCMD_TE
;
1413 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1415 /* Make sure hardware complete it */
1416 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1417 readl
, (sts
& DMA_GSTS_TES
), sts
);
1419 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1422 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1427 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1428 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1429 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1431 /* Make sure hardware complete it */
1432 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1433 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1435 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1439 static int iommu_init_domains(struct intel_iommu
*iommu
)
1441 unsigned long ndomains
;
1442 unsigned long nlongs
;
1444 ndomains
= cap_ndoms(iommu
->cap
);
1445 pr_debug("%s: Number of Domains supported <%ld>\n",
1446 iommu
->name
, ndomains
);
1447 nlongs
= BITS_TO_LONGS(ndomains
);
1449 spin_lock_init(&iommu
->lock
);
1451 /* TBD: there might be 64K domains,
1452 * consider other allocation for future chip
1454 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1455 if (!iommu
->domain_ids
) {
1456 pr_err("%s: Allocating domain id array failed\n",
1460 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1462 if (!iommu
->domains
) {
1463 pr_err("%s: Allocating domain array failed\n",
1465 kfree(iommu
->domain_ids
);
1466 iommu
->domain_ids
= NULL
;
1471 * if Caching mode is set, then invalid translations are tagged
1472 * with domainid 0. Hence we need to pre-allocate it.
1474 if (cap_caching_mode(iommu
->cap
))
1475 set_bit(0, iommu
->domain_ids
);
1479 static void disable_dmar_iommu(struct intel_iommu
*iommu
)
1481 struct dmar_domain
*domain
;
1484 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1485 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1487 * Domain id 0 is reserved for invalid translation
1488 * if hardware supports caching mode.
1490 if (cap_caching_mode(iommu
->cap
) && i
== 0)
1493 domain
= iommu
->domains
[i
];
1494 clear_bit(i
, iommu
->domain_ids
);
1495 if (domain_detach_iommu(domain
, iommu
) == 0 &&
1496 !domain_type_is_vm(domain
))
1497 domain_exit(domain
);
1501 if (iommu
->gcmd
& DMA_GCMD_TE
)
1502 iommu_disable_translation(iommu
);
1505 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1507 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1508 kfree(iommu
->domains
);
1509 kfree(iommu
->domain_ids
);
1510 iommu
->domains
= NULL
;
1511 iommu
->domain_ids
= NULL
;
1514 g_iommus
[iommu
->seq_id
] = NULL
;
1516 /* free context mapping */
1517 free_context_table(iommu
);
1520 static struct dmar_domain
*alloc_domain(int flags
)
1522 /* domain id for virtual machine, it won't be set in context */
1523 static atomic_t vm_domid
= ATOMIC_INIT(0);
1524 struct dmar_domain
*domain
;
1526 domain
= alloc_domain_mem();
1530 memset(domain
, 0, sizeof(*domain
));
1532 domain
->flags
= flags
;
1533 spin_lock_init(&domain
->iommu_lock
);
1534 INIT_LIST_HEAD(&domain
->devices
);
1535 if (flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1536 domain
->id
= atomic_inc_return(&vm_domid
);
1541 static int __iommu_attach_domain(struct dmar_domain
*domain
,
1542 struct intel_iommu
*iommu
)
1545 unsigned long ndomains
;
1547 ndomains
= cap_ndoms(iommu
->cap
);
1548 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1549 if (num
< ndomains
) {
1550 set_bit(num
, iommu
->domain_ids
);
1551 iommu
->domains
[num
] = domain
;
1559 static int iommu_attach_domain(struct dmar_domain
*domain
,
1560 struct intel_iommu
*iommu
)
1563 unsigned long flags
;
1565 spin_lock_irqsave(&iommu
->lock
, flags
);
1566 num
= __iommu_attach_domain(domain
, iommu
);
1567 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1569 pr_err("%s: No free domain ids\n", iommu
->name
);
1574 static int iommu_attach_vm_domain(struct dmar_domain
*domain
,
1575 struct intel_iommu
*iommu
)
1578 unsigned long ndomains
;
1580 ndomains
= cap_ndoms(iommu
->cap
);
1581 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
)
1582 if (iommu
->domains
[num
] == domain
)
1585 return __iommu_attach_domain(domain
, iommu
);
1588 static void iommu_detach_domain(struct dmar_domain
*domain
,
1589 struct intel_iommu
*iommu
)
1591 unsigned long flags
;
1594 spin_lock_irqsave(&iommu
->lock
, flags
);
1595 if (domain_type_is_vm_or_si(domain
)) {
1596 ndomains
= cap_ndoms(iommu
->cap
);
1597 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1598 if (iommu
->domains
[num
] == domain
) {
1599 clear_bit(num
, iommu
->domain_ids
);
1600 iommu
->domains
[num
] = NULL
;
1605 clear_bit(domain
->id
, iommu
->domain_ids
);
1606 iommu
->domains
[domain
->id
] = NULL
;
1608 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1611 static void domain_attach_iommu(struct dmar_domain
*domain
,
1612 struct intel_iommu
*iommu
)
1614 unsigned long flags
;
1616 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1617 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1618 domain
->iommu_count
++;
1619 if (domain
->iommu_count
== 1)
1620 domain
->nid
= iommu
->node
;
1621 domain_update_iommu_cap(domain
);
1623 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1626 static int domain_detach_iommu(struct dmar_domain
*domain
,
1627 struct intel_iommu
*iommu
)
1629 unsigned long flags
;
1630 int count
= INT_MAX
;
1632 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1633 if (test_and_clear_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1634 count
= --domain
->iommu_count
;
1635 domain_update_iommu_cap(domain
);
1637 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1642 static struct iova_domain reserved_iova_list
;
1643 static struct lock_class_key reserved_rbtree_key
;
1645 static int dmar_init_reserved_ranges(void)
1647 struct pci_dev
*pdev
= NULL
;
1651 init_iova_domain(&reserved_iova_list
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1654 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1655 &reserved_rbtree_key
);
1657 /* IOAPIC ranges shouldn't be accessed by DMA */
1658 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1659 IOVA_PFN(IOAPIC_RANGE_END
));
1661 pr_err("Reserve IOAPIC range failed\n");
1665 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1666 for_each_pci_dev(pdev
) {
1669 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1670 r
= &pdev
->resource
[i
];
1671 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1673 iova
= reserve_iova(&reserved_iova_list
,
1677 pr_err("Reserve iova failed\n");
1685 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1687 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1690 static inline int guestwidth_to_adjustwidth(int gaw
)
1693 int r
= (gaw
- 12) % 9;
1704 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1706 struct intel_iommu
*iommu
;
1707 int adjust_width
, agaw
;
1708 unsigned long sagaw
;
1710 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1712 domain_reserve_special_ranges(domain
);
1714 /* calculate AGAW */
1715 iommu
= domain_get_iommu(domain
);
1716 if (guest_width
> cap_mgaw(iommu
->cap
))
1717 guest_width
= cap_mgaw(iommu
->cap
);
1718 domain
->gaw
= guest_width
;
1719 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1720 agaw
= width_to_agaw(adjust_width
);
1721 sagaw
= cap_sagaw(iommu
->cap
);
1722 if (!test_bit(agaw
, &sagaw
)) {
1723 /* hardware doesn't support it, choose a bigger one */
1724 pr_debug("Hardware doesn't support agaw %d\n", agaw
);
1725 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1729 domain
->agaw
= agaw
;
1731 if (ecap_coherent(iommu
->ecap
))
1732 domain
->iommu_coherency
= 1;
1734 domain
->iommu_coherency
= 0;
1736 if (ecap_sc_support(iommu
->ecap
))
1737 domain
->iommu_snooping
= 1;
1739 domain
->iommu_snooping
= 0;
1741 if (intel_iommu_superpage
)
1742 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1744 domain
->iommu_superpage
= 0;
1746 domain
->nid
= iommu
->node
;
1748 /* always allocate the top pgd */
1749 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1752 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1756 static void domain_exit(struct dmar_domain
*domain
)
1758 struct page
*freelist
= NULL
;
1761 /* Domain 0 is reserved, so dont process it */
1765 /* Flush any lazy unmaps that may reference this domain */
1766 if (!intel_iommu_strict
)
1767 flush_unmaps_timeout(0);
1769 /* remove associated devices */
1770 domain_remove_dev_info(domain
);
1773 put_iova_domain(&domain
->iovad
);
1775 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1777 /* clear attached or cached domains */
1779 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
)
1780 iommu_detach_domain(domain
, g_iommus
[i
]);
1783 dma_free_pagelist(freelist
);
1785 free_domain_mem(domain
);
1788 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1789 struct intel_iommu
*iommu
,
1790 u8 bus
, u8 devfn
, int translation
)
1792 struct context_entry
*context
;
1793 unsigned long flags
;
1794 struct dma_pte
*pgd
;
1797 struct device_domain_info
*info
= NULL
;
1799 pr_debug("Set context mapping for %02x:%02x.%d\n",
1800 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1802 BUG_ON(!domain
->pgd
);
1803 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1804 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1806 spin_lock_irqsave(&iommu
->lock
, flags
);
1807 context
= iommu_context_addr(iommu
, bus
, devfn
, 1);
1808 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1811 spin_lock_irqsave(&iommu
->lock
, flags
);
1812 if (context_present(context
)) {
1813 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1820 if (domain_type_is_vm_or_si(domain
)) {
1821 if (domain_type_is_vm(domain
)) {
1822 id
= iommu_attach_vm_domain(domain
, iommu
);
1824 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1825 pr_err("%s: No free domain ids\n", iommu
->name
);
1830 /* Skip top levels of page tables for
1831 * iommu which has less agaw than default.
1832 * Unnecessary for PT mode.
1834 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1835 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1836 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1837 if (!dma_pte_present(pgd
)) {
1838 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1845 context_set_domain_id(context
, id
);
1847 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1848 info
= iommu_support_dev_iotlb(domain
, iommu
, bus
, devfn
);
1849 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1850 CONTEXT_TT_MULTI_LEVEL
;
1853 * In pass through mode, AW must be programmed to indicate the largest
1854 * AGAW value supported by hardware. And ASR is ignored by hardware.
1856 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1857 context_set_address_width(context
, iommu
->msagaw
);
1859 context_set_address_root(context
, virt_to_phys(pgd
));
1860 context_set_address_width(context
, iommu
->agaw
);
1863 context_set_translation_type(context
, translation
);
1864 context_set_fault_enable(context
);
1865 context_set_present(context
);
1866 domain_flush_cache(domain
, context
, sizeof(*context
));
1869 * It's a non-present to present mapping. If hardware doesn't cache
1870 * non-present entry we only need to flush the write-buffer. If the
1871 * _does_ cache non-present entries, then it does so in the special
1872 * domain #0, which we have to flush:
1874 if (cap_caching_mode(iommu
->cap
)) {
1875 iommu
->flush
.flush_context(iommu
, 0,
1876 (((u16
)bus
) << 8) | devfn
,
1877 DMA_CCMD_MASK_NOBIT
,
1878 DMA_CCMD_DEVICE_INVL
);
1879 iommu
->flush
.flush_iotlb(iommu
, id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1881 iommu_flush_write_buffer(iommu
);
1883 iommu_enable_dev_iotlb(info
);
1884 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1886 domain_attach_iommu(domain
, iommu
);
1891 struct domain_context_mapping_data
{
1892 struct dmar_domain
*domain
;
1893 struct intel_iommu
*iommu
;
1897 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
1898 u16 alias
, void *opaque
)
1900 struct domain_context_mapping_data
*data
= opaque
;
1902 return domain_context_mapping_one(data
->domain
, data
->iommu
,
1903 PCI_BUS_NUM(alias
), alias
& 0xff,
1908 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
,
1911 struct intel_iommu
*iommu
;
1913 struct domain_context_mapping_data data
;
1915 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1919 if (!dev_is_pci(dev
))
1920 return domain_context_mapping_one(domain
, iommu
, bus
, devfn
,
1923 data
.domain
= domain
;
1925 data
.translation
= translation
;
1927 return pci_for_each_dma_alias(to_pci_dev(dev
),
1928 &domain_context_mapping_cb
, &data
);
1931 static int domain_context_mapped_cb(struct pci_dev
*pdev
,
1932 u16 alias
, void *opaque
)
1934 struct intel_iommu
*iommu
= opaque
;
1936 return !device_context_mapped(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
1939 static int domain_context_mapped(struct device
*dev
)
1941 struct intel_iommu
*iommu
;
1944 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1948 if (!dev_is_pci(dev
))
1949 return device_context_mapped(iommu
, bus
, devfn
);
1951 return !pci_for_each_dma_alias(to_pci_dev(dev
),
1952 domain_context_mapped_cb
, iommu
);
1955 /* Returns a number of VTD pages, but aligned to MM page size */
1956 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1959 host_addr
&= ~PAGE_MASK
;
1960 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1963 /* Return largest possible superpage level for a given mapping */
1964 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1965 unsigned long iov_pfn
,
1966 unsigned long phy_pfn
,
1967 unsigned long pages
)
1969 int support
, level
= 1;
1970 unsigned long pfnmerge
;
1972 support
= domain
->iommu_superpage
;
1974 /* To use a large page, the virtual *and* physical addresses
1975 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1976 of them will mean we have to use smaller pages. So just
1977 merge them and check both at once. */
1978 pfnmerge
= iov_pfn
| phy_pfn
;
1980 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1981 pages
>>= VTD_STRIDE_SHIFT
;
1984 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1991 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1992 struct scatterlist
*sg
, unsigned long phys_pfn
,
1993 unsigned long nr_pages
, int prot
)
1995 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1996 phys_addr_t
uninitialized_var(pteval
);
1997 unsigned long sg_res
= 0;
1998 unsigned int largepage_lvl
= 0;
1999 unsigned long lvl_pages
= 0;
2001 BUG_ON(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1));
2003 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
2006 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
2010 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
2013 while (nr_pages
> 0) {
2017 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
2018 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
2019 sg
->dma_length
= sg
->length
;
2020 pteval
= page_to_phys(sg_page(sg
)) | prot
;
2021 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
2025 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
2027 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
2030 /* It is large page*/
2031 if (largepage_lvl
> 1) {
2032 pteval
|= DMA_PTE_LARGE_PAGE
;
2033 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2035 * Ensure that old small page tables are
2036 * removed to make room for superpage,
2039 dma_pte_free_pagetable(domain
, iov_pfn
,
2040 iov_pfn
+ lvl_pages
- 1);
2042 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2046 /* We don't need lock here, nobody else
2047 * touches the iova range
2049 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2051 static int dumps
= 5;
2052 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2053 iov_pfn
, tmp
, (unsigned long long)pteval
);
2056 debug_dma_dump_mappings(NULL
);
2061 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2063 BUG_ON(nr_pages
< lvl_pages
);
2064 BUG_ON(sg_res
< lvl_pages
);
2066 nr_pages
-= lvl_pages
;
2067 iov_pfn
+= lvl_pages
;
2068 phys_pfn
+= lvl_pages
;
2069 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2070 sg_res
-= lvl_pages
;
2072 /* If the next PTE would be the first in a new page, then we
2073 need to flush the cache on the entries we've just written.
2074 And then we'll need to recalculate 'pte', so clear it and
2075 let it get set again in the if (!pte) block above.
2077 If we're done (!nr_pages) we need to flush the cache too.
2079 Also if we've been setting superpages, we may need to
2080 recalculate 'pte' and switch back to smaller pages for the
2081 end of the mapping, if the trailing size is not enough to
2082 use another superpage (i.e. sg_res < lvl_pages). */
2084 if (!nr_pages
|| first_pte_in_page(pte
) ||
2085 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2086 domain_flush_cache(domain
, first_pte
,
2087 (void *)pte
- (void *)first_pte
);
2091 if (!sg_res
&& nr_pages
)
2097 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2098 struct scatterlist
*sg
, unsigned long nr_pages
,
2101 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2104 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2105 unsigned long phys_pfn
, unsigned long nr_pages
,
2108 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2111 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2116 clear_context_table(iommu
, bus
, devfn
);
2117 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2118 DMA_CCMD_GLOBAL_INVL
);
2119 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2122 static inline void unlink_domain_info(struct device_domain_info
*info
)
2124 assert_spin_locked(&device_domain_lock
);
2125 list_del(&info
->link
);
2126 list_del(&info
->global
);
2128 info
->dev
->archdata
.iommu
= NULL
;
2131 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2133 struct device_domain_info
*info
, *tmp
;
2134 unsigned long flags
;
2136 spin_lock_irqsave(&device_domain_lock
, flags
);
2137 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
2138 unlink_domain_info(info
);
2139 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2141 iommu_disable_dev_iotlb(info
);
2142 iommu_detach_dev(info
->iommu
, info
->bus
, info
->devfn
);
2144 if (domain_type_is_vm(domain
)) {
2145 iommu_detach_dependent_devices(info
->iommu
, info
->dev
);
2146 domain_detach_iommu(domain
, info
->iommu
);
2149 free_devinfo_mem(info
);
2150 spin_lock_irqsave(&device_domain_lock
, flags
);
2152 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2157 * Note: we use struct device->archdata.iommu stores the info
2159 static struct dmar_domain
*find_domain(struct device
*dev
)
2161 struct device_domain_info
*info
;
2163 /* No lock here, assumes no domain exit in normal case */
2164 info
= dev
->archdata
.iommu
;
2166 return info
->domain
;
2170 static inline struct device_domain_info
*
2171 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2173 struct device_domain_info
*info
;
2175 list_for_each_entry(info
, &device_domain_list
, global
)
2176 if (info
->iommu
->segment
== segment
&& info
->bus
== bus
&&
2177 info
->devfn
== devfn
)
2183 static struct dmar_domain
*dmar_insert_dev_info(struct intel_iommu
*iommu
,
2186 struct dmar_domain
*domain
)
2188 struct dmar_domain
*found
= NULL
;
2189 struct device_domain_info
*info
;
2190 unsigned long flags
;
2192 info
= alloc_devinfo_mem();
2197 info
->devfn
= devfn
;
2199 info
->domain
= domain
;
2200 info
->iommu
= iommu
;
2202 spin_lock_irqsave(&device_domain_lock
, flags
);
2204 found
= find_domain(dev
);
2206 struct device_domain_info
*info2
;
2207 info2
= dmar_search_domain_by_dev_info(iommu
->segment
, bus
, devfn
);
2209 found
= info2
->domain
;
2212 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2213 free_devinfo_mem(info
);
2214 /* Caller must free the original domain */
2218 list_add(&info
->link
, &domain
->devices
);
2219 list_add(&info
->global
, &device_domain_list
);
2221 dev
->archdata
.iommu
= info
;
2222 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2227 static int get_last_alias(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
2229 *(u16
*)opaque
= alias
;
2233 /* domain is initialized */
2234 static struct dmar_domain
*get_domain_for_dev(struct device
*dev
, int gaw
)
2236 struct dmar_domain
*domain
, *tmp
;
2237 struct intel_iommu
*iommu
;
2238 struct device_domain_info
*info
;
2240 unsigned long flags
;
2243 domain
= find_domain(dev
);
2247 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2251 if (dev_is_pci(dev
)) {
2252 struct pci_dev
*pdev
= to_pci_dev(dev
);
2254 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2256 spin_lock_irqsave(&device_domain_lock
, flags
);
2257 info
= dmar_search_domain_by_dev_info(pci_domain_nr(pdev
->bus
),
2258 PCI_BUS_NUM(dma_alias
),
2261 iommu
= info
->iommu
;
2262 domain
= info
->domain
;
2264 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2266 /* DMA alias already has a domain, uses it */
2271 /* Allocate and initialize new domain for the device */
2272 domain
= alloc_domain(0);
2275 domain
->id
= iommu_attach_domain(domain
, iommu
);
2276 if (domain
->id
< 0) {
2277 free_domain_mem(domain
);
2280 domain_attach_iommu(domain
, iommu
);
2281 if (domain_init(domain
, gaw
)) {
2282 domain_exit(domain
);
2286 /* register PCI DMA alias device */
2287 if (dev_is_pci(dev
)) {
2288 tmp
= dmar_insert_dev_info(iommu
, PCI_BUS_NUM(dma_alias
),
2289 dma_alias
& 0xff, NULL
, domain
);
2291 if (!tmp
|| tmp
!= domain
) {
2292 domain_exit(domain
);
2301 tmp
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2303 if (!tmp
|| tmp
!= domain
) {
2304 domain_exit(domain
);
2311 static int iommu_identity_mapping
;
2312 #define IDENTMAP_ALL 1
2313 #define IDENTMAP_GFX 2
2314 #define IDENTMAP_AZALIA 4
2316 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2317 unsigned long long start
,
2318 unsigned long long end
)
2320 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2321 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2323 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2324 dma_to_mm_pfn(last_vpfn
))) {
2325 pr_err("Reserving iova failed\n");
2329 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2330 start
, end
, domain
->id
);
2332 * RMRR range might have overlap with physical memory range,
2335 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2337 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2338 last_vpfn
- first_vpfn
+ 1,
2339 DMA_PTE_READ
|DMA_PTE_WRITE
);
2342 static int iommu_prepare_identity_map(struct device
*dev
,
2343 unsigned long long start
,
2344 unsigned long long end
)
2346 struct dmar_domain
*domain
;
2349 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2353 /* For _hardware_ passthrough, don't bother. But for software
2354 passthrough, we do it anyway -- it may indicate a memory
2355 range which is reserved in E820, so which didn't get set
2356 up to start with in si_domain */
2357 if (domain
== si_domain
&& hw_pass_through
) {
2358 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2359 dev_name(dev
), start
, end
);
2363 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2364 dev_name(dev
), start
, end
);
2367 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2368 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2369 dmi_get_system_info(DMI_BIOS_VENDOR
),
2370 dmi_get_system_info(DMI_BIOS_VERSION
),
2371 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2376 if (end
>> agaw_to_width(domain
->agaw
)) {
2377 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2378 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2379 agaw_to_width(domain
->agaw
),
2380 dmi_get_system_info(DMI_BIOS_VENDOR
),
2381 dmi_get_system_info(DMI_BIOS_VERSION
),
2382 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2387 ret
= iommu_domain_identity_map(domain
, start
, end
);
2391 /* context entry init */
2392 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2399 domain_exit(domain
);
2403 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2406 if (dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2408 return iommu_prepare_identity_map(dev
, rmrr
->base_address
,
2412 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2413 static inline void iommu_prepare_isa(void)
2415 struct pci_dev
*pdev
;
2418 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2422 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2423 ret
= iommu_prepare_identity_map(&pdev
->dev
, 0, 16*1024*1024 - 1);
2426 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2431 static inline void iommu_prepare_isa(void)
2435 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2437 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2439 static int __init
si_domain_init(int hw
)
2441 struct dmar_drhd_unit
*drhd
;
2442 struct intel_iommu
*iommu
;
2446 si_domain
= alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY
);
2450 for_each_active_iommu(iommu
, drhd
) {
2451 ret
= iommu_attach_domain(si_domain
, iommu
);
2453 domain_exit(si_domain
);
2456 si_domain
->id
= ret
;
2458 } else if (si_domain
->id
!= ret
) {
2459 domain_exit(si_domain
);
2462 domain_attach_iommu(si_domain
, iommu
);
2465 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2466 domain_exit(si_domain
);
2470 pr_debug("Identity mapping domain is domain %d\n",
2476 for_each_online_node(nid
) {
2477 unsigned long start_pfn
, end_pfn
;
2480 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2481 ret
= iommu_domain_identity_map(si_domain
,
2482 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2491 static int identity_mapping(struct device
*dev
)
2493 struct device_domain_info
*info
;
2495 if (likely(!iommu_identity_mapping
))
2498 info
= dev
->archdata
.iommu
;
2499 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2500 return (info
->domain
== si_domain
);
2505 static int domain_add_dev_info(struct dmar_domain
*domain
,
2506 struct device
*dev
, int translation
)
2508 struct dmar_domain
*ndomain
;
2509 struct intel_iommu
*iommu
;
2513 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2517 ndomain
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2518 if (ndomain
!= domain
)
2521 ret
= domain_context_mapping(domain
, dev
, translation
);
2523 domain_remove_one_dev_info(domain
, dev
);
2530 static bool device_has_rmrr(struct device
*dev
)
2532 struct dmar_rmrr_unit
*rmrr
;
2537 for_each_rmrr_units(rmrr
) {
2539 * Return TRUE if this RMRR contains the device that
2542 for_each_active_dev_scope(rmrr
->devices
,
2543 rmrr
->devices_cnt
, i
, tmp
)
2554 * There are a couple cases where we need to restrict the functionality of
2555 * devices associated with RMRRs. The first is when evaluating a device for
2556 * identity mapping because problems exist when devices are moved in and out
2557 * of domains and their respective RMRR information is lost. This means that
2558 * a device with associated RMRRs will never be in a "passthrough" domain.
2559 * The second is use of the device through the IOMMU API. This interface
2560 * expects to have full control of the IOVA space for the device. We cannot
2561 * satisfy both the requirement that RMRR access is maintained and have an
2562 * unencumbered IOVA space. We also have no ability to quiesce the device's
2563 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2564 * We therefore prevent devices associated with an RMRR from participating in
2565 * the IOMMU API, which eliminates them from device assignment.
2567 * In both cases we assume that PCI USB devices with RMRRs have them largely
2568 * for historical reasons and that the RMRR space is not actively used post
2569 * boot. This exclusion may change if vendors begin to abuse it.
2571 * The same exception is made for graphics devices, with the requirement that
2572 * any use of the RMRR regions will be torn down before assigning the device
2575 static bool device_is_rmrr_locked(struct device
*dev
)
2577 if (!device_has_rmrr(dev
))
2580 if (dev_is_pci(dev
)) {
2581 struct pci_dev
*pdev
= to_pci_dev(dev
);
2583 if (IS_USB_DEVICE(pdev
) || IS_GFX_DEVICE(pdev
))
2590 static int iommu_should_identity_map(struct device
*dev
, int startup
)
2593 if (dev_is_pci(dev
)) {
2594 struct pci_dev
*pdev
= to_pci_dev(dev
);
2596 if (device_is_rmrr_locked(dev
))
2599 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2602 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2605 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2609 * We want to start off with all devices in the 1:1 domain, and
2610 * take them out later if we find they can't access all of memory.
2612 * However, we can't do this for PCI devices behind bridges,
2613 * because all PCI devices behind the same bridge will end up
2614 * with the same source-id on their transactions.
2616 * Practically speaking, we can't change things around for these
2617 * devices at run-time, because we can't be sure there'll be no
2618 * DMA transactions in flight for any of their siblings.
2620 * So PCI devices (unless they're on the root bus) as well as
2621 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2622 * the 1:1 domain, just in _case_ one of their siblings turns out
2623 * not to be able to map all of memory.
2625 if (!pci_is_pcie(pdev
)) {
2626 if (!pci_is_root_bus(pdev
->bus
))
2628 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2630 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2633 if (device_has_rmrr(dev
))
2638 * At boot time, we don't yet know if devices will be 64-bit capable.
2639 * Assume that they will — if they turn out not to be, then we can
2640 * take them out of the 1:1 domain later.
2644 * If the device's dma_mask is less than the system's memory
2645 * size then this is not a candidate for identity mapping.
2647 u64 dma_mask
= *dev
->dma_mask
;
2649 if (dev
->coherent_dma_mask
&&
2650 dev
->coherent_dma_mask
< dma_mask
)
2651 dma_mask
= dev
->coherent_dma_mask
;
2653 return dma_mask
>= dma_get_required_mask(dev
);
2659 static int __init
dev_prepare_static_identity_mapping(struct device
*dev
, int hw
)
2663 if (!iommu_should_identity_map(dev
, 1))
2666 ret
= domain_add_dev_info(si_domain
, dev
,
2667 hw
? CONTEXT_TT_PASS_THROUGH
:
2668 CONTEXT_TT_MULTI_LEVEL
);
2670 pr_info("%s identity mapping for device %s\n",
2671 hw
? "Hardware" : "Software", dev_name(dev
));
2672 else if (ret
== -ENODEV
)
2673 /* device not associated with an iommu */
2680 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2682 struct pci_dev
*pdev
= NULL
;
2683 struct dmar_drhd_unit
*drhd
;
2684 struct intel_iommu
*iommu
;
2689 ret
= si_domain_init(hw
);
2693 for_each_pci_dev(pdev
) {
2694 ret
= dev_prepare_static_identity_mapping(&pdev
->dev
, hw
);
2699 for_each_active_iommu(iommu
, drhd
)
2700 for_each_active_dev_scope(drhd
->devices
, drhd
->devices_cnt
, i
, dev
) {
2701 struct acpi_device_physical_node
*pn
;
2702 struct acpi_device
*adev
;
2704 if (dev
->bus
!= &acpi_bus_type
)
2707 adev
= to_acpi_device(dev
);
2708 mutex_lock(&adev
->physical_node_lock
);
2709 list_for_each_entry(pn
, &adev
->physical_node_list
, node
) {
2710 ret
= dev_prepare_static_identity_mapping(pn
->dev
, hw
);
2714 mutex_unlock(&adev
->physical_node_lock
);
2722 static void intel_iommu_init_qi(struct intel_iommu
*iommu
)
2725 * Start from the sane iommu hardware state.
2726 * If the queued invalidation is already initialized by us
2727 * (for example, while enabling interrupt-remapping) then
2728 * we got the things already rolling from a sane state.
2732 * Clear any previous faults.
2734 dmar_fault(-1, iommu
);
2736 * Disable queued invalidation if supported and already enabled
2737 * before OS handover.
2739 dmar_disable_qi(iommu
);
2742 if (dmar_enable_qi(iommu
)) {
2744 * Queued Invalidate not enabled, use Register Based Invalidate
2746 iommu
->flush
.flush_context
= __iommu_flush_context
;
2747 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2748 pr_info("%s: Using Register based invalidation\n",
2751 iommu
->flush
.flush_context
= qi_flush_context
;
2752 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2753 pr_info("%s: Using Queued invalidation\n", iommu
->name
);
2757 static int __init
init_dmars(void)
2759 struct dmar_drhd_unit
*drhd
;
2760 struct dmar_rmrr_unit
*rmrr
;
2762 struct intel_iommu
*iommu
;
2768 * initialize and program root entry to not present
2771 for_each_drhd_unit(drhd
) {
2773 * lock not needed as this is only incremented in the single
2774 * threaded kernel __init code path all other access are read
2777 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
) {
2781 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED
);
2784 /* Preallocate enough resources for IOMMU hot-addition */
2785 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
)
2786 g_num_of_iommus
= DMAR_UNITS_SUPPORTED
;
2788 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2791 pr_err("Allocating global iommu array failed\n");
2796 deferred_flush
= kzalloc(g_num_of_iommus
*
2797 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2798 if (!deferred_flush
) {
2803 for_each_active_iommu(iommu
, drhd
) {
2804 g_iommus
[iommu
->seq_id
] = iommu
;
2806 intel_iommu_init_qi(iommu
);
2808 ret
= iommu_init_domains(iommu
);
2814 * we could share the same root & context tables
2815 * among all IOMMU's. Need to Split it later.
2817 ret
= iommu_alloc_root_entry(iommu
);
2821 iommu_flush_write_buffer(iommu
);
2822 iommu_set_root_entry(iommu
);
2823 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2824 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2826 if (!ecap_pass_through(iommu
->ecap
))
2827 hw_pass_through
= 0;
2830 if (iommu_pass_through
)
2831 iommu_identity_mapping
|= IDENTMAP_ALL
;
2833 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2834 iommu_identity_mapping
|= IDENTMAP_GFX
;
2837 check_tylersburg_isoch();
2840 * If pass through is not set or not enabled, setup context entries for
2841 * identity mappings for rmrr, gfx, and isa and may fall back to static
2842 * identity mapping if iommu_identity_mapping is set.
2844 if (iommu_identity_mapping
) {
2845 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2847 pr_crit("Failed to setup IOMMU pass-through\n");
2853 * for each dev attached to rmrr
2855 * locate drhd for dev, alloc domain for dev
2856 * allocate free domain
2857 * allocate page table entries for rmrr
2858 * if context not allocated for bus
2859 * allocate and init context
2860 * set present in root table for this bus
2861 * init context with domain, translation etc
2865 pr_info("Setting RMRR:\n");
2866 for_each_rmrr_units(rmrr
) {
2867 /* some BIOS lists non-exist devices in DMAR table. */
2868 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
2870 ret
= iommu_prepare_rmrr_dev(rmrr
, dev
);
2872 pr_err("Mapping reserved region failed\n");
2876 iommu_prepare_isa();
2881 * global invalidate context cache
2882 * global invalidate iotlb
2883 * enable translation
2885 for_each_iommu(iommu
, drhd
) {
2886 if (drhd
->ignored
) {
2888 * we always have to disable PMRs or DMA may fail on
2892 iommu_disable_protect_mem_regions(iommu
);
2896 iommu_flush_write_buffer(iommu
);
2898 ret
= dmar_set_interrupt(iommu
);
2902 iommu_enable_translation(iommu
);
2903 iommu_disable_protect_mem_regions(iommu
);
2909 for_each_active_iommu(iommu
, drhd
) {
2910 disable_dmar_iommu(iommu
);
2911 free_dmar_iommu(iommu
);
2913 kfree(deferred_flush
);
2920 /* This takes a number of _MM_ pages, not VTD pages */
2921 static struct iova
*intel_alloc_iova(struct device
*dev
,
2922 struct dmar_domain
*domain
,
2923 unsigned long nrpages
, uint64_t dma_mask
)
2925 struct iova
*iova
= NULL
;
2927 /* Restrict dma_mask to the width that the iommu can handle */
2928 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2930 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2932 * First try to allocate an io virtual address in
2933 * DMA_BIT_MASK(32) and if that fails then try allocating
2936 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2937 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2941 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2942 if (unlikely(!iova
)) {
2943 pr_err("Allocating %ld-page iova for %s failed",
2944 nrpages
, dev_name(dev
));
2951 static struct dmar_domain
*__get_valid_domain_for_dev(struct device
*dev
)
2953 struct dmar_domain
*domain
;
2956 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2958 pr_err("Allocating domain for %s failed\n",
2963 /* make sure context mapping is ok */
2964 if (unlikely(!domain_context_mapped(dev
))) {
2965 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2967 pr_err("Domain context map for %s failed\n",
2976 static inline struct dmar_domain
*get_valid_domain_for_dev(struct device
*dev
)
2978 struct device_domain_info
*info
;
2980 /* No lock here, assumes no domain exit in normal case */
2981 info
= dev
->archdata
.iommu
;
2983 return info
->domain
;
2985 return __get_valid_domain_for_dev(dev
);
2988 /* Check if the dev needs to go through non-identity map and unmap process.*/
2989 static int iommu_no_mapping(struct device
*dev
)
2993 if (iommu_dummy(dev
))
2996 if (!iommu_identity_mapping
)
2999 found
= identity_mapping(dev
);
3001 if (iommu_should_identity_map(dev
, 0))
3005 * 32 bit DMA is removed from si_domain and fall back
3006 * to non-identity mapping.
3008 domain_remove_one_dev_info(si_domain
, dev
);
3009 pr_info("32bit %s uses non-identity mapping\n",
3015 * In case of a detached 64 bit DMA device from vm, the device
3016 * is put into si_domain for identity mapping.
3018 if (iommu_should_identity_map(dev
, 0)) {
3020 ret
= domain_add_dev_info(si_domain
, dev
,
3022 CONTEXT_TT_PASS_THROUGH
:
3023 CONTEXT_TT_MULTI_LEVEL
);
3025 pr_info("64bit %s uses identity mapping\n",
3035 static dma_addr_t
__intel_map_single(struct device
*dev
, phys_addr_t paddr
,
3036 size_t size
, int dir
, u64 dma_mask
)
3038 struct dmar_domain
*domain
;
3039 phys_addr_t start_paddr
;
3043 struct intel_iommu
*iommu
;
3044 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
3046 BUG_ON(dir
== DMA_NONE
);
3048 if (iommu_no_mapping(dev
))
3051 domain
= get_valid_domain_for_dev(dev
);
3055 iommu
= domain_get_iommu(domain
);
3056 size
= aligned_nrpages(paddr
, size
);
3058 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
3063 * Check if DMAR supports zero-length reads on write only
3066 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3067 !cap_zlr(iommu
->cap
))
3068 prot
|= DMA_PTE_READ
;
3069 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3070 prot
|= DMA_PTE_WRITE
;
3072 * paddr - (paddr + size) might be partial page, we should map the whole
3073 * page. Note: if two part of one page are separately mapped, we
3074 * might have two guest_addr mapping to the same host paddr, but this
3075 * is not a big problem
3077 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
3078 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
3082 /* it's a non-present to present mapping. Only flush if caching mode */
3083 if (cap_caching_mode(iommu
->cap
))
3084 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 0, 1);
3086 iommu_flush_write_buffer(iommu
);
3088 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
3089 start_paddr
+= paddr
& ~PAGE_MASK
;
3094 __free_iova(&domain
->iovad
, iova
);
3095 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3096 dev_name(dev
), size
, (unsigned long long)paddr
, dir
);
3100 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
3101 unsigned long offset
, size_t size
,
3102 enum dma_data_direction dir
,
3103 struct dma_attrs
*attrs
)
3105 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
3106 dir
, *dev
->dma_mask
);
3109 static void flush_unmaps(void)
3115 /* just flush them all */
3116 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3117 struct intel_iommu
*iommu
= g_iommus
[i
];
3121 if (!deferred_flush
[i
].next
)
3124 /* In caching mode, global flushes turn emulation expensive */
3125 if (!cap_caching_mode(iommu
->cap
))
3126 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3127 DMA_TLB_GLOBAL_FLUSH
);
3128 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
3130 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
3131 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
3133 /* On real hardware multiple invalidations are expensive */
3134 if (cap_caching_mode(iommu
->cap
))
3135 iommu_flush_iotlb_psi(iommu
, domain
->id
,
3136 iova
->pfn_lo
, iova_size(iova
),
3137 !deferred_flush
[i
].freelist
[j
], 0);
3139 mask
= ilog2(mm_to_dma_pfn(iova_size(iova
)));
3140 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
3141 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
3143 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
3144 if (deferred_flush
[i
].freelist
[j
])
3145 dma_free_pagelist(deferred_flush
[i
].freelist
[j
]);
3147 deferred_flush
[i
].next
= 0;
3153 static void flush_unmaps_timeout(unsigned long data
)
3155 unsigned long flags
;
3157 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3159 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3162 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
, struct page
*freelist
)
3164 unsigned long flags
;
3166 struct intel_iommu
*iommu
;
3168 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3169 if (list_size
== HIGH_WATER_MARK
)
3172 iommu
= domain_get_iommu(dom
);
3173 iommu_id
= iommu
->seq_id
;
3175 next
= deferred_flush
[iommu_id
].next
;
3176 deferred_flush
[iommu_id
].domain
[next
] = dom
;
3177 deferred_flush
[iommu_id
].iova
[next
] = iova
;
3178 deferred_flush
[iommu_id
].freelist
[next
] = freelist
;
3179 deferred_flush
[iommu_id
].next
++;
3182 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
3186 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3189 static void intel_unmap(struct device
*dev
, dma_addr_t dev_addr
)
3191 struct dmar_domain
*domain
;
3192 unsigned long start_pfn
, last_pfn
;
3194 struct intel_iommu
*iommu
;
3195 struct page
*freelist
;
3197 if (iommu_no_mapping(dev
))
3200 domain
= find_domain(dev
);
3203 iommu
= domain_get_iommu(domain
);
3205 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
3206 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
3207 (unsigned long long)dev_addr
))
3210 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3211 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3213 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3214 dev_name(dev
), start_pfn
, last_pfn
);
3216 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3218 if (intel_iommu_strict
) {
3219 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3220 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3222 __free_iova(&domain
->iovad
, iova
);
3223 dma_free_pagelist(freelist
);
3225 add_unmap(domain
, iova
, freelist
);
3227 * queue up the release of the unmap to save the 1/6th of the
3228 * cpu used up by the iotlb flush operation...
3233 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3234 size_t size
, enum dma_data_direction dir
,
3235 struct dma_attrs
*attrs
)
3237 intel_unmap(dev
, dev_addr
);
3240 static void *intel_alloc_coherent(struct device
*dev
, size_t size
,
3241 dma_addr_t
*dma_handle
, gfp_t flags
,
3242 struct dma_attrs
*attrs
)
3244 struct page
*page
= NULL
;
3247 size
= PAGE_ALIGN(size
);
3248 order
= get_order(size
);
3250 if (!iommu_no_mapping(dev
))
3251 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3252 else if (dev
->coherent_dma_mask
< dma_get_required_mask(dev
)) {
3253 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3259 if (flags
& __GFP_WAIT
) {
3260 unsigned int count
= size
>> PAGE_SHIFT
;
3262 page
= dma_alloc_from_contiguous(dev
, count
, order
);
3263 if (page
&& iommu_no_mapping(dev
) &&
3264 page_to_phys(page
) + size
> dev
->coherent_dma_mask
) {
3265 dma_release_from_contiguous(dev
, page
, count
);
3271 page
= alloc_pages(flags
, order
);
3274 memset(page_address(page
), 0, size
);
3276 *dma_handle
= __intel_map_single(dev
, page_to_phys(page
), size
,
3278 dev
->coherent_dma_mask
);
3280 return page_address(page
);
3281 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3282 __free_pages(page
, order
);
3287 static void intel_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
3288 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3291 struct page
*page
= virt_to_page(vaddr
);
3293 size
= PAGE_ALIGN(size
);
3294 order
= get_order(size
);
3296 intel_unmap(dev
, dma_handle
);
3297 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3298 __free_pages(page
, order
);
3301 static void intel_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
3302 int nelems
, enum dma_data_direction dir
,
3303 struct dma_attrs
*attrs
)
3305 intel_unmap(dev
, sglist
[0].dma_address
);
3308 static int intel_nontranslate_map_sg(struct device
*hddev
,
3309 struct scatterlist
*sglist
, int nelems
, int dir
)
3312 struct scatterlist
*sg
;
3314 for_each_sg(sglist
, sg
, nelems
, i
) {
3315 BUG_ON(!sg_page(sg
));
3316 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3317 sg
->dma_length
= sg
->length
;
3322 static int intel_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
3323 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3326 struct dmar_domain
*domain
;
3329 struct iova
*iova
= NULL
;
3331 struct scatterlist
*sg
;
3332 unsigned long start_vpfn
;
3333 struct intel_iommu
*iommu
;
3335 BUG_ON(dir
== DMA_NONE
);
3336 if (iommu_no_mapping(dev
))
3337 return intel_nontranslate_map_sg(dev
, sglist
, nelems
, dir
);
3339 domain
= get_valid_domain_for_dev(dev
);
3343 iommu
= domain_get_iommu(domain
);
3345 for_each_sg(sglist
, sg
, nelems
, i
)
3346 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3348 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
),
3351 sglist
->dma_length
= 0;
3356 * Check if DMAR supports zero-length reads on write only
3359 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3360 !cap_zlr(iommu
->cap
))
3361 prot
|= DMA_PTE_READ
;
3362 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3363 prot
|= DMA_PTE_WRITE
;
3365 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3367 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3368 if (unlikely(ret
)) {
3369 dma_pte_free_pagetable(domain
, start_vpfn
,
3370 start_vpfn
+ size
- 1);
3371 __free_iova(&domain
->iovad
, iova
);
3375 /* it's a non-present to present mapping. Only flush if caching mode */
3376 if (cap_caching_mode(iommu
->cap
))
3377 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 0, 1);
3379 iommu_flush_write_buffer(iommu
);
3384 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3389 struct dma_map_ops intel_dma_ops
= {
3390 .alloc
= intel_alloc_coherent
,
3391 .free
= intel_free_coherent
,
3392 .map_sg
= intel_map_sg
,
3393 .unmap_sg
= intel_unmap_sg
,
3394 .map_page
= intel_map_page
,
3395 .unmap_page
= intel_unmap_page
,
3396 .mapping_error
= intel_mapping_error
,
3399 static inline int iommu_domain_cache_init(void)
3403 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3404 sizeof(struct dmar_domain
),
3409 if (!iommu_domain_cache
) {
3410 pr_err("Couldn't create iommu_domain cache\n");
3417 static inline int iommu_devinfo_cache_init(void)
3421 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3422 sizeof(struct device_domain_info
),
3426 if (!iommu_devinfo_cache
) {
3427 pr_err("Couldn't create devinfo cache\n");
3434 static int __init
iommu_init_mempool(void)
3437 ret
= iommu_iova_cache_init();
3441 ret
= iommu_domain_cache_init();
3445 ret
= iommu_devinfo_cache_init();
3449 kmem_cache_destroy(iommu_domain_cache
);
3451 iommu_iova_cache_destroy();
3456 static void __init
iommu_exit_mempool(void)
3458 kmem_cache_destroy(iommu_devinfo_cache
);
3459 kmem_cache_destroy(iommu_domain_cache
);
3460 iommu_iova_cache_destroy();
3463 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3465 struct dmar_drhd_unit
*drhd
;
3469 /* We know that this device on this chipset has its own IOMMU.
3470 * If we find it under a different IOMMU, then the BIOS is lying
3471 * to us. Hope that the IOMMU for this device is actually
3472 * disabled, and it needs no translation...
3474 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3476 /* "can't" happen */
3477 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3480 vtbar
&= 0xffff0000;
3482 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3483 drhd
= dmar_find_matched_drhd_unit(pdev
);
3484 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3485 TAINT_FIRMWARE_WORKAROUND
,
3486 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3487 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3489 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3491 static void __init
init_no_remapping_devices(void)
3493 struct dmar_drhd_unit
*drhd
;
3497 for_each_drhd_unit(drhd
) {
3498 if (!drhd
->include_all
) {
3499 for_each_active_dev_scope(drhd
->devices
,
3500 drhd
->devices_cnt
, i
, dev
)
3502 /* ignore DMAR unit if no devices exist */
3503 if (i
== drhd
->devices_cnt
)
3508 for_each_active_drhd_unit(drhd
) {
3509 if (drhd
->include_all
)
3512 for_each_active_dev_scope(drhd
->devices
,
3513 drhd
->devices_cnt
, i
, dev
)
3514 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
3516 if (i
< drhd
->devices_cnt
)
3519 /* This IOMMU has *only* gfx devices. Either bypass it or
3520 set the gfx_mapped flag, as appropriate */
3522 intel_iommu_gfx_mapped
= 1;
3525 for_each_active_dev_scope(drhd
->devices
,
3526 drhd
->devices_cnt
, i
, dev
)
3527 dev
->archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3532 #ifdef CONFIG_SUSPEND
3533 static int init_iommu_hw(void)
3535 struct dmar_drhd_unit
*drhd
;
3536 struct intel_iommu
*iommu
= NULL
;
3538 for_each_active_iommu(iommu
, drhd
)
3540 dmar_reenable_qi(iommu
);
3542 for_each_iommu(iommu
, drhd
) {
3543 if (drhd
->ignored
) {
3545 * we always have to disable PMRs or DMA may fail on
3549 iommu_disable_protect_mem_regions(iommu
);
3553 iommu_flush_write_buffer(iommu
);
3555 iommu_set_root_entry(iommu
);
3557 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3558 DMA_CCMD_GLOBAL_INVL
);
3559 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3560 iommu_enable_translation(iommu
);
3561 iommu_disable_protect_mem_regions(iommu
);
3567 static void iommu_flush_all(void)
3569 struct dmar_drhd_unit
*drhd
;
3570 struct intel_iommu
*iommu
;
3572 for_each_active_iommu(iommu
, drhd
) {
3573 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3574 DMA_CCMD_GLOBAL_INVL
);
3575 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3576 DMA_TLB_GLOBAL_FLUSH
);
3580 static int iommu_suspend(void)
3582 struct dmar_drhd_unit
*drhd
;
3583 struct intel_iommu
*iommu
= NULL
;
3586 for_each_active_iommu(iommu
, drhd
) {
3587 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3589 if (!iommu
->iommu_state
)
3595 for_each_active_iommu(iommu
, drhd
) {
3596 iommu_disable_translation(iommu
);
3598 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3600 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3601 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3602 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3603 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3604 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3605 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3606 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3607 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3609 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3614 for_each_active_iommu(iommu
, drhd
)
3615 kfree(iommu
->iommu_state
);
3620 static void iommu_resume(void)
3622 struct dmar_drhd_unit
*drhd
;
3623 struct intel_iommu
*iommu
= NULL
;
3626 if (init_iommu_hw()) {
3628 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3630 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3634 for_each_active_iommu(iommu
, drhd
) {
3636 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3638 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3639 iommu
->reg
+ DMAR_FECTL_REG
);
3640 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3641 iommu
->reg
+ DMAR_FEDATA_REG
);
3642 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3643 iommu
->reg
+ DMAR_FEADDR_REG
);
3644 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3645 iommu
->reg
+ DMAR_FEUADDR_REG
);
3647 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3650 for_each_active_iommu(iommu
, drhd
)
3651 kfree(iommu
->iommu_state
);
3654 static struct syscore_ops iommu_syscore_ops
= {
3655 .resume
= iommu_resume
,
3656 .suspend
= iommu_suspend
,
3659 static void __init
init_iommu_pm_ops(void)
3661 register_syscore_ops(&iommu_syscore_ops
);
3665 static inline void init_iommu_pm_ops(void) {}
3666 #endif /* CONFIG_PM */
3669 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
3671 struct acpi_dmar_reserved_memory
*rmrr
;
3672 struct dmar_rmrr_unit
*rmrru
;
3674 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3678 rmrru
->hdr
= header
;
3679 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3680 rmrru
->base_address
= rmrr
->base_address
;
3681 rmrru
->end_address
= rmrr
->end_address
;
3682 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3683 ((void *)rmrr
) + rmrr
->header
.length
,
3684 &rmrru
->devices_cnt
);
3685 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
3690 list_add(&rmrru
->list
, &dmar_rmrr_units
);
3695 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
3697 struct dmar_atsr_unit
*atsru
;
3698 struct acpi_dmar_atsr
*tmp
;
3700 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3701 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
3702 if (atsr
->segment
!= tmp
->segment
)
3704 if (atsr
->header
.length
!= tmp
->header
.length
)
3706 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
3713 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3715 struct acpi_dmar_atsr
*atsr
;
3716 struct dmar_atsr_unit
*atsru
;
3718 if (system_state
!= SYSTEM_BOOTING
&& !intel_iommu_enabled
)
3721 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3722 atsru
= dmar_find_atsr(atsr
);
3726 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
3731 * If memory is allocated from slab by ACPI _DSM method, we need to
3732 * copy the memory content because the memory buffer will be freed
3735 atsru
->hdr
= (void *)(atsru
+ 1);
3736 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
3737 atsru
->include_all
= atsr
->flags
& 0x1;
3738 if (!atsru
->include_all
) {
3739 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
3740 (void *)atsr
+ atsr
->header
.length
,
3741 &atsru
->devices_cnt
);
3742 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
3748 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
3753 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3755 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3759 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3761 struct acpi_dmar_atsr
*atsr
;
3762 struct dmar_atsr_unit
*atsru
;
3764 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3765 atsru
= dmar_find_atsr(atsr
);
3767 list_del_rcu(&atsru
->list
);
3769 intel_iommu_free_atsr(atsru
);
3775 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3779 struct acpi_dmar_atsr
*atsr
;
3780 struct dmar_atsr_unit
*atsru
;
3782 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3783 atsru
= dmar_find_atsr(atsr
);
3787 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
)
3788 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
3795 static int intel_iommu_add(struct dmar_drhd_unit
*dmaru
)
3798 struct intel_iommu
*iommu
= dmaru
->iommu
;
3800 if (g_iommus
[iommu
->seq_id
])
3803 if (hw_pass_through
&& !ecap_pass_through(iommu
->ecap
)) {
3804 pr_warn("%s: Doesn't support hardware pass through.\n",
3808 if (!ecap_sc_support(iommu
->ecap
) &&
3809 domain_update_iommu_snooping(iommu
)) {
3810 pr_warn("%s: Doesn't support snooping.\n",
3814 sp
= domain_update_iommu_superpage(iommu
) - 1;
3815 if (sp
>= 0 && !(cap_super_page_val(iommu
->cap
) & (1 << sp
))) {
3816 pr_warn("%s: Doesn't support large page.\n",
3822 * Disable translation if already enabled prior to OS handover.
3824 if (iommu
->gcmd
& DMA_GCMD_TE
)
3825 iommu_disable_translation(iommu
);
3827 g_iommus
[iommu
->seq_id
] = iommu
;
3828 ret
= iommu_init_domains(iommu
);
3830 ret
= iommu_alloc_root_entry(iommu
);
3834 if (dmaru
->ignored
) {
3836 * we always have to disable PMRs or DMA may fail on this device
3839 iommu_disable_protect_mem_regions(iommu
);
3843 intel_iommu_init_qi(iommu
);
3844 iommu_flush_write_buffer(iommu
);
3845 ret
= dmar_set_interrupt(iommu
);
3849 iommu_set_root_entry(iommu
);
3850 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
3851 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3852 iommu_enable_translation(iommu
);
3855 ret
= iommu_attach_domain(si_domain
, iommu
);
3856 if (ret
< 0 || si_domain
->id
!= ret
)
3858 domain_attach_iommu(si_domain
, iommu
);
3861 iommu_disable_protect_mem_regions(iommu
);
3865 disable_dmar_iommu(iommu
);
3867 free_dmar_iommu(iommu
);
3871 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
3874 struct intel_iommu
*iommu
= dmaru
->iommu
;
3876 if (!intel_iommu_enabled
)
3882 ret
= intel_iommu_add(dmaru
);
3884 disable_dmar_iommu(iommu
);
3885 free_dmar_iommu(iommu
);
3891 static void intel_iommu_free_dmars(void)
3893 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3894 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3896 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3897 list_del(&rmrru
->list
);
3898 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3902 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3903 list_del(&atsru
->list
);
3904 intel_iommu_free_atsr(atsru
);
3908 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3911 struct pci_bus
*bus
;
3912 struct pci_dev
*bridge
= NULL
;
3914 struct acpi_dmar_atsr
*atsr
;
3915 struct dmar_atsr_unit
*atsru
;
3917 dev
= pci_physfn(dev
);
3918 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3920 if (!bridge
|| !pci_is_pcie(bridge
) ||
3921 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3923 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
3930 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3931 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3932 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
3935 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
3936 if (tmp
== &bridge
->dev
)
3939 if (atsru
->include_all
)
3949 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
3952 struct dmar_rmrr_unit
*rmrru
;
3953 struct dmar_atsr_unit
*atsru
;
3954 struct acpi_dmar_atsr
*atsr
;
3955 struct acpi_dmar_reserved_memory
*rmrr
;
3957 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
3960 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
3961 rmrr
= container_of(rmrru
->hdr
,
3962 struct acpi_dmar_reserved_memory
, header
);
3963 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3964 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
3965 ((void *)rmrr
) + rmrr
->header
.length
,
3966 rmrr
->segment
, rmrru
->devices
,
3967 rmrru
->devices_cnt
);
3970 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3971 dmar_remove_dev_scope(info
, rmrr
->segment
,
3972 rmrru
->devices
, rmrru
->devices_cnt
);
3976 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3977 if (atsru
->include_all
)
3980 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3981 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3982 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
3983 (void *)atsr
+ atsr
->header
.length
,
3984 atsr
->segment
, atsru
->devices
,
3985 atsru
->devices_cnt
);
3990 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3991 if (dmar_remove_dev_scope(info
, atsr
->segment
,
3992 atsru
->devices
, atsru
->devices_cnt
))
4001 * Here we only respond to action of unbound device from driver.
4003 * Added device is not attached to its DMAR domain here yet. That will happen
4004 * when mapping the device to iova.
4006 static int device_notifier(struct notifier_block
*nb
,
4007 unsigned long action
, void *data
)
4009 struct device
*dev
= data
;
4010 struct dmar_domain
*domain
;
4012 if (iommu_dummy(dev
))
4015 if (action
!= BUS_NOTIFY_REMOVED_DEVICE
)
4018 domain
= find_domain(dev
);
4022 down_read(&dmar_global_lock
);
4023 domain_remove_one_dev_info(domain
, dev
);
4024 if (!domain_type_is_vm_or_si(domain
) && list_empty(&domain
->devices
))
4025 domain_exit(domain
);
4026 up_read(&dmar_global_lock
);
4031 static struct notifier_block device_nb
= {
4032 .notifier_call
= device_notifier
,
4035 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
4036 unsigned long val
, void *v
)
4038 struct memory_notify
*mhp
= v
;
4039 unsigned long long start
, end
;
4040 unsigned long start_vpfn
, last_vpfn
;
4043 case MEM_GOING_ONLINE
:
4044 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
4045 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
4046 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
4047 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4054 case MEM_CANCEL_ONLINE
:
4055 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
4056 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
4057 while (start_vpfn
<= last_vpfn
) {
4059 struct dmar_drhd_unit
*drhd
;
4060 struct intel_iommu
*iommu
;
4061 struct page
*freelist
;
4063 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
4065 pr_debug("Failed get IOVA for PFN %lx\n",
4070 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
4071 start_vpfn
, last_vpfn
);
4073 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4074 start_vpfn
, last_vpfn
);
4078 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
4082 for_each_active_iommu(iommu
, drhd
)
4083 iommu_flush_iotlb_psi(iommu
, si_domain
->id
,
4084 iova
->pfn_lo
, iova_size(iova
),
4087 dma_free_pagelist(freelist
);
4089 start_vpfn
= iova
->pfn_hi
+ 1;
4090 free_iova_mem(iova
);
4098 static struct notifier_block intel_iommu_memory_nb
= {
4099 .notifier_call
= intel_iommu_memory_notifier
,
4104 static ssize_t
intel_iommu_show_version(struct device
*dev
,
4105 struct device_attribute
*attr
,
4108 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4109 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
4110 return sprintf(buf
, "%d:%d\n",
4111 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
4113 static DEVICE_ATTR(version
, S_IRUGO
, intel_iommu_show_version
, NULL
);
4115 static ssize_t
intel_iommu_show_address(struct device
*dev
,
4116 struct device_attribute
*attr
,
4119 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4120 return sprintf(buf
, "%llx\n", iommu
->reg_phys
);
4122 static DEVICE_ATTR(address
, S_IRUGO
, intel_iommu_show_address
, NULL
);
4124 static ssize_t
intel_iommu_show_cap(struct device
*dev
,
4125 struct device_attribute
*attr
,
4128 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4129 return sprintf(buf
, "%llx\n", iommu
->cap
);
4131 static DEVICE_ATTR(cap
, S_IRUGO
, intel_iommu_show_cap
, NULL
);
4133 static ssize_t
intel_iommu_show_ecap(struct device
*dev
,
4134 struct device_attribute
*attr
,
4137 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4138 return sprintf(buf
, "%llx\n", iommu
->ecap
);
4140 static DEVICE_ATTR(ecap
, S_IRUGO
, intel_iommu_show_ecap
, NULL
);
4142 static struct attribute
*intel_iommu_attrs
[] = {
4143 &dev_attr_version
.attr
,
4144 &dev_attr_address
.attr
,
4146 &dev_attr_ecap
.attr
,
4150 static struct attribute_group intel_iommu_group
= {
4151 .name
= "intel-iommu",
4152 .attrs
= intel_iommu_attrs
,
4155 const struct attribute_group
*intel_iommu_groups
[] = {
4160 int __init
intel_iommu_init(void)
4163 struct dmar_drhd_unit
*drhd
;
4164 struct intel_iommu
*iommu
;
4166 /* VT-d is required for a TXT/tboot launch, so enforce that */
4167 force_on
= tboot_force_iommu();
4169 if (iommu_init_mempool()) {
4171 panic("tboot: Failed to initialize iommu memory\n");
4175 down_write(&dmar_global_lock
);
4176 if (dmar_table_init()) {
4178 panic("tboot: Failed to initialize DMAR table\n");
4183 * Disable translation if already enabled prior to OS handover.
4185 for_each_active_iommu(iommu
, drhd
)
4186 if (iommu
->gcmd
& DMA_GCMD_TE
)
4187 iommu_disable_translation(iommu
);
4189 if (dmar_dev_scope_init() < 0) {
4191 panic("tboot: Failed to initialize DMAR device scope\n");
4195 if (no_iommu
|| dmar_disabled
)
4198 if (list_empty(&dmar_rmrr_units
))
4199 pr_info("No RMRR found\n");
4201 if (list_empty(&dmar_atsr_units
))
4202 pr_info("No ATSR found\n");
4204 if (dmar_init_reserved_ranges()) {
4206 panic("tboot: Failed to reserve iommu ranges\n");
4207 goto out_free_reserved_range
;
4210 init_no_remapping_devices();
4215 panic("tboot: Failed to initialize DMARs\n");
4216 pr_err("Initialization failed\n");
4217 goto out_free_reserved_range
;
4219 up_write(&dmar_global_lock
);
4220 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4222 init_timer(&unmap_timer
);
4223 #ifdef CONFIG_SWIOTLB
4226 dma_ops
= &intel_dma_ops
;
4228 init_iommu_pm_ops();
4230 for_each_active_iommu(iommu
, drhd
)
4231 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
4235 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
4236 bus_register_notifier(&pci_bus_type
, &device_nb
);
4237 if (si_domain
&& !hw_pass_through
)
4238 register_memory_notifier(&intel_iommu_memory_nb
);
4240 intel_iommu_enabled
= 1;
4244 out_free_reserved_range
:
4245 put_iova_domain(&reserved_iova_list
);
4247 intel_iommu_free_dmars();
4248 up_write(&dmar_global_lock
);
4249 iommu_exit_mempool();
4253 static int iommu_detach_dev_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
4255 struct intel_iommu
*iommu
= opaque
;
4257 iommu_detach_dev(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
4262 * NB - intel-iommu lacks any sort of reference counting for the users of
4263 * dependent devices. If multiple endpoints have intersecting dependent
4264 * devices, unbinding the driver from any one of them will possibly leave
4265 * the others unable to operate.
4267 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
4270 if (!iommu
|| !dev
|| !dev_is_pci(dev
))
4273 pci_for_each_dma_alias(to_pci_dev(dev
), &iommu_detach_dev_cb
, iommu
);
4276 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
4279 struct device_domain_info
*info
, *tmp
;
4280 struct intel_iommu
*iommu
;
4281 unsigned long flags
;
4285 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4289 spin_lock_irqsave(&device_domain_lock
, flags
);
4290 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
4291 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
4292 info
->devfn
== devfn
) {
4293 unlink_domain_info(info
);
4294 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4296 iommu_disable_dev_iotlb(info
);
4297 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
4298 iommu_detach_dependent_devices(iommu
, dev
);
4299 free_devinfo_mem(info
);
4301 spin_lock_irqsave(&device_domain_lock
, flags
);
4309 /* if there is no other devices under the same iommu
4310 * owned by this domain, clear this iommu in iommu_bmp
4311 * update iommu count and coherency
4313 if (info
->iommu
== iommu
)
4317 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4320 domain_detach_iommu(domain
, iommu
);
4321 if (!domain_type_is_vm_or_si(domain
))
4322 iommu_detach_domain(domain
, iommu
);
4326 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4330 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
4332 domain_reserve_special_ranges(domain
);
4334 /* calculate AGAW */
4335 domain
->gaw
= guest_width
;
4336 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4337 domain
->agaw
= width_to_agaw(adjust_width
);
4339 domain
->iommu_coherency
= 0;
4340 domain
->iommu_snooping
= 0;
4341 domain
->iommu_superpage
= 0;
4342 domain
->max_addr
= 0;
4344 /* always allocate the top pgd */
4345 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4348 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4352 static struct iommu_domain
*intel_iommu_domain_alloc(unsigned type
)
4354 struct dmar_domain
*dmar_domain
;
4355 struct iommu_domain
*domain
;
4357 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
4360 dmar_domain
= alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE
);
4362 pr_err("Can't allocate dmar_domain\n");
4365 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4366 pr_err("Domain initialization failed\n");
4367 domain_exit(dmar_domain
);
4370 domain_update_iommu_cap(dmar_domain
);
4372 domain
= &dmar_domain
->domain
;
4373 domain
->geometry
.aperture_start
= 0;
4374 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4375 domain
->geometry
.force_aperture
= true;
4380 static void intel_iommu_domain_free(struct iommu_domain
*domain
)
4382 domain_exit(to_dmar_domain(domain
));
4385 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4388 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4389 struct intel_iommu
*iommu
;
4393 if (device_is_rmrr_locked(dev
)) {
4394 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4398 /* normally dev is not mapped */
4399 if (unlikely(domain_context_mapped(dev
))) {
4400 struct dmar_domain
*old_domain
;
4402 old_domain
= find_domain(dev
);
4404 if (domain_type_is_vm_or_si(dmar_domain
))
4405 domain_remove_one_dev_info(old_domain
, dev
);
4407 domain_remove_dev_info(old_domain
);
4409 if (!domain_type_is_vm_or_si(old_domain
) &&
4410 list_empty(&old_domain
->devices
))
4411 domain_exit(old_domain
);
4415 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4419 /* check if this iommu agaw is sufficient for max mapped address */
4420 addr_width
= agaw_to_width(iommu
->agaw
);
4421 if (addr_width
> cap_mgaw(iommu
->cap
))
4422 addr_width
= cap_mgaw(iommu
->cap
);
4424 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4425 pr_err("%s: iommu width (%d) is not "
4426 "sufficient for the mapped address (%llx)\n",
4427 __func__
, addr_width
, dmar_domain
->max_addr
);
4430 dmar_domain
->gaw
= addr_width
;
4433 * Knock out extra levels of page tables if necessary
4435 while (iommu
->agaw
< dmar_domain
->agaw
) {
4436 struct dma_pte
*pte
;
4438 pte
= dmar_domain
->pgd
;
4439 if (dma_pte_present(pte
)) {
4440 dmar_domain
->pgd
= (struct dma_pte
*)
4441 phys_to_virt(dma_pte_addr(pte
));
4442 free_pgtable_page(pte
);
4444 dmar_domain
->agaw
--;
4447 return domain_add_dev_info(dmar_domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
4450 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4453 domain_remove_one_dev_info(to_dmar_domain(domain
), dev
);
4456 static int intel_iommu_map(struct iommu_domain
*domain
,
4457 unsigned long iova
, phys_addr_t hpa
,
4458 size_t size
, int iommu_prot
)
4460 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4465 if (iommu_prot
& IOMMU_READ
)
4466 prot
|= DMA_PTE_READ
;
4467 if (iommu_prot
& IOMMU_WRITE
)
4468 prot
|= DMA_PTE_WRITE
;
4469 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4470 prot
|= DMA_PTE_SNP
;
4472 max_addr
= iova
+ size
;
4473 if (dmar_domain
->max_addr
< max_addr
) {
4476 /* check if minimum agaw is sufficient for mapped address */
4477 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4478 if (end
< max_addr
) {
4479 pr_err("%s: iommu width (%d) is not "
4480 "sufficient for the mapped address (%llx)\n",
4481 __func__
, dmar_domain
->gaw
, max_addr
);
4484 dmar_domain
->max_addr
= max_addr
;
4486 /* Round up size to next multiple of PAGE_SIZE, if it and
4487 the low bits of hpa would take us onto the next page */
4488 size
= aligned_nrpages(hpa
, size
);
4489 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4490 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4494 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4495 unsigned long iova
, size_t size
)
4497 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4498 struct page
*freelist
= NULL
;
4499 struct intel_iommu
*iommu
;
4500 unsigned long start_pfn
, last_pfn
;
4501 unsigned int npages
;
4502 int iommu_id
, num
, ndomains
, level
= 0;
4504 /* Cope with horrid API which requires us to unmap more than the
4505 size argument if it happens to be a large-page mapping. */
4506 if (!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
))
4509 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4510 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4512 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4513 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4515 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
4517 npages
= last_pfn
- start_pfn
+ 1;
4519 for_each_set_bit(iommu_id
, dmar_domain
->iommu_bmp
, g_num_of_iommus
) {
4520 iommu
= g_iommus
[iommu_id
];
4523 * find bit position of dmar_domain
4525 ndomains
= cap_ndoms(iommu
->cap
);
4526 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
4527 if (iommu
->domains
[num
] == dmar_domain
)
4528 iommu_flush_iotlb_psi(iommu
, num
, start_pfn
,
4529 npages
, !freelist
, 0);
4534 dma_free_pagelist(freelist
);
4536 if (dmar_domain
->max_addr
== iova
+ size
)
4537 dmar_domain
->max_addr
= iova
;
4542 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4545 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4546 struct dma_pte
*pte
;
4550 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
4552 phys
= dma_pte_addr(pte
);
4557 static bool intel_iommu_capable(enum iommu_cap cap
)
4559 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4560 return domain_update_iommu_snooping(NULL
) == 1;
4561 if (cap
== IOMMU_CAP_INTR_REMAP
)
4562 return irq_remapping_enabled
== 1;
4567 static int intel_iommu_add_device(struct device
*dev
)
4569 struct intel_iommu
*iommu
;
4570 struct iommu_group
*group
;
4573 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4577 iommu_device_link(iommu
->iommu_dev
, dev
);
4579 group
= iommu_group_get_for_dev(dev
);
4582 return PTR_ERR(group
);
4584 iommu_group_put(group
);
4588 static void intel_iommu_remove_device(struct device
*dev
)
4590 struct intel_iommu
*iommu
;
4593 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4597 iommu_group_remove_device(dev
);
4599 iommu_device_unlink(iommu
->iommu_dev
, dev
);
4602 static const struct iommu_ops intel_iommu_ops
= {
4603 .capable
= intel_iommu_capable
,
4604 .domain_alloc
= intel_iommu_domain_alloc
,
4605 .domain_free
= intel_iommu_domain_free
,
4606 .attach_dev
= intel_iommu_attach_device
,
4607 .detach_dev
= intel_iommu_detach_device
,
4608 .map
= intel_iommu_map
,
4609 .unmap
= intel_iommu_unmap
,
4610 .map_sg
= default_iommu_map_sg
,
4611 .iova_to_phys
= intel_iommu_iova_to_phys
,
4612 .add_device
= intel_iommu_add_device
,
4613 .remove_device
= intel_iommu_remove_device
,
4614 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4617 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4619 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4620 pr_info("Disabling IOMMU for graphics on this chipset\n");
4624 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4625 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4626 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4627 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4628 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4629 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4630 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4632 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4635 * Mobile 4 Series Chipset neglects to set RWBF capability,
4636 * but needs it. Same seems to hold for the desktop versions.
4638 pr_info("Forcing write-buffer flush capability\n");
4642 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4643 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4644 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4645 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4646 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4648 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4651 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4652 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4653 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4654 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4655 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4656 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4657 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4658 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4660 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4664 if (pci_read_config_word(dev
, GGC
, &ggc
))
4667 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4668 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4670 } else if (dmar_map_gfx
) {
4671 /* we have to ensure the gfx device is idle before we flush */
4672 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4673 intel_iommu_strict
= 1;
4676 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4677 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4678 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4679 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4681 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4682 ISOCH DMAR unit for the Azalia sound device, but not give it any
4683 TLB entries, which causes it to deadlock. Check for that. We do
4684 this in a function called from init_dmars(), instead of in a PCI
4685 quirk, because we don't want to print the obnoxious "BIOS broken"
4686 message if VT-d is actually disabled.
4688 static void __init
check_tylersburg_isoch(void)
4690 struct pci_dev
*pdev
;
4691 uint32_t vtisochctrl
;
4693 /* If there's no Azalia in the system anyway, forget it. */
4694 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4699 /* System Management Registers. Might be hidden, in which case
4700 we can't do the sanity check. But that's OK, because the
4701 known-broken BIOSes _don't_ actually hide it, so far. */
4702 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4706 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4713 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4714 if (vtisochctrl
& 1)
4717 /* Drop all bits other than the number of TLB entries */
4718 vtisochctrl
&= 0x1c;
4720 /* If we have the recommended number of TLB entries (16), fine. */
4721 if (vtisochctrl
== 0x10)
4724 /* Zero TLB entries? You get to ride the short bus to school. */
4726 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4727 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4728 dmi_get_system_info(DMI_BIOS_VENDOR
),
4729 dmi_get_system_info(DMI_BIOS_VERSION
),
4730 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4731 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4735 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",