2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/cacheflush.h>
44 #include <asm/iommu.h>
46 #include "irq_remapping.h"
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
78 /* page table handling */
79 #define LEVEL_STRIDE (9)
80 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
98 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100 static inline int agaw_to_level(int agaw
)
105 static inline int agaw_to_width(int agaw
)
107 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
110 static inline int width_to_agaw(int width
)
112 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
115 static inline unsigned int level_to_offset_bits(int level
)
117 return (level
- 1) * LEVEL_STRIDE
;
120 static inline int pfn_level_offset(unsigned long pfn
, int level
)
122 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
125 static inline unsigned long level_mask(int level
)
127 return -1UL << level_to_offset_bits(level
);
130 static inline unsigned long level_size(int level
)
132 return 1UL << level_to_offset_bits(level
);
135 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
137 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
142 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
149 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
154 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
156 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
158 return mm_to_dma_pfn(page_to_pfn(pg
));
160 static inline unsigned long virt_to_dma_pfn(void *p
)
162 return page_to_dma_pfn(virt_to_page(p
));
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu
**g_iommus
;
168 static void __init
check_tylersburg_isoch(void);
169 static int rwbf_quirk
;
172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
175 static int force_on
= 0;
180 * 12-63: Context Ptr (12 - (haw-1))
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry
*root
)
190 return (root
->val
& 1);
192 static inline void set_root_present(struct root_entry
*root
)
196 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
198 root
->val
|= value
& VTD_PAGE_MASK
;
201 static inline struct context_entry
*
202 get_context_addr_from_root(struct root_entry
*root
)
204 return (struct context_entry
*)
205 (root_present(root
)?phys_to_virt(
206 root
->val
& VTD_PAGE_MASK
) :
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
221 struct context_entry
{
226 static inline bool context_present(struct context_entry
*context
)
228 return (context
->lo
& 1);
230 static inline void context_set_present(struct context_entry
*context
)
235 static inline void context_set_fault_enable(struct context_entry
*context
)
237 context
->lo
&= (((u64
)-1) << 2) | 1;
240 static inline void context_set_translation_type(struct context_entry
*context
,
243 context
->lo
&= (((u64
)-1) << 4) | 3;
244 context
->lo
|= (value
& 3) << 2;
247 static inline void context_set_address_root(struct context_entry
*context
,
250 context
->lo
|= value
& VTD_PAGE_MASK
;
253 static inline void context_set_address_width(struct context_entry
*context
,
256 context
->hi
|= value
& 7;
259 static inline void context_set_domain_id(struct context_entry
*context
,
262 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
265 static inline void context_clear_entry(struct context_entry
*context
)
278 * 12-63: Host physcial address
284 static inline void dma_clear_pte(struct dma_pte
*pte
)
289 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
292 return pte
->val
& VTD_PAGE_MASK
;
294 /* Must have a full atomic 64-bit read */
295 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
299 static inline bool dma_pte_present(struct dma_pte
*pte
)
301 return (pte
->val
& 3) != 0;
304 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
306 return (pte
->val
& (1 << 7));
309 static inline int first_pte_in_page(struct dma_pte
*pte
)
311 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
320 static struct dmar_domain
*si_domain
;
321 static int hw_pass_through
= 1;
323 /* devices under the same p2p bridge are owned in one domain */
324 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
326 /* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
329 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
331 /* si_domain contains mulitple devices */
332 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
334 /* define the limit of IOMMUs supported in each domain */
336 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
338 # define IOMMU_UNITS_SUPPORTED 64
342 int id
; /* domain id */
343 int nid
; /* node id */
344 DECLARE_BITMAP(iommu_bmp
, IOMMU_UNITS_SUPPORTED
);
345 /* bitmap of iommus this domain uses*/
347 struct list_head devices
; /* all devices' list */
348 struct iova_domain iovad
; /* iova's that belong to this domain */
350 struct dma_pte
*pgd
; /* virtual address */
351 int gaw
; /* max guest address width */
353 /* adjusted guest address width, 0 is level 2 30-bit */
356 int flags
; /* flags to find out type of domain */
358 int iommu_coherency
;/* indicate coherency of iommu access */
359 int iommu_snooping
; /* indicate snooping control feature*/
360 int iommu_count
; /* reference count of iommu */
361 int iommu_superpage
;/* Level of superpages supported:
362 0 == 4KiB (no superpages), 1 == 2MiB,
363 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
364 spinlock_t iommu_lock
; /* protect iommu set in domain */
365 u64 max_addr
; /* maximum mapped address */
368 /* PCI domain-device relationship */
369 struct device_domain_info
{
370 struct list_head link
; /* link to domain siblings */
371 struct list_head global
; /* link to global list */
372 int segment
; /* PCI domain */
373 u8 bus
; /* PCI bus number */
374 u8 devfn
; /* PCI devfn number */
375 struct pci_dev
*dev
; /* it's NULL for PCIe-to-PCI bridge */
376 struct intel_iommu
*iommu
; /* IOMMU used by this device */
377 struct dmar_domain
*domain
; /* pointer to domain */
380 struct dmar_rmrr_unit
{
381 struct list_head list
; /* list of rmrr units */
382 struct acpi_dmar_header
*hdr
; /* ACPI header */
383 u64 base_address
; /* reserved base address*/
384 u64 end_address
; /* reserved end address */
385 struct pci_dev __rcu
**devices
; /* target devices */
386 int devices_cnt
; /* target device count */
389 struct dmar_atsr_unit
{
390 struct list_head list
; /* list of ATSR units */
391 struct acpi_dmar_header
*hdr
; /* ACPI header */
392 struct pci_dev __rcu
**devices
; /* target devices */
393 int devices_cnt
; /* target device count */
394 u8 include_all
:1; /* include all ports */
397 static LIST_HEAD(dmar_atsr_units
);
398 static LIST_HEAD(dmar_rmrr_units
);
400 #define for_each_rmrr_units(rmrr) \
401 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
403 static void flush_unmaps_timeout(unsigned long data
);
405 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
407 #define HIGH_WATER_MARK 250
408 struct deferred_flush_tables
{
410 struct iova
*iova
[HIGH_WATER_MARK
];
411 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
412 struct page
*freelist
[HIGH_WATER_MARK
];
415 static struct deferred_flush_tables
*deferred_flush
;
417 /* bitmap for indexing intel_iommus */
418 static int g_num_of_iommus
;
420 static DEFINE_SPINLOCK(async_umap_flush_lock
);
421 static LIST_HEAD(unmaps_to_do
);
424 static long list_size
;
426 static void domain_exit(struct dmar_domain
*domain
);
427 static void domain_remove_dev_info(struct dmar_domain
*domain
);
428 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
429 struct pci_dev
*pdev
);
430 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
431 struct pci_dev
*pdev
);
433 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
434 int dmar_disabled
= 0;
436 int dmar_disabled
= 1;
437 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
439 int intel_iommu_enabled
= 0;
440 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
442 static int dmar_map_gfx
= 1;
443 static int dmar_forcedac
;
444 static int intel_iommu_strict
;
445 static int intel_iommu_superpage
= 1;
447 int intel_iommu_gfx_mapped
;
448 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
450 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
451 static DEFINE_SPINLOCK(device_domain_lock
);
452 static LIST_HEAD(device_domain_list
);
454 static struct iommu_ops intel_iommu_ops
;
456 static int __init
intel_iommu_setup(char *str
)
461 if (!strncmp(str
, "on", 2)) {
463 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
464 } else if (!strncmp(str
, "off", 3)) {
466 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
467 } else if (!strncmp(str
, "igfx_off", 8)) {
470 "Intel-IOMMU: disable GFX device mapping\n");
471 } else if (!strncmp(str
, "forcedac", 8)) {
473 "Intel-IOMMU: Forcing DAC for PCI devices\n");
475 } else if (!strncmp(str
, "strict", 6)) {
477 "Intel-IOMMU: disable batched IOTLB flush\n");
478 intel_iommu_strict
= 1;
479 } else if (!strncmp(str
, "sp_off", 6)) {
481 "Intel-IOMMU: disable supported super page\n");
482 intel_iommu_superpage
= 0;
485 str
+= strcspn(str
, ",");
491 __setup("intel_iommu=", intel_iommu_setup
);
493 static struct kmem_cache
*iommu_domain_cache
;
494 static struct kmem_cache
*iommu_devinfo_cache
;
495 static struct kmem_cache
*iommu_iova_cache
;
497 static inline void *alloc_pgtable_page(int node
)
502 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
504 vaddr
= page_address(page
);
508 static inline void free_pgtable_page(void *vaddr
)
510 free_page((unsigned long)vaddr
);
513 static inline void *alloc_domain_mem(void)
515 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
518 static void free_domain_mem(void *vaddr
)
520 kmem_cache_free(iommu_domain_cache
, vaddr
);
523 static inline void * alloc_devinfo_mem(void)
525 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
528 static inline void free_devinfo_mem(void *vaddr
)
530 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
533 struct iova
*alloc_iova_mem(void)
535 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
538 void free_iova_mem(struct iova
*iova
)
540 kmem_cache_free(iommu_iova_cache
, iova
);
544 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
549 sagaw
= cap_sagaw(iommu
->cap
);
550 for (agaw
= width_to_agaw(max_gaw
);
552 if (test_bit(agaw
, &sagaw
))
560 * Calculate max SAGAW for each iommu.
562 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
564 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
568 * calculate agaw for each iommu.
569 * "SAGAW" may be different across iommus, use a default agaw, and
570 * get a supported less agaw for iommus that don't support the default agaw.
572 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
574 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
577 /* This functionin only returns single iommu in a domain */
578 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
582 /* si_domain and vm domain should not get here. */
583 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
584 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
586 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
587 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
590 return g_iommus
[iommu_id
];
593 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
597 i
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
599 domain
->iommu_coherency
= i
< g_num_of_iommus
? 1 : 0;
601 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
602 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
603 domain
->iommu_coherency
= 0;
609 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
613 domain
->iommu_snooping
= 1;
615 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
616 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
617 domain
->iommu_snooping
= 0;
623 static void domain_update_iommu_superpage(struct dmar_domain
*domain
)
625 struct dmar_drhd_unit
*drhd
;
626 struct intel_iommu
*iommu
= NULL
;
629 if (!intel_iommu_superpage
) {
630 domain
->iommu_superpage
= 0;
634 /* set iommu_superpage to the smallest common denominator */
636 for_each_active_iommu(iommu
, drhd
) {
637 mask
&= cap_super_page_val(iommu
->cap
);
644 domain
->iommu_superpage
= fls(mask
);
647 /* Some capabilities may be different across iommus */
648 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
650 domain_update_iommu_coherency(domain
);
651 domain_update_iommu_snooping(domain
);
652 domain_update_iommu_superpage(domain
);
655 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
657 struct dmar_drhd_unit
*drhd
= NULL
;
658 struct intel_iommu
*iommu
;
663 for_each_active_iommu(iommu
, drhd
) {
664 if (segment
!= drhd
->segment
)
667 for_each_active_dev_scope(drhd
->devices
,
668 drhd
->devices_cnt
, i
, dev
) {
669 if (dev
->bus
->number
== bus
&& dev
->devfn
== devfn
)
671 if (dev
->subordinate
&&
672 dev
->subordinate
->number
<= bus
&&
673 dev
->subordinate
->busn_res
.end
>= bus
)
677 if (drhd
->include_all
)
687 static void domain_flush_cache(struct dmar_domain
*domain
,
688 void *addr
, int size
)
690 if (!domain
->iommu_coherency
)
691 clflush_cache_range(addr
, size
);
694 /* Gets context entry for a given bus and devfn */
695 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
698 struct root_entry
*root
;
699 struct context_entry
*context
;
700 unsigned long phy_addr
;
703 spin_lock_irqsave(&iommu
->lock
, flags
);
704 root
= &iommu
->root_entry
[bus
];
705 context
= get_context_addr_from_root(root
);
707 context
= (struct context_entry
*)
708 alloc_pgtable_page(iommu
->node
);
710 spin_unlock_irqrestore(&iommu
->lock
, flags
);
713 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
714 phy_addr
= virt_to_phys((void *)context
);
715 set_root_value(root
, phy_addr
);
716 set_root_present(root
);
717 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
719 spin_unlock_irqrestore(&iommu
->lock
, flags
);
720 return &context
[devfn
];
723 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
725 struct root_entry
*root
;
726 struct context_entry
*context
;
730 spin_lock_irqsave(&iommu
->lock
, flags
);
731 root
= &iommu
->root_entry
[bus
];
732 context
= get_context_addr_from_root(root
);
737 ret
= context_present(&context
[devfn
]);
739 spin_unlock_irqrestore(&iommu
->lock
, flags
);
743 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
745 struct root_entry
*root
;
746 struct context_entry
*context
;
749 spin_lock_irqsave(&iommu
->lock
, flags
);
750 root
= &iommu
->root_entry
[bus
];
751 context
= get_context_addr_from_root(root
);
753 context_clear_entry(&context
[devfn
]);
754 __iommu_flush_cache(iommu
, &context
[devfn
], \
757 spin_unlock_irqrestore(&iommu
->lock
, flags
);
760 static void free_context_table(struct intel_iommu
*iommu
)
762 struct root_entry
*root
;
765 struct context_entry
*context
;
767 spin_lock_irqsave(&iommu
->lock
, flags
);
768 if (!iommu
->root_entry
) {
771 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
772 root
= &iommu
->root_entry
[i
];
773 context
= get_context_addr_from_root(root
);
775 free_pgtable_page(context
);
777 free_pgtable_page(iommu
->root_entry
);
778 iommu
->root_entry
= NULL
;
780 spin_unlock_irqrestore(&iommu
->lock
, flags
);
783 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
784 unsigned long pfn
, int *target_level
)
786 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
787 struct dma_pte
*parent
, *pte
= NULL
;
788 int level
= agaw_to_level(domain
->agaw
);
791 BUG_ON(!domain
->pgd
);
793 if (addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
)
794 /* Address beyond IOMMU's addressing capabilities. */
797 parent
= domain
->pgd
;
802 offset
= pfn_level_offset(pfn
, level
);
803 pte
= &parent
[offset
];
804 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
806 if (level
== *target_level
)
809 if (!dma_pte_present(pte
)) {
812 tmp_page
= alloc_pgtable_page(domain
->nid
);
817 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
818 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
819 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
820 /* Someone else set it while we were thinking; use theirs. */
821 free_pgtable_page(tmp_page
);
824 domain_flush_cache(domain
, pte
, sizeof(*pte
));
830 parent
= phys_to_virt(dma_pte_addr(pte
));
835 *target_level
= level
;
841 /* return address's pte at specific level */
842 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
844 int level
, int *large_page
)
846 struct dma_pte
*parent
, *pte
= NULL
;
847 int total
= agaw_to_level(domain
->agaw
);
850 parent
= domain
->pgd
;
851 while (level
<= total
) {
852 offset
= pfn_level_offset(pfn
, total
);
853 pte
= &parent
[offset
];
857 if (!dma_pte_present(pte
)) {
862 if (pte
->val
& DMA_PTE_LARGE_PAGE
) {
867 parent
= phys_to_virt(dma_pte_addr(pte
));
873 /* clear last level pte, a tlb flush should be followed */
874 static void dma_pte_clear_range(struct dmar_domain
*domain
,
875 unsigned long start_pfn
,
876 unsigned long last_pfn
)
878 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
879 unsigned int large_page
= 1;
880 struct dma_pte
*first_pte
, *pte
;
882 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
883 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
884 BUG_ON(start_pfn
> last_pfn
);
886 /* we don't need lock here; nobody else touches the iova range */
889 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
891 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
896 start_pfn
+= lvl_to_nr_pages(large_page
);
898 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
900 domain_flush_cache(domain
, first_pte
,
901 (void *)pte
- (void *)first_pte
);
903 } while (start_pfn
&& start_pfn
<= last_pfn
);
906 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
907 struct dma_pte
*pte
, unsigned long pfn
,
908 unsigned long start_pfn
, unsigned long last_pfn
)
910 pfn
= max(start_pfn
, pfn
);
911 pte
= &pte
[pfn_level_offset(pfn
, level
)];
914 unsigned long level_pfn
;
915 struct dma_pte
*level_pte
;
917 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
920 level_pfn
= pfn
& level_mask(level
- 1);
921 level_pte
= phys_to_virt(dma_pte_addr(pte
));
924 dma_pte_free_level(domain
, level
- 1, level_pte
,
925 level_pfn
, start_pfn
, last_pfn
);
927 /* If range covers entire pagetable, free it */
928 if (!(start_pfn
> level_pfn
||
929 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
931 domain_flush_cache(domain
, pte
, sizeof(*pte
));
932 free_pgtable_page(level_pte
);
935 pfn
+= level_size(level
);
936 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
939 /* free page table pages. last level pte should already be cleared */
940 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
941 unsigned long start_pfn
,
942 unsigned long last_pfn
)
944 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
946 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
947 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
948 BUG_ON(start_pfn
> last_pfn
);
950 /* We don't need lock here; nobody else touches the iova range */
951 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
952 domain
->pgd
, 0, start_pfn
, last_pfn
);
955 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
956 free_pgtable_page(domain
->pgd
);
961 /* When a page at a given level is being unlinked from its parent, we don't
962 need to *modify* it at all. All we need to do is make a list of all the
963 pages which can be freed just as soon as we've flushed the IOTLB and we
964 know the hardware page-walk will no longer touch them.
965 The 'pte' argument is the *parent* PTE, pointing to the page that is to
967 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
968 int level
, struct dma_pte
*pte
,
969 struct page
*freelist
)
973 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
974 pg
->freelist
= freelist
;
980 for (pte
= page_address(pg
); !first_pte_in_page(pte
); pte
++) {
981 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
982 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
989 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
990 struct dma_pte
*pte
, unsigned long pfn
,
991 unsigned long start_pfn
,
992 unsigned long last_pfn
,
993 struct page
*freelist
)
995 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
997 pfn
= max(start_pfn
, pfn
);
998 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1001 unsigned long level_pfn
;
1003 if (!dma_pte_present(pte
))
1006 level_pfn
= pfn
& level_mask(level
);
1008 /* If range covers entire pagetable, free it */
1009 if (start_pfn
<= level_pfn
&&
1010 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1011 /* These suborbinate page tables are going away entirely. Don't
1012 bother to clear them; we're just going to *free* them. */
1013 if (level
> 1 && !dma_pte_superpage(pte
))
1014 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1020 } else if (level
> 1) {
1021 /* Recurse down into a level that isn't *entirely* obsolete */
1022 freelist
= dma_pte_clear_level(domain
, level
- 1,
1023 phys_to_virt(dma_pte_addr(pte
)),
1024 level_pfn
, start_pfn
, last_pfn
,
1028 pfn
+= level_size(level
);
1029 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1032 domain_flush_cache(domain
, first_pte
,
1033 (void *)++last_pte
- (void *)first_pte
);
1038 /* We can't just free the pages because the IOMMU may still be walking
1039 the page tables, and may have cached the intermediate levels. The
1040 pages can only be freed after the IOTLB flush has been done. */
1041 struct page
*domain_unmap(struct dmar_domain
*domain
,
1042 unsigned long start_pfn
,
1043 unsigned long last_pfn
)
1045 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1046 struct page
*freelist
= NULL
;
1048 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
1049 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
1050 BUG_ON(start_pfn
> last_pfn
);
1052 /* we don't need lock here; nobody else touches the iova range */
1053 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1054 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1057 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1058 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1059 pgd_page
->freelist
= freelist
;
1060 freelist
= pgd_page
;
1068 void dma_free_pagelist(struct page
*freelist
)
1072 while ((pg
= freelist
)) {
1073 freelist
= pg
->freelist
;
1074 free_pgtable_page(page_address(pg
));
1078 /* iommu handling */
1079 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1081 struct root_entry
*root
;
1082 unsigned long flags
;
1084 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1088 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1090 spin_lock_irqsave(&iommu
->lock
, flags
);
1091 iommu
->root_entry
= root
;
1092 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1097 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1103 addr
= iommu
->root_entry
;
1105 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1106 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
1108 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1110 /* Make sure hardware complete it */
1111 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1112 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1114 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1117 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1122 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1125 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1126 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1128 /* Make sure hardware complete it */
1129 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1130 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1132 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1135 /* return value determine if we need a write buffer flush */
1136 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1137 u16 did
, u16 source_id
, u8 function_mask
,
1144 case DMA_CCMD_GLOBAL_INVL
:
1145 val
= DMA_CCMD_GLOBAL_INVL
;
1147 case DMA_CCMD_DOMAIN_INVL
:
1148 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1150 case DMA_CCMD_DEVICE_INVL
:
1151 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1152 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1157 val
|= DMA_CCMD_ICC
;
1159 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1160 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1162 /* Make sure hardware complete it */
1163 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1164 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1166 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1169 /* return value determine if we need a write buffer flush */
1170 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1171 u64 addr
, unsigned int size_order
, u64 type
)
1173 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1174 u64 val
= 0, val_iva
= 0;
1178 case DMA_TLB_GLOBAL_FLUSH
:
1179 /* global flush doesn't need set IVA_REG */
1180 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1182 case DMA_TLB_DSI_FLUSH
:
1183 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1185 case DMA_TLB_PSI_FLUSH
:
1186 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1187 /* IH bit is passed in as part of address */
1188 val_iva
= size_order
| addr
;
1193 /* Note: set drain read/write */
1196 * This is probably to be super secure.. Looks like we can
1197 * ignore it without any impact.
1199 if (cap_read_drain(iommu
->cap
))
1200 val
|= DMA_TLB_READ_DRAIN
;
1202 if (cap_write_drain(iommu
->cap
))
1203 val
|= DMA_TLB_WRITE_DRAIN
;
1205 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1206 /* Note: Only uses first TLB reg currently */
1208 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1209 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1213 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1215 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1217 /* check IOTLB invalidation granularity */
1218 if (DMA_TLB_IAIG(val
) == 0)
1219 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1220 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1221 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1222 (unsigned long long)DMA_TLB_IIRG(type
),
1223 (unsigned long long)DMA_TLB_IAIG(val
));
1226 static struct device_domain_info
*iommu_support_dev_iotlb(
1227 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1230 unsigned long flags
;
1231 struct device_domain_info
*info
;
1232 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1234 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1240 spin_lock_irqsave(&device_domain_lock
, flags
);
1241 list_for_each_entry(info
, &domain
->devices
, link
)
1242 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1246 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1248 if (!found
|| !info
->dev
)
1251 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1254 if (!dmar_find_matched_atsr_unit(info
->dev
))
1257 info
->iommu
= iommu
;
1262 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1267 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1270 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1272 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1275 pci_disable_ats(info
->dev
);
1278 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1279 u64 addr
, unsigned mask
)
1282 unsigned long flags
;
1283 struct device_domain_info
*info
;
1285 spin_lock_irqsave(&device_domain_lock
, flags
);
1286 list_for_each_entry(info
, &domain
->devices
, link
) {
1287 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1290 sid
= info
->bus
<< 8 | info
->devfn
;
1291 qdep
= pci_ats_queue_depth(info
->dev
);
1292 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1294 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1297 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1298 unsigned long pfn
, unsigned int pages
, int ih
, int map
)
1300 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1301 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1308 * Fallback to domain selective flush if no PSI support or the size is
1310 * PSI requires page size to be 2 ^ x, and the base address is naturally
1311 * aligned to the size
1313 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1314 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1317 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1321 * In caching mode, changes of pages from non-present to present require
1322 * flush. However, device IOTLB doesn't need to be flushed in this case.
1324 if (!cap_caching_mode(iommu
->cap
) || !map
)
1325 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1328 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1331 unsigned long flags
;
1333 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1334 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1335 pmen
&= ~DMA_PMEN_EPM
;
1336 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1338 /* wait for the protected region status bit to clear */
1339 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1340 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1342 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1345 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1348 unsigned long flags
;
1350 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1351 iommu
->gcmd
|= DMA_GCMD_TE
;
1352 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1354 /* Make sure hardware complete it */
1355 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1356 readl
, (sts
& DMA_GSTS_TES
), sts
);
1358 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1362 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1367 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1368 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1369 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1371 /* Make sure hardware complete it */
1372 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1373 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1375 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1380 static int iommu_init_domains(struct intel_iommu
*iommu
)
1382 unsigned long ndomains
;
1383 unsigned long nlongs
;
1385 ndomains
= cap_ndoms(iommu
->cap
);
1386 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1387 iommu
->seq_id
, ndomains
);
1388 nlongs
= BITS_TO_LONGS(ndomains
);
1390 spin_lock_init(&iommu
->lock
);
1392 /* TBD: there might be 64K domains,
1393 * consider other allocation for future chip
1395 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1396 if (!iommu
->domain_ids
) {
1397 pr_err("IOMMU%d: allocating domain id array failed\n",
1401 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1403 if (!iommu
->domains
) {
1404 pr_err("IOMMU%d: allocating domain array failed\n",
1406 kfree(iommu
->domain_ids
);
1407 iommu
->domain_ids
= NULL
;
1412 * if Caching mode is set, then invalid translations are tagged
1413 * with domainid 0. Hence we need to pre-allocate it.
1415 if (cap_caching_mode(iommu
->cap
))
1416 set_bit(0, iommu
->domain_ids
);
1420 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1422 struct dmar_domain
*domain
;
1424 unsigned long flags
;
1426 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1427 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1429 * Domain id 0 is reserved for invalid translation
1430 * if hardware supports caching mode.
1432 if (cap_caching_mode(iommu
->cap
) && i
== 0)
1435 domain
= iommu
->domains
[i
];
1436 clear_bit(i
, iommu
->domain_ids
);
1438 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1439 count
= --domain
->iommu_count
;
1440 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1442 domain_exit(domain
);
1446 if (iommu
->gcmd
& DMA_GCMD_TE
)
1447 iommu_disable_translation(iommu
);
1449 kfree(iommu
->domains
);
1450 kfree(iommu
->domain_ids
);
1451 iommu
->domains
= NULL
;
1452 iommu
->domain_ids
= NULL
;
1454 g_iommus
[iommu
->seq_id
] = NULL
;
1456 /* free context mapping */
1457 free_context_table(iommu
);
1460 static struct dmar_domain
*alloc_domain(bool vm
)
1462 /* domain id for virtual machine, it won't be set in context */
1463 static atomic_t vm_domid
= ATOMIC_INIT(0);
1464 struct dmar_domain
*domain
;
1466 domain
= alloc_domain_mem();
1471 domain
->iommu_count
= 0;
1472 memset(domain
->iommu_bmp
, 0, sizeof(domain
->iommu_bmp
));
1474 spin_lock_init(&domain
->iommu_lock
);
1475 INIT_LIST_HEAD(&domain
->devices
);
1477 domain
->id
= atomic_inc_return(&vm_domid
);
1478 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
1484 static int iommu_attach_domain(struct dmar_domain
*domain
,
1485 struct intel_iommu
*iommu
)
1488 unsigned long ndomains
;
1489 unsigned long flags
;
1491 ndomains
= cap_ndoms(iommu
->cap
);
1493 spin_lock_irqsave(&iommu
->lock
, flags
);
1495 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1496 if (num
>= ndomains
) {
1497 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1498 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1503 domain
->iommu_count
++;
1504 set_bit(num
, iommu
->domain_ids
);
1505 set_bit(iommu
->seq_id
, domain
->iommu_bmp
);
1506 iommu
->domains
[num
] = domain
;
1507 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1512 static void iommu_detach_domain(struct dmar_domain
*domain
,
1513 struct intel_iommu
*iommu
)
1515 unsigned long flags
;
1518 spin_lock_irqsave(&iommu
->lock
, flags
);
1519 ndomains
= cap_ndoms(iommu
->cap
);
1520 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1521 if (iommu
->domains
[num
] == domain
) {
1522 clear_bit(num
, iommu
->domain_ids
);
1523 iommu
->domains
[num
] = NULL
;
1527 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1530 static struct iova_domain reserved_iova_list
;
1531 static struct lock_class_key reserved_rbtree_key
;
1533 static int dmar_init_reserved_ranges(void)
1535 struct pci_dev
*pdev
= NULL
;
1539 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1541 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1542 &reserved_rbtree_key
);
1544 /* IOAPIC ranges shouldn't be accessed by DMA */
1545 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1546 IOVA_PFN(IOAPIC_RANGE_END
));
1548 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1552 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1553 for_each_pci_dev(pdev
) {
1556 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1557 r
= &pdev
->resource
[i
];
1558 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1560 iova
= reserve_iova(&reserved_iova_list
,
1564 printk(KERN_ERR
"Reserve iova failed\n");
1572 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1574 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1577 static inline int guestwidth_to_adjustwidth(int gaw
)
1580 int r
= (gaw
- 12) % 9;
1591 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1593 struct intel_iommu
*iommu
;
1594 int adjust_width
, agaw
;
1595 unsigned long sagaw
;
1597 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1598 domain_reserve_special_ranges(domain
);
1600 /* calculate AGAW */
1601 iommu
= domain_get_iommu(domain
);
1602 if (guest_width
> cap_mgaw(iommu
->cap
))
1603 guest_width
= cap_mgaw(iommu
->cap
);
1604 domain
->gaw
= guest_width
;
1605 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1606 agaw
= width_to_agaw(adjust_width
);
1607 sagaw
= cap_sagaw(iommu
->cap
);
1608 if (!test_bit(agaw
, &sagaw
)) {
1609 /* hardware doesn't support it, choose a bigger one */
1610 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1611 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1615 domain
->agaw
= agaw
;
1617 if (ecap_coherent(iommu
->ecap
))
1618 domain
->iommu_coherency
= 1;
1620 domain
->iommu_coherency
= 0;
1622 if (ecap_sc_support(iommu
->ecap
))
1623 domain
->iommu_snooping
= 1;
1625 domain
->iommu_snooping
= 0;
1627 if (intel_iommu_superpage
)
1628 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1630 domain
->iommu_superpage
= 0;
1632 domain
->nid
= iommu
->node
;
1634 /* always allocate the top pgd */
1635 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1638 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1642 static void domain_exit(struct dmar_domain
*domain
)
1644 struct dmar_drhd_unit
*drhd
;
1645 struct intel_iommu
*iommu
;
1646 struct page
*freelist
= NULL
;
1648 /* Domain 0 is reserved, so dont process it */
1652 /* Flush any lazy unmaps that may reference this domain */
1653 if (!intel_iommu_strict
)
1654 flush_unmaps_timeout(0);
1656 /* remove associated devices */
1657 domain_remove_dev_info(domain
);
1660 put_iova_domain(&domain
->iovad
);
1662 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1664 /* clear attached or cached domains */
1666 for_each_active_iommu(iommu
, drhd
)
1667 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1668 test_bit(iommu
->seq_id
, domain
->iommu_bmp
))
1669 iommu_detach_domain(domain
, iommu
);
1672 dma_free_pagelist(freelist
);
1674 free_domain_mem(domain
);
1677 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1678 u8 bus
, u8 devfn
, int translation
)
1680 struct context_entry
*context
;
1681 unsigned long flags
;
1682 struct intel_iommu
*iommu
;
1683 struct dma_pte
*pgd
;
1685 unsigned long ndomains
;
1688 struct device_domain_info
*info
= NULL
;
1690 pr_debug("Set context mapping for %02x:%02x.%d\n",
1691 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1693 BUG_ON(!domain
->pgd
);
1694 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1695 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1697 iommu
= device_to_iommu(segment
, bus
, devfn
);
1701 context
= device_to_context_entry(iommu
, bus
, devfn
);
1704 spin_lock_irqsave(&iommu
->lock
, flags
);
1705 if (context_present(context
)) {
1706 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1713 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1714 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1717 /* find an available domain id for this device in iommu */
1718 ndomains
= cap_ndoms(iommu
->cap
);
1719 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1720 if (iommu
->domains
[num
] == domain
) {
1728 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1729 if (num
>= ndomains
) {
1730 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1731 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1735 set_bit(num
, iommu
->domain_ids
);
1736 iommu
->domains
[num
] = domain
;
1740 /* Skip top levels of page tables for
1741 * iommu which has less agaw than default.
1742 * Unnecessary for PT mode.
1744 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1745 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1746 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1747 if (!dma_pte_present(pgd
)) {
1748 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1755 context_set_domain_id(context
, id
);
1757 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1758 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1759 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1760 CONTEXT_TT_MULTI_LEVEL
;
1763 * In pass through mode, AW must be programmed to indicate the largest
1764 * AGAW value supported by hardware. And ASR is ignored by hardware.
1766 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1767 context_set_address_width(context
, iommu
->msagaw
);
1769 context_set_address_root(context
, virt_to_phys(pgd
));
1770 context_set_address_width(context
, iommu
->agaw
);
1773 context_set_translation_type(context
, translation
);
1774 context_set_fault_enable(context
);
1775 context_set_present(context
);
1776 domain_flush_cache(domain
, context
, sizeof(*context
));
1779 * It's a non-present to present mapping. If hardware doesn't cache
1780 * non-present entry we only need to flush the write-buffer. If the
1781 * _does_ cache non-present entries, then it does so in the special
1782 * domain #0, which we have to flush:
1784 if (cap_caching_mode(iommu
->cap
)) {
1785 iommu
->flush
.flush_context(iommu
, 0,
1786 (((u16
)bus
) << 8) | devfn
,
1787 DMA_CCMD_MASK_NOBIT
,
1788 DMA_CCMD_DEVICE_INVL
);
1789 iommu
->flush
.flush_iotlb(iommu
, domain
->id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1791 iommu_flush_write_buffer(iommu
);
1793 iommu_enable_dev_iotlb(info
);
1794 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1796 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1797 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1798 domain
->iommu_count
++;
1799 if (domain
->iommu_count
== 1)
1800 domain
->nid
= iommu
->node
;
1801 domain_update_iommu_cap(domain
);
1803 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1808 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1812 struct pci_dev
*tmp
, *parent
;
1814 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1815 pdev
->bus
->number
, pdev
->devfn
,
1820 /* dependent device mapping */
1821 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1824 /* Secondary interface's bus number and devfn 0 */
1825 parent
= pdev
->bus
->self
;
1826 while (parent
!= tmp
) {
1827 ret
= domain_context_mapping_one(domain
,
1828 pci_domain_nr(parent
->bus
),
1829 parent
->bus
->number
,
1830 parent
->devfn
, translation
);
1833 parent
= parent
->bus
->self
;
1835 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
1836 return domain_context_mapping_one(domain
,
1837 pci_domain_nr(tmp
->subordinate
),
1838 tmp
->subordinate
->number
, 0,
1840 else /* this is a legacy PCI bridge */
1841 return domain_context_mapping_one(domain
,
1842 pci_domain_nr(tmp
->bus
),
1848 static int domain_context_mapped(struct pci_dev
*pdev
)
1851 struct pci_dev
*tmp
, *parent
;
1852 struct intel_iommu
*iommu
;
1854 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1859 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1862 /* dependent device mapping */
1863 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1866 /* Secondary interface's bus number and devfn 0 */
1867 parent
= pdev
->bus
->self
;
1868 while (parent
!= tmp
) {
1869 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1873 parent
= parent
->bus
->self
;
1875 if (pci_is_pcie(tmp
))
1876 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1879 return device_context_mapped(iommu
, tmp
->bus
->number
,
1883 /* Returns a number of VTD pages, but aligned to MM page size */
1884 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1887 host_addr
&= ~PAGE_MASK
;
1888 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1891 /* Return largest possible superpage level for a given mapping */
1892 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1893 unsigned long iov_pfn
,
1894 unsigned long phy_pfn
,
1895 unsigned long pages
)
1897 int support
, level
= 1;
1898 unsigned long pfnmerge
;
1900 support
= domain
->iommu_superpage
;
1902 /* To use a large page, the virtual *and* physical addresses
1903 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1904 of them will mean we have to use smaller pages. So just
1905 merge them and check both at once. */
1906 pfnmerge
= iov_pfn
| phy_pfn
;
1908 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1909 pages
>>= VTD_STRIDE_SHIFT
;
1912 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1919 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1920 struct scatterlist
*sg
, unsigned long phys_pfn
,
1921 unsigned long nr_pages
, int prot
)
1923 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1924 phys_addr_t
uninitialized_var(pteval
);
1925 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1926 unsigned long sg_res
;
1927 unsigned int largepage_lvl
= 0;
1928 unsigned long lvl_pages
= 0;
1930 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1932 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1935 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1940 sg_res
= nr_pages
+ 1;
1941 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1944 while (nr_pages
> 0) {
1948 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
1949 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1950 sg
->dma_length
= sg
->length
;
1951 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1952 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
1956 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
1958 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
1961 /* It is large page*/
1962 if (largepage_lvl
> 1) {
1963 pteval
|= DMA_PTE_LARGE_PAGE
;
1964 /* Ensure that old small page tables are removed to make room
1965 for superpage, if they exist. */
1966 dma_pte_clear_range(domain
, iov_pfn
,
1967 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1968 dma_pte_free_pagetable(domain
, iov_pfn
,
1969 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1971 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
1975 /* We don't need lock here, nobody else
1976 * touches the iova range
1978 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
1980 static int dumps
= 5;
1981 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1982 iov_pfn
, tmp
, (unsigned long long)pteval
);
1985 debug_dma_dump_mappings(NULL
);
1990 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
1992 BUG_ON(nr_pages
< lvl_pages
);
1993 BUG_ON(sg_res
< lvl_pages
);
1995 nr_pages
-= lvl_pages
;
1996 iov_pfn
+= lvl_pages
;
1997 phys_pfn
+= lvl_pages
;
1998 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
1999 sg_res
-= lvl_pages
;
2001 /* If the next PTE would be the first in a new page, then we
2002 need to flush the cache on the entries we've just written.
2003 And then we'll need to recalculate 'pte', so clear it and
2004 let it get set again in the if (!pte) block above.
2006 If we're done (!nr_pages) we need to flush the cache too.
2008 Also if we've been setting superpages, we may need to
2009 recalculate 'pte' and switch back to smaller pages for the
2010 end of the mapping, if the trailing size is not enough to
2011 use another superpage (i.e. sg_res < lvl_pages). */
2013 if (!nr_pages
|| first_pte_in_page(pte
) ||
2014 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2015 domain_flush_cache(domain
, first_pte
,
2016 (void *)pte
- (void *)first_pte
);
2020 if (!sg_res
&& nr_pages
)
2026 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2027 struct scatterlist
*sg
, unsigned long nr_pages
,
2030 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2033 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2034 unsigned long phys_pfn
, unsigned long nr_pages
,
2037 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2040 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2045 clear_context_table(iommu
, bus
, devfn
);
2046 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2047 DMA_CCMD_GLOBAL_INVL
);
2048 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2051 static inline void unlink_domain_info(struct device_domain_info
*info
)
2053 assert_spin_locked(&device_domain_lock
);
2054 list_del(&info
->link
);
2055 list_del(&info
->global
);
2057 info
->dev
->dev
.archdata
.iommu
= NULL
;
2060 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2062 struct device_domain_info
*info
;
2063 unsigned long flags
, flags2
;
2064 struct intel_iommu
*iommu
;
2066 spin_lock_irqsave(&device_domain_lock
, flags
);
2067 while (!list_empty(&domain
->devices
)) {
2068 info
= list_entry(domain
->devices
.next
,
2069 struct device_domain_info
, link
);
2070 unlink_domain_info(info
);
2071 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2073 iommu_disable_dev_iotlb(info
);
2074 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
2075 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2077 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
2078 iommu_detach_dependent_devices(iommu
, info
->dev
);
2079 /* clear this iommu in iommu_bmp, update iommu count
2082 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
2083 if (test_and_clear_bit(iommu
->seq_id
,
2084 domain
->iommu_bmp
)) {
2085 domain
->iommu_count
--;
2086 domain_update_iommu_cap(domain
);
2088 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
2091 free_devinfo_mem(info
);
2092 spin_lock_irqsave(&device_domain_lock
, flags
);
2094 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2099 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
2101 static struct dmar_domain
*
2102 find_domain(struct pci_dev
*pdev
)
2104 struct device_domain_info
*info
;
2106 /* No lock here, assumes no domain exit in normal case */
2107 info
= pdev
->dev
.archdata
.iommu
;
2109 return info
->domain
;
2113 static inline struct dmar_domain
*
2114 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2116 struct device_domain_info
*info
;
2118 list_for_each_entry(info
, &device_domain_list
, global
)
2119 if (info
->segment
== segment
&& info
->bus
== bus
&&
2120 info
->devfn
== devfn
)
2121 return info
->domain
;
2126 static int dmar_insert_dev_info(int segment
, int bus
, int devfn
,
2127 struct pci_dev
*dev
, struct dmar_domain
**domp
)
2129 struct dmar_domain
*found
, *domain
= *domp
;
2130 struct device_domain_info
*info
;
2131 unsigned long flags
;
2133 info
= alloc_devinfo_mem();
2137 info
->segment
= segment
;
2139 info
->devfn
= devfn
;
2141 info
->domain
= domain
;
2143 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
2145 spin_lock_irqsave(&device_domain_lock
, flags
);
2147 found
= find_domain(dev
);
2149 found
= dmar_search_domain_by_dev_info(segment
, bus
, devfn
);
2151 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2152 free_devinfo_mem(info
);
2153 if (found
!= domain
) {
2154 domain_exit(domain
);
2158 list_add(&info
->link
, &domain
->devices
);
2159 list_add(&info
->global
, &device_domain_list
);
2161 dev
->dev
.archdata
.iommu
= info
;
2162 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2168 /* domain is initialized */
2169 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
2171 struct dmar_domain
*domain
, *free
= NULL
;
2172 struct intel_iommu
*iommu
;
2173 struct dmar_drhd_unit
*drhd
;
2174 struct pci_dev
*dev_tmp
;
2175 unsigned long flags
;
2176 int bus
= 0, devfn
= 0;
2179 domain
= find_domain(pdev
);
2183 segment
= pci_domain_nr(pdev
->bus
);
2185 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
2187 if (pci_is_pcie(dev_tmp
)) {
2188 bus
= dev_tmp
->subordinate
->number
;
2191 bus
= dev_tmp
->bus
->number
;
2192 devfn
= dev_tmp
->devfn
;
2194 spin_lock_irqsave(&device_domain_lock
, flags
);
2195 domain
= dmar_search_domain_by_dev_info(segment
, bus
, devfn
);
2196 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2197 /* pcie-pci bridge already has a domain, uses it */
2202 drhd
= dmar_find_matched_drhd_unit(pdev
);
2204 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
2208 iommu
= drhd
->iommu
;
2210 /* Allocate and intialize new domain for the device */
2211 domain
= alloc_domain(false);
2214 if (iommu_attach_domain(domain
, iommu
)) {
2215 free_domain_mem(domain
);
2219 if (domain_init(domain
, gaw
))
2222 /* register pcie-to-pci device */
2224 if (dmar_insert_dev_info(segment
, bus
, devfn
, NULL
, &domain
))
2231 if (dmar_insert_dev_info(segment
, pdev
->bus
->number
, pdev
->devfn
,
2232 pdev
, &domain
) == 0)
2237 /* recheck it here, maybe others set it */
2238 return find_domain(pdev
);
2241 static int iommu_identity_mapping
;
2242 #define IDENTMAP_ALL 1
2243 #define IDENTMAP_GFX 2
2244 #define IDENTMAP_AZALIA 4
2246 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2247 unsigned long long start
,
2248 unsigned long long end
)
2250 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2251 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2253 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2254 dma_to_mm_pfn(last_vpfn
))) {
2255 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2259 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2260 start
, end
, domain
->id
);
2262 * RMRR range might have overlap with physical memory range,
2265 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2267 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2268 last_vpfn
- first_vpfn
+ 1,
2269 DMA_PTE_READ
|DMA_PTE_WRITE
);
2272 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
2273 unsigned long long start
,
2274 unsigned long long end
)
2276 struct dmar_domain
*domain
;
2279 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2283 /* For _hardware_ passthrough, don't bother. But for software
2284 passthrough, we do it anyway -- it may indicate a memory
2285 range which is reserved in E820, so which didn't get set
2286 up to start with in si_domain */
2287 if (domain
== si_domain
&& hw_pass_through
) {
2288 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2289 pci_name(pdev
), start
, end
);
2294 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2295 pci_name(pdev
), start
, end
);
2298 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2299 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2300 dmi_get_system_info(DMI_BIOS_VENDOR
),
2301 dmi_get_system_info(DMI_BIOS_VERSION
),
2302 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2307 if (end
>> agaw_to_width(domain
->agaw
)) {
2308 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2309 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2310 agaw_to_width(domain
->agaw
),
2311 dmi_get_system_info(DMI_BIOS_VENDOR
),
2312 dmi_get_system_info(DMI_BIOS_VERSION
),
2313 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2318 ret
= iommu_domain_identity_map(domain
, start
, end
);
2322 /* context entry init */
2323 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
2330 domain_exit(domain
);
2334 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2335 struct pci_dev
*pdev
)
2337 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2339 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
2343 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2344 static inline void iommu_prepare_isa(void)
2346 struct pci_dev
*pdev
;
2349 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2353 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2354 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024 - 1);
2357 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2358 "floppy might not work\n");
2362 static inline void iommu_prepare_isa(void)
2366 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2368 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2370 static int __init
si_domain_init(int hw
)
2372 struct dmar_drhd_unit
*drhd
;
2373 struct intel_iommu
*iommu
;
2376 si_domain
= alloc_domain(false);
2380 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2382 for_each_active_iommu(iommu
, drhd
) {
2383 ret
= iommu_attach_domain(si_domain
, iommu
);
2385 domain_exit(si_domain
);
2390 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2391 domain_exit(si_domain
);
2395 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2401 for_each_online_node(nid
) {
2402 unsigned long start_pfn
, end_pfn
;
2405 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2406 ret
= iommu_domain_identity_map(si_domain
,
2407 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2416 static int identity_mapping(struct pci_dev
*pdev
)
2418 struct device_domain_info
*info
;
2420 if (likely(!iommu_identity_mapping
))
2423 info
= pdev
->dev
.archdata
.iommu
;
2424 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2425 return (info
->domain
== si_domain
);
2430 static int domain_add_dev_info(struct dmar_domain
*domain
,
2431 struct pci_dev
*pdev
,
2434 struct device_domain_info
*info
;
2435 unsigned long flags
;
2438 info
= alloc_devinfo_mem();
2442 info
->segment
= pci_domain_nr(pdev
->bus
);
2443 info
->bus
= pdev
->bus
->number
;
2444 info
->devfn
= pdev
->devfn
;
2446 info
->domain
= domain
;
2448 spin_lock_irqsave(&device_domain_lock
, flags
);
2449 list_add(&info
->link
, &domain
->devices
);
2450 list_add(&info
->global
, &device_domain_list
);
2451 pdev
->dev
.archdata
.iommu
= info
;
2452 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2454 ret
= domain_context_mapping(domain
, pdev
, translation
);
2456 spin_lock_irqsave(&device_domain_lock
, flags
);
2457 unlink_domain_info(info
);
2458 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2459 free_devinfo_mem(info
);
2466 static bool device_has_rmrr(struct pci_dev
*dev
)
2468 struct dmar_rmrr_unit
*rmrr
;
2469 struct pci_dev
*tmp
;
2473 for_each_rmrr_units(rmrr
) {
2475 * Return TRUE if this RMRR contains the device that
2478 for_each_active_dev_scope(rmrr
->devices
,
2479 rmrr
->devices_cnt
, i
, tmp
)
2489 static int iommu_should_identity_map(struct pci_dev
*pdev
, int startup
)
2493 * We want to prevent any device associated with an RMRR from
2494 * getting placed into the SI Domain. This is done because
2495 * problems exist when devices are moved in and out of domains
2496 * and their respective RMRR info is lost. We exempt USB devices
2497 * from this process due to their usage of RMRRs that are known
2498 * to not be needed after BIOS hand-off to OS.
2500 if (device_has_rmrr(pdev
) &&
2501 (pdev
->class >> 8) != PCI_CLASS_SERIAL_USB
)
2504 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2507 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2510 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2514 * We want to start off with all devices in the 1:1 domain, and
2515 * take them out later if we find they can't access all of memory.
2517 * However, we can't do this for PCI devices behind bridges,
2518 * because all PCI devices behind the same bridge will end up
2519 * with the same source-id on their transactions.
2521 * Practically speaking, we can't change things around for these
2522 * devices at run-time, because we can't be sure there'll be no
2523 * DMA transactions in flight for any of their siblings.
2525 * So PCI devices (unless they're on the root bus) as well as
2526 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2527 * the 1:1 domain, just in _case_ one of their siblings turns out
2528 * not to be able to map all of memory.
2530 if (!pci_is_pcie(pdev
)) {
2531 if (!pci_is_root_bus(pdev
->bus
))
2533 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2535 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2539 * At boot time, we don't yet know if devices will be 64-bit capable.
2540 * Assume that they will -- if they turn out not to be, then we can
2541 * take them out of the 1:1 domain later.
2545 * If the device's dma_mask is less than the system's memory
2546 * size then this is not a candidate for identity mapping.
2548 u64 dma_mask
= pdev
->dma_mask
;
2550 if (pdev
->dev
.coherent_dma_mask
&&
2551 pdev
->dev
.coherent_dma_mask
< dma_mask
)
2552 dma_mask
= pdev
->dev
.coherent_dma_mask
;
2554 return dma_mask
>= dma_get_required_mask(&pdev
->dev
);
2560 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2562 struct pci_dev
*pdev
= NULL
;
2565 ret
= si_domain_init(hw
);
2569 for_each_pci_dev(pdev
) {
2570 if (iommu_should_identity_map(pdev
, 1)) {
2571 ret
= domain_add_dev_info(si_domain
, pdev
,
2572 hw
? CONTEXT_TT_PASS_THROUGH
:
2573 CONTEXT_TT_MULTI_LEVEL
);
2575 /* device not associated with an iommu */
2580 pr_info("IOMMU: %s identity mapping for device %s\n",
2581 hw
? "hardware" : "software", pci_name(pdev
));
2588 static int __init
init_dmars(void)
2590 struct dmar_drhd_unit
*drhd
;
2591 struct dmar_rmrr_unit
*rmrr
;
2592 struct pci_dev
*pdev
;
2593 struct intel_iommu
*iommu
;
2599 * initialize and program root entry to not present
2602 for_each_drhd_unit(drhd
) {
2604 * lock not needed as this is only incremented in the single
2605 * threaded kernel __init code path all other access are read
2608 if (g_num_of_iommus
< IOMMU_UNITS_SUPPORTED
) {
2612 printk_once(KERN_ERR
"intel-iommu: exceeded %d IOMMUs\n",
2613 IOMMU_UNITS_SUPPORTED
);
2616 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2619 printk(KERN_ERR
"Allocating global iommu array failed\n");
2624 deferred_flush
= kzalloc(g_num_of_iommus
*
2625 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2626 if (!deferred_flush
) {
2631 for_each_active_iommu(iommu
, drhd
) {
2632 g_iommus
[iommu
->seq_id
] = iommu
;
2634 ret
= iommu_init_domains(iommu
);
2640 * we could share the same root & context tables
2641 * among all IOMMU's. Need to Split it later.
2643 ret
= iommu_alloc_root_entry(iommu
);
2645 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2648 if (!ecap_pass_through(iommu
->ecap
))
2649 hw_pass_through
= 0;
2653 * Start from the sane iommu hardware state.
2655 for_each_active_iommu(iommu
, drhd
) {
2657 * If the queued invalidation is already initialized by us
2658 * (for example, while enabling interrupt-remapping) then
2659 * we got the things already rolling from a sane state.
2665 * Clear any previous faults.
2667 dmar_fault(-1, iommu
);
2669 * Disable queued invalidation if supported and already enabled
2670 * before OS handover.
2672 dmar_disable_qi(iommu
);
2675 for_each_active_iommu(iommu
, drhd
) {
2676 if (dmar_enable_qi(iommu
)) {
2678 * Queued Invalidate not enabled, use Register Based
2681 iommu
->flush
.flush_context
= __iommu_flush_context
;
2682 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2683 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Register based "
2686 (unsigned long long)drhd
->reg_base_addr
);
2688 iommu
->flush
.flush_context
= qi_flush_context
;
2689 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2690 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Queued "
2693 (unsigned long long)drhd
->reg_base_addr
);
2697 if (iommu_pass_through
)
2698 iommu_identity_mapping
|= IDENTMAP_ALL
;
2700 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2701 iommu_identity_mapping
|= IDENTMAP_GFX
;
2704 check_tylersburg_isoch();
2707 * If pass through is not set or not enabled, setup context entries for
2708 * identity mappings for rmrr, gfx, and isa and may fall back to static
2709 * identity mapping if iommu_identity_mapping is set.
2711 if (iommu_identity_mapping
) {
2712 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2714 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2720 * for each dev attached to rmrr
2722 * locate drhd for dev, alloc domain for dev
2723 * allocate free domain
2724 * allocate page table entries for rmrr
2725 * if context not allocated for bus
2726 * allocate and init context
2727 * set present in root table for this bus
2728 * init context with domain, translation etc
2732 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2733 for_each_rmrr_units(rmrr
) {
2734 /* some BIOS lists non-exist devices in DMAR table. */
2735 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
2737 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2740 "IOMMU: mapping reserved region failed\n");
2744 iommu_prepare_isa();
2749 * global invalidate context cache
2750 * global invalidate iotlb
2751 * enable translation
2753 for_each_iommu(iommu
, drhd
) {
2754 if (drhd
->ignored
) {
2756 * we always have to disable PMRs or DMA may fail on
2760 iommu_disable_protect_mem_regions(iommu
);
2764 iommu_flush_write_buffer(iommu
);
2766 ret
= dmar_set_interrupt(iommu
);
2770 iommu_set_root_entry(iommu
);
2772 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2773 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2775 ret
= iommu_enable_translation(iommu
);
2779 iommu_disable_protect_mem_regions(iommu
);
2785 for_each_active_iommu(iommu
, drhd
)
2786 free_dmar_iommu(iommu
);
2787 kfree(deferred_flush
);
2794 /* This takes a number of _MM_ pages, not VTD pages */
2795 static struct iova
*intel_alloc_iova(struct device
*dev
,
2796 struct dmar_domain
*domain
,
2797 unsigned long nrpages
, uint64_t dma_mask
)
2799 struct pci_dev
*pdev
= to_pci_dev(dev
);
2800 struct iova
*iova
= NULL
;
2802 /* Restrict dma_mask to the width that the iommu can handle */
2803 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2805 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2807 * First try to allocate an io virtual address in
2808 * DMA_BIT_MASK(32) and if that fails then try allocating
2811 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2812 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2816 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2817 if (unlikely(!iova
)) {
2818 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2819 nrpages
, pci_name(pdev
));
2826 static struct dmar_domain
*__get_valid_domain_for_dev(struct pci_dev
*pdev
)
2828 struct dmar_domain
*domain
;
2831 domain
= get_domain_for_dev(pdev
,
2832 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2835 "Allocating domain for %s failed", pci_name(pdev
));
2839 /* make sure context mapping is ok */
2840 if (unlikely(!domain_context_mapped(pdev
))) {
2841 ret
= domain_context_mapping(domain
, pdev
,
2842 CONTEXT_TT_MULTI_LEVEL
);
2845 "Domain context map for %s failed",
2854 static inline struct dmar_domain
*get_valid_domain_for_dev(struct pci_dev
*dev
)
2856 struct device_domain_info
*info
;
2858 /* No lock here, assumes no domain exit in normal case */
2859 info
= dev
->dev
.archdata
.iommu
;
2861 return info
->domain
;
2863 return __get_valid_domain_for_dev(dev
);
2866 static int iommu_dummy(struct pci_dev
*pdev
)
2868 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2871 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2872 static int iommu_no_mapping(struct device
*dev
)
2874 struct pci_dev
*pdev
;
2877 if (unlikely(!dev_is_pci(dev
)))
2880 pdev
= to_pci_dev(dev
);
2881 if (iommu_dummy(pdev
))
2884 if (!iommu_identity_mapping
)
2887 found
= identity_mapping(pdev
);
2889 if (iommu_should_identity_map(pdev
, 0))
2893 * 32 bit DMA is removed from si_domain and fall back
2894 * to non-identity mapping.
2896 domain_remove_one_dev_info(si_domain
, pdev
);
2897 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2903 * In case of a detached 64 bit DMA device from vm, the device
2904 * is put into si_domain for identity mapping.
2906 if (iommu_should_identity_map(pdev
, 0)) {
2908 ret
= domain_add_dev_info(si_domain
, pdev
,
2910 CONTEXT_TT_PASS_THROUGH
:
2911 CONTEXT_TT_MULTI_LEVEL
);
2913 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2923 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2924 size_t size
, int dir
, u64 dma_mask
)
2926 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2927 struct dmar_domain
*domain
;
2928 phys_addr_t start_paddr
;
2932 struct intel_iommu
*iommu
;
2933 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
2935 BUG_ON(dir
== DMA_NONE
);
2937 if (iommu_no_mapping(hwdev
))
2940 domain
= get_valid_domain_for_dev(pdev
);
2944 iommu
= domain_get_iommu(domain
);
2945 size
= aligned_nrpages(paddr
, size
);
2947 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
2952 * Check if DMAR supports zero-length reads on write only
2955 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2956 !cap_zlr(iommu
->cap
))
2957 prot
|= DMA_PTE_READ
;
2958 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2959 prot
|= DMA_PTE_WRITE
;
2961 * paddr - (paddr + size) might be partial page, we should map the whole
2962 * page. Note: if two part of one page are separately mapped, we
2963 * might have two guest_addr mapping to the same host paddr, but this
2964 * is not a big problem
2966 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2967 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
2971 /* it's a non-present to present mapping. Only flush if caching mode */
2972 if (cap_caching_mode(iommu
->cap
))
2973 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 0, 1);
2975 iommu_flush_write_buffer(iommu
);
2977 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2978 start_paddr
+= paddr
& ~PAGE_MASK
;
2983 __free_iova(&domain
->iovad
, iova
);
2984 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2985 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2989 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2990 unsigned long offset
, size_t size
,
2991 enum dma_data_direction dir
,
2992 struct dma_attrs
*attrs
)
2994 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2995 dir
, to_pci_dev(dev
)->dma_mask
);
2998 static void flush_unmaps(void)
3004 /* just flush them all */
3005 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3006 struct intel_iommu
*iommu
= g_iommus
[i
];
3010 if (!deferred_flush
[i
].next
)
3013 /* In caching mode, global flushes turn emulation expensive */
3014 if (!cap_caching_mode(iommu
->cap
))
3015 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3016 DMA_TLB_GLOBAL_FLUSH
);
3017 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
3019 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
3020 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
3022 /* On real hardware multiple invalidations are expensive */
3023 if (cap_caching_mode(iommu
->cap
))
3024 iommu_flush_iotlb_psi(iommu
, domain
->id
,
3025 iova
->pfn_lo
, iova
->pfn_hi
- iova
->pfn_lo
+ 1,
3026 !deferred_flush
[i
].freelist
[j
], 0);
3028 mask
= ilog2(mm_to_dma_pfn(iova
->pfn_hi
- iova
->pfn_lo
+ 1));
3029 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
3030 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
3032 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
3033 if (deferred_flush
[i
].freelist
[j
])
3034 dma_free_pagelist(deferred_flush
[i
].freelist
[j
]);
3036 deferred_flush
[i
].next
= 0;
3042 static void flush_unmaps_timeout(unsigned long data
)
3044 unsigned long flags
;
3046 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3048 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3051 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
, struct page
*freelist
)
3053 unsigned long flags
;
3055 struct intel_iommu
*iommu
;
3057 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3058 if (list_size
== HIGH_WATER_MARK
)
3061 iommu
= domain_get_iommu(dom
);
3062 iommu_id
= iommu
->seq_id
;
3064 next
= deferred_flush
[iommu_id
].next
;
3065 deferred_flush
[iommu_id
].domain
[next
] = dom
;
3066 deferred_flush
[iommu_id
].iova
[next
] = iova
;
3067 deferred_flush
[iommu_id
].freelist
[next
] = freelist
;
3068 deferred_flush
[iommu_id
].next
++;
3071 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
3075 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3078 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3079 size_t size
, enum dma_data_direction dir
,
3080 struct dma_attrs
*attrs
)
3082 struct pci_dev
*pdev
= to_pci_dev(dev
);
3083 struct dmar_domain
*domain
;
3084 unsigned long start_pfn
, last_pfn
;
3086 struct intel_iommu
*iommu
;
3087 struct page
*freelist
;
3089 if (iommu_no_mapping(dev
))
3092 domain
= find_domain(pdev
);
3095 iommu
= domain_get_iommu(domain
);
3097 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
3098 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
3099 (unsigned long long)dev_addr
))
3102 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3103 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3105 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3106 pci_name(pdev
), start_pfn
, last_pfn
);
3108 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3110 if (intel_iommu_strict
) {
3111 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3112 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3114 __free_iova(&domain
->iovad
, iova
);
3115 dma_free_pagelist(freelist
);
3117 add_unmap(domain
, iova
, freelist
);
3119 * queue up the release of the unmap to save the 1/6th of the
3120 * cpu used up by the iotlb flush operation...
3125 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
3126 dma_addr_t
*dma_handle
, gfp_t flags
,
3127 struct dma_attrs
*attrs
)
3132 size
= PAGE_ALIGN(size
);
3133 order
= get_order(size
);
3135 if (!iommu_no_mapping(hwdev
))
3136 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3137 else if (hwdev
->coherent_dma_mask
< dma_get_required_mask(hwdev
)) {
3138 if (hwdev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3144 vaddr
= (void *)__get_free_pages(flags
, order
);
3147 memset(vaddr
, 0, size
);
3149 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
3151 hwdev
->coherent_dma_mask
);
3154 free_pages((unsigned long)vaddr
, order
);
3158 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
3159 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3163 size
= PAGE_ALIGN(size
);
3164 order
= get_order(size
);
3166 intel_unmap_page(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, NULL
);
3167 free_pages((unsigned long)vaddr
, order
);
3170 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
3171 int nelems
, enum dma_data_direction dir
,
3172 struct dma_attrs
*attrs
)
3174 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3175 struct dmar_domain
*domain
;
3176 unsigned long start_pfn
, last_pfn
;
3178 struct intel_iommu
*iommu
;
3179 struct page
*freelist
;
3181 if (iommu_no_mapping(hwdev
))
3184 domain
= find_domain(pdev
);
3187 iommu
= domain_get_iommu(domain
);
3189 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
3190 if (WARN_ONCE(!iova
, "Driver unmaps unmatched sglist at PFN %llx\n",
3191 (unsigned long long)sglist
[0].dma_address
))
3194 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3195 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3197 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3199 if (intel_iommu_strict
) {
3200 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3201 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3203 __free_iova(&domain
->iovad
, iova
);
3204 dma_free_pagelist(freelist
);
3206 add_unmap(domain
, iova
, freelist
);
3208 * queue up the release of the unmap to save the 1/6th of the
3209 * cpu used up by the iotlb flush operation...
3214 static int intel_nontranslate_map_sg(struct device
*hddev
,
3215 struct scatterlist
*sglist
, int nelems
, int dir
)
3218 struct scatterlist
*sg
;
3220 for_each_sg(sglist
, sg
, nelems
, i
) {
3221 BUG_ON(!sg_page(sg
));
3222 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3223 sg
->dma_length
= sg
->length
;
3228 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
3229 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3232 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3233 struct dmar_domain
*domain
;
3236 struct iova
*iova
= NULL
;
3238 struct scatterlist
*sg
;
3239 unsigned long start_vpfn
;
3240 struct intel_iommu
*iommu
;
3242 BUG_ON(dir
== DMA_NONE
);
3243 if (iommu_no_mapping(hwdev
))
3244 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
3246 domain
= get_valid_domain_for_dev(pdev
);
3250 iommu
= domain_get_iommu(domain
);
3252 for_each_sg(sglist
, sg
, nelems
, i
)
3253 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3255 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
3258 sglist
->dma_length
= 0;
3263 * Check if DMAR supports zero-length reads on write only
3266 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3267 !cap_zlr(iommu
->cap
))
3268 prot
|= DMA_PTE_READ
;
3269 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3270 prot
|= DMA_PTE_WRITE
;
3272 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3274 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3275 if (unlikely(ret
)) {
3276 /* clear the page */
3277 dma_pte_clear_range(domain
, start_vpfn
,
3278 start_vpfn
+ size
- 1);
3279 /* free page tables */
3280 dma_pte_free_pagetable(domain
, start_vpfn
,
3281 start_vpfn
+ size
- 1);
3283 __free_iova(&domain
->iovad
, iova
);
3287 /* it's a non-present to present mapping. Only flush if caching mode */
3288 if (cap_caching_mode(iommu
->cap
))
3289 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 0, 1);
3291 iommu_flush_write_buffer(iommu
);
3296 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3301 struct dma_map_ops intel_dma_ops
= {
3302 .alloc
= intel_alloc_coherent
,
3303 .free
= intel_free_coherent
,
3304 .map_sg
= intel_map_sg
,
3305 .unmap_sg
= intel_unmap_sg
,
3306 .map_page
= intel_map_page
,
3307 .unmap_page
= intel_unmap_page
,
3308 .mapping_error
= intel_mapping_error
,
3311 static inline int iommu_domain_cache_init(void)
3315 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3316 sizeof(struct dmar_domain
),
3321 if (!iommu_domain_cache
) {
3322 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3329 static inline int iommu_devinfo_cache_init(void)
3333 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3334 sizeof(struct device_domain_info
),
3338 if (!iommu_devinfo_cache
) {
3339 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3346 static inline int iommu_iova_cache_init(void)
3350 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3351 sizeof(struct iova
),
3355 if (!iommu_iova_cache
) {
3356 printk(KERN_ERR
"Couldn't create iova cache\n");
3363 static int __init
iommu_init_mempool(void)
3366 ret
= iommu_iova_cache_init();
3370 ret
= iommu_domain_cache_init();
3374 ret
= iommu_devinfo_cache_init();
3378 kmem_cache_destroy(iommu_domain_cache
);
3380 kmem_cache_destroy(iommu_iova_cache
);
3385 static void __init
iommu_exit_mempool(void)
3387 kmem_cache_destroy(iommu_devinfo_cache
);
3388 kmem_cache_destroy(iommu_domain_cache
);
3389 kmem_cache_destroy(iommu_iova_cache
);
3393 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3395 struct dmar_drhd_unit
*drhd
;
3399 /* We know that this device on this chipset has its own IOMMU.
3400 * If we find it under a different IOMMU, then the BIOS is lying
3401 * to us. Hope that the IOMMU for this device is actually
3402 * disabled, and it needs no translation...
3404 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3406 /* "can't" happen */
3407 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3410 vtbar
&= 0xffff0000;
3412 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3413 drhd
= dmar_find_matched_drhd_unit(pdev
);
3414 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3415 TAINT_FIRMWARE_WORKAROUND
,
3416 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3417 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3419 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3421 static void __init
init_no_remapping_devices(void)
3423 struct dmar_drhd_unit
*drhd
;
3424 struct pci_dev
*dev
;
3427 for_each_drhd_unit(drhd
) {
3428 if (!drhd
->include_all
) {
3429 for_each_active_dev_scope(drhd
->devices
,
3430 drhd
->devices_cnt
, i
, dev
)
3432 /* ignore DMAR unit if no pci devices exist */
3433 if (i
== drhd
->devices_cnt
)
3438 for_each_active_drhd_unit(drhd
) {
3439 if (drhd
->include_all
)
3442 for_each_active_dev_scope(drhd
->devices
,
3443 drhd
->devices_cnt
, i
, dev
)
3444 if (!IS_GFX_DEVICE(dev
))
3446 if (i
< drhd
->devices_cnt
)
3449 /* This IOMMU has *only* gfx devices. Either bypass it or
3450 set the gfx_mapped flag, as appropriate */
3452 intel_iommu_gfx_mapped
= 1;
3455 for_each_active_dev_scope(drhd
->devices
,
3456 drhd
->devices_cnt
, i
, dev
)
3457 dev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3462 #ifdef CONFIG_SUSPEND
3463 static int init_iommu_hw(void)
3465 struct dmar_drhd_unit
*drhd
;
3466 struct intel_iommu
*iommu
= NULL
;
3468 for_each_active_iommu(iommu
, drhd
)
3470 dmar_reenable_qi(iommu
);
3472 for_each_iommu(iommu
, drhd
) {
3473 if (drhd
->ignored
) {
3475 * we always have to disable PMRs or DMA may fail on
3479 iommu_disable_protect_mem_regions(iommu
);
3483 iommu_flush_write_buffer(iommu
);
3485 iommu_set_root_entry(iommu
);
3487 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3488 DMA_CCMD_GLOBAL_INVL
);
3489 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3490 DMA_TLB_GLOBAL_FLUSH
);
3491 if (iommu_enable_translation(iommu
))
3493 iommu_disable_protect_mem_regions(iommu
);
3499 static void iommu_flush_all(void)
3501 struct dmar_drhd_unit
*drhd
;
3502 struct intel_iommu
*iommu
;
3504 for_each_active_iommu(iommu
, drhd
) {
3505 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3506 DMA_CCMD_GLOBAL_INVL
);
3507 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3508 DMA_TLB_GLOBAL_FLUSH
);
3512 static int iommu_suspend(void)
3514 struct dmar_drhd_unit
*drhd
;
3515 struct intel_iommu
*iommu
= NULL
;
3518 for_each_active_iommu(iommu
, drhd
) {
3519 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3521 if (!iommu
->iommu_state
)
3527 for_each_active_iommu(iommu
, drhd
) {
3528 iommu_disable_translation(iommu
);
3530 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3532 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3533 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3534 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3535 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3536 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3537 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3538 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3539 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3541 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3546 for_each_active_iommu(iommu
, drhd
)
3547 kfree(iommu
->iommu_state
);
3552 static void iommu_resume(void)
3554 struct dmar_drhd_unit
*drhd
;
3555 struct intel_iommu
*iommu
= NULL
;
3558 if (init_iommu_hw()) {
3560 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3562 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3566 for_each_active_iommu(iommu
, drhd
) {
3568 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3570 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3571 iommu
->reg
+ DMAR_FECTL_REG
);
3572 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3573 iommu
->reg
+ DMAR_FEDATA_REG
);
3574 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3575 iommu
->reg
+ DMAR_FEADDR_REG
);
3576 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3577 iommu
->reg
+ DMAR_FEUADDR_REG
);
3579 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3582 for_each_active_iommu(iommu
, drhd
)
3583 kfree(iommu
->iommu_state
);
3586 static struct syscore_ops iommu_syscore_ops
= {
3587 .resume
= iommu_resume
,
3588 .suspend
= iommu_suspend
,
3591 static void __init
init_iommu_pm_ops(void)
3593 register_syscore_ops(&iommu_syscore_ops
);
3597 static inline void init_iommu_pm_ops(void) {}
3598 #endif /* CONFIG_PM */
3601 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
3603 struct acpi_dmar_reserved_memory
*rmrr
;
3604 struct dmar_rmrr_unit
*rmrru
;
3606 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3610 rmrru
->hdr
= header
;
3611 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3612 rmrru
->base_address
= rmrr
->base_address
;
3613 rmrru
->end_address
= rmrr
->end_address
;
3614 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3615 ((void *)rmrr
) + rmrr
->header
.length
,
3616 &rmrru
->devices_cnt
);
3617 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
3622 list_add(&rmrru
->list
, &dmar_rmrr_units
);
3627 int __init
dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
)
3629 struct acpi_dmar_atsr
*atsr
;
3630 struct dmar_atsr_unit
*atsru
;
3632 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3633 atsru
= kzalloc(sizeof(*atsru
), GFP_KERNEL
);
3638 atsru
->include_all
= atsr
->flags
& 0x1;
3639 if (!atsru
->include_all
) {
3640 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
3641 (void *)atsr
+ atsr
->header
.length
,
3642 &atsru
->devices_cnt
);
3643 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
3649 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
3654 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3656 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3660 static void intel_iommu_free_dmars(void)
3662 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3663 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3665 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3666 list_del(&rmrru
->list
);
3667 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3671 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3672 list_del(&atsru
->list
);
3673 intel_iommu_free_atsr(atsru
);
3677 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3680 struct pci_bus
*bus
;
3681 struct pci_dev
*bridge
= NULL
, *tmp
;
3682 struct acpi_dmar_atsr
*atsr
;
3683 struct dmar_atsr_unit
*atsru
;
3685 dev
= pci_physfn(dev
);
3686 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3688 if (!bridge
|| !pci_is_pcie(bridge
) ||
3689 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3691 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
3698 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3699 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3700 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
3703 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
3707 if (atsru
->include_all
)
3717 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
3720 struct dmar_rmrr_unit
*rmrru
;
3721 struct dmar_atsr_unit
*atsru
;
3722 struct acpi_dmar_atsr
*atsr
;
3723 struct acpi_dmar_reserved_memory
*rmrr
;
3725 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
3728 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
3729 rmrr
= container_of(rmrru
->hdr
,
3730 struct acpi_dmar_reserved_memory
, header
);
3731 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3732 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
3733 ((void *)rmrr
) + rmrr
->header
.length
,
3734 rmrr
->segment
, rmrru
->devices
,
3735 rmrru
->devices_cnt
);
3740 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3741 if (dmar_remove_dev_scope(info
, rmrr
->segment
,
3742 rmrru
->devices
, rmrru
->devices_cnt
))
3747 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3748 if (atsru
->include_all
)
3751 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3752 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3753 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
3754 (void *)atsr
+ atsr
->header
.length
,
3755 atsr
->segment
, atsru
->devices
,
3756 atsru
->devices_cnt
);
3761 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3762 if (dmar_remove_dev_scope(info
, atsr
->segment
,
3763 atsru
->devices
, atsru
->devices_cnt
))
3772 * Here we only respond to action of unbound device from driver.
3774 * Added device is not attached to its DMAR domain here yet. That will happen
3775 * when mapping the device to iova.
3777 static int device_notifier(struct notifier_block
*nb
,
3778 unsigned long action
, void *data
)
3780 struct device
*dev
= data
;
3781 struct pci_dev
*pdev
= to_pci_dev(dev
);
3782 struct dmar_domain
*domain
;
3784 if (iommu_dummy(pdev
))
3787 if (action
!= BUS_NOTIFY_UNBOUND_DRIVER
&&
3788 action
!= BUS_NOTIFY_DEL_DEVICE
)
3791 domain
= find_domain(pdev
);
3795 down_read(&dmar_global_lock
);
3796 domain_remove_one_dev_info(domain
, pdev
);
3797 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3798 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) &&
3799 list_empty(&domain
->devices
))
3800 domain_exit(domain
);
3801 up_read(&dmar_global_lock
);
3806 static struct notifier_block device_nb
= {
3807 .notifier_call
= device_notifier
,
3810 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
3811 unsigned long val
, void *v
)
3813 struct memory_notify
*mhp
= v
;
3814 unsigned long long start
, end
;
3815 unsigned long start_vpfn
, last_vpfn
;
3818 case MEM_GOING_ONLINE
:
3819 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
3820 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
3821 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
3822 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3829 case MEM_CANCEL_ONLINE
:
3830 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
3831 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
3832 while (start_vpfn
<= last_vpfn
) {
3834 struct dmar_drhd_unit
*drhd
;
3835 struct intel_iommu
*iommu
;
3836 struct page
*freelist
;
3838 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
3840 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3845 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
3846 start_vpfn
, last_vpfn
);
3848 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3849 start_vpfn
, last_vpfn
);
3853 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
3857 for_each_active_iommu(iommu
, drhd
)
3858 iommu_flush_iotlb_psi(iommu
, si_domain
->id
,
3860 iova
->pfn_hi
- iova
->pfn_lo
+ 1,
3863 dma_free_pagelist(freelist
);
3865 start_vpfn
= iova
->pfn_hi
+ 1;
3866 free_iova_mem(iova
);
3874 static struct notifier_block intel_iommu_memory_nb
= {
3875 .notifier_call
= intel_iommu_memory_notifier
,
3879 int __init
intel_iommu_init(void)
3882 struct dmar_drhd_unit
*drhd
;
3883 struct intel_iommu
*iommu
;
3885 /* VT-d is required for a TXT/tboot launch, so enforce that */
3886 force_on
= tboot_force_iommu();
3888 if (iommu_init_mempool()) {
3890 panic("tboot: Failed to initialize iommu memory\n");
3894 down_write(&dmar_global_lock
);
3895 if (dmar_table_init()) {
3897 panic("tboot: Failed to initialize DMAR table\n");
3902 * Disable translation if already enabled prior to OS handover.
3904 for_each_active_iommu(iommu
, drhd
)
3905 if (iommu
->gcmd
& DMA_GCMD_TE
)
3906 iommu_disable_translation(iommu
);
3908 if (dmar_dev_scope_init() < 0) {
3910 panic("tboot: Failed to initialize DMAR device scope\n");
3914 if (no_iommu
|| dmar_disabled
)
3917 if (list_empty(&dmar_rmrr_units
))
3918 printk(KERN_INFO
"DMAR: No RMRR found\n");
3920 if (list_empty(&dmar_atsr_units
))
3921 printk(KERN_INFO
"DMAR: No ATSR found\n");
3923 if (dmar_init_reserved_ranges()) {
3925 panic("tboot: Failed to reserve iommu ranges\n");
3926 goto out_free_reserved_range
;
3929 init_no_remapping_devices();
3934 panic("tboot: Failed to initialize DMARs\n");
3935 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3936 goto out_free_reserved_range
;
3938 up_write(&dmar_global_lock
);
3940 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3942 init_timer(&unmap_timer
);
3943 #ifdef CONFIG_SWIOTLB
3946 dma_ops
= &intel_dma_ops
;
3948 init_iommu_pm_ops();
3950 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
3951 bus_register_notifier(&pci_bus_type
, &device_nb
);
3952 if (si_domain
&& !hw_pass_through
)
3953 register_memory_notifier(&intel_iommu_memory_nb
);
3955 intel_iommu_enabled
= 1;
3959 out_free_reserved_range
:
3960 put_iova_domain(&reserved_iova_list
);
3962 intel_iommu_free_dmars();
3963 up_write(&dmar_global_lock
);
3964 iommu_exit_mempool();
3968 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3969 struct pci_dev
*pdev
)
3971 struct pci_dev
*tmp
, *parent
;
3973 if (!iommu
|| !pdev
)
3976 /* dependent device detach */
3977 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3978 /* Secondary interface's bus number and devfn 0 */
3980 parent
= pdev
->bus
->self
;
3981 while (parent
!= tmp
) {
3982 iommu_detach_dev(iommu
, parent
->bus
->number
,
3984 parent
= parent
->bus
->self
;
3986 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
3987 iommu_detach_dev(iommu
,
3988 tmp
->subordinate
->number
, 0);
3989 else /* this is a legacy PCI bridge */
3990 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3995 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3996 struct pci_dev
*pdev
)
3998 struct device_domain_info
*info
, *tmp
;
3999 struct intel_iommu
*iommu
;
4000 unsigned long flags
;
4003 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
4008 spin_lock_irqsave(&device_domain_lock
, flags
);
4009 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
4010 if (info
->segment
== pci_domain_nr(pdev
->bus
) &&
4011 info
->bus
== pdev
->bus
->number
&&
4012 info
->devfn
== pdev
->devfn
) {
4013 unlink_domain_info(info
);
4014 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4016 iommu_disable_dev_iotlb(info
);
4017 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
4018 iommu_detach_dependent_devices(iommu
, pdev
);
4019 free_devinfo_mem(info
);
4021 spin_lock_irqsave(&device_domain_lock
, flags
);
4029 /* if there is no other devices under the same iommu
4030 * owned by this domain, clear this iommu in iommu_bmp
4031 * update iommu count and coherency
4033 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
4038 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4041 unsigned long tmp_flags
;
4042 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
4043 clear_bit(iommu
->seq_id
, domain
->iommu_bmp
);
4044 domain
->iommu_count
--;
4045 domain_update_iommu_cap(domain
);
4046 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
4048 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
4049 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)) {
4050 spin_lock_irqsave(&iommu
->lock
, tmp_flags
);
4051 clear_bit(domain
->id
, iommu
->domain_ids
);
4052 iommu
->domains
[domain
->id
] = NULL
;
4053 spin_unlock_irqrestore(&iommu
->lock
, tmp_flags
);
4058 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4062 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
4063 domain_reserve_special_ranges(domain
);
4065 /* calculate AGAW */
4066 domain
->gaw
= guest_width
;
4067 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4068 domain
->agaw
= width_to_agaw(adjust_width
);
4070 domain
->iommu_coherency
= 0;
4071 domain
->iommu_snooping
= 0;
4072 domain
->iommu_superpage
= 0;
4073 domain
->max_addr
= 0;
4076 /* always allocate the top pgd */
4077 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4080 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4084 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
4086 struct dmar_domain
*dmar_domain
;
4088 dmar_domain
= alloc_domain(true);
4091 "intel_iommu_domain_init: dmar_domain == NULL\n");
4094 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4096 "intel_iommu_domain_init() failed\n");
4097 domain_exit(dmar_domain
);
4100 domain_update_iommu_cap(dmar_domain
);
4101 domain
->priv
= dmar_domain
;
4103 domain
->geometry
.aperture_start
= 0;
4104 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4105 domain
->geometry
.force_aperture
= true;
4110 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
4112 struct dmar_domain
*dmar_domain
= domain
->priv
;
4114 domain
->priv
= NULL
;
4115 domain_exit(dmar_domain
);
4118 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4121 struct dmar_domain
*dmar_domain
= domain
->priv
;
4122 struct pci_dev
*pdev
= to_pci_dev(dev
);
4123 struct intel_iommu
*iommu
;
4126 /* normally pdev is not mapped */
4127 if (unlikely(domain_context_mapped(pdev
))) {
4128 struct dmar_domain
*old_domain
;
4130 old_domain
= find_domain(pdev
);
4132 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
4133 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
4134 domain_remove_one_dev_info(old_domain
, pdev
);
4136 domain_remove_dev_info(old_domain
);
4140 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
4145 /* check if this iommu agaw is sufficient for max mapped address */
4146 addr_width
= agaw_to_width(iommu
->agaw
);
4147 if (addr_width
> cap_mgaw(iommu
->cap
))
4148 addr_width
= cap_mgaw(iommu
->cap
);
4150 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4151 printk(KERN_ERR
"%s: iommu width (%d) is not "
4152 "sufficient for the mapped address (%llx)\n",
4153 __func__
, addr_width
, dmar_domain
->max_addr
);
4156 dmar_domain
->gaw
= addr_width
;
4159 * Knock out extra levels of page tables if necessary
4161 while (iommu
->agaw
< dmar_domain
->agaw
) {
4162 struct dma_pte
*pte
;
4164 pte
= dmar_domain
->pgd
;
4165 if (dma_pte_present(pte
)) {
4166 dmar_domain
->pgd
= (struct dma_pte
*)
4167 phys_to_virt(dma_pte_addr(pte
));
4168 free_pgtable_page(pte
);
4170 dmar_domain
->agaw
--;
4173 return domain_add_dev_info(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
4176 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4179 struct dmar_domain
*dmar_domain
= domain
->priv
;
4180 struct pci_dev
*pdev
= to_pci_dev(dev
);
4182 domain_remove_one_dev_info(dmar_domain
, pdev
);
4185 static int intel_iommu_map(struct iommu_domain
*domain
,
4186 unsigned long iova
, phys_addr_t hpa
,
4187 size_t size
, int iommu_prot
)
4189 struct dmar_domain
*dmar_domain
= domain
->priv
;
4194 if (iommu_prot
& IOMMU_READ
)
4195 prot
|= DMA_PTE_READ
;
4196 if (iommu_prot
& IOMMU_WRITE
)
4197 prot
|= DMA_PTE_WRITE
;
4198 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4199 prot
|= DMA_PTE_SNP
;
4201 max_addr
= iova
+ size
;
4202 if (dmar_domain
->max_addr
< max_addr
) {
4205 /* check if minimum agaw is sufficient for mapped address */
4206 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4207 if (end
< max_addr
) {
4208 printk(KERN_ERR
"%s: iommu width (%d) is not "
4209 "sufficient for the mapped address (%llx)\n",
4210 __func__
, dmar_domain
->gaw
, max_addr
);
4213 dmar_domain
->max_addr
= max_addr
;
4215 /* Round up size to next multiple of PAGE_SIZE, if it and
4216 the low bits of hpa would take us onto the next page */
4217 size
= aligned_nrpages(hpa
, size
);
4218 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4219 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4223 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4224 unsigned long iova
, size_t size
)
4226 struct dmar_domain
*dmar_domain
= domain
->priv
;
4227 struct page
*freelist
= NULL
;
4228 struct intel_iommu
*iommu
;
4229 unsigned long start_pfn
, last_pfn
;
4230 unsigned int npages
;
4231 int iommu_id
, num
, ndomains
, level
= 0;
4233 /* Cope with horrid API which requires us to unmap more than the
4234 size argument if it happens to be a large-page mapping. */
4235 if (!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
))
4238 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4239 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4241 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4242 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4244 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
4246 npages
= last_pfn
- start_pfn
+ 1;
4248 for_each_set_bit(iommu_id
, dmar_domain
->iommu_bmp
, g_num_of_iommus
) {
4249 iommu
= g_iommus
[iommu_id
];
4252 * find bit position of dmar_domain
4254 ndomains
= cap_ndoms(iommu
->cap
);
4255 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
4256 if (iommu
->domains
[num
] == dmar_domain
)
4257 iommu_flush_iotlb_psi(iommu
, num
, start_pfn
,
4258 npages
, !freelist
, 0);
4263 dma_free_pagelist(freelist
);
4265 if (dmar_domain
->max_addr
== iova
+ size
)
4266 dmar_domain
->max_addr
= iova
;
4271 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4274 struct dmar_domain
*dmar_domain
= domain
->priv
;
4275 struct dma_pte
*pte
;
4279 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
4281 phys
= dma_pte_addr(pte
);
4286 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
4289 struct dmar_domain
*dmar_domain
= domain
->priv
;
4291 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4292 return dmar_domain
->iommu_snooping
;
4293 if (cap
== IOMMU_CAP_INTR_REMAP
)
4294 return irq_remapping_enabled
;
4299 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4301 static int intel_iommu_add_device(struct device
*dev
)
4303 struct pci_dev
*pdev
= to_pci_dev(dev
);
4304 struct pci_dev
*bridge
, *dma_pdev
= NULL
;
4305 struct iommu_group
*group
;
4308 if (!device_to_iommu(pci_domain_nr(pdev
->bus
),
4309 pdev
->bus
->number
, pdev
->devfn
))
4312 bridge
= pci_find_upstream_pcie_bridge(pdev
);
4314 if (pci_is_pcie(bridge
))
4315 dma_pdev
= pci_get_domain_bus_and_slot(
4316 pci_domain_nr(pdev
->bus
),
4317 bridge
->subordinate
->number
, 0);
4319 dma_pdev
= pci_dev_get(bridge
);
4321 dma_pdev
= pci_dev_get(pdev
);
4323 /* Account for quirked devices */
4324 swap_pci_ref(&dma_pdev
, pci_get_dma_source(dma_pdev
));
4327 * If it's a multifunction device that does not support our
4328 * required ACS flags, add to the same group as lowest numbered
4329 * function that also does not suport the required ACS flags.
4331 if (dma_pdev
->multifunction
&&
4332 !pci_acs_enabled(dma_pdev
, REQ_ACS_FLAGS
)) {
4333 u8 i
, slot
= PCI_SLOT(dma_pdev
->devfn
);
4335 for (i
= 0; i
< 8; i
++) {
4336 struct pci_dev
*tmp
;
4338 tmp
= pci_get_slot(dma_pdev
->bus
, PCI_DEVFN(slot
, i
));
4342 if (!pci_acs_enabled(tmp
, REQ_ACS_FLAGS
)) {
4343 swap_pci_ref(&dma_pdev
, tmp
);
4351 * Devices on the root bus go through the iommu. If that's not us,
4352 * find the next upstream device and test ACS up to the root bus.
4353 * Finding the next device may require skipping virtual buses.
4355 while (!pci_is_root_bus(dma_pdev
->bus
)) {
4356 struct pci_bus
*bus
= dma_pdev
->bus
;
4358 while (!bus
->self
) {
4359 if (!pci_is_root_bus(bus
))
4365 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
4368 swap_pci_ref(&dma_pdev
, pci_dev_get(bus
->self
));
4372 group
= iommu_group_get(&dma_pdev
->dev
);
4373 pci_dev_put(dma_pdev
);
4375 group
= iommu_group_alloc();
4377 return PTR_ERR(group
);
4380 ret
= iommu_group_add_device(group
, dev
);
4382 iommu_group_put(group
);
4386 static void intel_iommu_remove_device(struct device
*dev
)
4388 iommu_group_remove_device(dev
);
4391 static struct iommu_ops intel_iommu_ops
= {
4392 .domain_init
= intel_iommu_domain_init
,
4393 .domain_destroy
= intel_iommu_domain_destroy
,
4394 .attach_dev
= intel_iommu_attach_device
,
4395 .detach_dev
= intel_iommu_detach_device
,
4396 .map
= intel_iommu_map
,
4397 .unmap
= intel_iommu_unmap
,
4398 .iova_to_phys
= intel_iommu_iova_to_phys
,
4399 .domain_has_cap
= intel_iommu_domain_has_cap
,
4400 .add_device
= intel_iommu_add_device
,
4401 .remove_device
= intel_iommu_remove_device
,
4402 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4405 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4407 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4408 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
4412 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4413 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4414 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4415 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4416 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4417 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4418 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4420 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4423 * Mobile 4 Series Chipset neglects to set RWBF capability,
4424 * but needs it. Same seems to hold for the desktop versions.
4426 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
4430 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4431 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4432 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4433 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4434 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4435 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4436 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4439 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4440 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4441 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4442 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4443 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4444 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4445 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4446 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4448 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4452 if (pci_read_config_word(dev
, GGC
, &ggc
))
4455 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4456 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4458 } else if (dmar_map_gfx
) {
4459 /* we have to ensure the gfx device is idle before we flush */
4460 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
4461 intel_iommu_strict
= 1;
4464 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4466 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4469 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4470 ISOCH DMAR unit for the Azalia sound device, but not give it any
4471 TLB entries, which causes it to deadlock. Check for that. We do
4472 this in a function called from init_dmars(), instead of in a PCI
4473 quirk, because we don't want to print the obnoxious "BIOS broken"
4474 message if VT-d is actually disabled.
4476 static void __init
check_tylersburg_isoch(void)
4478 struct pci_dev
*pdev
;
4479 uint32_t vtisochctrl
;
4481 /* If there's no Azalia in the system anyway, forget it. */
4482 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4487 /* System Management Registers. Might be hidden, in which case
4488 we can't do the sanity check. But that's OK, because the
4489 known-broken BIOSes _don't_ actually hide it, so far. */
4490 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4494 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4501 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4502 if (vtisochctrl
& 1)
4505 /* Drop all bits other than the number of TLB entries */
4506 vtisochctrl
&= 0x1c;
4508 /* If we have the recommended number of TLB entries (16), fine. */
4509 if (vtisochctrl
== 0x10)
4512 /* Zero TLB entries? You get to ride the short bus to school. */
4514 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4515 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4516 dmi_get_system_info(DMI_BIOS_VENDOR
),
4517 dmi_get_system_info(DMI_BIOS_VERSION
),
4518 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4519 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4523 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",