2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
49 #include "irq_remapping.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
57 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
59 #define IOAPIC_RANGE_START (0xfee00000)
60 #define IOAPIC_RANGE_END (0xfeefffff)
61 #define IOVA_START_ADDR (0x1000)
63 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
65 #define MAX_AGAW_WIDTH 64
67 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
70 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
73 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
76 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
77 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
78 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
80 /* page table handling */
81 #define LEVEL_STRIDE (9)
82 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
85 * This bitmap is used to advertise the page sizes our hardware support
86 * to the IOMMU core, which will then use this information to split
87 * physically contiguous memory regions it is mapping into page sizes
90 * Traditionally the IOMMU core just handed us the mappings directly,
91 * after making sure the size is an order of a 4KiB page and that the
92 * mapping has natural alignment.
94 * To retain this behavior, we currently advertise that we support
95 * all page sizes that are an order of 4KiB.
97 * If at some point we'd like to utilize the IOMMU core's new behavior,
98 * we could change this to advertise the real page sizes we support.
100 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102 static inline int agaw_to_level(int agaw
)
107 static inline int agaw_to_width(int agaw
)
109 return 30 + agaw
* LEVEL_STRIDE
;
112 static inline int width_to_agaw(int width
)
114 return (width
- 30) / LEVEL_STRIDE
;
117 static inline unsigned int level_to_offset_bits(int level
)
119 return (level
- 1) * LEVEL_STRIDE
;
122 static inline int pfn_level_offset(unsigned long pfn
, int level
)
124 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
127 static inline unsigned long level_mask(int level
)
129 return -1UL << level_to_offset_bits(level
);
132 static inline unsigned long level_size(int level
)
134 return 1UL << level_to_offset_bits(level
);
137 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
139 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
142 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
144 return 1 << ((lvl
- 1) * LEVEL_STRIDE
);
147 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148 are never going to work. */
149 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
151 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
154 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
156 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
158 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
160 return mm_to_dma_pfn(page_to_pfn(pg
));
162 static inline unsigned long virt_to_dma_pfn(void *p
)
164 return page_to_dma_pfn(virt_to_page(p
));
167 /* global iommu list, set NULL for ignored DMAR units */
168 static struct intel_iommu
**g_iommus
;
170 static void __init
check_tylersburg_isoch(void);
171 static int rwbf_quirk
;
174 * set to 1 to panic kernel if can't successfully enable VT-d
175 * (used when kernel is launched w/ TXT)
177 static int force_on
= 0;
182 * 12-63: Context Ptr (12 - (haw-1))
189 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190 static inline bool root_present(struct root_entry
*root
)
192 return (root
->val
& 1);
194 static inline void set_root_present(struct root_entry
*root
)
198 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
200 root
->val
|= value
& VTD_PAGE_MASK
;
203 static inline struct context_entry
*
204 get_context_addr_from_root(struct root_entry
*root
)
206 return (struct context_entry
*)
207 (root_present(root
)?phys_to_virt(
208 root
->val
& VTD_PAGE_MASK
) :
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
223 struct context_entry
{
228 static inline bool context_present(struct context_entry
*context
)
230 return (context
->lo
& 1);
232 static inline void context_set_present(struct context_entry
*context
)
237 static inline void context_set_fault_enable(struct context_entry
*context
)
239 context
->lo
&= (((u64
)-1) << 2) | 1;
242 static inline void context_set_translation_type(struct context_entry
*context
,
245 context
->lo
&= (((u64
)-1) << 4) | 3;
246 context
->lo
|= (value
& 3) << 2;
249 static inline void context_set_address_root(struct context_entry
*context
,
252 context
->lo
|= value
& VTD_PAGE_MASK
;
255 static inline void context_set_address_width(struct context_entry
*context
,
258 context
->hi
|= value
& 7;
261 static inline void context_set_domain_id(struct context_entry
*context
,
264 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
267 static inline void context_clear_entry(struct context_entry
*context
)
280 * 12-63: Host physcial address
286 static inline void dma_clear_pte(struct dma_pte
*pte
)
291 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
293 pte
->val
|= DMA_PTE_READ
;
296 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
298 pte
->val
|= DMA_PTE_WRITE
;
301 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
303 pte
->val
|= DMA_PTE_SNP
;
306 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
308 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
311 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
314 return pte
->val
& VTD_PAGE_MASK
;
316 /* Must have a full atomic 64-bit read */
317 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
321 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
323 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
326 static inline bool dma_pte_present(struct dma_pte
*pte
)
328 return (pte
->val
& 3) != 0;
331 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
333 return (pte
->val
& (1 << 7));
336 static inline int first_pte_in_page(struct dma_pte
*pte
)
338 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
342 * This domain is a statically identity mapping domain.
343 * 1. This domain creats a static 1:1 mapping to all usable memory.
344 * 2. It maps to each iommu if successful.
345 * 3. Each iommu mapps to this domain if successful.
347 static struct dmar_domain
*si_domain
;
348 static int hw_pass_through
= 1;
350 /* devices under the same p2p bridge are owned in one domain */
351 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
353 /* domain represents a virtual machine, more than one devices
354 * across iommus may be owned in one domain, e.g. kvm guest.
356 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
358 /* si_domain contains mulitple devices */
359 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
361 /* define the limit of IOMMUs supported in each domain */
363 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
365 # define IOMMU_UNITS_SUPPORTED 64
369 int id
; /* domain id */
370 int nid
; /* node id */
371 DECLARE_BITMAP(iommu_bmp
, IOMMU_UNITS_SUPPORTED
);
372 /* bitmap of iommus this domain uses*/
374 struct list_head devices
; /* all devices' list */
375 struct iova_domain iovad
; /* iova's that belong to this domain */
377 struct dma_pte
*pgd
; /* virtual address */
378 int gaw
; /* max guest address width */
380 /* adjusted guest address width, 0 is level 2 30-bit */
383 int flags
; /* flags to find out type of domain */
385 int iommu_coherency
;/* indicate coherency of iommu access */
386 int iommu_snooping
; /* indicate snooping control feature*/
387 int iommu_count
; /* reference count of iommu */
388 int iommu_superpage
;/* Level of superpages supported:
389 0 == 4KiB (no superpages), 1 == 2MiB,
390 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
391 spinlock_t iommu_lock
; /* protect iommu set in domain */
392 u64 max_addr
; /* maximum mapped address */
395 /* PCI domain-device relationship */
396 struct device_domain_info
{
397 struct list_head link
; /* link to domain siblings */
398 struct list_head global
; /* link to global list */
399 int segment
; /* PCI domain */
400 u8 bus
; /* PCI bus number */
401 u8 devfn
; /* PCI devfn number */
402 struct pci_dev
*dev
; /* it's NULL for PCIe-to-PCI bridge */
403 struct intel_iommu
*iommu
; /* IOMMU used by this device */
404 struct dmar_domain
*domain
; /* pointer to domain */
407 static void flush_unmaps_timeout(unsigned long data
);
409 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
411 #define HIGH_WATER_MARK 250
412 struct deferred_flush_tables
{
414 struct iova
*iova
[HIGH_WATER_MARK
];
415 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
418 static struct deferred_flush_tables
*deferred_flush
;
420 /* bitmap for indexing intel_iommus */
421 static int g_num_of_iommus
;
423 static DEFINE_SPINLOCK(async_umap_flush_lock
);
424 static LIST_HEAD(unmaps_to_do
);
427 static long list_size
;
429 static void domain_remove_dev_info(struct dmar_domain
*domain
);
431 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
432 int dmar_disabled
= 0;
434 int dmar_disabled
= 1;
435 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
437 int intel_iommu_enabled
= 0;
438 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
440 static int dmar_map_gfx
= 1;
441 static int dmar_forcedac
;
442 static int intel_iommu_strict
;
443 static int intel_iommu_superpage
= 1;
445 int intel_iommu_gfx_mapped
;
446 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
448 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449 static DEFINE_SPINLOCK(device_domain_lock
);
450 static LIST_HEAD(device_domain_list
);
452 static struct iommu_ops intel_iommu_ops
;
454 static int __init
intel_iommu_setup(char *str
)
459 if (!strncmp(str
, "on", 2)) {
461 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str
, "off", 3)) {
464 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
465 } else if (!strncmp(str
, "igfx_off", 8)) {
468 "Intel-IOMMU: disable GFX device mapping\n");
469 } else if (!strncmp(str
, "forcedac", 8)) {
471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
473 } else if (!strncmp(str
, "strict", 6)) {
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict
= 1;
477 } else if (!strncmp(str
, "sp_off", 6)) {
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage
= 0;
483 str
+= strcspn(str
, ",");
489 __setup("intel_iommu=", intel_iommu_setup
);
491 static struct kmem_cache
*iommu_domain_cache
;
492 static struct kmem_cache
*iommu_devinfo_cache
;
493 static struct kmem_cache
*iommu_iova_cache
;
495 static inline void *alloc_pgtable_page(int node
)
500 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
502 vaddr
= page_address(page
);
506 static inline void free_pgtable_page(void *vaddr
)
508 free_page((unsigned long)vaddr
);
511 static inline void *alloc_domain_mem(void)
513 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
516 static void free_domain_mem(void *vaddr
)
518 kmem_cache_free(iommu_domain_cache
, vaddr
);
521 static inline void * alloc_devinfo_mem(void)
523 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
526 static inline void free_devinfo_mem(void *vaddr
)
528 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
531 struct iova
*alloc_iova_mem(void)
533 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
536 void free_iova_mem(struct iova
*iova
)
538 kmem_cache_free(iommu_iova_cache
, iova
);
542 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
547 sagaw
= cap_sagaw(iommu
->cap
);
548 for (agaw
= width_to_agaw(max_gaw
);
550 if (test_bit(agaw
, &sagaw
))
558 * Calculate max SAGAW for each iommu.
560 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
562 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
566 * calculate agaw for each iommu.
567 * "SAGAW" may be different across iommus, use a default agaw, and
568 * get a supported less agaw for iommus that don't support the default agaw.
570 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
572 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
575 /* This functionin only returns single iommu in a domain */
576 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
580 /* si_domain and vm domain should not get here. */
581 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
582 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
584 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
585 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
588 return g_iommus
[iommu_id
];
591 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
595 i
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
597 domain
->iommu_coherency
= i
< g_num_of_iommus
? 1 : 0;
599 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
600 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
601 domain
->iommu_coherency
= 0;
607 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
611 domain
->iommu_snooping
= 1;
613 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
614 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
615 domain
->iommu_snooping
= 0;
621 static void domain_update_iommu_superpage(struct dmar_domain
*domain
)
623 struct dmar_drhd_unit
*drhd
;
624 struct intel_iommu
*iommu
= NULL
;
627 if (!intel_iommu_superpage
) {
628 domain
->iommu_superpage
= 0;
632 /* set iommu_superpage to the smallest common denominator */
633 for_each_active_iommu(iommu
, drhd
) {
634 mask
&= cap_super_page_val(iommu
->cap
);
639 domain
->iommu_superpage
= fls(mask
);
642 /* Some capabilities may be different across iommus */
643 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
645 domain_update_iommu_coherency(domain
);
646 domain_update_iommu_snooping(domain
);
647 domain_update_iommu_superpage(domain
);
650 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
652 struct dmar_drhd_unit
*drhd
= NULL
;
655 for_each_drhd_unit(drhd
) {
658 if (segment
!= drhd
->segment
)
661 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
662 if (drhd
->devices
[i
] &&
663 drhd
->devices
[i
]->bus
->number
== bus
&&
664 drhd
->devices
[i
]->devfn
== devfn
)
666 if (drhd
->devices
[i
] &&
667 drhd
->devices
[i
]->subordinate
&&
668 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
669 drhd
->devices
[i
]->subordinate
->busn_res
.end
>= bus
)
673 if (drhd
->include_all
)
680 static void domain_flush_cache(struct dmar_domain
*domain
,
681 void *addr
, int size
)
683 if (!domain
->iommu_coherency
)
684 clflush_cache_range(addr
, size
);
687 /* Gets context entry for a given bus and devfn */
688 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
691 struct root_entry
*root
;
692 struct context_entry
*context
;
693 unsigned long phy_addr
;
696 spin_lock_irqsave(&iommu
->lock
, flags
);
697 root
= &iommu
->root_entry
[bus
];
698 context
= get_context_addr_from_root(root
);
700 context
= (struct context_entry
*)
701 alloc_pgtable_page(iommu
->node
);
703 spin_unlock_irqrestore(&iommu
->lock
, flags
);
706 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
707 phy_addr
= virt_to_phys((void *)context
);
708 set_root_value(root
, phy_addr
);
709 set_root_present(root
);
710 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
712 spin_unlock_irqrestore(&iommu
->lock
, flags
);
713 return &context
[devfn
];
716 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
718 struct root_entry
*root
;
719 struct context_entry
*context
;
723 spin_lock_irqsave(&iommu
->lock
, flags
);
724 root
= &iommu
->root_entry
[bus
];
725 context
= get_context_addr_from_root(root
);
730 ret
= context_present(&context
[devfn
]);
732 spin_unlock_irqrestore(&iommu
->lock
, flags
);
736 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
738 struct root_entry
*root
;
739 struct context_entry
*context
;
742 spin_lock_irqsave(&iommu
->lock
, flags
);
743 root
= &iommu
->root_entry
[bus
];
744 context
= get_context_addr_from_root(root
);
746 context_clear_entry(&context
[devfn
]);
747 __iommu_flush_cache(iommu
, &context
[devfn
], \
750 spin_unlock_irqrestore(&iommu
->lock
, flags
);
753 static void free_context_table(struct intel_iommu
*iommu
)
755 struct root_entry
*root
;
758 struct context_entry
*context
;
760 spin_lock_irqsave(&iommu
->lock
, flags
);
761 if (!iommu
->root_entry
) {
764 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
765 root
= &iommu
->root_entry
[i
];
766 context
= get_context_addr_from_root(root
);
768 free_pgtable_page(context
);
770 free_pgtable_page(iommu
->root_entry
);
771 iommu
->root_entry
= NULL
;
773 spin_unlock_irqrestore(&iommu
->lock
, flags
);
776 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
777 unsigned long pfn
, int target_level
)
779 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
780 struct dma_pte
*parent
, *pte
= NULL
;
781 int level
= agaw_to_level(domain
->agaw
);
784 BUG_ON(!domain
->pgd
);
786 if (addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
)
787 /* Address beyond IOMMU's addressing capabilities. */
790 parent
= domain
->pgd
;
795 offset
= pfn_level_offset(pfn
, level
);
796 pte
= &parent
[offset
];
797 if (!target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
799 if (level
== target_level
)
802 if (!dma_pte_present(pte
)) {
805 tmp_page
= alloc_pgtable_page(domain
->nid
);
810 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
811 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
812 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
813 /* Someone else set it while we were thinking; use theirs. */
814 free_pgtable_page(tmp_page
);
817 domain_flush_cache(domain
, pte
, sizeof(*pte
));
820 parent
= phys_to_virt(dma_pte_addr(pte
));
828 /* return address's pte at specific level */
829 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
831 int level
, int *large_page
)
833 struct dma_pte
*parent
, *pte
= NULL
;
834 int total
= agaw_to_level(domain
->agaw
);
837 parent
= domain
->pgd
;
838 while (level
<= total
) {
839 offset
= pfn_level_offset(pfn
, total
);
840 pte
= &parent
[offset
];
844 if (!dma_pte_present(pte
)) {
849 if (pte
->val
& DMA_PTE_LARGE_PAGE
) {
854 parent
= phys_to_virt(dma_pte_addr(pte
));
860 /* clear last level pte, a tlb flush should be followed */
861 static int dma_pte_clear_range(struct dmar_domain
*domain
,
862 unsigned long start_pfn
,
863 unsigned long last_pfn
)
865 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
866 unsigned int large_page
= 1;
867 struct dma_pte
*first_pte
, *pte
;
870 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
871 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
872 BUG_ON(start_pfn
> last_pfn
);
874 /* we don't need lock here; nobody else touches the iova range */
877 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
879 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
884 start_pfn
+= lvl_to_nr_pages(large_page
);
886 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
888 domain_flush_cache(domain
, first_pte
,
889 (void *)pte
- (void *)first_pte
);
891 } while (start_pfn
&& start_pfn
<= last_pfn
);
893 order
= (large_page
- 1) * 9;
897 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
898 struct dma_pte
*pte
, unsigned long pfn
,
899 unsigned long start_pfn
, unsigned long last_pfn
)
901 pfn
= max(start_pfn
, pfn
);
902 pte
= &pte
[pfn_level_offset(pfn
, level
)];
905 unsigned long level_pfn
;
906 struct dma_pte
*level_pte
;
908 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
911 level_pfn
= pfn
& level_mask(level
- 1);
912 level_pte
= phys_to_virt(dma_pte_addr(pte
));
915 dma_pte_free_level(domain
, level
- 1, level_pte
,
916 level_pfn
, start_pfn
, last_pfn
);
918 /* If range covers entire pagetable, free it */
919 if (!(start_pfn
> level_pfn
||
920 last_pfn
< level_pfn
+ level_size(level
))) {
922 domain_flush_cache(domain
, pte
, sizeof(*pte
));
923 free_pgtable_page(level_pte
);
926 pfn
+= level_size(level
);
927 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
930 /* free page table pages. last level pte should already be cleared */
931 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
932 unsigned long start_pfn
,
933 unsigned long last_pfn
)
935 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
937 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
938 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
939 BUG_ON(start_pfn
> last_pfn
);
941 /* We don't need lock here; nobody else touches the iova range */
942 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
943 domain
->pgd
, 0, start_pfn
, last_pfn
);
946 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
947 free_pgtable_page(domain
->pgd
);
953 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
955 struct root_entry
*root
;
958 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
962 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
964 spin_lock_irqsave(&iommu
->lock
, flags
);
965 iommu
->root_entry
= root
;
966 spin_unlock_irqrestore(&iommu
->lock
, flags
);
971 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
977 addr
= iommu
->root_entry
;
979 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
980 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
982 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
986 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
988 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
991 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
996 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
999 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1000 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1002 /* Make sure hardware complete it */
1003 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1004 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1006 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1009 /* return value determine if we need a write buffer flush */
1010 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1011 u16 did
, u16 source_id
, u8 function_mask
,
1018 case DMA_CCMD_GLOBAL_INVL
:
1019 val
= DMA_CCMD_GLOBAL_INVL
;
1021 case DMA_CCMD_DOMAIN_INVL
:
1022 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1024 case DMA_CCMD_DEVICE_INVL
:
1025 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1026 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1031 val
|= DMA_CCMD_ICC
;
1033 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1034 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1038 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1040 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1043 /* return value determine if we need a write buffer flush */
1044 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1045 u64 addr
, unsigned int size_order
, u64 type
)
1047 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1048 u64 val
= 0, val_iva
= 0;
1052 case DMA_TLB_GLOBAL_FLUSH
:
1053 /* global flush doesn't need set IVA_REG */
1054 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1056 case DMA_TLB_DSI_FLUSH
:
1057 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1059 case DMA_TLB_PSI_FLUSH
:
1060 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1061 /* Note: always flush non-leaf currently */
1062 val_iva
= size_order
| addr
;
1067 /* Note: set drain read/write */
1070 * This is probably to be super secure.. Looks like we can
1071 * ignore it without any impact.
1073 if (cap_read_drain(iommu
->cap
))
1074 val
|= DMA_TLB_READ_DRAIN
;
1076 if (cap_write_drain(iommu
->cap
))
1077 val
|= DMA_TLB_WRITE_DRAIN
;
1079 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1080 /* Note: Only uses first TLB reg currently */
1082 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1083 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1085 /* Make sure hardware complete it */
1086 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1087 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1089 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1091 /* check IOTLB invalidation granularity */
1092 if (DMA_TLB_IAIG(val
) == 0)
1093 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1094 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1095 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1096 (unsigned long long)DMA_TLB_IIRG(type
),
1097 (unsigned long long)DMA_TLB_IAIG(val
));
1100 static struct device_domain_info
*iommu_support_dev_iotlb(
1101 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1104 unsigned long flags
;
1105 struct device_domain_info
*info
;
1106 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1108 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1114 spin_lock_irqsave(&device_domain_lock
, flags
);
1115 list_for_each_entry(info
, &domain
->devices
, link
)
1116 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1120 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1122 if (!found
|| !info
->dev
)
1125 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1128 if (!dmar_find_matched_atsr_unit(info
->dev
))
1131 info
->iommu
= iommu
;
1136 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1141 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1144 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1146 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1149 pci_disable_ats(info
->dev
);
1152 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1153 u64 addr
, unsigned mask
)
1156 unsigned long flags
;
1157 struct device_domain_info
*info
;
1159 spin_lock_irqsave(&device_domain_lock
, flags
);
1160 list_for_each_entry(info
, &domain
->devices
, link
) {
1161 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1164 sid
= info
->bus
<< 8 | info
->devfn
;
1165 qdep
= pci_ats_queue_depth(info
->dev
);
1166 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1168 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1171 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1172 unsigned long pfn
, unsigned int pages
, int map
)
1174 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1175 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1180 * Fallback to domain selective flush if no PSI support or the size is
1182 * PSI requires page size to be 2 ^ x, and the base address is naturally
1183 * aligned to the size
1185 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1186 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1189 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1193 * In caching mode, changes of pages from non-present to present require
1194 * flush. However, device IOTLB doesn't need to be flushed in this case.
1196 if (!cap_caching_mode(iommu
->cap
) || !map
)
1197 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1200 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1203 unsigned long flags
;
1205 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1206 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1207 pmen
&= ~DMA_PMEN_EPM
;
1208 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1210 /* wait for the protected region status bit to clear */
1211 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1212 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1214 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1217 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1220 unsigned long flags
;
1222 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1223 iommu
->gcmd
|= DMA_GCMD_TE
;
1224 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1226 /* Make sure hardware complete it */
1227 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1228 readl
, (sts
& DMA_GSTS_TES
), sts
);
1230 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1234 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1239 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1240 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1241 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1245 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1247 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1252 static int iommu_init_domains(struct intel_iommu
*iommu
)
1254 unsigned long ndomains
;
1255 unsigned long nlongs
;
1257 ndomains
= cap_ndoms(iommu
->cap
);
1258 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1259 iommu
->seq_id
, ndomains
);
1260 nlongs
= BITS_TO_LONGS(ndomains
);
1262 spin_lock_init(&iommu
->lock
);
1264 /* TBD: there might be 64K domains,
1265 * consider other allocation for future chip
1267 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1268 if (!iommu
->domain_ids
) {
1269 pr_err("IOMMU%d: allocating domain id array failed\n",
1273 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1275 if (!iommu
->domains
) {
1276 pr_err("IOMMU%d: allocating domain array failed\n",
1278 kfree(iommu
->domain_ids
);
1279 iommu
->domain_ids
= NULL
;
1284 * if Caching mode is set, then invalid translations are tagged
1285 * with domainid 0. Hence we need to pre-allocate it.
1287 if (cap_caching_mode(iommu
->cap
))
1288 set_bit(0, iommu
->domain_ids
);
1293 static void domain_exit(struct dmar_domain
*domain
);
1294 static void vm_domain_exit(struct dmar_domain
*domain
);
1296 void free_dmar_iommu(struct intel_iommu
*iommu
)
1298 struct dmar_domain
*domain
;
1300 unsigned long flags
;
1302 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1303 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1304 domain
= iommu
->domains
[i
];
1305 clear_bit(i
, iommu
->domain_ids
);
1307 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1308 if (--domain
->iommu_count
== 0) {
1309 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1310 vm_domain_exit(domain
);
1312 domain_exit(domain
);
1314 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1318 if (iommu
->gcmd
& DMA_GCMD_TE
)
1319 iommu_disable_translation(iommu
);
1322 irq_set_handler_data(iommu
->irq
, NULL
);
1323 /* This will mask the irq */
1324 free_irq(iommu
->irq
, iommu
);
1325 destroy_irq(iommu
->irq
);
1328 kfree(iommu
->domains
);
1329 kfree(iommu
->domain_ids
);
1331 g_iommus
[iommu
->seq_id
] = NULL
;
1333 /* if all iommus are freed, free g_iommus */
1334 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1339 if (i
== g_num_of_iommus
)
1342 /* free context mapping */
1343 free_context_table(iommu
);
1346 static struct dmar_domain
*alloc_domain(void)
1348 struct dmar_domain
*domain
;
1350 domain
= alloc_domain_mem();
1355 memset(domain
->iommu_bmp
, 0, sizeof(domain
->iommu_bmp
));
1361 static int iommu_attach_domain(struct dmar_domain
*domain
,
1362 struct intel_iommu
*iommu
)
1365 unsigned long ndomains
;
1366 unsigned long flags
;
1368 ndomains
= cap_ndoms(iommu
->cap
);
1370 spin_lock_irqsave(&iommu
->lock
, flags
);
1372 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1373 if (num
>= ndomains
) {
1374 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1375 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1380 set_bit(num
, iommu
->domain_ids
);
1381 set_bit(iommu
->seq_id
, domain
->iommu_bmp
);
1382 iommu
->domains
[num
] = domain
;
1383 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1388 static void iommu_detach_domain(struct dmar_domain
*domain
,
1389 struct intel_iommu
*iommu
)
1391 unsigned long flags
;
1395 spin_lock_irqsave(&iommu
->lock
, flags
);
1396 ndomains
= cap_ndoms(iommu
->cap
);
1397 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1398 if (iommu
->domains
[num
] == domain
) {
1405 clear_bit(num
, iommu
->domain_ids
);
1406 clear_bit(iommu
->seq_id
, domain
->iommu_bmp
);
1407 iommu
->domains
[num
] = NULL
;
1409 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1412 static struct iova_domain reserved_iova_list
;
1413 static struct lock_class_key reserved_rbtree_key
;
1415 static int dmar_init_reserved_ranges(void)
1417 struct pci_dev
*pdev
= NULL
;
1421 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1423 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1424 &reserved_rbtree_key
);
1426 /* IOAPIC ranges shouldn't be accessed by DMA */
1427 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1428 IOVA_PFN(IOAPIC_RANGE_END
));
1430 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1434 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1435 for_each_pci_dev(pdev
) {
1438 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1439 r
= &pdev
->resource
[i
];
1440 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1442 iova
= reserve_iova(&reserved_iova_list
,
1446 printk(KERN_ERR
"Reserve iova failed\n");
1454 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1456 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1459 static inline int guestwidth_to_adjustwidth(int gaw
)
1462 int r
= (gaw
- 12) % 9;
1473 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1475 struct intel_iommu
*iommu
;
1476 int adjust_width
, agaw
;
1477 unsigned long sagaw
;
1479 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1480 spin_lock_init(&domain
->iommu_lock
);
1482 domain_reserve_special_ranges(domain
);
1484 /* calculate AGAW */
1485 iommu
= domain_get_iommu(domain
);
1486 if (guest_width
> cap_mgaw(iommu
->cap
))
1487 guest_width
= cap_mgaw(iommu
->cap
);
1488 domain
->gaw
= guest_width
;
1489 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1490 agaw
= width_to_agaw(adjust_width
);
1491 sagaw
= cap_sagaw(iommu
->cap
);
1492 if (!test_bit(agaw
, &sagaw
)) {
1493 /* hardware doesn't support it, choose a bigger one */
1494 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1495 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1499 domain
->agaw
= agaw
;
1500 INIT_LIST_HEAD(&domain
->devices
);
1502 if (ecap_coherent(iommu
->ecap
))
1503 domain
->iommu_coherency
= 1;
1505 domain
->iommu_coherency
= 0;
1507 if (ecap_sc_support(iommu
->ecap
))
1508 domain
->iommu_snooping
= 1;
1510 domain
->iommu_snooping
= 0;
1512 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1513 domain
->iommu_count
= 1;
1514 domain
->nid
= iommu
->node
;
1516 /* always allocate the top pgd */
1517 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1520 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1524 static void domain_exit(struct dmar_domain
*domain
)
1526 struct dmar_drhd_unit
*drhd
;
1527 struct intel_iommu
*iommu
;
1529 /* Domain 0 is reserved, so dont process it */
1533 /* Flush any lazy unmaps that may reference this domain */
1534 if (!intel_iommu_strict
)
1535 flush_unmaps_timeout(0);
1537 domain_remove_dev_info(domain
);
1539 put_iova_domain(&domain
->iovad
);
1542 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1544 /* free page tables */
1545 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1547 for_each_active_iommu(iommu
, drhd
)
1548 if (test_bit(iommu
->seq_id
, domain
->iommu_bmp
))
1549 iommu_detach_domain(domain
, iommu
);
1551 free_domain_mem(domain
);
1554 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1555 u8 bus
, u8 devfn
, int translation
)
1557 struct context_entry
*context
;
1558 unsigned long flags
;
1559 struct intel_iommu
*iommu
;
1560 struct dma_pte
*pgd
;
1562 unsigned long ndomains
;
1565 struct device_domain_info
*info
= NULL
;
1567 pr_debug("Set context mapping for %02x:%02x.%d\n",
1568 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1570 BUG_ON(!domain
->pgd
);
1571 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1572 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1574 iommu
= device_to_iommu(segment
, bus
, devfn
);
1578 context
= device_to_context_entry(iommu
, bus
, devfn
);
1581 spin_lock_irqsave(&iommu
->lock
, flags
);
1582 if (context_present(context
)) {
1583 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1590 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1591 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1594 /* find an available domain id for this device in iommu */
1595 ndomains
= cap_ndoms(iommu
->cap
);
1596 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1597 if (iommu
->domains
[num
] == domain
) {
1605 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1606 if (num
>= ndomains
) {
1607 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1608 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1612 set_bit(num
, iommu
->domain_ids
);
1613 iommu
->domains
[num
] = domain
;
1617 /* Skip top levels of page tables for
1618 * iommu which has less agaw than default.
1619 * Unnecessary for PT mode.
1621 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1622 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1623 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1624 if (!dma_pte_present(pgd
)) {
1625 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1632 context_set_domain_id(context
, id
);
1634 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1635 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1636 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1637 CONTEXT_TT_MULTI_LEVEL
;
1640 * In pass through mode, AW must be programmed to indicate the largest
1641 * AGAW value supported by hardware. And ASR is ignored by hardware.
1643 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1644 context_set_address_width(context
, iommu
->msagaw
);
1646 context_set_address_root(context
, virt_to_phys(pgd
));
1647 context_set_address_width(context
, iommu
->agaw
);
1650 context_set_translation_type(context
, translation
);
1651 context_set_fault_enable(context
);
1652 context_set_present(context
);
1653 domain_flush_cache(domain
, context
, sizeof(*context
));
1656 * It's a non-present to present mapping. If hardware doesn't cache
1657 * non-present entry we only need to flush the write-buffer. If the
1658 * _does_ cache non-present entries, then it does so in the special
1659 * domain #0, which we have to flush:
1661 if (cap_caching_mode(iommu
->cap
)) {
1662 iommu
->flush
.flush_context(iommu
, 0,
1663 (((u16
)bus
) << 8) | devfn
,
1664 DMA_CCMD_MASK_NOBIT
,
1665 DMA_CCMD_DEVICE_INVL
);
1666 iommu
->flush
.flush_iotlb(iommu
, domain
->id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1668 iommu_flush_write_buffer(iommu
);
1670 iommu_enable_dev_iotlb(info
);
1671 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1673 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1674 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1675 domain
->iommu_count
++;
1676 if (domain
->iommu_count
== 1)
1677 domain
->nid
= iommu
->node
;
1678 domain_update_iommu_cap(domain
);
1680 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1685 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1689 struct pci_dev
*tmp
, *parent
;
1691 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1692 pdev
->bus
->number
, pdev
->devfn
,
1697 /* dependent device mapping */
1698 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1701 /* Secondary interface's bus number and devfn 0 */
1702 parent
= pdev
->bus
->self
;
1703 while (parent
!= tmp
) {
1704 ret
= domain_context_mapping_one(domain
,
1705 pci_domain_nr(parent
->bus
),
1706 parent
->bus
->number
,
1707 parent
->devfn
, translation
);
1710 parent
= parent
->bus
->self
;
1712 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
1713 return domain_context_mapping_one(domain
,
1714 pci_domain_nr(tmp
->subordinate
),
1715 tmp
->subordinate
->number
, 0,
1717 else /* this is a legacy PCI bridge */
1718 return domain_context_mapping_one(domain
,
1719 pci_domain_nr(tmp
->bus
),
1725 static int domain_context_mapped(struct pci_dev
*pdev
)
1728 struct pci_dev
*tmp
, *parent
;
1729 struct intel_iommu
*iommu
;
1731 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1736 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1739 /* dependent device mapping */
1740 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1743 /* Secondary interface's bus number and devfn 0 */
1744 parent
= pdev
->bus
->self
;
1745 while (parent
!= tmp
) {
1746 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1750 parent
= parent
->bus
->self
;
1752 if (pci_is_pcie(tmp
))
1753 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1756 return device_context_mapped(iommu
, tmp
->bus
->number
,
1760 /* Returns a number of VTD pages, but aligned to MM page size */
1761 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1764 host_addr
&= ~PAGE_MASK
;
1765 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1768 /* Return largest possible superpage level for a given mapping */
1769 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1770 unsigned long iov_pfn
,
1771 unsigned long phy_pfn
,
1772 unsigned long pages
)
1774 int support
, level
= 1;
1775 unsigned long pfnmerge
;
1777 support
= domain
->iommu_superpage
;
1779 /* To use a large page, the virtual *and* physical addresses
1780 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1781 of them will mean we have to use smaller pages. So just
1782 merge them and check both at once. */
1783 pfnmerge
= iov_pfn
| phy_pfn
;
1785 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1786 pages
>>= VTD_STRIDE_SHIFT
;
1789 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1796 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1797 struct scatterlist
*sg
, unsigned long phys_pfn
,
1798 unsigned long nr_pages
, int prot
)
1800 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1801 phys_addr_t
uninitialized_var(pteval
);
1802 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1803 unsigned long sg_res
;
1804 unsigned int largepage_lvl
= 0;
1805 unsigned long lvl_pages
= 0;
1807 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1809 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1812 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1817 sg_res
= nr_pages
+ 1;
1818 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1821 while (nr_pages
> 0) {
1825 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
1826 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1827 sg
->dma_length
= sg
->length
;
1828 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1829 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
1833 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
1835 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, largepage_lvl
);
1838 /* It is large page*/
1839 if (largepage_lvl
> 1) {
1840 pteval
|= DMA_PTE_LARGE_PAGE
;
1841 /* Ensure that old small page tables are removed to make room
1842 for superpage, if they exist. */
1843 dma_pte_clear_range(domain
, iov_pfn
,
1844 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1845 dma_pte_free_pagetable(domain
, iov_pfn
,
1846 iov_pfn
+ lvl_to_nr_pages(largepage_lvl
) - 1);
1848 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
1852 /* We don't need lock here, nobody else
1853 * touches the iova range
1855 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
1857 static int dumps
= 5;
1858 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1859 iov_pfn
, tmp
, (unsigned long long)pteval
);
1862 debug_dma_dump_mappings(NULL
);
1867 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
1869 BUG_ON(nr_pages
< lvl_pages
);
1870 BUG_ON(sg_res
< lvl_pages
);
1872 nr_pages
-= lvl_pages
;
1873 iov_pfn
+= lvl_pages
;
1874 phys_pfn
+= lvl_pages
;
1875 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
1876 sg_res
-= lvl_pages
;
1878 /* If the next PTE would be the first in a new page, then we
1879 need to flush the cache on the entries we've just written.
1880 And then we'll need to recalculate 'pte', so clear it and
1881 let it get set again in the if (!pte) block above.
1883 If we're done (!nr_pages) we need to flush the cache too.
1885 Also if we've been setting superpages, we may need to
1886 recalculate 'pte' and switch back to smaller pages for the
1887 end of the mapping, if the trailing size is not enough to
1888 use another superpage (i.e. sg_res < lvl_pages). */
1890 if (!nr_pages
|| first_pte_in_page(pte
) ||
1891 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
1892 domain_flush_cache(domain
, first_pte
,
1893 (void *)pte
- (void *)first_pte
);
1897 if (!sg_res
&& nr_pages
)
1903 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1904 struct scatterlist
*sg
, unsigned long nr_pages
,
1907 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1910 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1911 unsigned long phys_pfn
, unsigned long nr_pages
,
1914 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1917 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1922 clear_context_table(iommu
, bus
, devfn
);
1923 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1924 DMA_CCMD_GLOBAL_INVL
);
1925 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1928 static inline void unlink_domain_info(struct device_domain_info
*info
)
1930 assert_spin_locked(&device_domain_lock
);
1931 list_del(&info
->link
);
1932 list_del(&info
->global
);
1934 info
->dev
->dev
.archdata
.iommu
= NULL
;
1937 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1939 struct device_domain_info
*info
;
1940 unsigned long flags
;
1941 struct intel_iommu
*iommu
;
1943 spin_lock_irqsave(&device_domain_lock
, flags
);
1944 while (!list_empty(&domain
->devices
)) {
1945 info
= list_entry(domain
->devices
.next
,
1946 struct device_domain_info
, link
);
1947 unlink_domain_info(info
);
1948 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1950 iommu_disable_dev_iotlb(info
);
1951 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1952 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1953 free_devinfo_mem(info
);
1955 spin_lock_irqsave(&device_domain_lock
, flags
);
1957 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1962 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1964 static struct dmar_domain
*
1965 find_domain(struct pci_dev
*pdev
)
1967 struct device_domain_info
*info
;
1969 /* No lock here, assumes no domain exit in normal case */
1970 info
= pdev
->dev
.archdata
.iommu
;
1972 return info
->domain
;
1976 /* domain is initialized */
1977 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1979 struct dmar_domain
*domain
, *found
= NULL
;
1980 struct intel_iommu
*iommu
;
1981 struct dmar_drhd_unit
*drhd
;
1982 struct device_domain_info
*info
, *tmp
;
1983 struct pci_dev
*dev_tmp
;
1984 unsigned long flags
;
1985 int bus
= 0, devfn
= 0;
1989 domain
= find_domain(pdev
);
1993 segment
= pci_domain_nr(pdev
->bus
);
1995 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1997 if (pci_is_pcie(dev_tmp
)) {
1998 bus
= dev_tmp
->subordinate
->number
;
2001 bus
= dev_tmp
->bus
->number
;
2002 devfn
= dev_tmp
->devfn
;
2004 spin_lock_irqsave(&device_domain_lock
, flags
);
2005 list_for_each_entry(info
, &device_domain_list
, global
) {
2006 if (info
->segment
== segment
&&
2007 info
->bus
== bus
&& info
->devfn
== devfn
) {
2008 found
= info
->domain
;
2012 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2013 /* pcie-pci bridge already has a domain, uses it */
2020 domain
= alloc_domain();
2024 /* Allocate new domain for the device */
2025 drhd
= dmar_find_matched_drhd_unit(pdev
);
2027 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
2029 free_domain_mem(domain
);
2032 iommu
= drhd
->iommu
;
2034 ret
= iommu_attach_domain(domain
, iommu
);
2036 free_domain_mem(domain
);
2040 if (domain_init(domain
, gaw
)) {
2041 domain_exit(domain
);
2045 /* register pcie-to-pci device */
2047 info
= alloc_devinfo_mem();
2049 domain_exit(domain
);
2052 info
->segment
= segment
;
2054 info
->devfn
= devfn
;
2056 info
->domain
= domain
;
2057 /* This domain is shared by devices under p2p bridge */
2058 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
2060 /* pcie-to-pci bridge already has a domain, uses it */
2062 spin_lock_irqsave(&device_domain_lock
, flags
);
2063 list_for_each_entry(tmp
, &device_domain_list
, global
) {
2064 if (tmp
->segment
== segment
&&
2065 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
2066 found
= tmp
->domain
;
2071 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2072 free_devinfo_mem(info
);
2073 domain_exit(domain
);
2076 list_add(&info
->link
, &domain
->devices
);
2077 list_add(&info
->global
, &device_domain_list
);
2078 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2083 info
= alloc_devinfo_mem();
2086 info
->segment
= segment
;
2087 info
->bus
= pdev
->bus
->number
;
2088 info
->devfn
= pdev
->devfn
;
2090 info
->domain
= domain
;
2091 spin_lock_irqsave(&device_domain_lock
, flags
);
2092 /* somebody is fast */
2093 found
= find_domain(pdev
);
2094 if (found
!= NULL
) {
2095 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2096 if (found
!= domain
) {
2097 domain_exit(domain
);
2100 free_devinfo_mem(info
);
2103 list_add(&info
->link
, &domain
->devices
);
2104 list_add(&info
->global
, &device_domain_list
);
2105 pdev
->dev
.archdata
.iommu
= info
;
2106 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2109 /* recheck it here, maybe others set it */
2110 return find_domain(pdev
);
2113 static int iommu_identity_mapping
;
2114 #define IDENTMAP_ALL 1
2115 #define IDENTMAP_GFX 2
2116 #define IDENTMAP_AZALIA 4
2118 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2119 unsigned long long start
,
2120 unsigned long long end
)
2122 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2123 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2125 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2126 dma_to_mm_pfn(last_vpfn
))) {
2127 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2131 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2132 start
, end
, domain
->id
);
2134 * RMRR range might have overlap with physical memory range,
2137 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2139 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2140 last_vpfn
- first_vpfn
+ 1,
2141 DMA_PTE_READ
|DMA_PTE_WRITE
);
2144 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
2145 unsigned long long start
,
2146 unsigned long long end
)
2148 struct dmar_domain
*domain
;
2151 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2155 /* For _hardware_ passthrough, don't bother. But for software
2156 passthrough, we do it anyway -- it may indicate a memory
2157 range which is reserved in E820, so which didn't get set
2158 up to start with in si_domain */
2159 if (domain
== si_domain
&& hw_pass_through
) {
2160 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2161 pci_name(pdev
), start
, end
);
2166 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2167 pci_name(pdev
), start
, end
);
2170 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2171 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2172 dmi_get_system_info(DMI_BIOS_VENDOR
),
2173 dmi_get_system_info(DMI_BIOS_VERSION
),
2174 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2179 if (end
>> agaw_to_width(domain
->agaw
)) {
2180 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2182 agaw_to_width(domain
->agaw
),
2183 dmi_get_system_info(DMI_BIOS_VENDOR
),
2184 dmi_get_system_info(DMI_BIOS_VERSION
),
2185 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2190 ret
= iommu_domain_identity_map(domain
, start
, end
);
2194 /* context entry init */
2195 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
2202 domain_exit(domain
);
2206 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2207 struct pci_dev
*pdev
)
2209 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2211 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
2215 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2216 static inline void iommu_prepare_isa(void)
2218 struct pci_dev
*pdev
;
2221 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2225 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2226 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024 - 1);
2229 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2230 "floppy might not work\n");
2234 static inline void iommu_prepare_isa(void)
2238 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2240 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2242 static int __init
si_domain_init(int hw
)
2244 struct dmar_drhd_unit
*drhd
;
2245 struct intel_iommu
*iommu
;
2248 si_domain
= alloc_domain();
2252 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2254 for_each_active_iommu(iommu
, drhd
) {
2255 ret
= iommu_attach_domain(si_domain
, iommu
);
2257 domain_exit(si_domain
);
2262 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2263 domain_exit(si_domain
);
2267 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2272 for_each_online_node(nid
) {
2273 unsigned long start_pfn
, end_pfn
;
2276 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2277 ret
= iommu_domain_identity_map(si_domain
,
2278 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2287 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2288 struct pci_dev
*pdev
);
2289 static int identity_mapping(struct pci_dev
*pdev
)
2291 struct device_domain_info
*info
;
2293 if (likely(!iommu_identity_mapping
))
2296 info
= pdev
->dev
.archdata
.iommu
;
2297 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2298 return (info
->domain
== si_domain
);
2303 static int domain_add_dev_info(struct dmar_domain
*domain
,
2304 struct pci_dev
*pdev
,
2307 struct device_domain_info
*info
;
2308 unsigned long flags
;
2311 info
= alloc_devinfo_mem();
2315 info
->segment
= pci_domain_nr(pdev
->bus
);
2316 info
->bus
= pdev
->bus
->number
;
2317 info
->devfn
= pdev
->devfn
;
2319 info
->domain
= domain
;
2321 spin_lock_irqsave(&device_domain_lock
, flags
);
2322 list_add(&info
->link
, &domain
->devices
);
2323 list_add(&info
->global
, &device_domain_list
);
2324 pdev
->dev
.archdata
.iommu
= info
;
2325 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2327 ret
= domain_context_mapping(domain
, pdev
, translation
);
2329 spin_lock_irqsave(&device_domain_lock
, flags
);
2330 unlink_domain_info(info
);
2331 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2332 free_devinfo_mem(info
);
2339 static bool device_has_rmrr(struct pci_dev
*dev
)
2341 struct dmar_rmrr_unit
*rmrr
;
2344 for_each_rmrr_units(rmrr
) {
2345 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2347 * Return TRUE if this RMRR contains the device that
2350 if (rmrr
->devices
[i
] == dev
)
2357 static int iommu_should_identity_map(struct pci_dev
*pdev
, int startup
)
2361 * We want to prevent any device associated with an RMRR from
2362 * getting placed into the SI Domain. This is done because
2363 * problems exist when devices are moved in and out of domains
2364 * and their respective RMRR info is lost. We exempt USB devices
2365 * from this process due to their usage of RMRRs that are known
2366 * to not be needed after BIOS hand-off to OS.
2368 if (device_has_rmrr(pdev
) &&
2369 (pdev
->class >> 8) != PCI_CLASS_SERIAL_USB
)
2372 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2375 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2378 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2382 * We want to start off with all devices in the 1:1 domain, and
2383 * take them out later if we find they can't access all of memory.
2385 * However, we can't do this for PCI devices behind bridges,
2386 * because all PCI devices behind the same bridge will end up
2387 * with the same source-id on their transactions.
2389 * Practically speaking, we can't change things around for these
2390 * devices at run-time, because we can't be sure there'll be no
2391 * DMA transactions in flight for any of their siblings.
2393 * So PCI devices (unless they're on the root bus) as well as
2394 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2395 * the 1:1 domain, just in _case_ one of their siblings turns out
2396 * not to be able to map all of memory.
2398 if (!pci_is_pcie(pdev
)) {
2399 if (!pci_is_root_bus(pdev
->bus
))
2401 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2403 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2407 * At boot time, we don't yet know if devices will be 64-bit capable.
2408 * Assume that they will -- if they turn out not to be, then we can
2409 * take them out of the 1:1 domain later.
2413 * If the device's dma_mask is less than the system's memory
2414 * size then this is not a candidate for identity mapping.
2416 u64 dma_mask
= pdev
->dma_mask
;
2418 if (pdev
->dev
.coherent_dma_mask
&&
2419 pdev
->dev
.coherent_dma_mask
< dma_mask
)
2420 dma_mask
= pdev
->dev
.coherent_dma_mask
;
2422 return dma_mask
>= dma_get_required_mask(&pdev
->dev
);
2428 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2430 struct pci_dev
*pdev
= NULL
;
2433 ret
= si_domain_init(hw
);
2437 for_each_pci_dev(pdev
) {
2438 if (iommu_should_identity_map(pdev
, 1)) {
2439 ret
= domain_add_dev_info(si_domain
, pdev
,
2440 hw
? CONTEXT_TT_PASS_THROUGH
:
2441 CONTEXT_TT_MULTI_LEVEL
);
2443 /* device not associated with an iommu */
2448 pr_info("IOMMU: %s identity mapping for device %s\n",
2449 hw
? "hardware" : "software", pci_name(pdev
));
2456 static int __init
init_dmars(void)
2458 struct dmar_drhd_unit
*drhd
;
2459 struct dmar_rmrr_unit
*rmrr
;
2460 struct pci_dev
*pdev
;
2461 struct intel_iommu
*iommu
;
2467 * initialize and program root entry to not present
2470 for_each_drhd_unit(drhd
) {
2472 * lock not needed as this is only incremented in the single
2473 * threaded kernel __init code path all other access are read
2476 if (g_num_of_iommus
< IOMMU_UNITS_SUPPORTED
) {
2480 printk_once(KERN_ERR
"intel-iommu: exceeded %d IOMMUs\n",
2481 IOMMU_UNITS_SUPPORTED
);
2484 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2487 printk(KERN_ERR
"Allocating global iommu array failed\n");
2492 deferred_flush
= kzalloc(g_num_of_iommus
*
2493 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2494 if (!deferred_flush
) {
2499 for_each_drhd_unit(drhd
) {
2503 iommu
= drhd
->iommu
;
2504 g_iommus
[iommu
->seq_id
] = iommu
;
2506 ret
= iommu_init_domains(iommu
);
2512 * we could share the same root & context tables
2513 * among all IOMMU's. Need to Split it later.
2515 ret
= iommu_alloc_root_entry(iommu
);
2517 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2520 if (!ecap_pass_through(iommu
->ecap
))
2521 hw_pass_through
= 0;
2525 * Start from the sane iommu hardware state.
2527 for_each_drhd_unit(drhd
) {
2531 iommu
= drhd
->iommu
;
2534 * If the queued invalidation is already initialized by us
2535 * (for example, while enabling interrupt-remapping) then
2536 * we got the things already rolling from a sane state.
2542 * Clear any previous faults.
2544 dmar_fault(-1, iommu
);
2546 * Disable queued invalidation if supported and already enabled
2547 * before OS handover.
2549 dmar_disable_qi(iommu
);
2552 for_each_drhd_unit(drhd
) {
2556 iommu
= drhd
->iommu
;
2558 if (dmar_enable_qi(iommu
)) {
2560 * Queued Invalidate not enabled, use Register Based
2563 iommu
->flush
.flush_context
= __iommu_flush_context
;
2564 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2565 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Register based "
2568 (unsigned long long)drhd
->reg_base_addr
);
2570 iommu
->flush
.flush_context
= qi_flush_context
;
2571 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2572 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Queued "
2575 (unsigned long long)drhd
->reg_base_addr
);
2579 if (iommu_pass_through
)
2580 iommu_identity_mapping
|= IDENTMAP_ALL
;
2582 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2583 iommu_identity_mapping
|= IDENTMAP_GFX
;
2586 check_tylersburg_isoch();
2589 * If pass through is not set or not enabled, setup context entries for
2590 * identity mappings for rmrr, gfx, and isa and may fall back to static
2591 * identity mapping if iommu_identity_mapping is set.
2593 if (iommu_identity_mapping
) {
2594 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2596 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2602 * for each dev attached to rmrr
2604 * locate drhd for dev, alloc domain for dev
2605 * allocate free domain
2606 * allocate page table entries for rmrr
2607 * if context not allocated for bus
2608 * allocate and init context
2609 * set present in root table for this bus
2610 * init context with domain, translation etc
2614 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2615 for_each_rmrr_units(rmrr
) {
2616 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2617 pdev
= rmrr
->devices
[i
];
2619 * some BIOS lists non-exist devices in DMAR
2624 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2627 "IOMMU: mapping reserved region failed\n");
2631 iommu_prepare_isa();
2636 * global invalidate context cache
2637 * global invalidate iotlb
2638 * enable translation
2640 for_each_drhd_unit(drhd
) {
2641 if (drhd
->ignored
) {
2643 * we always have to disable PMRs or DMA may fail on
2647 iommu_disable_protect_mem_regions(drhd
->iommu
);
2650 iommu
= drhd
->iommu
;
2652 iommu_flush_write_buffer(iommu
);
2654 ret
= dmar_set_interrupt(iommu
);
2658 iommu_set_root_entry(iommu
);
2660 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2661 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2663 ret
= iommu_enable_translation(iommu
);
2667 iommu_disable_protect_mem_regions(iommu
);
2672 for_each_drhd_unit(drhd
) {
2675 iommu
= drhd
->iommu
;
2682 /* This takes a number of _MM_ pages, not VTD pages */
2683 static struct iova
*intel_alloc_iova(struct device
*dev
,
2684 struct dmar_domain
*domain
,
2685 unsigned long nrpages
, uint64_t dma_mask
)
2687 struct pci_dev
*pdev
= to_pci_dev(dev
);
2688 struct iova
*iova
= NULL
;
2690 /* Restrict dma_mask to the width that the iommu can handle */
2691 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2693 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2695 * First try to allocate an io virtual address in
2696 * DMA_BIT_MASK(32) and if that fails then try allocating
2699 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2700 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2704 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2705 if (unlikely(!iova
)) {
2706 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2707 nrpages
, pci_name(pdev
));
2714 static struct dmar_domain
*__get_valid_domain_for_dev(struct pci_dev
*pdev
)
2716 struct dmar_domain
*domain
;
2719 domain
= get_domain_for_dev(pdev
,
2720 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2723 "Allocating domain for %s failed", pci_name(pdev
));
2727 /* make sure context mapping is ok */
2728 if (unlikely(!domain_context_mapped(pdev
))) {
2729 ret
= domain_context_mapping(domain
, pdev
,
2730 CONTEXT_TT_MULTI_LEVEL
);
2733 "Domain context map for %s failed",
2742 static inline struct dmar_domain
*get_valid_domain_for_dev(struct pci_dev
*dev
)
2744 struct device_domain_info
*info
;
2746 /* No lock here, assumes no domain exit in normal case */
2747 info
= dev
->dev
.archdata
.iommu
;
2749 return info
->domain
;
2751 return __get_valid_domain_for_dev(dev
);
2754 static int iommu_dummy(struct pci_dev
*pdev
)
2756 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2759 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2760 static int iommu_no_mapping(struct device
*dev
)
2762 struct pci_dev
*pdev
;
2765 if (unlikely(!dev_is_pci(dev
)))
2768 pdev
= to_pci_dev(dev
);
2769 if (iommu_dummy(pdev
))
2772 if (!iommu_identity_mapping
)
2775 found
= identity_mapping(pdev
);
2777 if (iommu_should_identity_map(pdev
, 0))
2781 * 32 bit DMA is removed from si_domain and fall back
2782 * to non-identity mapping.
2784 domain_remove_one_dev_info(si_domain
, pdev
);
2785 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2791 * In case of a detached 64 bit DMA device from vm, the device
2792 * is put into si_domain for identity mapping.
2794 if (iommu_should_identity_map(pdev
, 0)) {
2796 ret
= domain_add_dev_info(si_domain
, pdev
,
2798 CONTEXT_TT_PASS_THROUGH
:
2799 CONTEXT_TT_MULTI_LEVEL
);
2801 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2811 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2812 size_t size
, int dir
, u64 dma_mask
)
2814 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2815 struct dmar_domain
*domain
;
2816 phys_addr_t start_paddr
;
2820 struct intel_iommu
*iommu
;
2821 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
2823 BUG_ON(dir
== DMA_NONE
);
2825 if (iommu_no_mapping(hwdev
))
2828 domain
= get_valid_domain_for_dev(pdev
);
2832 iommu
= domain_get_iommu(domain
);
2833 size
= aligned_nrpages(paddr
, size
);
2835 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
2840 * Check if DMAR supports zero-length reads on write only
2843 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2844 !cap_zlr(iommu
->cap
))
2845 prot
|= DMA_PTE_READ
;
2846 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2847 prot
|= DMA_PTE_WRITE
;
2849 * paddr - (paddr + size) might be partial page, we should map the whole
2850 * page. Note: if two part of one page are separately mapped, we
2851 * might have two guest_addr mapping to the same host paddr, but this
2852 * is not a big problem
2854 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2855 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
2859 /* it's a non-present to present mapping. Only flush if caching mode */
2860 if (cap_caching_mode(iommu
->cap
))
2861 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 1);
2863 iommu_flush_write_buffer(iommu
);
2865 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2866 start_paddr
+= paddr
& ~PAGE_MASK
;
2871 __free_iova(&domain
->iovad
, iova
);
2872 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2873 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2877 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2878 unsigned long offset
, size_t size
,
2879 enum dma_data_direction dir
,
2880 struct dma_attrs
*attrs
)
2882 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2883 dir
, to_pci_dev(dev
)->dma_mask
);
2886 static void flush_unmaps(void)
2892 /* just flush them all */
2893 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2894 struct intel_iommu
*iommu
= g_iommus
[i
];
2898 if (!deferred_flush
[i
].next
)
2901 /* In caching mode, global flushes turn emulation expensive */
2902 if (!cap_caching_mode(iommu
->cap
))
2903 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2904 DMA_TLB_GLOBAL_FLUSH
);
2905 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2907 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2908 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
2910 /* On real hardware multiple invalidations are expensive */
2911 if (cap_caching_mode(iommu
->cap
))
2912 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2913 iova
->pfn_lo
, iova
->pfn_hi
- iova
->pfn_lo
+ 1, 0);
2915 mask
= ilog2(mm_to_dma_pfn(iova
->pfn_hi
- iova
->pfn_lo
+ 1));
2916 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2917 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2919 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2921 deferred_flush
[i
].next
= 0;
2927 static void flush_unmaps_timeout(unsigned long data
)
2929 unsigned long flags
;
2931 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2933 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2936 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2938 unsigned long flags
;
2940 struct intel_iommu
*iommu
;
2942 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2943 if (list_size
== HIGH_WATER_MARK
)
2946 iommu
= domain_get_iommu(dom
);
2947 iommu_id
= iommu
->seq_id
;
2949 next
= deferred_flush
[iommu_id
].next
;
2950 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2951 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2952 deferred_flush
[iommu_id
].next
++;
2955 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2959 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2962 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2963 size_t size
, enum dma_data_direction dir
,
2964 struct dma_attrs
*attrs
)
2966 struct pci_dev
*pdev
= to_pci_dev(dev
);
2967 struct dmar_domain
*domain
;
2968 unsigned long start_pfn
, last_pfn
;
2970 struct intel_iommu
*iommu
;
2972 if (iommu_no_mapping(dev
))
2975 domain
= find_domain(pdev
);
2978 iommu
= domain_get_iommu(domain
);
2980 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2981 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
2982 (unsigned long long)dev_addr
))
2985 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2986 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2988 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2989 pci_name(pdev
), start_pfn
, last_pfn
);
2991 /* clear the whole page */
2992 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2994 /* free page tables */
2995 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2997 if (intel_iommu_strict
) {
2998 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2999 last_pfn
- start_pfn
+ 1, 0);
3001 __free_iova(&domain
->iovad
, iova
);
3003 add_unmap(domain
, iova
);
3005 * queue up the release of the unmap to save the 1/6th of the
3006 * cpu used up by the iotlb flush operation...
3011 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
3012 dma_addr_t
*dma_handle
, gfp_t flags
,
3013 struct dma_attrs
*attrs
)
3018 size
= PAGE_ALIGN(size
);
3019 order
= get_order(size
);
3021 if (!iommu_no_mapping(hwdev
))
3022 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3023 else if (hwdev
->coherent_dma_mask
< dma_get_required_mask(hwdev
)) {
3024 if (hwdev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3030 vaddr
= (void *)__get_free_pages(flags
, order
);
3033 memset(vaddr
, 0, size
);
3035 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
3037 hwdev
->coherent_dma_mask
);
3040 free_pages((unsigned long)vaddr
, order
);
3044 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
3045 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3049 size
= PAGE_ALIGN(size
);
3050 order
= get_order(size
);
3052 intel_unmap_page(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, NULL
);
3053 free_pages((unsigned long)vaddr
, order
);
3056 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
3057 int nelems
, enum dma_data_direction dir
,
3058 struct dma_attrs
*attrs
)
3060 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3061 struct dmar_domain
*domain
;
3062 unsigned long start_pfn
, last_pfn
;
3064 struct intel_iommu
*iommu
;
3066 if (iommu_no_mapping(hwdev
))
3069 domain
= find_domain(pdev
);
3072 iommu
= domain_get_iommu(domain
);
3074 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
3075 if (WARN_ONCE(!iova
, "Driver unmaps unmatched sglist at PFN %llx\n",
3076 (unsigned long long)sglist
[0].dma_address
))
3079 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3080 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3082 /* clear the whole page */
3083 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
3085 /* free page tables */
3086 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
3088 if (intel_iommu_strict
) {
3089 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3090 last_pfn
- start_pfn
+ 1, 0);
3092 __free_iova(&domain
->iovad
, iova
);
3094 add_unmap(domain
, iova
);
3096 * queue up the release of the unmap to save the 1/6th of the
3097 * cpu used up by the iotlb flush operation...
3102 static int intel_nontranslate_map_sg(struct device
*hddev
,
3103 struct scatterlist
*sglist
, int nelems
, int dir
)
3106 struct scatterlist
*sg
;
3108 for_each_sg(sglist
, sg
, nelems
, i
) {
3109 BUG_ON(!sg_page(sg
));
3110 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3111 sg
->dma_length
= sg
->length
;
3116 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
3117 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3120 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
3121 struct dmar_domain
*domain
;
3124 struct iova
*iova
= NULL
;
3126 struct scatterlist
*sg
;
3127 unsigned long start_vpfn
;
3128 struct intel_iommu
*iommu
;
3130 BUG_ON(dir
== DMA_NONE
);
3131 if (iommu_no_mapping(hwdev
))
3132 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
3134 domain
= get_valid_domain_for_dev(pdev
);
3138 iommu
= domain_get_iommu(domain
);
3140 for_each_sg(sglist
, sg
, nelems
, i
)
3141 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3143 iova
= intel_alloc_iova(hwdev
, domain
, dma_to_mm_pfn(size
),
3146 sglist
->dma_length
= 0;
3151 * Check if DMAR supports zero-length reads on write only
3154 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3155 !cap_zlr(iommu
->cap
))
3156 prot
|= DMA_PTE_READ
;
3157 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3158 prot
|= DMA_PTE_WRITE
;
3160 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3162 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3163 if (unlikely(ret
)) {
3164 /* clear the page */
3165 dma_pte_clear_range(domain
, start_vpfn
,
3166 start_vpfn
+ size
- 1);
3167 /* free page tables */
3168 dma_pte_free_pagetable(domain
, start_vpfn
,
3169 start_vpfn
+ size
- 1);
3171 __free_iova(&domain
->iovad
, iova
);
3175 /* it's a non-present to present mapping. Only flush if caching mode */
3176 if (cap_caching_mode(iommu
->cap
))
3177 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 1);
3179 iommu_flush_write_buffer(iommu
);
3184 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3189 struct dma_map_ops intel_dma_ops
= {
3190 .alloc
= intel_alloc_coherent
,
3191 .free
= intel_free_coherent
,
3192 .map_sg
= intel_map_sg
,
3193 .unmap_sg
= intel_unmap_sg
,
3194 .map_page
= intel_map_page
,
3195 .unmap_page
= intel_unmap_page
,
3196 .mapping_error
= intel_mapping_error
,
3199 static inline int iommu_domain_cache_init(void)
3203 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3204 sizeof(struct dmar_domain
),
3209 if (!iommu_domain_cache
) {
3210 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3217 static inline int iommu_devinfo_cache_init(void)
3221 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3222 sizeof(struct device_domain_info
),
3226 if (!iommu_devinfo_cache
) {
3227 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3234 static inline int iommu_iova_cache_init(void)
3238 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3239 sizeof(struct iova
),
3243 if (!iommu_iova_cache
) {
3244 printk(KERN_ERR
"Couldn't create iova cache\n");
3251 static int __init
iommu_init_mempool(void)
3254 ret
= iommu_iova_cache_init();
3258 ret
= iommu_domain_cache_init();
3262 ret
= iommu_devinfo_cache_init();
3266 kmem_cache_destroy(iommu_domain_cache
);
3268 kmem_cache_destroy(iommu_iova_cache
);
3273 static void __init
iommu_exit_mempool(void)
3275 kmem_cache_destroy(iommu_devinfo_cache
);
3276 kmem_cache_destroy(iommu_domain_cache
);
3277 kmem_cache_destroy(iommu_iova_cache
);
3281 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3283 struct dmar_drhd_unit
*drhd
;
3287 /* We know that this device on this chipset has its own IOMMU.
3288 * If we find it under a different IOMMU, then the BIOS is lying
3289 * to us. Hope that the IOMMU for this device is actually
3290 * disabled, and it needs no translation...
3292 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3294 /* "can't" happen */
3295 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3298 vtbar
&= 0xffff0000;
3300 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3301 drhd
= dmar_find_matched_drhd_unit(pdev
);
3302 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3303 TAINT_FIRMWARE_WORKAROUND
,
3304 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3305 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3307 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3309 static void __init
init_no_remapping_devices(void)
3311 struct dmar_drhd_unit
*drhd
;
3313 for_each_drhd_unit(drhd
) {
3314 if (!drhd
->include_all
) {
3316 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3317 if (drhd
->devices
[i
] != NULL
)
3319 /* ignore DMAR unit if no pci devices exist */
3320 if (i
== drhd
->devices_cnt
)
3325 for_each_drhd_unit(drhd
) {
3327 if (drhd
->ignored
|| drhd
->include_all
)
3330 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
3331 if (drhd
->devices
[i
] &&
3332 !IS_GFX_DEVICE(drhd
->devices
[i
]))
3335 if (i
< drhd
->devices_cnt
)
3338 /* This IOMMU has *only* gfx devices. Either bypass it or
3339 set the gfx_mapped flag, as appropriate */
3341 intel_iommu_gfx_mapped
= 1;
3344 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
3345 if (!drhd
->devices
[i
])
3347 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3353 #ifdef CONFIG_SUSPEND
3354 static int init_iommu_hw(void)
3356 struct dmar_drhd_unit
*drhd
;
3357 struct intel_iommu
*iommu
= NULL
;
3359 for_each_active_iommu(iommu
, drhd
)
3361 dmar_reenable_qi(iommu
);
3363 for_each_iommu(iommu
, drhd
) {
3364 if (drhd
->ignored
) {
3366 * we always have to disable PMRs or DMA may fail on
3370 iommu_disable_protect_mem_regions(iommu
);
3374 iommu_flush_write_buffer(iommu
);
3376 iommu_set_root_entry(iommu
);
3378 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3379 DMA_CCMD_GLOBAL_INVL
);
3380 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3381 DMA_TLB_GLOBAL_FLUSH
);
3382 if (iommu_enable_translation(iommu
))
3384 iommu_disable_protect_mem_regions(iommu
);
3390 static void iommu_flush_all(void)
3392 struct dmar_drhd_unit
*drhd
;
3393 struct intel_iommu
*iommu
;
3395 for_each_active_iommu(iommu
, drhd
) {
3396 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3397 DMA_CCMD_GLOBAL_INVL
);
3398 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3399 DMA_TLB_GLOBAL_FLUSH
);
3403 static int iommu_suspend(void)
3405 struct dmar_drhd_unit
*drhd
;
3406 struct intel_iommu
*iommu
= NULL
;
3409 for_each_active_iommu(iommu
, drhd
) {
3410 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3412 if (!iommu
->iommu_state
)
3418 for_each_active_iommu(iommu
, drhd
) {
3419 iommu_disable_translation(iommu
);
3421 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3423 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3424 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3425 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3426 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3427 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3428 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3429 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3430 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3432 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3437 for_each_active_iommu(iommu
, drhd
)
3438 kfree(iommu
->iommu_state
);
3443 static void iommu_resume(void)
3445 struct dmar_drhd_unit
*drhd
;
3446 struct intel_iommu
*iommu
= NULL
;
3449 if (init_iommu_hw()) {
3451 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3453 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3457 for_each_active_iommu(iommu
, drhd
) {
3459 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3461 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3462 iommu
->reg
+ DMAR_FECTL_REG
);
3463 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3464 iommu
->reg
+ DMAR_FEDATA_REG
);
3465 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3466 iommu
->reg
+ DMAR_FEADDR_REG
);
3467 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3468 iommu
->reg
+ DMAR_FEUADDR_REG
);
3470 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3473 for_each_active_iommu(iommu
, drhd
)
3474 kfree(iommu
->iommu_state
);
3477 static struct syscore_ops iommu_syscore_ops
= {
3478 .resume
= iommu_resume
,
3479 .suspend
= iommu_suspend
,
3482 static void __init
init_iommu_pm_ops(void)
3484 register_syscore_ops(&iommu_syscore_ops
);
3488 static inline void init_iommu_pm_ops(void) {}
3489 #endif /* CONFIG_PM */
3491 LIST_HEAD(dmar_rmrr_units
);
3493 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
3495 list_add(&rmrr
->list
, &dmar_rmrr_units
);
3499 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
3501 struct acpi_dmar_reserved_memory
*rmrr
;
3502 struct dmar_rmrr_unit
*rmrru
;
3504 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3508 rmrru
->hdr
= header
;
3509 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3510 rmrru
->base_address
= rmrr
->base_address
;
3511 rmrru
->end_address
= rmrr
->end_address
;
3513 dmar_register_rmrr_unit(rmrru
);
3518 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
3520 struct acpi_dmar_reserved_memory
*rmrr
;
3523 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
3524 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
3525 ((void *)rmrr
) + rmrr
->header
.length
,
3526 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
3528 if (ret
|| (rmrru
->devices_cnt
== 0)) {
3529 list_del(&rmrru
->list
);
3535 static LIST_HEAD(dmar_atsr_units
);
3537 int __init
dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
)
3539 struct acpi_dmar_atsr
*atsr
;
3540 struct dmar_atsr_unit
*atsru
;
3542 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3543 atsru
= kzalloc(sizeof(*atsru
), GFP_KERNEL
);
3548 atsru
->include_all
= atsr
->flags
& 0x1;
3550 list_add(&atsru
->list
, &dmar_atsr_units
);
3555 static int __init
atsr_parse_dev(struct dmar_atsr_unit
*atsru
)
3558 struct acpi_dmar_atsr
*atsr
;
3560 if (atsru
->include_all
)
3563 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3564 rc
= dmar_parse_dev_scope((void *)(atsr
+ 1),
3565 (void *)atsr
+ atsr
->header
.length
,
3566 &atsru
->devices_cnt
, &atsru
->devices
,
3568 if (rc
|| !atsru
->devices_cnt
) {
3569 list_del(&atsru
->list
);
3576 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3579 struct pci_bus
*bus
;
3580 struct acpi_dmar_atsr
*atsr
;
3581 struct dmar_atsr_unit
*atsru
;
3583 dev
= pci_physfn(dev
);
3585 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3586 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3587 if (atsr
->segment
== pci_domain_nr(dev
->bus
))
3594 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3595 struct pci_dev
*bridge
= bus
->self
;
3597 if (!bridge
|| !pci_is_pcie(bridge
) ||
3598 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3601 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
) {
3602 for (i
= 0; i
< atsru
->devices_cnt
; i
++)
3603 if (atsru
->devices
[i
] == bridge
)
3609 if (atsru
->include_all
)
3615 int __init
dmar_parse_rmrr_atsr_dev(void)
3617 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
3618 struct dmar_atsr_unit
*atsr
, *atsr_n
;
3621 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
3622 ret
= rmrr_parse_dev(rmrr
);
3627 list_for_each_entry_safe(atsr
, atsr_n
, &dmar_atsr_units
, list
) {
3628 ret
= atsr_parse_dev(atsr
);
3637 * Here we only respond to action of unbound device from driver.
3639 * Added device is not attached to its DMAR domain here yet. That will happen
3640 * when mapping the device to iova.
3642 static int device_notifier(struct notifier_block
*nb
,
3643 unsigned long action
, void *data
)
3645 struct device
*dev
= data
;
3646 struct pci_dev
*pdev
= to_pci_dev(dev
);
3647 struct dmar_domain
*domain
;
3649 if (iommu_no_mapping(dev
))
3652 domain
= find_domain(pdev
);
3656 if (action
== BUS_NOTIFY_UNBOUND_DRIVER
&& !iommu_pass_through
) {
3657 domain_remove_one_dev_info(domain
, pdev
);
3659 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3660 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) &&
3661 list_empty(&domain
->devices
))
3662 domain_exit(domain
);
3668 static struct notifier_block device_nb
= {
3669 .notifier_call
= device_notifier
,
3672 int __init
intel_iommu_init(void)
3675 struct dmar_drhd_unit
*drhd
;
3677 /* VT-d is required for a TXT/tboot launch, so enforce that */
3678 force_on
= tboot_force_iommu();
3680 if (dmar_table_init()) {
3682 panic("tboot: Failed to initialize DMAR table\n");
3687 * Disable translation if already enabled prior to OS handover.
3689 for_each_drhd_unit(drhd
) {
3690 struct intel_iommu
*iommu
;
3695 iommu
= drhd
->iommu
;
3696 if (iommu
->gcmd
& DMA_GCMD_TE
)
3697 iommu_disable_translation(iommu
);
3700 if (dmar_dev_scope_init() < 0) {
3702 panic("tboot: Failed to initialize DMAR device scope\n");
3706 if (no_iommu
|| dmar_disabled
)
3709 if (iommu_init_mempool()) {
3711 panic("tboot: Failed to initialize iommu memory\n");
3715 if (list_empty(&dmar_rmrr_units
))
3716 printk(KERN_INFO
"DMAR: No RMRR found\n");
3718 if (list_empty(&dmar_atsr_units
))
3719 printk(KERN_INFO
"DMAR: No ATSR found\n");
3721 if (dmar_init_reserved_ranges()) {
3723 panic("tboot: Failed to reserve iommu ranges\n");
3727 init_no_remapping_devices();
3732 panic("tboot: Failed to initialize DMARs\n");
3733 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3734 put_iova_domain(&reserved_iova_list
);
3735 iommu_exit_mempool();
3739 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3741 init_timer(&unmap_timer
);
3742 #ifdef CONFIG_SWIOTLB
3745 dma_ops
= &intel_dma_ops
;
3747 init_iommu_pm_ops();
3749 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
3751 bus_register_notifier(&pci_bus_type
, &device_nb
);
3753 intel_iommu_enabled
= 1;
3758 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3759 struct pci_dev
*pdev
)
3761 struct pci_dev
*tmp
, *parent
;
3763 if (!iommu
|| !pdev
)
3766 /* dependent device detach */
3767 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3768 /* Secondary interface's bus number and devfn 0 */
3770 parent
= pdev
->bus
->self
;
3771 while (parent
!= tmp
) {
3772 iommu_detach_dev(iommu
, parent
->bus
->number
,
3774 parent
= parent
->bus
->self
;
3776 if (pci_is_pcie(tmp
)) /* this is a PCIe-to-PCI bridge */
3777 iommu_detach_dev(iommu
,
3778 tmp
->subordinate
->number
, 0);
3779 else /* this is a legacy PCI bridge */
3780 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3785 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3786 struct pci_dev
*pdev
)
3788 struct device_domain_info
*info
, *tmp
;
3789 struct intel_iommu
*iommu
;
3790 unsigned long flags
;
3793 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3798 spin_lock_irqsave(&device_domain_lock
, flags
);
3799 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
3800 if (info
->segment
== pci_domain_nr(pdev
->bus
) &&
3801 info
->bus
== pdev
->bus
->number
&&
3802 info
->devfn
== pdev
->devfn
) {
3803 unlink_domain_info(info
);
3804 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3806 iommu_disable_dev_iotlb(info
);
3807 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3808 iommu_detach_dependent_devices(iommu
, pdev
);
3809 free_devinfo_mem(info
);
3811 spin_lock_irqsave(&device_domain_lock
, flags
);
3819 /* if there is no other devices under the same iommu
3820 * owned by this domain, clear this iommu in iommu_bmp
3821 * update iommu count and coherency
3823 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3828 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3831 unsigned long tmp_flags
;
3832 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3833 clear_bit(iommu
->seq_id
, domain
->iommu_bmp
);
3834 domain
->iommu_count
--;
3835 domain_update_iommu_cap(domain
);
3836 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3838 if (!(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) &&
3839 !(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)) {
3840 spin_lock_irqsave(&iommu
->lock
, tmp_flags
);
3841 clear_bit(domain
->id
, iommu
->domain_ids
);
3842 iommu
->domains
[domain
->id
] = NULL
;
3843 spin_unlock_irqrestore(&iommu
->lock
, tmp_flags
);
3848 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3850 struct device_domain_info
*info
;
3851 struct intel_iommu
*iommu
;
3852 unsigned long flags1
, flags2
;
3854 spin_lock_irqsave(&device_domain_lock
, flags1
);
3855 while (!list_empty(&domain
->devices
)) {
3856 info
= list_entry(domain
->devices
.next
,
3857 struct device_domain_info
, link
);
3858 unlink_domain_info(info
);
3859 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3861 iommu_disable_dev_iotlb(info
);
3862 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3863 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3864 iommu_detach_dependent_devices(iommu
, info
->dev
);
3866 /* clear this iommu in iommu_bmp, update iommu count
3869 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3870 if (test_and_clear_bit(iommu
->seq_id
,
3871 domain
->iommu_bmp
)) {
3872 domain
->iommu_count
--;
3873 domain_update_iommu_cap(domain
);
3875 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3877 free_devinfo_mem(info
);
3878 spin_lock_irqsave(&device_domain_lock
, flags1
);
3880 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3883 /* domain id for virtual machine, it won't be set in context */
3884 static atomic_t vm_domid
= ATOMIC_INIT(0);
3886 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3888 struct dmar_domain
*domain
;
3890 domain
= alloc_domain_mem();
3894 domain
->id
= atomic_inc_return(&vm_domid
);
3896 memset(domain
->iommu_bmp
, 0, sizeof(domain
->iommu_bmp
));
3897 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3902 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3906 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3907 spin_lock_init(&domain
->iommu_lock
);
3909 domain_reserve_special_ranges(domain
);
3911 /* calculate AGAW */
3912 domain
->gaw
= guest_width
;
3913 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3914 domain
->agaw
= width_to_agaw(adjust_width
);
3916 INIT_LIST_HEAD(&domain
->devices
);
3918 domain
->iommu_count
= 0;
3919 domain
->iommu_coherency
= 0;
3920 domain
->iommu_snooping
= 0;
3921 domain
->iommu_superpage
= 0;
3922 domain
->max_addr
= 0;
3925 /* always allocate the top pgd */
3926 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
3929 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3933 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3935 unsigned long flags
;
3936 struct dmar_drhd_unit
*drhd
;
3937 struct intel_iommu
*iommu
;
3939 unsigned long ndomains
;
3941 for_each_drhd_unit(drhd
) {
3944 iommu
= drhd
->iommu
;
3946 ndomains
= cap_ndoms(iommu
->cap
);
3947 for_each_set_bit(i
, iommu
->domain_ids
, ndomains
) {
3948 if (iommu
->domains
[i
] == domain
) {
3949 spin_lock_irqsave(&iommu
->lock
, flags
);
3950 clear_bit(i
, iommu
->domain_ids
);
3951 iommu
->domains
[i
] = NULL
;
3952 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3959 static void vm_domain_exit(struct dmar_domain
*domain
)
3961 /* Domain 0 is reserved, so dont process it */
3965 vm_domain_remove_all_dev_info(domain
);
3967 put_iova_domain(&domain
->iovad
);
3970 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3972 /* free page tables */
3973 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3975 iommu_free_vm_domain(domain
);
3976 free_domain_mem(domain
);
3979 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3981 struct dmar_domain
*dmar_domain
;
3983 dmar_domain
= iommu_alloc_vm_domain();
3986 "intel_iommu_domain_init: dmar_domain == NULL\n");
3989 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3991 "intel_iommu_domain_init() failed\n");
3992 vm_domain_exit(dmar_domain
);
3995 domain_update_iommu_cap(dmar_domain
);
3996 domain
->priv
= dmar_domain
;
3998 domain
->geometry
.aperture_start
= 0;
3999 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4000 domain
->geometry
.force_aperture
= true;
4005 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
4007 struct dmar_domain
*dmar_domain
= domain
->priv
;
4009 domain
->priv
= NULL
;
4010 vm_domain_exit(dmar_domain
);
4013 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4016 struct dmar_domain
*dmar_domain
= domain
->priv
;
4017 struct pci_dev
*pdev
= to_pci_dev(dev
);
4018 struct intel_iommu
*iommu
;
4021 /* normally pdev is not mapped */
4022 if (unlikely(domain_context_mapped(pdev
))) {
4023 struct dmar_domain
*old_domain
;
4025 old_domain
= find_domain(pdev
);
4027 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
4028 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
4029 domain_remove_one_dev_info(old_domain
, pdev
);
4031 domain_remove_dev_info(old_domain
);
4035 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
4040 /* check if this iommu agaw is sufficient for max mapped address */
4041 addr_width
= agaw_to_width(iommu
->agaw
);
4042 if (addr_width
> cap_mgaw(iommu
->cap
))
4043 addr_width
= cap_mgaw(iommu
->cap
);
4045 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4046 printk(KERN_ERR
"%s: iommu width (%d) is not "
4047 "sufficient for the mapped address (%llx)\n",
4048 __func__
, addr_width
, dmar_domain
->max_addr
);
4051 dmar_domain
->gaw
= addr_width
;
4054 * Knock out extra levels of page tables if necessary
4056 while (iommu
->agaw
< dmar_domain
->agaw
) {
4057 struct dma_pte
*pte
;
4059 pte
= dmar_domain
->pgd
;
4060 if (dma_pte_present(pte
)) {
4061 dmar_domain
->pgd
= (struct dma_pte
*)
4062 phys_to_virt(dma_pte_addr(pte
));
4063 free_pgtable_page(pte
);
4065 dmar_domain
->agaw
--;
4068 return domain_add_dev_info(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
4071 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4074 struct dmar_domain
*dmar_domain
= domain
->priv
;
4075 struct pci_dev
*pdev
= to_pci_dev(dev
);
4077 domain_remove_one_dev_info(dmar_domain
, pdev
);
4080 static int intel_iommu_map(struct iommu_domain
*domain
,
4081 unsigned long iova
, phys_addr_t hpa
,
4082 size_t size
, int iommu_prot
)
4084 struct dmar_domain
*dmar_domain
= domain
->priv
;
4089 if (iommu_prot
& IOMMU_READ
)
4090 prot
|= DMA_PTE_READ
;
4091 if (iommu_prot
& IOMMU_WRITE
)
4092 prot
|= DMA_PTE_WRITE
;
4093 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4094 prot
|= DMA_PTE_SNP
;
4096 max_addr
= iova
+ size
;
4097 if (dmar_domain
->max_addr
< max_addr
) {
4100 /* check if minimum agaw is sufficient for mapped address */
4101 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4102 if (end
< max_addr
) {
4103 printk(KERN_ERR
"%s: iommu width (%d) is not "
4104 "sufficient for the mapped address (%llx)\n",
4105 __func__
, dmar_domain
->gaw
, max_addr
);
4108 dmar_domain
->max_addr
= max_addr
;
4110 /* Round up size to next multiple of PAGE_SIZE, if it and
4111 the low bits of hpa would take us onto the next page */
4112 size
= aligned_nrpages(hpa
, size
);
4113 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4114 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4118 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4119 unsigned long iova
, size_t size
)
4121 struct dmar_domain
*dmar_domain
= domain
->priv
;
4124 order
= dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4125 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
4127 if (dmar_domain
->max_addr
== iova
+ size
)
4128 dmar_domain
->max_addr
= iova
;
4130 return PAGE_SIZE
<< order
;
4133 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4136 struct dmar_domain
*dmar_domain
= domain
->priv
;
4137 struct dma_pte
*pte
;
4140 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, 0);
4142 phys
= dma_pte_addr(pte
);
4147 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
4150 struct dmar_domain
*dmar_domain
= domain
->priv
;
4152 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4153 return dmar_domain
->iommu_snooping
;
4154 if (cap
== IOMMU_CAP_INTR_REMAP
)
4155 return irq_remapping_enabled
;
4160 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4162 static int intel_iommu_add_device(struct device
*dev
)
4164 struct pci_dev
*pdev
= to_pci_dev(dev
);
4165 struct pci_dev
*bridge
, *dma_pdev
= NULL
;
4166 struct iommu_group
*group
;
4169 if (!device_to_iommu(pci_domain_nr(pdev
->bus
),
4170 pdev
->bus
->number
, pdev
->devfn
))
4173 bridge
= pci_find_upstream_pcie_bridge(pdev
);
4175 if (pci_is_pcie(bridge
))
4176 dma_pdev
= pci_get_domain_bus_and_slot(
4177 pci_domain_nr(pdev
->bus
),
4178 bridge
->subordinate
->number
, 0);
4180 dma_pdev
= pci_dev_get(bridge
);
4182 dma_pdev
= pci_dev_get(pdev
);
4184 /* Account for quirked devices */
4185 swap_pci_ref(&dma_pdev
, pci_get_dma_source(dma_pdev
));
4188 * If it's a multifunction device that does not support our
4189 * required ACS flags, add to the same group as lowest numbered
4190 * function that also does not suport the required ACS flags.
4192 if (dma_pdev
->multifunction
&&
4193 !pci_acs_enabled(dma_pdev
, REQ_ACS_FLAGS
)) {
4194 u8 i
, slot
= PCI_SLOT(dma_pdev
->devfn
);
4196 for (i
= 0; i
< 8; i
++) {
4197 struct pci_dev
*tmp
;
4199 tmp
= pci_get_slot(dma_pdev
->bus
, PCI_DEVFN(slot
, i
));
4203 if (!pci_acs_enabled(tmp
, REQ_ACS_FLAGS
)) {
4204 swap_pci_ref(&dma_pdev
, tmp
);
4212 * Devices on the root bus go through the iommu. If that's not us,
4213 * find the next upstream device and test ACS up to the root bus.
4214 * Finding the next device may require skipping virtual buses.
4216 while (!pci_is_root_bus(dma_pdev
->bus
)) {
4217 struct pci_bus
*bus
= dma_pdev
->bus
;
4219 while (!bus
->self
) {
4220 if (!pci_is_root_bus(bus
))
4226 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
4229 swap_pci_ref(&dma_pdev
, pci_dev_get(bus
->self
));
4233 group
= iommu_group_get(&dma_pdev
->dev
);
4234 pci_dev_put(dma_pdev
);
4236 group
= iommu_group_alloc();
4238 return PTR_ERR(group
);
4241 ret
= iommu_group_add_device(group
, dev
);
4243 iommu_group_put(group
);
4247 static void intel_iommu_remove_device(struct device
*dev
)
4249 iommu_group_remove_device(dev
);
4252 static struct iommu_ops intel_iommu_ops
= {
4253 .domain_init
= intel_iommu_domain_init
,
4254 .domain_destroy
= intel_iommu_domain_destroy
,
4255 .attach_dev
= intel_iommu_attach_device
,
4256 .detach_dev
= intel_iommu_detach_device
,
4257 .map
= intel_iommu_map
,
4258 .unmap
= intel_iommu_unmap
,
4259 .iova_to_phys
= intel_iommu_iova_to_phys
,
4260 .domain_has_cap
= intel_iommu_domain_has_cap
,
4261 .add_device
= intel_iommu_add_device
,
4262 .remove_device
= intel_iommu_remove_device
,
4263 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4266 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4268 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4269 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
4273 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4274 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4276 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4281 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4284 * Mobile 4 Series Chipset neglects to set RWBF capability,
4285 * but needs it. Same seems to hold for the desktop versions.
4287 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
4291 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4292 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4293 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4294 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4295 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4296 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4297 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4300 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4301 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4302 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4303 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4304 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4305 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4306 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4307 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4309 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4313 if (pci_read_config_word(dev
, GGC
, &ggc
))
4316 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4317 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4319 } else if (dmar_map_gfx
) {
4320 /* we have to ensure the gfx device is idle before we flush */
4321 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
4322 intel_iommu_strict
= 1;
4325 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4326 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4327 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4328 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4330 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4331 ISOCH DMAR unit for the Azalia sound device, but not give it any
4332 TLB entries, which causes it to deadlock. Check for that. We do
4333 this in a function called from init_dmars(), instead of in a PCI
4334 quirk, because we don't want to print the obnoxious "BIOS broken"
4335 message if VT-d is actually disabled.
4337 static void __init
check_tylersburg_isoch(void)
4339 struct pci_dev
*pdev
;
4340 uint32_t vtisochctrl
;
4342 /* If there's no Azalia in the system anyway, forget it. */
4343 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4348 /* System Management Registers. Might be hidden, in which case
4349 we can't do the sanity check. But that's OK, because the
4350 known-broken BIOSes _don't_ actually hide it, so far. */
4351 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4355 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4362 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4363 if (vtisochctrl
& 1)
4366 /* Drop all bits other than the number of TLB entries */
4367 vtisochctrl
&= 0x1c;
4369 /* If we have the recommended number of TLB entries (16), fine. */
4370 if (vtisochctrl
== 0x10)
4373 /* Zero TLB entries? You get to ride the short bus to school. */
4375 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4376 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4377 dmi_get_system_info(DMI_BIOS_VENDOR
),
4378 dmi_get_system_info(DMI_BIOS_VERSION
),
4379 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4380 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4384 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",