2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
60 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
61 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
62 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
64 /* global iommu list, set NULL for ignored DMAR units */
65 static struct intel_iommu
**g_iommus
;
67 static int rwbf_quirk
;
72 * 12-63: Context Ptr (12 - (haw-1))
79 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
80 static inline bool root_present(struct root_entry
*root
)
82 return (root
->val
& 1);
84 static inline void set_root_present(struct root_entry
*root
)
88 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
90 root
->val
|= value
& VTD_PAGE_MASK
;
93 static inline struct context_entry
*
94 get_context_addr_from_root(struct root_entry
*root
)
96 return (struct context_entry
*)
97 (root_present(root
)?phys_to_virt(
98 root
->val
& VTD_PAGE_MASK
) :
105 * 1: fault processing disable
106 * 2-3: translation type
107 * 12-63: address space root
113 struct context_entry
{
118 static inline bool context_present(struct context_entry
*context
)
120 return (context
->lo
& 1);
122 static inline void context_set_present(struct context_entry
*context
)
127 static inline void context_set_fault_enable(struct context_entry
*context
)
129 context
->lo
&= (((u64
)-1) << 2) | 1;
132 static inline void context_set_translation_type(struct context_entry
*context
,
135 context
->lo
&= (((u64
)-1) << 4) | 3;
136 context
->lo
|= (value
& 3) << 2;
139 static inline void context_set_address_root(struct context_entry
*context
,
142 context
->lo
|= value
& VTD_PAGE_MASK
;
145 static inline void context_set_address_width(struct context_entry
*context
,
148 context
->hi
|= value
& 7;
151 static inline void context_set_domain_id(struct context_entry
*context
,
154 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
157 static inline void context_clear_entry(struct context_entry
*context
)
170 * 12-63: Host physcial address
176 static inline void dma_clear_pte(struct dma_pte
*pte
)
181 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
183 pte
->val
|= DMA_PTE_READ
;
186 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
188 pte
->val
|= DMA_PTE_WRITE
;
191 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
193 pte
->val
|= DMA_PTE_SNP
;
196 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
198 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
201 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
203 return (pte
->val
& VTD_PAGE_MASK
);
206 static inline void dma_set_pte_addr(struct dma_pte
*pte
, u64 addr
)
208 pte
->val
|= (addr
& VTD_PAGE_MASK
);
211 static inline bool dma_pte_present(struct dma_pte
*pte
)
213 return (pte
->val
& 3) != 0;
216 /* devices under the same p2p bridge are owned in one domain */
217 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
219 /* domain represents a virtual machine, more than one devices
220 * across iommus may be owned in one domain, e.g. kvm guest.
222 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
225 int id
; /* domain id */
226 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
228 struct list_head devices
; /* all devices' list */
229 struct iova_domain iovad
; /* iova's that belong to this domain */
231 struct dma_pte
*pgd
; /* virtual address */
232 spinlock_t mapping_lock
; /* page table lock */
233 int gaw
; /* max guest address width */
235 /* adjusted guest address width, 0 is level 2 30-bit */
238 int flags
; /* flags to find out type of domain */
240 int iommu_coherency
;/* indicate coherency of iommu access */
241 int iommu_snooping
; /* indicate snooping control feature*/
242 int iommu_count
; /* reference count of iommu */
243 spinlock_t iommu_lock
; /* protect iommu set in domain */
244 u64 max_addr
; /* maximum mapped address */
247 /* PCI domain-device relationship */
248 struct device_domain_info
{
249 struct list_head link
; /* link to domain siblings */
250 struct list_head global
; /* link to global list */
251 int segment
; /* PCI domain */
252 u8 bus
; /* PCI bus number */
253 u8 devfn
; /* PCI devfn number */
254 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
255 struct intel_iommu
*iommu
; /* IOMMU used by this device */
256 struct dmar_domain
*domain
; /* pointer to domain */
259 static void flush_unmaps_timeout(unsigned long data
);
261 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
263 #define HIGH_WATER_MARK 250
264 struct deferred_flush_tables
{
266 struct iova
*iova
[HIGH_WATER_MARK
];
267 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
270 static struct deferred_flush_tables
*deferred_flush
;
272 /* bitmap for indexing intel_iommus */
273 static int g_num_of_iommus
;
275 static DEFINE_SPINLOCK(async_umap_flush_lock
);
276 static LIST_HEAD(unmaps_to_do
);
279 static long list_size
;
281 static void domain_remove_dev_info(struct dmar_domain
*domain
);
283 #ifdef CONFIG_DMAR_DEFAULT_ON
284 int dmar_disabled
= 0;
286 int dmar_disabled
= 1;
287 #endif /*CONFIG_DMAR_DEFAULT_ON*/
289 static int __initdata dmar_map_gfx
= 1;
290 static int dmar_forcedac
;
291 static int intel_iommu_strict
;
293 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
294 static DEFINE_SPINLOCK(device_domain_lock
);
295 static LIST_HEAD(device_domain_list
);
297 static struct iommu_ops intel_iommu_ops
;
299 static int __init
intel_iommu_setup(char *str
)
304 if (!strncmp(str
, "on", 2)) {
306 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
307 } else if (!strncmp(str
, "off", 3)) {
309 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
310 } else if (!strncmp(str
, "igfx_off", 8)) {
313 "Intel-IOMMU: disable GFX device mapping\n");
314 } else if (!strncmp(str
, "forcedac", 8)) {
316 "Intel-IOMMU: Forcing DAC for PCI devices\n");
318 } else if (!strncmp(str
, "strict", 6)) {
320 "Intel-IOMMU: disable batched IOTLB flush\n");
321 intel_iommu_strict
= 1;
324 str
+= strcspn(str
, ",");
330 __setup("intel_iommu=", intel_iommu_setup
);
332 static struct kmem_cache
*iommu_domain_cache
;
333 static struct kmem_cache
*iommu_devinfo_cache
;
334 static struct kmem_cache
*iommu_iova_cache
;
336 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
341 /* trying to avoid low memory issues */
342 flags
= current
->flags
& PF_MEMALLOC
;
343 current
->flags
|= PF_MEMALLOC
;
344 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
345 current
->flags
&= (~PF_MEMALLOC
| flags
);
350 static inline void *alloc_pgtable_page(void)
355 /* trying to avoid low memory issues */
356 flags
= current
->flags
& PF_MEMALLOC
;
357 current
->flags
|= PF_MEMALLOC
;
358 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
359 current
->flags
&= (~PF_MEMALLOC
| flags
);
363 static inline void free_pgtable_page(void *vaddr
)
365 free_page((unsigned long)vaddr
);
368 static inline void *alloc_domain_mem(void)
370 return iommu_kmem_cache_alloc(iommu_domain_cache
);
373 static void free_domain_mem(void *vaddr
)
375 kmem_cache_free(iommu_domain_cache
, vaddr
);
378 static inline void * alloc_devinfo_mem(void)
380 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
383 static inline void free_devinfo_mem(void *vaddr
)
385 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
388 struct iova
*alloc_iova_mem(void)
390 return iommu_kmem_cache_alloc(iommu_iova_cache
);
393 void free_iova_mem(struct iova
*iova
)
395 kmem_cache_free(iommu_iova_cache
, iova
);
399 static inline int width_to_agaw(int width
);
401 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
406 sagaw
= cap_sagaw(iommu
->cap
);
407 for (agaw
= width_to_agaw(max_gaw
);
409 if (test_bit(agaw
, &sagaw
))
417 * Calculate max SAGAW for each iommu.
419 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
421 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
425 * calculate agaw for each iommu.
426 * "SAGAW" may be different across iommus, use a default agaw, and
427 * get a supported less agaw for iommus that don't support the default agaw.
429 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
431 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
434 /* in native case, each domain is related to only one iommu */
435 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
439 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
441 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
442 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
445 return g_iommus
[iommu_id
];
448 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
452 domain
->iommu_coherency
= 1;
454 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
455 for (; i
< g_num_of_iommus
; ) {
456 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
457 domain
->iommu_coherency
= 0;
460 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
464 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
468 domain
->iommu_snooping
= 1;
470 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
471 for (; i
< g_num_of_iommus
; ) {
472 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
473 domain
->iommu_snooping
= 0;
476 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
480 /* Some capabilities may be different across iommus */
481 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
483 domain_update_iommu_coherency(domain
);
484 domain_update_iommu_snooping(domain
);
487 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
489 struct dmar_drhd_unit
*drhd
= NULL
;
492 for_each_drhd_unit(drhd
) {
495 if (segment
!= drhd
->segment
)
498 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
499 if (drhd
->devices
[i
] &&
500 drhd
->devices
[i
]->bus
->number
== bus
&&
501 drhd
->devices
[i
]->devfn
== devfn
)
503 if (drhd
->devices
[i
] &&
504 drhd
->devices
[i
]->subordinate
&&
505 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
506 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
510 if (drhd
->include_all
)
517 static void domain_flush_cache(struct dmar_domain
*domain
,
518 void *addr
, int size
)
520 if (!domain
->iommu_coherency
)
521 clflush_cache_range(addr
, size
);
524 /* Gets context entry for a given bus and devfn */
525 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
528 struct root_entry
*root
;
529 struct context_entry
*context
;
530 unsigned long phy_addr
;
533 spin_lock_irqsave(&iommu
->lock
, flags
);
534 root
= &iommu
->root_entry
[bus
];
535 context
= get_context_addr_from_root(root
);
537 context
= (struct context_entry
*)alloc_pgtable_page();
539 spin_unlock_irqrestore(&iommu
->lock
, flags
);
542 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
543 phy_addr
= virt_to_phys((void *)context
);
544 set_root_value(root
, phy_addr
);
545 set_root_present(root
);
546 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
548 spin_unlock_irqrestore(&iommu
->lock
, flags
);
549 return &context
[devfn
];
552 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
554 struct root_entry
*root
;
555 struct context_entry
*context
;
559 spin_lock_irqsave(&iommu
->lock
, flags
);
560 root
= &iommu
->root_entry
[bus
];
561 context
= get_context_addr_from_root(root
);
566 ret
= context_present(&context
[devfn
]);
568 spin_unlock_irqrestore(&iommu
->lock
, flags
);
572 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
574 struct root_entry
*root
;
575 struct context_entry
*context
;
578 spin_lock_irqsave(&iommu
->lock
, flags
);
579 root
= &iommu
->root_entry
[bus
];
580 context
= get_context_addr_from_root(root
);
582 context_clear_entry(&context
[devfn
]);
583 __iommu_flush_cache(iommu
, &context
[devfn
], \
586 spin_unlock_irqrestore(&iommu
->lock
, flags
);
589 static void free_context_table(struct intel_iommu
*iommu
)
591 struct root_entry
*root
;
594 struct context_entry
*context
;
596 spin_lock_irqsave(&iommu
->lock
, flags
);
597 if (!iommu
->root_entry
) {
600 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
601 root
= &iommu
->root_entry
[i
];
602 context
= get_context_addr_from_root(root
);
604 free_pgtable_page(context
);
606 free_pgtable_page(iommu
->root_entry
);
607 iommu
->root_entry
= NULL
;
609 spin_unlock_irqrestore(&iommu
->lock
, flags
);
612 /* page table handling */
613 #define LEVEL_STRIDE (9)
614 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
616 static inline int agaw_to_level(int agaw
)
621 static inline int agaw_to_width(int agaw
)
623 return 30 + agaw
* LEVEL_STRIDE
;
627 static inline int width_to_agaw(int width
)
629 return (width
- 30) / LEVEL_STRIDE
;
632 static inline unsigned int level_to_offset_bits(int level
)
634 return (12 + (level
- 1) * LEVEL_STRIDE
);
637 static inline int address_level_offset(u64 addr
, int level
)
639 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
642 static inline u64
level_mask(int level
)
644 return ((u64
)-1 << level_to_offset_bits(level
));
647 static inline u64
level_size(int level
)
649 return ((u64
)1 << level_to_offset_bits(level
));
652 static inline u64
align_to_level(u64 addr
, int level
)
654 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
657 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
659 int addr_width
= agaw_to_width(domain
->agaw
);
660 struct dma_pte
*parent
, *pte
= NULL
;
661 int level
= agaw_to_level(domain
->agaw
);
665 BUG_ON(!domain
->pgd
);
667 addr
&= (((u64
)1) << addr_width
) - 1;
668 parent
= domain
->pgd
;
670 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
674 offset
= address_level_offset(addr
, level
);
675 pte
= &parent
[offset
];
679 if (!dma_pte_present(pte
)) {
680 tmp_page
= alloc_pgtable_page();
683 spin_unlock_irqrestore(&domain
->mapping_lock
,
687 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
688 dma_set_pte_addr(pte
, virt_to_phys(tmp_page
));
690 * high level table always sets r/w, last level page
691 * table control read/write
693 dma_set_pte_readable(pte
);
694 dma_set_pte_writable(pte
);
695 domain_flush_cache(domain
, pte
, sizeof(*pte
));
697 parent
= phys_to_virt(dma_pte_addr(pte
));
701 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
705 /* return address's pte at specific level */
706 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
709 struct dma_pte
*parent
, *pte
= NULL
;
710 int total
= agaw_to_level(domain
->agaw
);
713 parent
= domain
->pgd
;
714 while (level
<= total
) {
715 offset
= address_level_offset(addr
, total
);
716 pte
= &parent
[offset
];
720 if (!dma_pte_present(pte
))
722 parent
= phys_to_virt(dma_pte_addr(pte
));
728 /* clear one page's page table */
729 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
731 struct dma_pte
*pte
= NULL
;
733 /* get last level pte */
734 pte
= dma_addr_level_pte(domain
, addr
, 1);
738 domain_flush_cache(domain
, pte
, sizeof(*pte
));
742 /* clear last level pte, a tlb flush should be followed */
743 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
745 int addr_width
= agaw_to_width(domain
->agaw
);
748 start
&= (((u64
)1) << addr_width
) - 1;
749 end
&= (((u64
)1) << addr_width
) - 1;
750 /* in case it's partial page */
752 end
= PAGE_ALIGN(end
);
753 npages
= (end
- start
) / VTD_PAGE_SIZE
;
755 /* we don't need lock here, nobody else touches the iova range */
757 dma_pte_clear_one(domain
, start
);
758 start
+= VTD_PAGE_SIZE
;
762 /* free page table pages. last level pte should already be cleared */
763 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
766 int addr_width
= agaw_to_width(domain
->agaw
);
768 int total
= agaw_to_level(domain
->agaw
);
772 start
&= (((u64
)1) << addr_width
) - 1;
773 end
&= (((u64
)1) << addr_width
) - 1;
775 /* we don't need lock here, nobody else touches the iova range */
777 while (level
<= total
) {
778 tmp
= align_to_level(start
, level
);
779 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
783 pte
= dma_addr_level_pte(domain
, tmp
, level
);
786 phys_to_virt(dma_pte_addr(pte
)));
788 domain_flush_cache(domain
, pte
, sizeof(*pte
));
790 tmp
+= level_size(level
);
795 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
796 free_pgtable_page(domain
->pgd
);
802 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
804 struct root_entry
*root
;
807 root
= (struct root_entry
*)alloc_pgtable_page();
811 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
813 spin_lock_irqsave(&iommu
->lock
, flags
);
814 iommu
->root_entry
= root
;
815 spin_unlock_irqrestore(&iommu
->lock
, flags
);
820 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
826 addr
= iommu
->root_entry
;
828 spin_lock_irqsave(&iommu
->register_lock
, flag
);
829 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
831 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
833 /* Make sure hardware complete it */
834 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
835 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
837 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
840 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
845 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
848 spin_lock_irqsave(&iommu
->register_lock
, flag
);
849 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
851 /* Make sure hardware complete it */
852 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
853 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
855 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
858 /* return value determine if we need a write buffer flush */
859 static void __iommu_flush_context(struct intel_iommu
*iommu
,
860 u16 did
, u16 source_id
, u8 function_mask
,
867 case DMA_CCMD_GLOBAL_INVL
:
868 val
= DMA_CCMD_GLOBAL_INVL
;
870 case DMA_CCMD_DOMAIN_INVL
:
871 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
873 case DMA_CCMD_DEVICE_INVL
:
874 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
875 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
882 spin_lock_irqsave(&iommu
->register_lock
, flag
);
883 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
885 /* Make sure hardware complete it */
886 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
887 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
889 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
892 /* return value determine if we need a write buffer flush */
893 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
894 u64 addr
, unsigned int size_order
, u64 type
)
896 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
897 u64 val
= 0, val_iva
= 0;
901 case DMA_TLB_GLOBAL_FLUSH
:
902 /* global flush doesn't need set IVA_REG */
903 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
905 case DMA_TLB_DSI_FLUSH
:
906 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
908 case DMA_TLB_PSI_FLUSH
:
909 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
910 /* Note: always flush non-leaf currently */
911 val_iva
= size_order
| addr
;
916 /* Note: set drain read/write */
919 * This is probably to be super secure.. Looks like we can
920 * ignore it without any impact.
922 if (cap_read_drain(iommu
->cap
))
923 val
|= DMA_TLB_READ_DRAIN
;
925 if (cap_write_drain(iommu
->cap
))
926 val
|= DMA_TLB_WRITE_DRAIN
;
928 spin_lock_irqsave(&iommu
->register_lock
, flag
);
929 /* Note: Only uses first TLB reg currently */
931 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
932 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
934 /* Make sure hardware complete it */
935 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
936 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
938 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
940 /* check IOTLB invalidation granularity */
941 if (DMA_TLB_IAIG(val
) == 0)
942 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
943 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
944 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
945 (unsigned long long)DMA_TLB_IIRG(type
),
946 (unsigned long long)DMA_TLB_IAIG(val
));
949 static struct device_domain_info
*iommu_support_dev_iotlb(
950 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
954 struct device_domain_info
*info
;
955 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
957 if (!ecap_dev_iotlb_support(iommu
->ecap
))
963 spin_lock_irqsave(&device_domain_lock
, flags
);
964 list_for_each_entry(info
, &domain
->devices
, link
)
965 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
969 spin_unlock_irqrestore(&device_domain_lock
, flags
);
971 if (!found
|| !info
->dev
)
974 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
977 if (!dmar_find_matched_atsr_unit(info
->dev
))
985 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
990 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
993 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
995 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
998 pci_disable_ats(info
->dev
);
1001 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1002 u64 addr
, unsigned mask
)
1005 unsigned long flags
;
1006 struct device_domain_info
*info
;
1008 spin_lock_irqsave(&device_domain_lock
, flags
);
1009 list_for_each_entry(info
, &domain
->devices
, link
) {
1010 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1013 sid
= info
->bus
<< 8 | info
->devfn
;
1014 qdep
= pci_ats_queue_depth(info
->dev
);
1015 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1017 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1020 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1021 u64 addr
, unsigned int pages
)
1023 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1025 BUG_ON(addr
& (~VTD_PAGE_MASK
));
1029 * Fallback to domain selective flush if no PSI support or the size is
1031 * PSI requires page size to be 2 ^ x, and the base address is naturally
1032 * aligned to the size
1034 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1035 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1038 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1041 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1044 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1047 unsigned long flags
;
1049 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1050 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1051 pmen
&= ~DMA_PMEN_EPM
;
1052 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1054 /* wait for the protected region status bit to clear */
1055 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1056 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1058 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1061 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1064 unsigned long flags
;
1066 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1067 iommu
->gcmd
|= DMA_GCMD_TE
;
1068 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1070 /* Make sure hardware complete it */
1071 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1072 readl
, (sts
& DMA_GSTS_TES
), sts
);
1074 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1078 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1083 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1084 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1085 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1087 /* Make sure hardware complete it */
1088 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1089 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1091 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1096 static int iommu_init_domains(struct intel_iommu
*iommu
)
1098 unsigned long ndomains
;
1099 unsigned long nlongs
;
1101 ndomains
= cap_ndoms(iommu
->cap
);
1102 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1103 nlongs
= BITS_TO_LONGS(ndomains
);
1105 /* TBD: there might be 64K domains,
1106 * consider other allocation for future chip
1108 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1109 if (!iommu
->domain_ids
) {
1110 printk(KERN_ERR
"Allocating domain id array failed\n");
1113 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1115 if (!iommu
->domains
) {
1116 printk(KERN_ERR
"Allocating domain array failed\n");
1117 kfree(iommu
->domain_ids
);
1121 spin_lock_init(&iommu
->lock
);
1124 * if Caching mode is set, then invalid translations are tagged
1125 * with domainid 0. Hence we need to pre-allocate it.
1127 if (cap_caching_mode(iommu
->cap
))
1128 set_bit(0, iommu
->domain_ids
);
1133 static void domain_exit(struct dmar_domain
*domain
);
1134 static void vm_domain_exit(struct dmar_domain
*domain
);
1136 void free_dmar_iommu(struct intel_iommu
*iommu
)
1138 struct dmar_domain
*domain
;
1140 unsigned long flags
;
1142 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1143 for (; i
< cap_ndoms(iommu
->cap
); ) {
1144 domain
= iommu
->domains
[i
];
1145 clear_bit(i
, iommu
->domain_ids
);
1147 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1148 if (--domain
->iommu_count
== 0) {
1149 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1150 vm_domain_exit(domain
);
1152 domain_exit(domain
);
1154 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1156 i
= find_next_bit(iommu
->domain_ids
,
1157 cap_ndoms(iommu
->cap
), i
+1);
1160 if (iommu
->gcmd
& DMA_GCMD_TE
)
1161 iommu_disable_translation(iommu
);
1164 set_irq_data(iommu
->irq
, NULL
);
1165 /* This will mask the irq */
1166 free_irq(iommu
->irq
, iommu
);
1167 destroy_irq(iommu
->irq
);
1170 kfree(iommu
->domains
);
1171 kfree(iommu
->domain_ids
);
1173 g_iommus
[iommu
->seq_id
] = NULL
;
1175 /* if all iommus are freed, free g_iommus */
1176 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1181 if (i
== g_num_of_iommus
)
1184 /* free context mapping */
1185 free_context_table(iommu
);
1188 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1191 unsigned long ndomains
;
1192 struct dmar_domain
*domain
;
1193 unsigned long flags
;
1195 domain
= alloc_domain_mem();
1199 ndomains
= cap_ndoms(iommu
->cap
);
1201 spin_lock_irqsave(&iommu
->lock
, flags
);
1202 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1203 if (num
>= ndomains
) {
1204 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1205 free_domain_mem(domain
);
1206 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1210 set_bit(num
, iommu
->domain_ids
);
1212 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1213 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1215 iommu
->domains
[num
] = domain
;
1216 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1221 static void iommu_free_domain(struct dmar_domain
*domain
)
1223 unsigned long flags
;
1224 struct intel_iommu
*iommu
;
1226 iommu
= domain_get_iommu(domain
);
1228 spin_lock_irqsave(&iommu
->lock
, flags
);
1229 clear_bit(domain
->id
, iommu
->domain_ids
);
1230 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1233 static struct iova_domain reserved_iova_list
;
1234 static struct lock_class_key reserved_alloc_key
;
1235 static struct lock_class_key reserved_rbtree_key
;
1237 static void dmar_init_reserved_ranges(void)
1239 struct pci_dev
*pdev
= NULL
;
1244 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1246 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1247 &reserved_alloc_key
);
1248 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1249 &reserved_rbtree_key
);
1251 /* IOAPIC ranges shouldn't be accessed by DMA */
1252 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1253 IOVA_PFN(IOAPIC_RANGE_END
));
1255 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1257 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1258 for_each_pci_dev(pdev
) {
1261 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1262 r
= &pdev
->resource
[i
];
1263 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1267 size
= r
->end
- addr
;
1268 size
= PAGE_ALIGN(size
);
1269 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1270 IOVA_PFN(size
+ addr
) - 1);
1272 printk(KERN_ERR
"Reserve iova failed\n");
1278 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1280 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1283 static inline int guestwidth_to_adjustwidth(int gaw
)
1286 int r
= (gaw
- 12) % 9;
1297 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1299 struct intel_iommu
*iommu
;
1300 int adjust_width
, agaw
;
1301 unsigned long sagaw
;
1303 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1304 spin_lock_init(&domain
->mapping_lock
);
1305 spin_lock_init(&domain
->iommu_lock
);
1307 domain_reserve_special_ranges(domain
);
1309 /* calculate AGAW */
1310 iommu
= domain_get_iommu(domain
);
1311 if (guest_width
> cap_mgaw(iommu
->cap
))
1312 guest_width
= cap_mgaw(iommu
->cap
);
1313 domain
->gaw
= guest_width
;
1314 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1315 agaw
= width_to_agaw(adjust_width
);
1316 sagaw
= cap_sagaw(iommu
->cap
);
1317 if (!test_bit(agaw
, &sagaw
)) {
1318 /* hardware doesn't support it, choose a bigger one */
1319 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1320 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1324 domain
->agaw
= agaw
;
1325 INIT_LIST_HEAD(&domain
->devices
);
1327 if (ecap_coherent(iommu
->ecap
))
1328 domain
->iommu_coherency
= 1;
1330 domain
->iommu_coherency
= 0;
1332 if (ecap_sc_support(iommu
->ecap
))
1333 domain
->iommu_snooping
= 1;
1335 domain
->iommu_snooping
= 0;
1337 domain
->iommu_count
= 1;
1339 /* always allocate the top pgd */
1340 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1343 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1347 static void domain_exit(struct dmar_domain
*domain
)
1351 /* Domain 0 is reserved, so dont process it */
1355 domain_remove_dev_info(domain
);
1357 put_iova_domain(&domain
->iovad
);
1358 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1359 end
= end
& (~PAGE_MASK
);
1362 dma_pte_clear_range(domain
, 0, end
);
1364 /* free page tables */
1365 dma_pte_free_pagetable(domain
, 0, end
);
1367 iommu_free_domain(domain
);
1368 free_domain_mem(domain
);
1371 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1372 u8 bus
, u8 devfn
, int translation
)
1374 struct context_entry
*context
;
1375 unsigned long flags
;
1376 struct intel_iommu
*iommu
;
1377 struct dma_pte
*pgd
;
1379 unsigned long ndomains
;
1382 struct device_domain_info
*info
= NULL
;
1384 pr_debug("Set context mapping for %02x:%02x.%d\n",
1385 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1387 BUG_ON(!domain
->pgd
);
1388 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1389 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1391 iommu
= device_to_iommu(segment
, bus
, devfn
);
1395 context
= device_to_context_entry(iommu
, bus
, devfn
);
1398 spin_lock_irqsave(&iommu
->lock
, flags
);
1399 if (context_present(context
)) {
1400 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1407 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
1410 /* find an available domain id for this device in iommu */
1411 ndomains
= cap_ndoms(iommu
->cap
);
1412 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1413 for (; num
< ndomains
; ) {
1414 if (iommu
->domains
[num
] == domain
) {
1419 num
= find_next_bit(iommu
->domain_ids
,
1420 cap_ndoms(iommu
->cap
), num
+1);
1424 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1425 if (num
>= ndomains
) {
1426 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1427 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1431 set_bit(num
, iommu
->domain_ids
);
1432 iommu
->domains
[num
] = domain
;
1436 /* Skip top levels of page tables for
1437 * iommu which has less agaw than default.
1439 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1440 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1441 if (!dma_pte_present(pgd
)) {
1442 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1448 context_set_domain_id(context
, id
);
1450 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1451 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1452 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1453 CONTEXT_TT_MULTI_LEVEL
;
1456 * In pass through mode, AW must be programmed to indicate the largest
1457 * AGAW value supported by hardware. And ASR is ignored by hardware.
1459 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1460 context_set_address_width(context
, iommu
->msagaw
);
1462 context_set_address_root(context
, virt_to_phys(pgd
));
1463 context_set_address_width(context
, iommu
->agaw
);
1466 context_set_translation_type(context
, translation
);
1467 context_set_fault_enable(context
);
1468 context_set_present(context
);
1469 domain_flush_cache(domain
, context
, sizeof(*context
));
1472 * It's a non-present to present mapping. If hardware doesn't cache
1473 * non-present entry we only need to flush the write-buffer. If the
1474 * _does_ cache non-present entries, then it does so in the special
1475 * domain #0, which we have to flush:
1477 if (cap_caching_mode(iommu
->cap
)) {
1478 iommu
->flush
.flush_context(iommu
, 0,
1479 (((u16
)bus
) << 8) | devfn
,
1480 DMA_CCMD_MASK_NOBIT
,
1481 DMA_CCMD_DEVICE_INVL
);
1482 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1484 iommu_flush_write_buffer(iommu
);
1486 iommu_enable_dev_iotlb(info
);
1487 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1489 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1490 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1491 domain
->iommu_count
++;
1492 domain_update_iommu_cap(domain
);
1494 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1499 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1503 struct pci_dev
*tmp
, *parent
;
1505 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1506 pdev
->bus
->number
, pdev
->devfn
,
1511 /* dependent device mapping */
1512 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1515 /* Secondary interface's bus number and devfn 0 */
1516 parent
= pdev
->bus
->self
;
1517 while (parent
!= tmp
) {
1518 ret
= domain_context_mapping_one(domain
,
1519 pci_domain_nr(parent
->bus
),
1520 parent
->bus
->number
,
1521 parent
->devfn
, translation
);
1524 parent
= parent
->bus
->self
;
1526 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1527 return domain_context_mapping_one(domain
,
1528 pci_domain_nr(tmp
->subordinate
),
1529 tmp
->subordinate
->number
, 0,
1531 else /* this is a legacy PCI bridge */
1532 return domain_context_mapping_one(domain
,
1533 pci_domain_nr(tmp
->bus
),
1539 static int domain_context_mapped(struct pci_dev
*pdev
)
1542 struct pci_dev
*tmp
, *parent
;
1543 struct intel_iommu
*iommu
;
1545 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1550 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1553 /* dependent device mapping */
1554 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1557 /* Secondary interface's bus number and devfn 0 */
1558 parent
= pdev
->bus
->self
;
1559 while (parent
!= tmp
) {
1560 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1564 parent
= parent
->bus
->self
;
1567 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1570 return device_context_mapped(iommu
, tmp
->bus
->number
,
1575 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1576 u64 hpa
, size_t size
, int prot
)
1578 u64 start_pfn
, end_pfn
;
1579 struct dma_pte
*pte
;
1581 int addr_width
= agaw_to_width(domain
->agaw
);
1583 hpa
&= (((u64
)1) << addr_width
) - 1;
1585 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1588 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1589 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1591 while (start_pfn
< end_pfn
) {
1592 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1595 /* We don't need lock here, nobody else
1596 * touches the iova range
1598 BUG_ON(dma_pte_addr(pte
));
1599 dma_set_pte_addr(pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1600 dma_set_pte_prot(pte
, prot
);
1601 if (prot
& DMA_PTE_SNP
)
1602 dma_set_pte_snp(pte
);
1603 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1610 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1615 clear_context_table(iommu
, bus
, devfn
);
1616 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1617 DMA_CCMD_GLOBAL_INVL
);
1618 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1621 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1623 struct device_domain_info
*info
;
1624 unsigned long flags
;
1625 struct intel_iommu
*iommu
;
1627 spin_lock_irqsave(&device_domain_lock
, flags
);
1628 while (!list_empty(&domain
->devices
)) {
1629 info
= list_entry(domain
->devices
.next
,
1630 struct device_domain_info
, link
);
1631 list_del(&info
->link
);
1632 list_del(&info
->global
);
1634 info
->dev
->dev
.archdata
.iommu
= NULL
;
1635 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1637 iommu_disable_dev_iotlb(info
);
1638 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1639 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1640 free_devinfo_mem(info
);
1642 spin_lock_irqsave(&device_domain_lock
, flags
);
1644 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1649 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1651 static struct dmar_domain
*
1652 find_domain(struct pci_dev
*pdev
)
1654 struct device_domain_info
*info
;
1656 /* No lock here, assumes no domain exit in normal case */
1657 info
= pdev
->dev
.archdata
.iommu
;
1659 return info
->domain
;
1663 /* domain is initialized */
1664 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1666 struct dmar_domain
*domain
, *found
= NULL
;
1667 struct intel_iommu
*iommu
;
1668 struct dmar_drhd_unit
*drhd
;
1669 struct device_domain_info
*info
, *tmp
;
1670 struct pci_dev
*dev_tmp
;
1671 unsigned long flags
;
1672 int bus
= 0, devfn
= 0;
1675 domain
= find_domain(pdev
);
1679 segment
= pci_domain_nr(pdev
->bus
);
1681 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1683 if (dev_tmp
->is_pcie
) {
1684 bus
= dev_tmp
->subordinate
->number
;
1687 bus
= dev_tmp
->bus
->number
;
1688 devfn
= dev_tmp
->devfn
;
1690 spin_lock_irqsave(&device_domain_lock
, flags
);
1691 list_for_each_entry(info
, &device_domain_list
, global
) {
1692 if (info
->segment
== segment
&&
1693 info
->bus
== bus
&& info
->devfn
== devfn
) {
1694 found
= info
->domain
;
1698 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1699 /* pcie-pci bridge already has a domain, uses it */
1706 /* Allocate new domain for the device */
1707 drhd
= dmar_find_matched_drhd_unit(pdev
);
1709 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1713 iommu
= drhd
->iommu
;
1715 domain
= iommu_alloc_domain(iommu
);
1719 if (domain_init(domain
, gaw
)) {
1720 domain_exit(domain
);
1724 /* register pcie-to-pci device */
1726 info
= alloc_devinfo_mem();
1728 domain_exit(domain
);
1731 info
->segment
= segment
;
1733 info
->devfn
= devfn
;
1735 info
->domain
= domain
;
1736 /* This domain is shared by devices under p2p bridge */
1737 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1739 /* pcie-to-pci bridge already has a domain, uses it */
1741 spin_lock_irqsave(&device_domain_lock
, flags
);
1742 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1743 if (tmp
->segment
== segment
&&
1744 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1745 found
= tmp
->domain
;
1750 free_devinfo_mem(info
);
1751 domain_exit(domain
);
1754 list_add(&info
->link
, &domain
->devices
);
1755 list_add(&info
->global
, &device_domain_list
);
1757 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1761 info
= alloc_devinfo_mem();
1764 info
->segment
= segment
;
1765 info
->bus
= pdev
->bus
->number
;
1766 info
->devfn
= pdev
->devfn
;
1768 info
->domain
= domain
;
1769 spin_lock_irqsave(&device_domain_lock
, flags
);
1770 /* somebody is fast */
1771 found
= find_domain(pdev
);
1772 if (found
!= NULL
) {
1773 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1774 if (found
!= domain
) {
1775 domain_exit(domain
);
1778 free_devinfo_mem(info
);
1781 list_add(&info
->link
, &domain
->devices
);
1782 list_add(&info
->global
, &device_domain_list
);
1783 pdev
->dev
.archdata
.iommu
= info
;
1784 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1787 /* recheck it here, maybe others set it */
1788 return find_domain(pdev
);
1791 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1792 unsigned long long start
,
1793 unsigned long long end
)
1795 struct dmar_domain
*domain
;
1797 unsigned long long base
;
1801 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1802 pci_name(pdev
), start
, end
);
1803 /* page table init */
1804 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1808 /* The address might not be aligned */
1809 base
= start
& PAGE_MASK
;
1811 size
= PAGE_ALIGN(size
);
1812 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1813 IOVA_PFN(base
+ size
) - 1)) {
1814 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1819 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1820 size
, base
, pci_name(pdev
));
1822 * RMRR range might have overlap with physical memory range,
1825 dma_pte_clear_range(domain
, base
, base
+ size
);
1827 ret
= domain_page_mapping(domain
, base
, base
, size
,
1828 DMA_PTE_READ
|DMA_PTE_WRITE
);
1832 /* context entry init */
1833 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
1837 domain_exit(domain
);
1842 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1843 struct pci_dev
*pdev
)
1845 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1847 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1848 rmrr
->end_address
+ 1);
1851 #ifdef CONFIG_DMAR_GFX_WA
1852 struct iommu_prepare_data
{
1853 struct pci_dev
*pdev
;
1857 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1858 unsigned long end_pfn
, void *datax
)
1860 struct iommu_prepare_data
*data
;
1862 data
= (struct iommu_prepare_data
*)datax
;
1864 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1865 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1870 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1873 struct iommu_prepare_data data
;
1878 for_each_online_node(nid
) {
1879 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1886 static void __init
iommu_prepare_gfx_mapping(void)
1888 struct pci_dev
*pdev
= NULL
;
1891 for_each_pci_dev(pdev
) {
1892 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1893 !IS_GFX_DEVICE(pdev
))
1895 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1897 ret
= iommu_prepare_with_active_regions(pdev
);
1899 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1902 #else /* !CONFIG_DMAR_GFX_WA */
1903 static inline void iommu_prepare_gfx_mapping(void)
1909 #ifdef CONFIG_DMAR_FLOPPY_WA
1910 static inline void iommu_prepare_isa(void)
1912 struct pci_dev
*pdev
;
1915 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1919 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1920 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1923 printk(KERN_ERR
"IOMMU: Failed to create 0-64M identity map, "
1924 "floppy might not work\n");
1928 static inline void iommu_prepare_isa(void)
1932 #endif /* !CONFIG_DMAR_FLPY_WA */
1934 /* Initialize each context entry as pass through.*/
1935 static int __init
init_context_pass_through(void)
1937 struct pci_dev
*pdev
= NULL
;
1938 struct dmar_domain
*domain
;
1941 for_each_pci_dev(pdev
) {
1942 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1943 ret
= domain_context_mapping(domain
, pdev
,
1944 CONTEXT_TT_PASS_THROUGH
);
1951 static int __init
init_dmars(void)
1953 struct dmar_drhd_unit
*drhd
;
1954 struct dmar_rmrr_unit
*rmrr
;
1955 struct pci_dev
*pdev
;
1956 struct intel_iommu
*iommu
;
1958 int pass_through
= 1;
1963 * initialize and program root entry to not present
1966 for_each_drhd_unit(drhd
) {
1969 * lock not needed as this is only incremented in the single
1970 * threaded kernel __init code path all other access are read
1975 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
1978 printk(KERN_ERR
"Allocating global iommu array failed\n");
1983 deferred_flush
= kzalloc(g_num_of_iommus
*
1984 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1985 if (!deferred_flush
) {
1991 for_each_drhd_unit(drhd
) {
1995 iommu
= drhd
->iommu
;
1996 g_iommus
[iommu
->seq_id
] = iommu
;
1998 ret
= iommu_init_domains(iommu
);
2004 * we could share the same root & context tables
2005 * amoung all IOMMU's. Need to Split it later.
2007 ret
= iommu_alloc_root_entry(iommu
);
2009 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2012 if (!ecap_pass_through(iommu
->ecap
))
2015 if (iommu_pass_through
)
2016 if (!pass_through
) {
2018 "Pass Through is not supported by hardware.\n");
2019 iommu_pass_through
= 0;
2023 * Start from the sane iommu hardware state.
2025 for_each_drhd_unit(drhd
) {
2029 iommu
= drhd
->iommu
;
2032 * If the queued invalidation is already initialized by us
2033 * (for example, while enabling interrupt-remapping) then
2034 * we got the things already rolling from a sane state.
2040 * Clear any previous faults.
2042 dmar_fault(-1, iommu
);
2044 * Disable queued invalidation if supported and already enabled
2045 * before OS handover.
2047 dmar_disable_qi(iommu
);
2050 for_each_drhd_unit(drhd
) {
2054 iommu
= drhd
->iommu
;
2056 if (dmar_enable_qi(iommu
)) {
2058 * Queued Invalidate not enabled, use Register Based
2061 iommu
->flush
.flush_context
= __iommu_flush_context
;
2062 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2063 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2065 (unsigned long long)drhd
->reg_base_addr
);
2067 iommu
->flush
.flush_context
= qi_flush_context
;
2068 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2069 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2071 (unsigned long long)drhd
->reg_base_addr
);
2075 #ifdef CONFIG_INTR_REMAP
2076 if (!intr_remapping_enabled
) {
2077 ret
= enable_intr_remapping(0);
2080 "IOMMU: enable interrupt remapping failed\n");
2084 * If pass through is set and enabled, context entries of all pci
2085 * devices are intialized by pass through translation type.
2087 if (iommu_pass_through
) {
2088 ret
= init_context_pass_through();
2090 printk(KERN_ERR
"IOMMU: Pass through init failed.\n");
2091 iommu_pass_through
= 0;
2096 * If pass through is not set or not enabled, setup context entries for
2097 * identity mappings for rmrr, gfx, and isa.
2099 if (!iommu_pass_through
) {
2102 * for each dev attached to rmrr
2104 * locate drhd for dev, alloc domain for dev
2105 * allocate free domain
2106 * allocate page table entries for rmrr
2107 * if context not allocated for bus
2108 * allocate and init context
2109 * set present in root table for this bus
2110 * init context with domain, translation etc
2114 for_each_rmrr_units(rmrr
) {
2115 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2116 pdev
= rmrr
->devices
[i
];
2118 * some BIOS lists non-exist devices in DMAR
2123 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2126 "IOMMU: mapping reserved region failed\n");
2130 iommu_prepare_gfx_mapping();
2132 iommu_prepare_isa();
2138 * global invalidate context cache
2139 * global invalidate iotlb
2140 * enable translation
2142 for_each_drhd_unit(drhd
) {
2145 iommu
= drhd
->iommu
;
2147 iommu_flush_write_buffer(iommu
);
2149 ret
= dmar_set_interrupt(iommu
);
2153 iommu_set_root_entry(iommu
);
2155 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2156 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2157 iommu_disable_protect_mem_regions(iommu
);
2159 ret
= iommu_enable_translation(iommu
);
2166 for_each_drhd_unit(drhd
) {
2169 iommu
= drhd
->iommu
;
2176 static inline u64
aligned_size(u64 host_addr
, size_t size
)
2179 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
2180 return PAGE_ALIGN(addr
);
2184 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2188 /* Make sure it's in range */
2189 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2190 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2193 piova
= alloc_iova(&domain
->iovad
,
2194 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2198 static struct iova
*
2199 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2200 size_t size
, u64 dma_mask
)
2202 struct pci_dev
*pdev
= to_pci_dev(dev
);
2203 struct iova
*iova
= NULL
;
2205 if (dma_mask
<= DMA_BIT_MASK(32) || dmar_forcedac
)
2206 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2209 * First try to allocate an io virtual address in
2210 * DMA_BIT_MASK(32) and if that fails then try allocating
2213 iova
= iommu_alloc_iova(domain
, size
, DMA_BIT_MASK(32));
2215 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2219 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2226 static struct dmar_domain
*
2227 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2229 struct dmar_domain
*domain
;
2232 domain
= get_domain_for_dev(pdev
,
2233 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2236 "Allocating domain for %s failed", pci_name(pdev
));
2240 /* make sure context mapping is ok */
2241 if (unlikely(!domain_context_mapped(pdev
))) {
2242 ret
= domain_context_mapping(domain
, pdev
,
2243 CONTEXT_TT_MULTI_LEVEL
);
2246 "Domain context map for %s failed",
2255 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2256 size_t size
, int dir
, u64 dma_mask
)
2258 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2259 struct dmar_domain
*domain
;
2260 phys_addr_t start_paddr
;
2264 struct intel_iommu
*iommu
;
2266 BUG_ON(dir
== DMA_NONE
);
2267 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2270 domain
= get_valid_domain_for_dev(pdev
);
2274 iommu
= domain_get_iommu(domain
);
2275 size
= aligned_size((u64
)paddr
, size
);
2277 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2281 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2284 * Check if DMAR supports zero-length reads on write only
2287 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2288 !cap_zlr(iommu
->cap
))
2289 prot
|= DMA_PTE_READ
;
2290 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2291 prot
|= DMA_PTE_WRITE
;
2293 * paddr - (paddr + size) might be partial page, we should map the whole
2294 * page. Note: if two part of one page are separately mapped, we
2295 * might have two guest_addr mapping to the same host paddr, but this
2296 * is not a big problem
2298 ret
= domain_page_mapping(domain
, start_paddr
,
2299 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
2303 /* it's a non-present to present mapping. Only flush if caching mode */
2304 if (cap_caching_mode(iommu
->cap
))
2305 iommu_flush_iotlb_psi(iommu
, 0, start_paddr
,
2306 size
>> VTD_PAGE_SHIFT
);
2308 iommu_flush_write_buffer(iommu
);
2310 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2314 __free_iova(&domain
->iovad
, iova
);
2315 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2316 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2320 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2321 unsigned long offset
, size_t size
,
2322 enum dma_data_direction dir
,
2323 struct dma_attrs
*attrs
)
2325 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2326 dir
, to_pci_dev(dev
)->dma_mask
);
2329 static void flush_unmaps(void)
2335 /* just flush them all */
2336 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2337 struct intel_iommu
*iommu
= g_iommus
[i
];
2341 if (!deferred_flush
[i
].next
)
2344 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2345 DMA_TLB_GLOBAL_FLUSH
);
2346 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2348 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2350 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2351 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2352 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2353 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2354 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2356 deferred_flush
[i
].next
= 0;
2362 static void flush_unmaps_timeout(unsigned long data
)
2364 unsigned long flags
;
2366 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2368 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2371 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2373 unsigned long flags
;
2375 struct intel_iommu
*iommu
;
2377 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2378 if (list_size
== HIGH_WATER_MARK
)
2381 iommu
= domain_get_iommu(dom
);
2382 iommu_id
= iommu
->seq_id
;
2384 next
= deferred_flush
[iommu_id
].next
;
2385 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2386 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2387 deferred_flush
[iommu_id
].next
++;
2390 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2394 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2397 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2398 size_t size
, enum dma_data_direction dir
,
2399 struct dma_attrs
*attrs
)
2401 struct pci_dev
*pdev
= to_pci_dev(dev
);
2402 struct dmar_domain
*domain
;
2403 unsigned long start_addr
;
2405 struct intel_iommu
*iommu
;
2407 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2409 domain
= find_domain(pdev
);
2412 iommu
= domain_get_iommu(domain
);
2414 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2418 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2419 size
= aligned_size((u64
)dev_addr
, size
);
2421 pr_debug("Device %s unmapping: %zx@%llx\n",
2422 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2424 /* clear the whole page */
2425 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2426 /* free page tables */
2427 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2428 if (intel_iommu_strict
) {
2429 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2430 size
>> VTD_PAGE_SHIFT
);
2432 __free_iova(&domain
->iovad
, iova
);
2434 add_unmap(domain
, iova
);
2436 * queue up the release of the unmap to save the 1/6th of the
2437 * cpu used up by the iotlb flush operation...
2442 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2445 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2448 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2449 dma_addr_t
*dma_handle
, gfp_t flags
)
2454 size
= PAGE_ALIGN(size
);
2455 order
= get_order(size
);
2456 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2458 vaddr
= (void *)__get_free_pages(flags
, order
);
2461 memset(vaddr
, 0, size
);
2463 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2465 hwdev
->coherent_dma_mask
);
2468 free_pages((unsigned long)vaddr
, order
);
2472 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2473 dma_addr_t dma_handle
)
2477 size
= PAGE_ALIGN(size
);
2478 order
= get_order(size
);
2480 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2481 free_pages((unsigned long)vaddr
, order
);
2484 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2485 int nelems
, enum dma_data_direction dir
,
2486 struct dma_attrs
*attrs
)
2489 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2490 struct dmar_domain
*domain
;
2491 unsigned long start_addr
;
2495 struct scatterlist
*sg
;
2496 struct intel_iommu
*iommu
;
2498 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2501 domain
= find_domain(pdev
);
2504 iommu
= domain_get_iommu(domain
);
2506 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2509 for_each_sg(sglist
, sg
, nelems
, i
) {
2510 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2511 size
+= aligned_size((u64
)addr
, sg
->length
);
2514 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2516 /* clear the whole page */
2517 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2518 /* free page tables */
2519 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2521 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2522 size
>> VTD_PAGE_SHIFT
);
2525 __free_iova(&domain
->iovad
, iova
);
2528 static int intel_nontranslate_map_sg(struct device
*hddev
,
2529 struct scatterlist
*sglist
, int nelems
, int dir
)
2532 struct scatterlist
*sg
;
2534 for_each_sg(sglist
, sg
, nelems
, i
) {
2535 BUG_ON(!sg_page(sg
));
2536 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2537 sg
->dma_length
= sg
->length
;
2542 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2543 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2547 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2548 struct dmar_domain
*domain
;
2552 struct iova
*iova
= NULL
;
2554 struct scatterlist
*sg
;
2555 unsigned long start_addr
;
2556 struct intel_iommu
*iommu
;
2558 BUG_ON(dir
== DMA_NONE
);
2559 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2560 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2562 domain
= get_valid_domain_for_dev(pdev
);
2566 iommu
= domain_get_iommu(domain
);
2568 for_each_sg(sglist
, sg
, nelems
, i
) {
2569 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2570 size
+= aligned_size((u64
)addr
, sg
->length
);
2573 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2575 sglist
->dma_length
= 0;
2580 * Check if DMAR supports zero-length reads on write only
2583 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2584 !cap_zlr(iommu
->cap
))
2585 prot
|= DMA_PTE_READ
;
2586 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2587 prot
|= DMA_PTE_WRITE
;
2589 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2591 for_each_sg(sglist
, sg
, nelems
, i
) {
2592 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2593 size
= aligned_size((u64
)addr
, sg
->length
);
2594 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2595 ((u64
)addr
) & PAGE_MASK
,
2598 /* clear the page */
2599 dma_pte_clear_range(domain
, start_addr
,
2600 start_addr
+ offset
);
2601 /* free page tables */
2602 dma_pte_free_pagetable(domain
, start_addr
,
2603 start_addr
+ offset
);
2605 __free_iova(&domain
->iovad
, iova
);
2608 sg
->dma_address
= start_addr
+ offset
+
2609 ((u64
)addr
& (~PAGE_MASK
));
2610 sg
->dma_length
= sg
->length
;
2614 /* it's a non-present to present mapping. Only flush if caching mode */
2615 if (cap_caching_mode(iommu
->cap
))
2616 iommu_flush_iotlb_psi(iommu
, 0, start_addr
,
2617 offset
>> VTD_PAGE_SHIFT
);
2619 iommu_flush_write_buffer(iommu
);
2624 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2629 struct dma_map_ops intel_dma_ops
= {
2630 .alloc_coherent
= intel_alloc_coherent
,
2631 .free_coherent
= intel_free_coherent
,
2632 .map_sg
= intel_map_sg
,
2633 .unmap_sg
= intel_unmap_sg
,
2634 .map_page
= intel_map_page
,
2635 .unmap_page
= intel_unmap_page
,
2636 .mapping_error
= intel_mapping_error
,
2639 static inline int iommu_domain_cache_init(void)
2643 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2644 sizeof(struct dmar_domain
),
2649 if (!iommu_domain_cache
) {
2650 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2657 static inline int iommu_devinfo_cache_init(void)
2661 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2662 sizeof(struct device_domain_info
),
2666 if (!iommu_devinfo_cache
) {
2667 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2674 static inline int iommu_iova_cache_init(void)
2678 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2679 sizeof(struct iova
),
2683 if (!iommu_iova_cache
) {
2684 printk(KERN_ERR
"Couldn't create iova cache\n");
2691 static int __init
iommu_init_mempool(void)
2694 ret
= iommu_iova_cache_init();
2698 ret
= iommu_domain_cache_init();
2702 ret
= iommu_devinfo_cache_init();
2706 kmem_cache_destroy(iommu_domain_cache
);
2708 kmem_cache_destroy(iommu_iova_cache
);
2713 static void __init
iommu_exit_mempool(void)
2715 kmem_cache_destroy(iommu_devinfo_cache
);
2716 kmem_cache_destroy(iommu_domain_cache
);
2717 kmem_cache_destroy(iommu_iova_cache
);
2721 static void __init
init_no_remapping_devices(void)
2723 struct dmar_drhd_unit
*drhd
;
2725 for_each_drhd_unit(drhd
) {
2726 if (!drhd
->include_all
) {
2728 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2729 if (drhd
->devices
[i
] != NULL
)
2731 /* ignore DMAR unit if no pci devices exist */
2732 if (i
== drhd
->devices_cnt
)
2740 for_each_drhd_unit(drhd
) {
2742 if (drhd
->ignored
|| drhd
->include_all
)
2745 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2746 if (drhd
->devices
[i
] &&
2747 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2750 if (i
< drhd
->devices_cnt
)
2753 /* bypass IOMMU if it is just for gfx devices */
2755 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2756 if (!drhd
->devices
[i
])
2758 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2763 #ifdef CONFIG_SUSPEND
2764 static int init_iommu_hw(void)
2766 struct dmar_drhd_unit
*drhd
;
2767 struct intel_iommu
*iommu
= NULL
;
2769 for_each_active_iommu(iommu
, drhd
)
2771 dmar_reenable_qi(iommu
);
2773 for_each_active_iommu(iommu
, drhd
) {
2774 iommu_flush_write_buffer(iommu
);
2776 iommu_set_root_entry(iommu
);
2778 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2779 DMA_CCMD_GLOBAL_INVL
);
2780 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2781 DMA_TLB_GLOBAL_FLUSH
);
2782 iommu_disable_protect_mem_regions(iommu
);
2783 iommu_enable_translation(iommu
);
2789 static void iommu_flush_all(void)
2791 struct dmar_drhd_unit
*drhd
;
2792 struct intel_iommu
*iommu
;
2794 for_each_active_iommu(iommu
, drhd
) {
2795 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2796 DMA_CCMD_GLOBAL_INVL
);
2797 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2798 DMA_TLB_GLOBAL_FLUSH
);
2802 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
2804 struct dmar_drhd_unit
*drhd
;
2805 struct intel_iommu
*iommu
= NULL
;
2808 for_each_active_iommu(iommu
, drhd
) {
2809 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
2811 if (!iommu
->iommu_state
)
2817 for_each_active_iommu(iommu
, drhd
) {
2818 iommu_disable_translation(iommu
);
2820 spin_lock_irqsave(&iommu
->register_lock
, flag
);
2822 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
2823 readl(iommu
->reg
+ DMAR_FECTL_REG
);
2824 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
2825 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
2826 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
2827 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
2828 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
2829 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
2831 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
2836 for_each_active_iommu(iommu
, drhd
)
2837 kfree(iommu
->iommu_state
);
2842 static int iommu_resume(struct sys_device
*dev
)
2844 struct dmar_drhd_unit
*drhd
;
2845 struct intel_iommu
*iommu
= NULL
;
2848 if (init_iommu_hw()) {
2849 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2853 for_each_active_iommu(iommu
, drhd
) {
2855 spin_lock_irqsave(&iommu
->register_lock
, flag
);
2857 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
2858 iommu
->reg
+ DMAR_FECTL_REG
);
2859 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
2860 iommu
->reg
+ DMAR_FEDATA_REG
);
2861 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
2862 iommu
->reg
+ DMAR_FEADDR_REG
);
2863 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
2864 iommu
->reg
+ DMAR_FEUADDR_REG
);
2866 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
2869 for_each_active_iommu(iommu
, drhd
)
2870 kfree(iommu
->iommu_state
);
2875 static struct sysdev_class iommu_sysclass
= {
2877 .resume
= iommu_resume
,
2878 .suspend
= iommu_suspend
,
2881 static struct sys_device device_iommu
= {
2882 .cls
= &iommu_sysclass
,
2885 static int __init
init_iommu_sysfs(void)
2889 error
= sysdev_class_register(&iommu_sysclass
);
2893 error
= sysdev_register(&device_iommu
);
2895 sysdev_class_unregister(&iommu_sysclass
);
2901 static int __init
init_iommu_sysfs(void)
2905 #endif /* CONFIG_PM */
2907 int __init
intel_iommu_init(void)
2911 if (dmar_table_init())
2914 if (dmar_dev_scope_init())
2918 * Check the need for DMA-remapping initialization now.
2919 * Above initialization will also be used by Interrupt-remapping.
2921 if (no_iommu
|| (swiotlb
&& !iommu_pass_through
) || dmar_disabled
)
2924 iommu_init_mempool();
2925 dmar_init_reserved_ranges();
2927 init_no_remapping_devices();
2931 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2932 put_iova_domain(&reserved_iova_list
);
2933 iommu_exit_mempool();
2937 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2939 init_timer(&unmap_timer
);
2942 if (!iommu_pass_through
) {
2944 "Multi-level page-table translation for DMAR.\n");
2945 dma_ops
= &intel_dma_ops
;
2948 "DMAR: Pass through translation for DMAR.\n");
2952 register_iommu(&intel_iommu_ops
);
2957 static int vm_domain_add_dev_info(struct dmar_domain
*domain
,
2958 struct pci_dev
*pdev
)
2960 struct device_domain_info
*info
;
2961 unsigned long flags
;
2963 info
= alloc_devinfo_mem();
2967 info
->segment
= pci_domain_nr(pdev
->bus
);
2968 info
->bus
= pdev
->bus
->number
;
2969 info
->devfn
= pdev
->devfn
;
2971 info
->domain
= domain
;
2973 spin_lock_irqsave(&device_domain_lock
, flags
);
2974 list_add(&info
->link
, &domain
->devices
);
2975 list_add(&info
->global
, &device_domain_list
);
2976 pdev
->dev
.archdata
.iommu
= info
;
2977 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2982 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
2983 struct pci_dev
*pdev
)
2985 struct pci_dev
*tmp
, *parent
;
2987 if (!iommu
|| !pdev
)
2990 /* dependent device detach */
2991 tmp
= pci_find_upstream_pcie_bridge(pdev
);
2992 /* Secondary interface's bus number and devfn 0 */
2994 parent
= pdev
->bus
->self
;
2995 while (parent
!= tmp
) {
2996 iommu_detach_dev(iommu
, parent
->bus
->number
,
2998 parent
= parent
->bus
->self
;
3000 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3001 iommu_detach_dev(iommu
,
3002 tmp
->subordinate
->number
, 0);
3003 else /* this is a legacy PCI bridge */
3004 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3009 static void vm_domain_remove_one_dev_info(struct dmar_domain
*domain
,
3010 struct pci_dev
*pdev
)
3012 struct device_domain_info
*info
;
3013 struct intel_iommu
*iommu
;
3014 unsigned long flags
;
3016 struct list_head
*entry
, *tmp
;
3018 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3023 spin_lock_irqsave(&device_domain_lock
, flags
);
3024 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3025 info
= list_entry(entry
, struct device_domain_info
, link
);
3026 /* No need to compare PCI domain; it has to be the same */
3027 if (info
->bus
== pdev
->bus
->number
&&
3028 info
->devfn
== pdev
->devfn
) {
3029 list_del(&info
->link
);
3030 list_del(&info
->global
);
3032 info
->dev
->dev
.archdata
.iommu
= NULL
;
3033 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3035 iommu_disable_dev_iotlb(info
);
3036 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3037 iommu_detach_dependent_devices(iommu
, pdev
);
3038 free_devinfo_mem(info
);
3040 spin_lock_irqsave(&device_domain_lock
, flags
);
3048 /* if there is no other devices under the same iommu
3049 * owned by this domain, clear this iommu in iommu_bmp
3050 * update iommu count and coherency
3052 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3058 unsigned long tmp_flags
;
3059 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3060 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3061 domain
->iommu_count
--;
3062 domain_update_iommu_cap(domain
);
3063 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3066 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3069 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3071 struct device_domain_info
*info
;
3072 struct intel_iommu
*iommu
;
3073 unsigned long flags1
, flags2
;
3075 spin_lock_irqsave(&device_domain_lock
, flags1
);
3076 while (!list_empty(&domain
->devices
)) {
3077 info
= list_entry(domain
->devices
.next
,
3078 struct device_domain_info
, link
);
3079 list_del(&info
->link
);
3080 list_del(&info
->global
);
3082 info
->dev
->dev
.archdata
.iommu
= NULL
;
3084 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3086 iommu_disable_dev_iotlb(info
);
3087 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3088 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3089 iommu_detach_dependent_devices(iommu
, info
->dev
);
3091 /* clear this iommu in iommu_bmp, update iommu count
3094 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3095 if (test_and_clear_bit(iommu
->seq_id
,
3096 &domain
->iommu_bmp
)) {
3097 domain
->iommu_count
--;
3098 domain_update_iommu_cap(domain
);
3100 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3102 free_devinfo_mem(info
);
3103 spin_lock_irqsave(&device_domain_lock
, flags1
);
3105 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3108 /* domain id for virtual machine, it won't be set in context */
3109 static unsigned long vm_domid
;
3111 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3114 int min_agaw
= domain
->agaw
;
3116 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3117 for (; i
< g_num_of_iommus
; ) {
3118 if (min_agaw
> g_iommus
[i
]->agaw
)
3119 min_agaw
= g_iommus
[i
]->agaw
;
3121 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3127 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3129 struct dmar_domain
*domain
;
3131 domain
= alloc_domain_mem();
3135 domain
->id
= vm_domid
++;
3136 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3137 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3142 static int vm_domain_init(struct dmar_domain
*domain
, int guest_width
)
3146 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3147 spin_lock_init(&domain
->mapping_lock
);
3148 spin_lock_init(&domain
->iommu_lock
);
3150 domain_reserve_special_ranges(domain
);
3152 /* calculate AGAW */
3153 domain
->gaw
= guest_width
;
3154 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3155 domain
->agaw
= width_to_agaw(adjust_width
);
3157 INIT_LIST_HEAD(&domain
->devices
);
3159 domain
->iommu_count
= 0;
3160 domain
->iommu_coherency
= 0;
3161 domain
->max_addr
= 0;
3163 /* always allocate the top pgd */
3164 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3167 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3171 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3173 unsigned long flags
;
3174 struct dmar_drhd_unit
*drhd
;
3175 struct intel_iommu
*iommu
;
3177 unsigned long ndomains
;
3179 for_each_drhd_unit(drhd
) {
3182 iommu
= drhd
->iommu
;
3184 ndomains
= cap_ndoms(iommu
->cap
);
3185 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3186 for (; i
< ndomains
; ) {
3187 if (iommu
->domains
[i
] == domain
) {
3188 spin_lock_irqsave(&iommu
->lock
, flags
);
3189 clear_bit(i
, iommu
->domain_ids
);
3190 iommu
->domains
[i
] = NULL
;
3191 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3194 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3199 static void vm_domain_exit(struct dmar_domain
*domain
)
3203 /* Domain 0 is reserved, so dont process it */
3207 vm_domain_remove_all_dev_info(domain
);
3209 put_iova_domain(&domain
->iovad
);
3210 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
3211 end
= end
& (~VTD_PAGE_MASK
);
3214 dma_pte_clear_range(domain
, 0, end
);
3216 /* free page tables */
3217 dma_pte_free_pagetable(domain
, 0, end
);
3219 iommu_free_vm_domain(domain
);
3220 free_domain_mem(domain
);
3223 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3225 struct dmar_domain
*dmar_domain
;
3227 dmar_domain
= iommu_alloc_vm_domain();
3230 "intel_iommu_domain_init: dmar_domain == NULL\n");
3233 if (vm_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3235 "intel_iommu_domain_init() failed\n");
3236 vm_domain_exit(dmar_domain
);
3239 domain
->priv
= dmar_domain
;
3244 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3246 struct dmar_domain
*dmar_domain
= domain
->priv
;
3248 domain
->priv
= NULL
;
3249 vm_domain_exit(dmar_domain
);
3252 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3255 struct dmar_domain
*dmar_domain
= domain
->priv
;
3256 struct pci_dev
*pdev
= to_pci_dev(dev
);
3257 struct intel_iommu
*iommu
;
3262 /* normally pdev is not mapped */
3263 if (unlikely(domain_context_mapped(pdev
))) {
3264 struct dmar_domain
*old_domain
;
3266 old_domain
= find_domain(pdev
);
3268 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
3269 vm_domain_remove_one_dev_info(old_domain
, pdev
);
3271 domain_remove_dev_info(old_domain
);
3275 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3280 /* check if this iommu agaw is sufficient for max mapped address */
3281 addr_width
= agaw_to_width(iommu
->agaw
);
3282 end
= DOMAIN_MAX_ADDR(addr_width
);
3283 end
= end
& VTD_PAGE_MASK
;
3284 if (end
< dmar_domain
->max_addr
) {
3285 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3286 "sufficient for the mapped address (%llx)\n",
3287 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3291 ret
= vm_domain_add_dev_info(dmar_domain
, pdev
);
3295 ret
= domain_context_mapping(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3299 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3302 struct dmar_domain
*dmar_domain
= domain
->priv
;
3303 struct pci_dev
*pdev
= to_pci_dev(dev
);
3305 vm_domain_remove_one_dev_info(dmar_domain
, pdev
);
3308 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3309 unsigned long iova
, phys_addr_t hpa
,
3310 size_t size
, int iommu_prot
)
3312 struct dmar_domain
*dmar_domain
= domain
->priv
;
3318 if (iommu_prot
& IOMMU_READ
)
3319 prot
|= DMA_PTE_READ
;
3320 if (iommu_prot
& IOMMU_WRITE
)
3321 prot
|= DMA_PTE_WRITE
;
3322 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3323 prot
|= DMA_PTE_SNP
;
3325 max_addr
= (iova
& VTD_PAGE_MASK
) + VTD_PAGE_ALIGN(size
);
3326 if (dmar_domain
->max_addr
< max_addr
) {
3330 /* check if minimum agaw is sufficient for mapped address */
3331 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3332 addr_width
= agaw_to_width(min_agaw
);
3333 end
= DOMAIN_MAX_ADDR(addr_width
);
3334 end
= end
& VTD_PAGE_MASK
;
3335 if (end
< max_addr
) {
3336 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3337 "sufficient for the mapped address (%llx)\n",
3338 __func__
, min_agaw
, max_addr
);
3341 dmar_domain
->max_addr
= max_addr
;
3344 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
3348 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3349 unsigned long iova
, size_t size
)
3351 struct dmar_domain
*dmar_domain
= domain
->priv
;
3354 /* The address might not be aligned */
3355 base
= iova
& VTD_PAGE_MASK
;
3356 size
= VTD_PAGE_ALIGN(size
);
3357 dma_pte_clear_range(dmar_domain
, base
, base
+ size
);
3359 if (dmar_domain
->max_addr
== base
+ size
)
3360 dmar_domain
->max_addr
= base
;
3363 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3366 struct dmar_domain
*dmar_domain
= domain
->priv
;
3367 struct dma_pte
*pte
;
3370 pte
= addr_to_dma_pte(dmar_domain
, iova
);
3372 phys
= dma_pte_addr(pte
);
3377 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3380 struct dmar_domain
*dmar_domain
= domain
->priv
;
3382 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3383 return dmar_domain
->iommu_snooping
;
3388 static struct iommu_ops intel_iommu_ops
= {
3389 .domain_init
= intel_iommu_domain_init
,
3390 .domain_destroy
= intel_iommu_domain_destroy
,
3391 .attach_dev
= intel_iommu_attach_device
,
3392 .detach_dev
= intel_iommu_detach_device
,
3393 .map
= intel_iommu_map_range
,
3394 .unmap
= intel_iommu_unmap_range
,
3395 .iova_to_phys
= intel_iommu_iova_to_phys
,
3396 .domain_has_cap
= intel_iommu_domain_has_cap
,
3399 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3402 * Mobile 4 Series Chipset neglects to set RWBF capability,
3405 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3409 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);