2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #include "irq_remapping.h"
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
54 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
55 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
57 #define IOAPIC_RANGE_START (0xfee00000)
58 #define IOAPIC_RANGE_END (0xfeefffff)
59 #define IOVA_START_ADDR (0x1000)
61 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
63 #define MAX_AGAW_WIDTH 64
64 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
66 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
75 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
76 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
77 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
79 /* page table handling */
80 #define LEVEL_STRIDE (9)
81 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
99 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
101 static inline int agaw_to_level(int agaw
)
106 static inline int agaw_to_width(int agaw
)
108 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
111 static inline int width_to_agaw(int width
)
113 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
116 static inline unsigned int level_to_offset_bits(int level
)
118 return (level
- 1) * LEVEL_STRIDE
;
121 static inline int pfn_level_offset(unsigned long pfn
, int level
)
123 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
126 static inline unsigned long level_mask(int level
)
128 return -1UL << level_to_offset_bits(level
);
131 static inline unsigned long level_size(int level
)
133 return 1UL << level_to_offset_bits(level
);
136 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
138 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
141 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
143 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
146 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
150 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
153 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
155 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
157 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
159 return mm_to_dma_pfn(page_to_pfn(pg
));
161 static inline unsigned long virt_to_dma_pfn(void *p
)
163 return page_to_dma_pfn(virt_to_page(p
));
166 /* global iommu list, set NULL for ignored DMAR units */
167 static struct intel_iommu
**g_iommus
;
169 static void __init
check_tylersburg_isoch(void);
170 static int rwbf_quirk
;
173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
176 static int force_on
= 0;
181 * 12-63: Context Ptr (12 - (haw-1))
188 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189 static inline bool root_present(struct root_entry
*root
)
191 return (root
->val
& 1);
193 static inline void set_root_present(struct root_entry
*root
)
197 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
199 root
->val
&= ~VTD_PAGE_MASK
;
200 root
->val
|= value
& VTD_PAGE_MASK
;
203 static inline struct context_entry
*
204 get_context_addr_from_root(struct root_entry
*root
)
206 return (struct context_entry
*)
207 (root_present(root
)?phys_to_virt(
208 root
->val
& VTD_PAGE_MASK
) :
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
223 struct context_entry
{
228 static inline bool context_present(struct context_entry
*context
)
230 return (context
->lo
& 1);
232 static inline void context_set_present(struct context_entry
*context
)
237 static inline void context_set_fault_enable(struct context_entry
*context
)
239 context
->lo
&= (((u64
)-1) << 2) | 1;
242 static inline void context_set_translation_type(struct context_entry
*context
,
245 context
->lo
&= (((u64
)-1) << 4) | 3;
246 context
->lo
|= (value
& 3) << 2;
249 static inline void context_set_address_root(struct context_entry
*context
,
252 context
->lo
&= ~VTD_PAGE_MASK
;
253 context
->lo
|= value
& VTD_PAGE_MASK
;
256 static inline void context_set_address_width(struct context_entry
*context
,
259 context
->hi
|= value
& 7;
262 static inline void context_set_domain_id(struct context_entry
*context
,
265 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
268 static inline void context_clear_entry(struct context_entry
*context
)
281 * 12-63: Host physcial address
287 static inline void dma_clear_pte(struct dma_pte
*pte
)
292 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
295 return pte
->val
& VTD_PAGE_MASK
;
297 /* Must have a full atomic 64-bit read */
298 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
302 static inline bool dma_pte_present(struct dma_pte
*pte
)
304 return (pte
->val
& 3) != 0;
307 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
309 return (pte
->val
& DMA_PTE_LARGE_PAGE
);
312 static inline int first_pte_in_page(struct dma_pte
*pte
)
314 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
323 static struct dmar_domain
*si_domain
;
324 static int hw_pass_through
= 1;
326 /* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
329 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
331 /* si_domain contains mulitple devices */
332 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
335 int id
; /* domain id */
336 int nid
; /* node id */
337 DECLARE_BITMAP(iommu_bmp
, DMAR_UNITS_SUPPORTED
);
338 /* bitmap of iommus this domain uses*/
340 struct list_head devices
; /* all devices' list */
341 struct iova_domain iovad
; /* iova's that belong to this domain */
343 struct dma_pte
*pgd
; /* virtual address */
344 int gaw
; /* max guest address width */
346 /* adjusted guest address width, 0 is level 2 30-bit */
349 int flags
; /* flags to find out type of domain */
351 int iommu_coherency
;/* indicate coherency of iommu access */
352 int iommu_snooping
; /* indicate snooping control feature*/
353 int iommu_count
; /* reference count of iommu */
354 int iommu_superpage
;/* Level of superpages supported:
355 0 == 4KiB (no superpages), 1 == 2MiB,
356 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
357 spinlock_t iommu_lock
; /* protect iommu set in domain */
358 u64 max_addr
; /* maximum mapped address */
361 /* PCI domain-device relationship */
362 struct device_domain_info
{
363 struct list_head link
; /* link to domain siblings */
364 struct list_head global
; /* link to global list */
365 u8 bus
; /* PCI bus number */
366 u8 devfn
; /* PCI devfn number */
367 struct device
*dev
; /* it's NULL for PCIe-to-PCI bridge */
368 struct intel_iommu
*iommu
; /* IOMMU used by this device */
369 struct dmar_domain
*domain
; /* pointer to domain */
372 struct dmar_rmrr_unit
{
373 struct list_head list
; /* list of rmrr units */
374 struct acpi_dmar_header
*hdr
; /* ACPI header */
375 u64 base_address
; /* reserved base address*/
376 u64 end_address
; /* reserved end address */
377 struct dmar_dev_scope
*devices
; /* target devices */
378 int devices_cnt
; /* target device count */
381 struct dmar_atsr_unit
{
382 struct list_head list
; /* list of ATSR units */
383 struct acpi_dmar_header
*hdr
; /* ACPI header */
384 struct dmar_dev_scope
*devices
; /* target devices */
385 int devices_cnt
; /* target device count */
386 u8 include_all
:1; /* include all ports */
389 static LIST_HEAD(dmar_atsr_units
);
390 static LIST_HEAD(dmar_rmrr_units
);
392 #define for_each_rmrr_units(rmrr) \
393 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
395 static void flush_unmaps_timeout(unsigned long data
);
397 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
399 #define HIGH_WATER_MARK 250
400 struct deferred_flush_tables
{
402 struct iova
*iova
[HIGH_WATER_MARK
];
403 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
404 struct page
*freelist
[HIGH_WATER_MARK
];
407 static struct deferred_flush_tables
*deferred_flush
;
409 /* bitmap for indexing intel_iommus */
410 static int g_num_of_iommus
;
412 static DEFINE_SPINLOCK(async_umap_flush_lock
);
413 static LIST_HEAD(unmaps_to_do
);
416 static long list_size
;
418 static void domain_exit(struct dmar_domain
*domain
);
419 static void domain_remove_dev_info(struct dmar_domain
*domain
);
420 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
422 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
424 static int domain_detach_iommu(struct dmar_domain
*domain
,
425 struct intel_iommu
*iommu
);
427 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
428 int dmar_disabled
= 0;
430 int dmar_disabled
= 1;
431 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
433 int intel_iommu_enabled
= 0;
434 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
436 static int dmar_map_gfx
= 1;
437 static int dmar_forcedac
;
438 static int intel_iommu_strict
;
439 static int intel_iommu_superpage
= 1;
441 int intel_iommu_gfx_mapped
;
442 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
444 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
445 static DEFINE_SPINLOCK(device_domain_lock
);
446 static LIST_HEAD(device_domain_list
);
448 static const struct iommu_ops intel_iommu_ops
;
450 static int __init
intel_iommu_setup(char *str
)
455 if (!strncmp(str
, "on", 2)) {
457 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
458 } else if (!strncmp(str
, "off", 3)) {
460 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
461 } else if (!strncmp(str
, "igfx_off", 8)) {
464 "Intel-IOMMU: disable GFX device mapping\n");
465 } else if (!strncmp(str
, "forcedac", 8)) {
467 "Intel-IOMMU: Forcing DAC for PCI devices\n");
469 } else if (!strncmp(str
, "strict", 6)) {
471 "Intel-IOMMU: disable batched IOTLB flush\n");
472 intel_iommu_strict
= 1;
473 } else if (!strncmp(str
, "sp_off", 6)) {
475 "Intel-IOMMU: disable supported super page\n");
476 intel_iommu_superpage
= 0;
479 str
+= strcspn(str
, ",");
485 __setup("intel_iommu=", intel_iommu_setup
);
487 static struct kmem_cache
*iommu_domain_cache
;
488 static struct kmem_cache
*iommu_devinfo_cache
;
489 static struct kmem_cache
*iommu_iova_cache
;
491 static inline void *alloc_pgtable_page(int node
)
496 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
498 vaddr
= page_address(page
);
502 static inline void free_pgtable_page(void *vaddr
)
504 free_page((unsigned long)vaddr
);
507 static inline void *alloc_domain_mem(void)
509 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
512 static void free_domain_mem(void *vaddr
)
514 kmem_cache_free(iommu_domain_cache
, vaddr
);
517 static inline void * alloc_devinfo_mem(void)
519 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
522 static inline void free_devinfo_mem(void *vaddr
)
524 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
527 struct iova
*alloc_iova_mem(void)
529 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
532 void free_iova_mem(struct iova
*iova
)
534 kmem_cache_free(iommu_iova_cache
, iova
);
537 static inline int domain_type_is_vm(struct dmar_domain
*domain
)
539 return domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
;
542 static inline int domain_type_is_vm_or_si(struct dmar_domain
*domain
)
544 return domain
->flags
& (DOMAIN_FLAG_VIRTUAL_MACHINE
|
545 DOMAIN_FLAG_STATIC_IDENTITY
);
548 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
551 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
553 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
556 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
561 sagaw
= cap_sagaw(iommu
->cap
);
562 for (agaw
= width_to_agaw(max_gaw
);
564 if (test_bit(agaw
, &sagaw
))
572 * Calculate max SAGAW for each iommu.
574 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
576 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
580 * calculate agaw for each iommu.
581 * "SAGAW" may be different across iommus, use a default agaw, and
582 * get a supported less agaw for iommus that don't support the default agaw.
584 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
586 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
589 /* This functionin only returns single iommu in a domain */
590 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
594 /* si_domain and vm domain should not get here. */
595 BUG_ON(domain_type_is_vm_or_si(domain
));
596 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
597 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
600 return g_iommus
[iommu_id
];
603 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
605 struct dmar_drhd_unit
*drhd
;
606 struct intel_iommu
*iommu
;
609 domain
->iommu_coherency
= 1;
611 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
613 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
614 domain
->iommu_coherency
= 0;
621 /* No hardware attached; use lowest common denominator */
623 for_each_active_iommu(iommu
, drhd
) {
624 if (!ecap_coherent(iommu
->ecap
)) {
625 domain
->iommu_coherency
= 0;
632 static int domain_update_iommu_snooping(struct intel_iommu
*skip
)
634 struct dmar_drhd_unit
*drhd
;
635 struct intel_iommu
*iommu
;
639 for_each_active_iommu(iommu
, drhd
) {
641 if (!ecap_sc_support(iommu
->ecap
)) {
652 static int domain_update_iommu_superpage(struct intel_iommu
*skip
)
654 struct dmar_drhd_unit
*drhd
;
655 struct intel_iommu
*iommu
;
658 if (!intel_iommu_superpage
) {
662 /* set iommu_superpage to the smallest common denominator */
664 for_each_active_iommu(iommu
, drhd
) {
666 mask
&= cap_super_page_val(iommu
->cap
);
676 /* Some capabilities may be different across iommus */
677 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
679 domain_update_iommu_coherency(domain
);
680 domain
->iommu_snooping
= domain_update_iommu_snooping(NULL
);
681 domain
->iommu_superpage
= domain_update_iommu_superpage(NULL
);
684 static struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
686 struct dmar_drhd_unit
*drhd
= NULL
;
687 struct intel_iommu
*iommu
;
689 struct pci_dev
*ptmp
, *pdev
= NULL
;
693 if (dev_is_pci(dev
)) {
694 pdev
= to_pci_dev(dev
);
695 segment
= pci_domain_nr(pdev
->bus
);
696 } else if (ACPI_COMPANION(dev
))
697 dev
= &ACPI_COMPANION(dev
)->dev
;
700 for_each_active_iommu(iommu
, drhd
) {
701 if (pdev
&& segment
!= drhd
->segment
)
704 for_each_active_dev_scope(drhd
->devices
,
705 drhd
->devices_cnt
, i
, tmp
) {
707 *bus
= drhd
->devices
[i
].bus
;
708 *devfn
= drhd
->devices
[i
].devfn
;
712 if (!pdev
|| !dev_is_pci(tmp
))
715 ptmp
= to_pci_dev(tmp
);
716 if (ptmp
->subordinate
&&
717 ptmp
->subordinate
->number
<= pdev
->bus
->number
&&
718 ptmp
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
722 if (pdev
&& drhd
->include_all
) {
724 *bus
= pdev
->bus
->number
;
725 *devfn
= pdev
->devfn
;
736 static void domain_flush_cache(struct dmar_domain
*domain
,
737 void *addr
, int size
)
739 if (!domain
->iommu_coherency
)
740 clflush_cache_range(addr
, size
);
743 /* Gets context entry for a given bus and devfn */
744 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
747 struct root_entry
*root
;
748 struct context_entry
*context
;
749 unsigned long phy_addr
;
752 spin_lock_irqsave(&iommu
->lock
, flags
);
753 root
= &iommu
->root_entry
[bus
];
754 context
= get_context_addr_from_root(root
);
756 context
= (struct context_entry
*)
757 alloc_pgtable_page(iommu
->node
);
759 spin_unlock_irqrestore(&iommu
->lock
, flags
);
762 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
763 phy_addr
= virt_to_phys((void *)context
);
764 set_root_value(root
, phy_addr
);
765 set_root_present(root
);
766 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
768 spin_unlock_irqrestore(&iommu
->lock
, flags
);
769 return &context
[devfn
];
772 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
774 struct root_entry
*root
;
775 struct context_entry
*context
;
779 spin_lock_irqsave(&iommu
->lock
, flags
);
780 root
= &iommu
->root_entry
[bus
];
781 context
= get_context_addr_from_root(root
);
786 ret
= context_present(&context
[devfn
]);
788 spin_unlock_irqrestore(&iommu
->lock
, flags
);
792 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
794 struct root_entry
*root
;
795 struct context_entry
*context
;
798 spin_lock_irqsave(&iommu
->lock
, flags
);
799 root
= &iommu
->root_entry
[bus
];
800 context
= get_context_addr_from_root(root
);
802 context_clear_entry(&context
[devfn
]);
803 __iommu_flush_cache(iommu
, &context
[devfn
], \
806 spin_unlock_irqrestore(&iommu
->lock
, flags
);
809 static void free_context_table(struct intel_iommu
*iommu
)
811 struct root_entry
*root
;
814 struct context_entry
*context
;
816 spin_lock_irqsave(&iommu
->lock
, flags
);
817 if (!iommu
->root_entry
) {
820 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
821 root
= &iommu
->root_entry
[i
];
822 context
= get_context_addr_from_root(root
);
824 free_pgtable_page(context
);
826 free_pgtable_page(iommu
->root_entry
);
827 iommu
->root_entry
= NULL
;
829 spin_unlock_irqrestore(&iommu
->lock
, flags
);
832 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
833 unsigned long pfn
, int *target_level
)
835 struct dma_pte
*parent
, *pte
= NULL
;
836 int level
= agaw_to_level(domain
->agaw
);
839 BUG_ON(!domain
->pgd
);
841 if (!domain_pfn_supported(domain
, pfn
))
842 /* Address beyond IOMMU's addressing capabilities. */
845 parent
= domain
->pgd
;
850 offset
= pfn_level_offset(pfn
, level
);
851 pte
= &parent
[offset
];
852 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
854 if (level
== *target_level
)
857 if (!dma_pte_present(pte
)) {
860 tmp_page
= alloc_pgtable_page(domain
->nid
);
865 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
866 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
867 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
868 /* Someone else set it while we were thinking; use theirs. */
869 free_pgtable_page(tmp_page
);
871 domain_flush_cache(domain
, pte
, sizeof(*pte
));
876 parent
= phys_to_virt(dma_pte_addr(pte
));
881 *target_level
= level
;
887 /* return address's pte at specific level */
888 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
890 int level
, int *large_page
)
892 struct dma_pte
*parent
, *pte
= NULL
;
893 int total
= agaw_to_level(domain
->agaw
);
896 parent
= domain
->pgd
;
897 while (level
<= total
) {
898 offset
= pfn_level_offset(pfn
, total
);
899 pte
= &parent
[offset
];
903 if (!dma_pte_present(pte
)) {
908 if (dma_pte_superpage(pte
)) {
913 parent
= phys_to_virt(dma_pte_addr(pte
));
919 /* clear last level pte, a tlb flush should be followed */
920 static void dma_pte_clear_range(struct dmar_domain
*domain
,
921 unsigned long start_pfn
,
922 unsigned long last_pfn
)
924 unsigned int large_page
= 1;
925 struct dma_pte
*first_pte
, *pte
;
927 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
928 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
929 BUG_ON(start_pfn
> last_pfn
);
931 /* we don't need lock here; nobody else touches the iova range */
934 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
936 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
941 start_pfn
+= lvl_to_nr_pages(large_page
);
943 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
945 domain_flush_cache(domain
, first_pte
,
946 (void *)pte
- (void *)first_pte
);
948 } while (start_pfn
&& start_pfn
<= last_pfn
);
951 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
952 struct dma_pte
*pte
, unsigned long pfn
,
953 unsigned long start_pfn
, unsigned long last_pfn
)
955 pfn
= max(start_pfn
, pfn
);
956 pte
= &pte
[pfn_level_offset(pfn
, level
)];
959 unsigned long level_pfn
;
960 struct dma_pte
*level_pte
;
962 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
965 level_pfn
= pfn
& level_mask(level
- 1);
966 level_pte
= phys_to_virt(dma_pte_addr(pte
));
969 dma_pte_free_level(domain
, level
- 1, level_pte
,
970 level_pfn
, start_pfn
, last_pfn
);
972 /* If range covers entire pagetable, free it */
973 if (!(start_pfn
> level_pfn
||
974 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
976 domain_flush_cache(domain
, pte
, sizeof(*pte
));
977 free_pgtable_page(level_pte
);
980 pfn
+= level_size(level
);
981 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
984 /* free page table pages. last level pte should already be cleared */
985 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
986 unsigned long start_pfn
,
987 unsigned long last_pfn
)
989 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
990 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
991 BUG_ON(start_pfn
> last_pfn
);
993 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
995 /* We don't need lock here; nobody else touches the iova range */
996 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
997 domain
->pgd
, 0, start_pfn
, last_pfn
);
1000 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1001 free_pgtable_page(domain
->pgd
);
1006 /* When a page at a given level is being unlinked from its parent, we don't
1007 need to *modify* it at all. All we need to do is make a list of all the
1008 pages which can be freed just as soon as we've flushed the IOTLB and we
1009 know the hardware page-walk will no longer touch them.
1010 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1012 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
1013 int level
, struct dma_pte
*pte
,
1014 struct page
*freelist
)
1018 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1019 pg
->freelist
= freelist
;
1025 pte
= page_address(pg
);
1027 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1028 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
1031 } while (!first_pte_in_page(pte
));
1036 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1037 struct dma_pte
*pte
, unsigned long pfn
,
1038 unsigned long start_pfn
,
1039 unsigned long last_pfn
,
1040 struct page
*freelist
)
1042 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1044 pfn
= max(start_pfn
, pfn
);
1045 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1048 unsigned long level_pfn
;
1050 if (!dma_pte_present(pte
))
1053 level_pfn
= pfn
& level_mask(level
);
1055 /* If range covers entire pagetable, free it */
1056 if (start_pfn
<= level_pfn
&&
1057 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1058 /* These suborbinate page tables are going away entirely. Don't
1059 bother to clear them; we're just going to *free* them. */
1060 if (level
> 1 && !dma_pte_superpage(pte
))
1061 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1067 } else if (level
> 1) {
1068 /* Recurse down into a level that isn't *entirely* obsolete */
1069 freelist
= dma_pte_clear_level(domain
, level
- 1,
1070 phys_to_virt(dma_pte_addr(pte
)),
1071 level_pfn
, start_pfn
, last_pfn
,
1075 pfn
+= level_size(level
);
1076 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1079 domain_flush_cache(domain
, first_pte
,
1080 (void *)++last_pte
- (void *)first_pte
);
1085 /* We can't just free the pages because the IOMMU may still be walking
1086 the page tables, and may have cached the intermediate levels. The
1087 pages can only be freed after the IOTLB flush has been done. */
1088 struct page
*domain_unmap(struct dmar_domain
*domain
,
1089 unsigned long start_pfn
,
1090 unsigned long last_pfn
)
1092 struct page
*freelist
= NULL
;
1094 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1095 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1096 BUG_ON(start_pfn
> last_pfn
);
1098 /* we don't need lock here; nobody else touches the iova range */
1099 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1100 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1103 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1104 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1105 pgd_page
->freelist
= freelist
;
1106 freelist
= pgd_page
;
1114 void dma_free_pagelist(struct page
*freelist
)
1118 while ((pg
= freelist
)) {
1119 freelist
= pg
->freelist
;
1120 free_pgtable_page(page_address(pg
));
1124 /* iommu handling */
1125 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1127 struct root_entry
*root
;
1128 unsigned long flags
;
1130 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1132 pr_err("IOMMU: allocating root entry for %s failed\n",
1137 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1139 spin_lock_irqsave(&iommu
->lock
, flags
);
1140 iommu
->root_entry
= root
;
1141 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1146 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1152 addr
= iommu
->root_entry
;
1154 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1155 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
1157 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1159 /* Make sure hardware complete it */
1160 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1161 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1163 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1166 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1171 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1174 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1175 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1177 /* Make sure hardware complete it */
1178 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1179 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1181 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1184 /* return value determine if we need a write buffer flush */
1185 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1186 u16 did
, u16 source_id
, u8 function_mask
,
1193 case DMA_CCMD_GLOBAL_INVL
:
1194 val
= DMA_CCMD_GLOBAL_INVL
;
1196 case DMA_CCMD_DOMAIN_INVL
:
1197 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1199 case DMA_CCMD_DEVICE_INVL
:
1200 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1201 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1206 val
|= DMA_CCMD_ICC
;
1208 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1209 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1213 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1215 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1218 /* return value determine if we need a write buffer flush */
1219 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1220 u64 addr
, unsigned int size_order
, u64 type
)
1222 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1223 u64 val
= 0, val_iva
= 0;
1227 case DMA_TLB_GLOBAL_FLUSH
:
1228 /* global flush doesn't need set IVA_REG */
1229 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1231 case DMA_TLB_DSI_FLUSH
:
1232 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1234 case DMA_TLB_PSI_FLUSH
:
1235 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1236 /* IH bit is passed in as part of address */
1237 val_iva
= size_order
| addr
;
1242 /* Note: set drain read/write */
1245 * This is probably to be super secure.. Looks like we can
1246 * ignore it without any impact.
1248 if (cap_read_drain(iommu
->cap
))
1249 val
|= DMA_TLB_READ_DRAIN
;
1251 if (cap_write_drain(iommu
->cap
))
1252 val
|= DMA_TLB_WRITE_DRAIN
;
1254 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1255 /* Note: Only uses first TLB reg currently */
1257 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1258 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1260 /* Make sure hardware complete it */
1261 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1262 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1264 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1266 /* check IOTLB invalidation granularity */
1267 if (DMA_TLB_IAIG(val
) == 0)
1268 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1269 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1270 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1271 (unsigned long long)DMA_TLB_IIRG(type
),
1272 (unsigned long long)DMA_TLB_IAIG(val
));
1275 static struct device_domain_info
*
1276 iommu_support_dev_iotlb (struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1280 unsigned long flags
;
1281 struct device_domain_info
*info
;
1282 struct pci_dev
*pdev
;
1284 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1290 spin_lock_irqsave(&device_domain_lock
, flags
);
1291 list_for_each_entry(info
, &domain
->devices
, link
)
1292 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1293 info
->devfn
== devfn
) {
1297 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1299 if (!found
|| !info
->dev
|| !dev_is_pci(info
->dev
))
1302 pdev
= to_pci_dev(info
->dev
);
1304 if (!pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
))
1307 if (!dmar_find_matched_atsr_unit(pdev
))
1313 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1315 if (!info
|| !dev_is_pci(info
->dev
))
1318 pci_enable_ats(to_pci_dev(info
->dev
), VTD_PAGE_SHIFT
);
1321 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1323 if (!info
->dev
|| !dev_is_pci(info
->dev
) ||
1324 !pci_ats_enabled(to_pci_dev(info
->dev
)))
1327 pci_disable_ats(to_pci_dev(info
->dev
));
1330 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1331 u64 addr
, unsigned mask
)
1334 unsigned long flags
;
1335 struct device_domain_info
*info
;
1337 spin_lock_irqsave(&device_domain_lock
, flags
);
1338 list_for_each_entry(info
, &domain
->devices
, link
) {
1339 struct pci_dev
*pdev
;
1340 if (!info
->dev
|| !dev_is_pci(info
->dev
))
1343 pdev
= to_pci_dev(info
->dev
);
1344 if (!pci_ats_enabled(pdev
))
1347 sid
= info
->bus
<< 8 | info
->devfn
;
1348 qdep
= pci_ats_queue_depth(pdev
);
1349 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1351 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1354 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1355 unsigned long pfn
, unsigned int pages
, int ih
, int map
)
1357 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1358 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1365 * Fallback to domain selective flush if no PSI support or the size is
1367 * PSI requires page size to be 2 ^ x, and the base address is naturally
1368 * aligned to the size
1370 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1371 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1374 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1378 * In caching mode, changes of pages from non-present to present require
1379 * flush. However, device IOTLB doesn't need to be flushed in this case.
1381 if (!cap_caching_mode(iommu
->cap
) || !map
)
1382 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1385 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1388 unsigned long flags
;
1390 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1391 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1392 pmen
&= ~DMA_PMEN_EPM
;
1393 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1395 /* wait for the protected region status bit to clear */
1396 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1397 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1399 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1402 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1405 unsigned long flags
;
1407 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1408 iommu
->gcmd
|= DMA_GCMD_TE
;
1409 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1413 readl
, (sts
& DMA_GSTS_TES
), sts
);
1415 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1418 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1423 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1424 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1425 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1427 /* Make sure hardware complete it */
1428 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1429 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1431 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1435 static int iommu_init_domains(struct intel_iommu
*iommu
)
1437 unsigned long ndomains
;
1438 unsigned long nlongs
;
1440 ndomains
= cap_ndoms(iommu
->cap
);
1441 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1442 iommu
->seq_id
, ndomains
);
1443 nlongs
= BITS_TO_LONGS(ndomains
);
1445 spin_lock_init(&iommu
->lock
);
1447 /* TBD: there might be 64K domains,
1448 * consider other allocation for future chip
1450 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1451 if (!iommu
->domain_ids
) {
1452 pr_err("IOMMU%d: allocating domain id array failed\n",
1456 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1458 if (!iommu
->domains
) {
1459 pr_err("IOMMU%d: allocating domain array failed\n",
1461 kfree(iommu
->domain_ids
);
1462 iommu
->domain_ids
= NULL
;
1467 * if Caching mode is set, then invalid translations are tagged
1468 * with domainid 0. Hence we need to pre-allocate it.
1470 if (cap_caching_mode(iommu
->cap
))
1471 set_bit(0, iommu
->domain_ids
);
1475 static void disable_dmar_iommu(struct intel_iommu
*iommu
)
1477 struct dmar_domain
*domain
;
1480 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1481 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1483 * Domain id 0 is reserved for invalid translation
1484 * if hardware supports caching mode.
1486 if (cap_caching_mode(iommu
->cap
) && i
== 0)
1489 domain
= iommu
->domains
[i
];
1490 clear_bit(i
, iommu
->domain_ids
);
1491 if (domain_detach_iommu(domain
, iommu
) == 0 &&
1492 !domain_type_is_vm(domain
))
1493 domain_exit(domain
);
1497 if (iommu
->gcmd
& DMA_GCMD_TE
)
1498 iommu_disable_translation(iommu
);
1501 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1503 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1504 kfree(iommu
->domains
);
1505 kfree(iommu
->domain_ids
);
1506 iommu
->domains
= NULL
;
1507 iommu
->domain_ids
= NULL
;
1510 g_iommus
[iommu
->seq_id
] = NULL
;
1512 /* free context mapping */
1513 free_context_table(iommu
);
1516 static struct dmar_domain
*alloc_domain(int flags
)
1518 /* domain id for virtual machine, it won't be set in context */
1519 static atomic_t vm_domid
= ATOMIC_INIT(0);
1520 struct dmar_domain
*domain
;
1522 domain
= alloc_domain_mem();
1526 memset(domain
, 0, sizeof(*domain
));
1528 domain
->flags
= flags
;
1529 spin_lock_init(&domain
->iommu_lock
);
1530 INIT_LIST_HEAD(&domain
->devices
);
1531 if (flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1532 domain
->id
= atomic_inc_return(&vm_domid
);
1537 static int __iommu_attach_domain(struct dmar_domain
*domain
,
1538 struct intel_iommu
*iommu
)
1541 unsigned long ndomains
;
1543 ndomains
= cap_ndoms(iommu
->cap
);
1544 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1545 if (num
< ndomains
) {
1546 set_bit(num
, iommu
->domain_ids
);
1547 iommu
->domains
[num
] = domain
;
1555 static int iommu_attach_domain(struct dmar_domain
*domain
,
1556 struct intel_iommu
*iommu
)
1559 unsigned long flags
;
1561 spin_lock_irqsave(&iommu
->lock
, flags
);
1562 num
= __iommu_attach_domain(domain
, iommu
);
1563 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1565 pr_err("IOMMU: no free domain ids\n");
1570 static int iommu_attach_vm_domain(struct dmar_domain
*domain
,
1571 struct intel_iommu
*iommu
)
1574 unsigned long ndomains
;
1576 ndomains
= cap_ndoms(iommu
->cap
);
1577 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
)
1578 if (iommu
->domains
[num
] == domain
)
1581 return __iommu_attach_domain(domain
, iommu
);
1584 static void iommu_detach_domain(struct dmar_domain
*domain
,
1585 struct intel_iommu
*iommu
)
1587 unsigned long flags
;
1590 spin_lock_irqsave(&iommu
->lock
, flags
);
1591 if (domain_type_is_vm_or_si(domain
)) {
1592 ndomains
= cap_ndoms(iommu
->cap
);
1593 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1594 if (iommu
->domains
[num
] == domain
) {
1595 clear_bit(num
, iommu
->domain_ids
);
1596 iommu
->domains
[num
] = NULL
;
1601 clear_bit(domain
->id
, iommu
->domain_ids
);
1602 iommu
->domains
[domain
->id
] = NULL
;
1604 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1607 static void domain_attach_iommu(struct dmar_domain
*domain
,
1608 struct intel_iommu
*iommu
)
1610 unsigned long flags
;
1612 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1613 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1614 domain
->iommu_count
++;
1615 if (domain
->iommu_count
== 1)
1616 domain
->nid
= iommu
->node
;
1617 domain_update_iommu_cap(domain
);
1619 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1622 static int domain_detach_iommu(struct dmar_domain
*domain
,
1623 struct intel_iommu
*iommu
)
1625 unsigned long flags
;
1626 int count
= INT_MAX
;
1628 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1629 if (test_and_clear_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1630 count
= --domain
->iommu_count
;
1631 domain_update_iommu_cap(domain
);
1633 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1638 static struct iova_domain reserved_iova_list
;
1639 static struct lock_class_key reserved_rbtree_key
;
1641 static int dmar_init_reserved_ranges(void)
1643 struct pci_dev
*pdev
= NULL
;
1647 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1649 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1650 &reserved_rbtree_key
);
1652 /* IOAPIC ranges shouldn't be accessed by DMA */
1653 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1654 IOVA_PFN(IOAPIC_RANGE_END
));
1656 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1660 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1661 for_each_pci_dev(pdev
) {
1664 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1665 r
= &pdev
->resource
[i
];
1666 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1668 iova
= reserve_iova(&reserved_iova_list
,
1672 printk(KERN_ERR
"Reserve iova failed\n");
1680 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1682 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1685 static inline int guestwidth_to_adjustwidth(int gaw
)
1688 int r
= (gaw
- 12) % 9;
1699 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1701 struct intel_iommu
*iommu
;
1702 int adjust_width
, agaw
;
1703 unsigned long sagaw
;
1705 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1706 domain_reserve_special_ranges(domain
);
1708 /* calculate AGAW */
1709 iommu
= domain_get_iommu(domain
);
1710 if (guest_width
> cap_mgaw(iommu
->cap
))
1711 guest_width
= cap_mgaw(iommu
->cap
);
1712 domain
->gaw
= guest_width
;
1713 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1714 agaw
= width_to_agaw(adjust_width
);
1715 sagaw
= cap_sagaw(iommu
->cap
);
1716 if (!test_bit(agaw
, &sagaw
)) {
1717 /* hardware doesn't support it, choose a bigger one */
1718 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1719 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1723 domain
->agaw
= agaw
;
1725 if (ecap_coherent(iommu
->ecap
))
1726 domain
->iommu_coherency
= 1;
1728 domain
->iommu_coherency
= 0;
1730 if (ecap_sc_support(iommu
->ecap
))
1731 domain
->iommu_snooping
= 1;
1733 domain
->iommu_snooping
= 0;
1735 if (intel_iommu_superpage
)
1736 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1738 domain
->iommu_superpage
= 0;
1740 domain
->nid
= iommu
->node
;
1742 /* always allocate the top pgd */
1743 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1746 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1750 static void domain_exit(struct dmar_domain
*domain
)
1752 struct dmar_drhd_unit
*drhd
;
1753 struct intel_iommu
*iommu
;
1754 struct page
*freelist
= NULL
;
1756 /* Domain 0 is reserved, so dont process it */
1760 /* Flush any lazy unmaps that may reference this domain */
1761 if (!intel_iommu_strict
)
1762 flush_unmaps_timeout(0);
1764 /* remove associated devices */
1765 domain_remove_dev_info(domain
);
1768 put_iova_domain(&domain
->iovad
);
1770 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1772 /* clear attached or cached domains */
1774 for_each_active_iommu(iommu
, drhd
)
1775 iommu_detach_domain(domain
, iommu
);
1778 dma_free_pagelist(freelist
);
1780 free_domain_mem(domain
);
1783 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1784 struct intel_iommu
*iommu
,
1785 u8 bus
, u8 devfn
, int translation
)
1787 struct context_entry
*context
;
1788 unsigned long flags
;
1789 struct dma_pte
*pgd
;
1792 struct device_domain_info
*info
= NULL
;
1794 pr_debug("Set context mapping for %02x:%02x.%d\n",
1795 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1797 BUG_ON(!domain
->pgd
);
1798 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1799 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1801 context
= device_to_context_entry(iommu
, bus
, devfn
);
1804 spin_lock_irqsave(&iommu
->lock
, flags
);
1805 if (context_present(context
)) {
1806 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1813 if (domain_type_is_vm_or_si(domain
)) {
1814 if (domain_type_is_vm(domain
)) {
1815 id
= iommu_attach_vm_domain(domain
, iommu
);
1817 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1818 pr_err("IOMMU: no free domain ids\n");
1823 /* Skip top levels of page tables for
1824 * iommu which has less agaw than default.
1825 * Unnecessary for PT mode.
1827 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1828 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1829 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1830 if (!dma_pte_present(pgd
)) {
1831 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1838 context_set_domain_id(context
, id
);
1840 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1841 info
= iommu_support_dev_iotlb(domain
, iommu
, bus
, devfn
);
1842 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1843 CONTEXT_TT_MULTI_LEVEL
;
1846 * In pass through mode, AW must be programmed to indicate the largest
1847 * AGAW value supported by hardware. And ASR is ignored by hardware.
1849 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1850 context_set_address_width(context
, iommu
->msagaw
);
1852 context_set_address_root(context
, virt_to_phys(pgd
));
1853 context_set_address_width(context
, iommu
->agaw
);
1856 context_set_translation_type(context
, translation
);
1857 context_set_fault_enable(context
);
1858 context_set_present(context
);
1859 domain_flush_cache(domain
, context
, sizeof(*context
));
1862 * It's a non-present to present mapping. If hardware doesn't cache
1863 * non-present entry we only need to flush the write-buffer. If the
1864 * _does_ cache non-present entries, then it does so in the special
1865 * domain #0, which we have to flush:
1867 if (cap_caching_mode(iommu
->cap
)) {
1868 iommu
->flush
.flush_context(iommu
, 0,
1869 (((u16
)bus
) << 8) | devfn
,
1870 DMA_CCMD_MASK_NOBIT
,
1871 DMA_CCMD_DEVICE_INVL
);
1872 iommu
->flush
.flush_iotlb(iommu
, id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1874 iommu_flush_write_buffer(iommu
);
1876 iommu_enable_dev_iotlb(info
);
1877 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1879 domain_attach_iommu(domain
, iommu
);
1884 struct domain_context_mapping_data
{
1885 struct dmar_domain
*domain
;
1886 struct intel_iommu
*iommu
;
1890 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
1891 u16 alias
, void *opaque
)
1893 struct domain_context_mapping_data
*data
= opaque
;
1895 return domain_context_mapping_one(data
->domain
, data
->iommu
,
1896 PCI_BUS_NUM(alias
), alias
& 0xff,
1901 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
,
1904 struct intel_iommu
*iommu
;
1906 struct domain_context_mapping_data data
;
1908 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1912 if (!dev_is_pci(dev
))
1913 return domain_context_mapping_one(domain
, iommu
, bus
, devfn
,
1916 data
.domain
= domain
;
1918 data
.translation
= translation
;
1920 return pci_for_each_dma_alias(to_pci_dev(dev
),
1921 &domain_context_mapping_cb
, &data
);
1924 static int domain_context_mapped_cb(struct pci_dev
*pdev
,
1925 u16 alias
, void *opaque
)
1927 struct intel_iommu
*iommu
= opaque
;
1929 return !device_context_mapped(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
1932 static int domain_context_mapped(struct device
*dev
)
1934 struct intel_iommu
*iommu
;
1937 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1941 if (!dev_is_pci(dev
))
1942 return device_context_mapped(iommu
, bus
, devfn
);
1944 return !pci_for_each_dma_alias(to_pci_dev(dev
),
1945 domain_context_mapped_cb
, iommu
);
1948 /* Returns a number of VTD pages, but aligned to MM page size */
1949 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1952 host_addr
&= ~PAGE_MASK
;
1953 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1956 /* Return largest possible superpage level for a given mapping */
1957 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1958 unsigned long iov_pfn
,
1959 unsigned long phy_pfn
,
1960 unsigned long pages
)
1962 int support
, level
= 1;
1963 unsigned long pfnmerge
;
1965 support
= domain
->iommu_superpage
;
1967 /* To use a large page, the virtual *and* physical addresses
1968 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1969 of them will mean we have to use smaller pages. So just
1970 merge them and check both at once. */
1971 pfnmerge
= iov_pfn
| phy_pfn
;
1973 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1974 pages
>>= VTD_STRIDE_SHIFT
;
1977 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1984 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1985 struct scatterlist
*sg
, unsigned long phys_pfn
,
1986 unsigned long nr_pages
, int prot
)
1988 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1989 phys_addr_t
uninitialized_var(pteval
);
1990 unsigned long sg_res
= 0;
1991 unsigned int largepage_lvl
= 0;
1992 unsigned long lvl_pages
= 0;
1994 BUG_ON(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1));
1996 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1999 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
2003 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
2006 while (nr_pages
> 0) {
2010 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
2011 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
2012 sg
->dma_length
= sg
->length
;
2013 pteval
= page_to_phys(sg_page(sg
)) | prot
;
2014 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
2018 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
2020 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
2023 /* It is large page*/
2024 if (largepage_lvl
> 1) {
2025 pteval
|= DMA_PTE_LARGE_PAGE
;
2026 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2028 * Ensure that old small page tables are
2029 * removed to make room for superpage,
2032 dma_pte_free_pagetable(domain
, iov_pfn
,
2033 iov_pfn
+ lvl_pages
- 1);
2035 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2039 /* We don't need lock here, nobody else
2040 * touches the iova range
2042 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2044 static int dumps
= 5;
2045 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2046 iov_pfn
, tmp
, (unsigned long long)pteval
);
2049 debug_dma_dump_mappings(NULL
);
2054 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2056 BUG_ON(nr_pages
< lvl_pages
);
2057 BUG_ON(sg_res
< lvl_pages
);
2059 nr_pages
-= lvl_pages
;
2060 iov_pfn
+= lvl_pages
;
2061 phys_pfn
+= lvl_pages
;
2062 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2063 sg_res
-= lvl_pages
;
2065 /* If the next PTE would be the first in a new page, then we
2066 need to flush the cache on the entries we've just written.
2067 And then we'll need to recalculate 'pte', so clear it and
2068 let it get set again in the if (!pte) block above.
2070 If we're done (!nr_pages) we need to flush the cache too.
2072 Also if we've been setting superpages, we may need to
2073 recalculate 'pte' and switch back to smaller pages for the
2074 end of the mapping, if the trailing size is not enough to
2075 use another superpage (i.e. sg_res < lvl_pages). */
2077 if (!nr_pages
|| first_pte_in_page(pte
) ||
2078 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2079 domain_flush_cache(domain
, first_pte
,
2080 (void *)pte
- (void *)first_pte
);
2084 if (!sg_res
&& nr_pages
)
2090 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2091 struct scatterlist
*sg
, unsigned long nr_pages
,
2094 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2097 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2098 unsigned long phys_pfn
, unsigned long nr_pages
,
2101 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2104 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2109 clear_context_table(iommu
, bus
, devfn
);
2110 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2111 DMA_CCMD_GLOBAL_INVL
);
2112 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2115 static inline void unlink_domain_info(struct device_domain_info
*info
)
2117 assert_spin_locked(&device_domain_lock
);
2118 list_del(&info
->link
);
2119 list_del(&info
->global
);
2121 info
->dev
->archdata
.iommu
= NULL
;
2124 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2126 struct device_domain_info
*info
, *tmp
;
2127 unsigned long flags
;
2129 spin_lock_irqsave(&device_domain_lock
, flags
);
2130 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
2131 unlink_domain_info(info
);
2132 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2134 iommu_disable_dev_iotlb(info
);
2135 iommu_detach_dev(info
->iommu
, info
->bus
, info
->devfn
);
2137 if (domain_type_is_vm(domain
)) {
2138 iommu_detach_dependent_devices(info
->iommu
, info
->dev
);
2139 domain_detach_iommu(domain
, info
->iommu
);
2142 free_devinfo_mem(info
);
2143 spin_lock_irqsave(&device_domain_lock
, flags
);
2145 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2150 * Note: we use struct device->archdata.iommu stores the info
2152 static struct dmar_domain
*find_domain(struct device
*dev
)
2154 struct device_domain_info
*info
;
2156 /* No lock here, assumes no domain exit in normal case */
2157 info
= dev
->archdata
.iommu
;
2159 return info
->domain
;
2163 static inline struct device_domain_info
*
2164 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2166 struct device_domain_info
*info
;
2168 list_for_each_entry(info
, &device_domain_list
, global
)
2169 if (info
->iommu
->segment
== segment
&& info
->bus
== bus
&&
2170 info
->devfn
== devfn
)
2176 static struct dmar_domain
*dmar_insert_dev_info(struct intel_iommu
*iommu
,
2179 struct dmar_domain
*domain
)
2181 struct dmar_domain
*found
= NULL
;
2182 struct device_domain_info
*info
;
2183 unsigned long flags
;
2185 info
= alloc_devinfo_mem();
2190 info
->devfn
= devfn
;
2192 info
->domain
= domain
;
2193 info
->iommu
= iommu
;
2195 spin_lock_irqsave(&device_domain_lock
, flags
);
2197 found
= find_domain(dev
);
2199 struct device_domain_info
*info2
;
2200 info2
= dmar_search_domain_by_dev_info(iommu
->segment
, bus
, devfn
);
2202 found
= info2
->domain
;
2205 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2206 free_devinfo_mem(info
);
2207 /* Caller must free the original domain */
2211 list_add(&info
->link
, &domain
->devices
);
2212 list_add(&info
->global
, &device_domain_list
);
2214 dev
->archdata
.iommu
= info
;
2215 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2220 static int get_last_alias(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
2222 *(u16
*)opaque
= alias
;
2226 /* domain is initialized */
2227 static struct dmar_domain
*get_domain_for_dev(struct device
*dev
, int gaw
)
2229 struct dmar_domain
*domain
, *tmp
;
2230 struct intel_iommu
*iommu
;
2231 struct device_domain_info
*info
;
2233 unsigned long flags
;
2236 domain
= find_domain(dev
);
2240 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2244 if (dev_is_pci(dev
)) {
2245 struct pci_dev
*pdev
= to_pci_dev(dev
);
2247 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2249 spin_lock_irqsave(&device_domain_lock
, flags
);
2250 info
= dmar_search_domain_by_dev_info(pci_domain_nr(pdev
->bus
),
2251 PCI_BUS_NUM(dma_alias
),
2254 iommu
= info
->iommu
;
2255 domain
= info
->domain
;
2257 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2259 /* DMA alias already has a domain, uses it */
2264 /* Allocate and initialize new domain for the device */
2265 domain
= alloc_domain(0);
2268 domain
->id
= iommu_attach_domain(domain
, iommu
);
2269 if (domain
->id
< 0) {
2270 free_domain_mem(domain
);
2273 domain_attach_iommu(domain
, iommu
);
2274 if (domain_init(domain
, gaw
)) {
2275 domain_exit(domain
);
2279 /* register PCI DMA alias device */
2280 if (dev_is_pci(dev
)) {
2281 tmp
= dmar_insert_dev_info(iommu
, PCI_BUS_NUM(dma_alias
),
2282 dma_alias
& 0xff, NULL
, domain
);
2284 if (!tmp
|| tmp
!= domain
) {
2285 domain_exit(domain
);
2294 tmp
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2296 if (!tmp
|| tmp
!= domain
) {
2297 domain_exit(domain
);
2304 static int iommu_identity_mapping
;
2305 #define IDENTMAP_ALL 1
2306 #define IDENTMAP_GFX 2
2307 #define IDENTMAP_AZALIA 4
2309 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2310 unsigned long long start
,
2311 unsigned long long end
)
2313 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2314 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2316 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2317 dma_to_mm_pfn(last_vpfn
))) {
2318 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2322 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2323 start
, end
, domain
->id
);
2325 * RMRR range might have overlap with physical memory range,
2328 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2330 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2331 last_vpfn
- first_vpfn
+ 1,
2332 DMA_PTE_READ
|DMA_PTE_WRITE
);
2335 static int iommu_prepare_identity_map(struct device
*dev
,
2336 unsigned long long start
,
2337 unsigned long long end
)
2339 struct dmar_domain
*domain
;
2342 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2346 /* For _hardware_ passthrough, don't bother. But for software
2347 passthrough, we do it anyway -- it may indicate a memory
2348 range which is reserved in E820, so which didn't get set
2349 up to start with in si_domain */
2350 if (domain
== si_domain
&& hw_pass_through
) {
2351 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2352 dev_name(dev
), start
, end
);
2357 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2358 dev_name(dev
), start
, end
);
2361 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2362 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2363 dmi_get_system_info(DMI_BIOS_VENDOR
),
2364 dmi_get_system_info(DMI_BIOS_VERSION
),
2365 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2370 if (end
>> agaw_to_width(domain
->agaw
)) {
2371 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2372 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2373 agaw_to_width(domain
->agaw
),
2374 dmi_get_system_info(DMI_BIOS_VENDOR
),
2375 dmi_get_system_info(DMI_BIOS_VERSION
),
2376 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2381 ret
= iommu_domain_identity_map(domain
, start
, end
);
2385 /* context entry init */
2386 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2393 domain_exit(domain
);
2397 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2400 if (dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2402 return iommu_prepare_identity_map(dev
, rmrr
->base_address
,
2406 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2407 static inline void iommu_prepare_isa(void)
2409 struct pci_dev
*pdev
;
2412 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2416 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2417 ret
= iommu_prepare_identity_map(&pdev
->dev
, 0, 16*1024*1024 - 1);
2420 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2421 "floppy might not work\n");
2426 static inline void iommu_prepare_isa(void)
2430 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2432 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2434 static int __init
si_domain_init(int hw
)
2436 struct dmar_drhd_unit
*drhd
;
2437 struct intel_iommu
*iommu
;
2441 si_domain
= alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY
);
2445 for_each_active_iommu(iommu
, drhd
) {
2446 ret
= iommu_attach_domain(si_domain
, iommu
);
2448 domain_exit(si_domain
);
2451 si_domain
->id
= ret
;
2453 } else if (si_domain
->id
!= ret
) {
2454 domain_exit(si_domain
);
2457 domain_attach_iommu(si_domain
, iommu
);
2460 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2461 domain_exit(si_domain
);
2465 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2471 for_each_online_node(nid
) {
2472 unsigned long start_pfn
, end_pfn
;
2475 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2476 ret
= iommu_domain_identity_map(si_domain
,
2477 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2486 static int identity_mapping(struct device
*dev
)
2488 struct device_domain_info
*info
;
2490 if (likely(!iommu_identity_mapping
))
2493 info
= dev
->archdata
.iommu
;
2494 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2495 return (info
->domain
== si_domain
);
2500 static int domain_add_dev_info(struct dmar_domain
*domain
,
2501 struct device
*dev
, int translation
)
2503 struct dmar_domain
*ndomain
;
2504 struct intel_iommu
*iommu
;
2508 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2512 ndomain
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2513 if (ndomain
!= domain
)
2516 ret
= domain_context_mapping(domain
, dev
, translation
);
2518 domain_remove_one_dev_info(domain
, dev
);
2525 static bool device_has_rmrr(struct device
*dev
)
2527 struct dmar_rmrr_unit
*rmrr
;
2532 for_each_rmrr_units(rmrr
) {
2534 * Return TRUE if this RMRR contains the device that
2537 for_each_active_dev_scope(rmrr
->devices
,
2538 rmrr
->devices_cnt
, i
, tmp
)
2549 * There are a couple cases where we need to restrict the functionality of
2550 * devices associated with RMRRs. The first is when evaluating a device for
2551 * identity mapping because problems exist when devices are moved in and out
2552 * of domains and their respective RMRR information is lost. This means that
2553 * a device with associated RMRRs will never be in a "passthrough" domain.
2554 * The second is use of the device through the IOMMU API. This interface
2555 * expects to have full control of the IOVA space for the device. We cannot
2556 * satisfy both the requirement that RMRR access is maintained and have an
2557 * unencumbered IOVA space. We also have no ability to quiesce the device's
2558 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2559 * We therefore prevent devices associated with an RMRR from participating in
2560 * the IOMMU API, which eliminates them from device assignment.
2562 * In both cases we assume that PCI USB devices with RMRRs have them largely
2563 * for historical reasons and that the RMRR space is not actively used post
2564 * boot. This exclusion may change if vendors begin to abuse it.
2566 * The same exception is made for graphics devices, with the requirement that
2567 * any use of the RMRR regions will be torn down before assigning the device
2570 static bool device_is_rmrr_locked(struct device
*dev
)
2572 if (!device_has_rmrr(dev
))
2575 if (dev_is_pci(dev
)) {
2576 struct pci_dev
*pdev
= to_pci_dev(dev
);
2578 if (IS_USB_DEVICE(pdev
) || IS_GFX_DEVICE(pdev
))
2585 static int iommu_should_identity_map(struct device
*dev
, int startup
)
2588 if (dev_is_pci(dev
)) {
2589 struct pci_dev
*pdev
= to_pci_dev(dev
);
2591 if (device_is_rmrr_locked(dev
))
2594 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2597 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2600 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2604 * We want to start off with all devices in the 1:1 domain, and
2605 * take them out later if we find they can't access all of memory.
2607 * However, we can't do this for PCI devices behind bridges,
2608 * because all PCI devices behind the same bridge will end up
2609 * with the same source-id on their transactions.
2611 * Practically speaking, we can't change things around for these
2612 * devices at run-time, because we can't be sure there'll be no
2613 * DMA transactions in flight for any of their siblings.
2615 * So PCI devices (unless they're on the root bus) as well as
2616 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2617 * the 1:1 domain, just in _case_ one of their siblings turns out
2618 * not to be able to map all of memory.
2620 if (!pci_is_pcie(pdev
)) {
2621 if (!pci_is_root_bus(pdev
->bus
))
2623 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2625 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2628 if (device_has_rmrr(dev
))
2633 * At boot time, we don't yet know if devices will be 64-bit capable.
2634 * Assume that they will — if they turn out not to be, then we can
2635 * take them out of the 1:1 domain later.
2639 * If the device's dma_mask is less than the system's memory
2640 * size then this is not a candidate for identity mapping.
2642 u64 dma_mask
= *dev
->dma_mask
;
2644 if (dev
->coherent_dma_mask
&&
2645 dev
->coherent_dma_mask
< dma_mask
)
2646 dma_mask
= dev
->coherent_dma_mask
;
2648 return dma_mask
>= dma_get_required_mask(dev
);
2654 static int __init
dev_prepare_static_identity_mapping(struct device
*dev
, int hw
)
2658 if (!iommu_should_identity_map(dev
, 1))
2661 ret
= domain_add_dev_info(si_domain
, dev
,
2662 hw
? CONTEXT_TT_PASS_THROUGH
:
2663 CONTEXT_TT_MULTI_LEVEL
);
2665 pr_info("IOMMU: %s identity mapping for device %s\n",
2666 hw
? "hardware" : "software", dev_name(dev
));
2667 else if (ret
== -ENODEV
)
2668 /* device not associated with an iommu */
2675 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2677 struct pci_dev
*pdev
= NULL
;
2678 struct dmar_drhd_unit
*drhd
;
2679 struct intel_iommu
*iommu
;
2684 ret
= si_domain_init(hw
);
2688 for_each_pci_dev(pdev
) {
2689 ret
= dev_prepare_static_identity_mapping(&pdev
->dev
, hw
);
2694 for_each_active_iommu(iommu
, drhd
)
2695 for_each_active_dev_scope(drhd
->devices
, drhd
->devices_cnt
, i
, dev
) {
2696 struct acpi_device_physical_node
*pn
;
2697 struct acpi_device
*adev
;
2699 if (dev
->bus
!= &acpi_bus_type
)
2702 adev
= to_acpi_device(dev
);
2703 mutex_lock(&adev
->physical_node_lock
);
2704 list_for_each_entry(pn
, &adev
->physical_node_list
, node
) {
2705 ret
= dev_prepare_static_identity_mapping(pn
->dev
, hw
);
2709 mutex_unlock(&adev
->physical_node_lock
);
2717 static void intel_iommu_init_qi(struct intel_iommu
*iommu
)
2720 * Start from the sane iommu hardware state.
2721 * If the queued invalidation is already initialized by us
2722 * (for example, while enabling interrupt-remapping) then
2723 * we got the things already rolling from a sane state.
2727 * Clear any previous faults.
2729 dmar_fault(-1, iommu
);
2731 * Disable queued invalidation if supported and already enabled
2732 * before OS handover.
2734 dmar_disable_qi(iommu
);
2737 if (dmar_enable_qi(iommu
)) {
2739 * Queued Invalidate not enabled, use Register Based Invalidate
2741 iommu
->flush
.flush_context
= __iommu_flush_context
;
2742 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2743 pr_info("IOMMU: %s using Register based invalidation\n",
2746 iommu
->flush
.flush_context
= qi_flush_context
;
2747 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2748 pr_info("IOMMU: %s using Queued invalidation\n", iommu
->name
);
2752 static int __init
init_dmars(void)
2754 struct dmar_drhd_unit
*drhd
;
2755 struct dmar_rmrr_unit
*rmrr
;
2757 struct intel_iommu
*iommu
;
2763 * initialize and program root entry to not present
2766 for_each_drhd_unit(drhd
) {
2768 * lock not needed as this is only incremented in the single
2769 * threaded kernel __init code path all other access are read
2772 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
) {
2776 printk_once(KERN_ERR
"intel-iommu: exceeded %d IOMMUs\n",
2777 DMAR_UNITS_SUPPORTED
);
2780 /* Preallocate enough resources for IOMMU hot-addition */
2781 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
)
2782 g_num_of_iommus
= DMAR_UNITS_SUPPORTED
;
2784 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2787 printk(KERN_ERR
"Allocating global iommu array failed\n");
2792 deferred_flush
= kzalloc(g_num_of_iommus
*
2793 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2794 if (!deferred_flush
) {
2799 for_each_active_iommu(iommu
, drhd
) {
2800 g_iommus
[iommu
->seq_id
] = iommu
;
2802 ret
= iommu_init_domains(iommu
);
2808 * we could share the same root & context tables
2809 * among all IOMMU's. Need to Split it later.
2811 ret
= iommu_alloc_root_entry(iommu
);
2814 if (!ecap_pass_through(iommu
->ecap
))
2815 hw_pass_through
= 0;
2818 for_each_active_iommu(iommu
, drhd
)
2819 intel_iommu_init_qi(iommu
);
2821 if (iommu_pass_through
)
2822 iommu_identity_mapping
|= IDENTMAP_ALL
;
2824 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2825 iommu_identity_mapping
|= IDENTMAP_GFX
;
2828 check_tylersburg_isoch();
2831 * If pass through is not set or not enabled, setup context entries for
2832 * identity mappings for rmrr, gfx, and isa and may fall back to static
2833 * identity mapping if iommu_identity_mapping is set.
2835 if (iommu_identity_mapping
) {
2836 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2838 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2844 * for each dev attached to rmrr
2846 * locate drhd for dev, alloc domain for dev
2847 * allocate free domain
2848 * allocate page table entries for rmrr
2849 * if context not allocated for bus
2850 * allocate and init context
2851 * set present in root table for this bus
2852 * init context with domain, translation etc
2856 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2857 for_each_rmrr_units(rmrr
) {
2858 /* some BIOS lists non-exist devices in DMAR table. */
2859 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
2861 ret
= iommu_prepare_rmrr_dev(rmrr
, dev
);
2864 "IOMMU: mapping reserved region failed\n");
2868 iommu_prepare_isa();
2873 * global invalidate context cache
2874 * global invalidate iotlb
2875 * enable translation
2877 for_each_iommu(iommu
, drhd
) {
2878 if (drhd
->ignored
) {
2880 * we always have to disable PMRs or DMA may fail on
2884 iommu_disable_protect_mem_regions(iommu
);
2888 iommu_flush_write_buffer(iommu
);
2890 ret
= dmar_set_interrupt(iommu
);
2894 iommu_set_root_entry(iommu
);
2896 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2897 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2898 iommu_enable_translation(iommu
);
2899 iommu_disable_protect_mem_regions(iommu
);
2905 for_each_active_iommu(iommu
, drhd
) {
2906 disable_dmar_iommu(iommu
);
2907 free_dmar_iommu(iommu
);
2909 kfree(deferred_flush
);
2916 /* This takes a number of _MM_ pages, not VTD pages */
2917 static struct iova
*intel_alloc_iova(struct device
*dev
,
2918 struct dmar_domain
*domain
,
2919 unsigned long nrpages
, uint64_t dma_mask
)
2921 struct iova
*iova
= NULL
;
2923 /* Restrict dma_mask to the width that the iommu can handle */
2924 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2926 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2928 * First try to allocate an io virtual address in
2929 * DMA_BIT_MASK(32) and if that fails then try allocating
2932 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2933 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2937 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2938 if (unlikely(!iova
)) {
2939 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2940 nrpages
, dev_name(dev
));
2947 static struct dmar_domain
*__get_valid_domain_for_dev(struct device
*dev
)
2949 struct dmar_domain
*domain
;
2952 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2954 printk(KERN_ERR
"Allocating domain for %s failed",
2959 /* make sure context mapping is ok */
2960 if (unlikely(!domain_context_mapped(dev
))) {
2961 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2963 printk(KERN_ERR
"Domain context map for %s failed",
2972 static inline struct dmar_domain
*get_valid_domain_for_dev(struct device
*dev
)
2974 struct device_domain_info
*info
;
2976 /* No lock here, assumes no domain exit in normal case */
2977 info
= dev
->archdata
.iommu
;
2979 return info
->domain
;
2981 return __get_valid_domain_for_dev(dev
);
2984 static int iommu_dummy(struct device
*dev
)
2986 return dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2989 /* Check if the dev needs to go through non-identity map and unmap process.*/
2990 static int iommu_no_mapping(struct device
*dev
)
2994 if (iommu_dummy(dev
))
2997 if (!iommu_identity_mapping
)
3000 found
= identity_mapping(dev
);
3002 if (iommu_should_identity_map(dev
, 0))
3006 * 32 bit DMA is removed from si_domain and fall back
3007 * to non-identity mapping.
3009 domain_remove_one_dev_info(si_domain
, dev
);
3010 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
3016 * In case of a detached 64 bit DMA device from vm, the device
3017 * is put into si_domain for identity mapping.
3019 if (iommu_should_identity_map(dev
, 0)) {
3021 ret
= domain_add_dev_info(si_domain
, dev
,
3023 CONTEXT_TT_PASS_THROUGH
:
3024 CONTEXT_TT_MULTI_LEVEL
);
3026 printk(KERN_INFO
"64bit %s uses identity mapping\n",
3036 static dma_addr_t
__intel_map_single(struct device
*dev
, phys_addr_t paddr
,
3037 size_t size
, int dir
, u64 dma_mask
)
3039 struct dmar_domain
*domain
;
3040 phys_addr_t start_paddr
;
3044 struct intel_iommu
*iommu
;
3045 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
3047 BUG_ON(dir
== DMA_NONE
);
3049 if (iommu_no_mapping(dev
))
3052 domain
= get_valid_domain_for_dev(dev
);
3056 iommu
= domain_get_iommu(domain
);
3057 size
= aligned_nrpages(paddr
, size
);
3059 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
3064 * Check if DMAR supports zero-length reads on write only
3067 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3068 !cap_zlr(iommu
->cap
))
3069 prot
|= DMA_PTE_READ
;
3070 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3071 prot
|= DMA_PTE_WRITE
;
3073 * paddr - (paddr + size) might be partial page, we should map the whole
3074 * page. Note: if two part of one page are separately mapped, we
3075 * might have two guest_addr mapping to the same host paddr, but this
3076 * is not a big problem
3078 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
3079 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
3083 /* it's a non-present to present mapping. Only flush if caching mode */
3084 if (cap_caching_mode(iommu
->cap
))
3085 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 0, 1);
3087 iommu_flush_write_buffer(iommu
);
3089 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
3090 start_paddr
+= paddr
& ~PAGE_MASK
;
3095 __free_iova(&domain
->iovad
, iova
);
3096 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
3097 dev_name(dev
), size
, (unsigned long long)paddr
, dir
);
3101 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
3102 unsigned long offset
, size_t size
,
3103 enum dma_data_direction dir
,
3104 struct dma_attrs
*attrs
)
3106 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
3107 dir
, *dev
->dma_mask
);
3110 static void flush_unmaps(void)
3116 /* just flush them all */
3117 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3118 struct intel_iommu
*iommu
= g_iommus
[i
];
3122 if (!deferred_flush
[i
].next
)
3125 /* In caching mode, global flushes turn emulation expensive */
3126 if (!cap_caching_mode(iommu
->cap
))
3127 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3128 DMA_TLB_GLOBAL_FLUSH
);
3129 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
3131 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
3132 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
3134 /* On real hardware multiple invalidations are expensive */
3135 if (cap_caching_mode(iommu
->cap
))
3136 iommu_flush_iotlb_psi(iommu
, domain
->id
,
3137 iova
->pfn_lo
, iova_size(iova
),
3138 !deferred_flush
[i
].freelist
[j
], 0);
3140 mask
= ilog2(mm_to_dma_pfn(iova_size(iova
)));
3141 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
3142 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
3144 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
3145 if (deferred_flush
[i
].freelist
[j
])
3146 dma_free_pagelist(deferred_flush
[i
].freelist
[j
]);
3148 deferred_flush
[i
].next
= 0;
3154 static void flush_unmaps_timeout(unsigned long data
)
3156 unsigned long flags
;
3158 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3160 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3163 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
, struct page
*freelist
)
3165 unsigned long flags
;
3167 struct intel_iommu
*iommu
;
3169 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3170 if (list_size
== HIGH_WATER_MARK
)
3173 iommu
= domain_get_iommu(dom
);
3174 iommu_id
= iommu
->seq_id
;
3176 next
= deferred_flush
[iommu_id
].next
;
3177 deferred_flush
[iommu_id
].domain
[next
] = dom
;
3178 deferred_flush
[iommu_id
].iova
[next
] = iova
;
3179 deferred_flush
[iommu_id
].freelist
[next
] = freelist
;
3180 deferred_flush
[iommu_id
].next
++;
3183 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
3187 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3190 static void intel_unmap(struct device
*dev
, dma_addr_t dev_addr
)
3192 struct dmar_domain
*domain
;
3193 unsigned long start_pfn
, last_pfn
;
3195 struct intel_iommu
*iommu
;
3196 struct page
*freelist
;
3198 if (iommu_no_mapping(dev
))
3201 domain
= find_domain(dev
);
3204 iommu
= domain_get_iommu(domain
);
3206 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
3207 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
3208 (unsigned long long)dev_addr
))
3211 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3212 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3214 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3215 dev_name(dev
), start_pfn
, last_pfn
);
3217 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3219 if (intel_iommu_strict
) {
3220 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3221 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3223 __free_iova(&domain
->iovad
, iova
);
3224 dma_free_pagelist(freelist
);
3226 add_unmap(domain
, iova
, freelist
);
3228 * queue up the release of the unmap to save the 1/6th of the
3229 * cpu used up by the iotlb flush operation...
3234 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3235 size_t size
, enum dma_data_direction dir
,
3236 struct dma_attrs
*attrs
)
3238 intel_unmap(dev
, dev_addr
);
3241 static void *intel_alloc_coherent(struct device
*dev
, size_t size
,
3242 dma_addr_t
*dma_handle
, gfp_t flags
,
3243 struct dma_attrs
*attrs
)
3245 struct page
*page
= NULL
;
3248 size
= PAGE_ALIGN(size
);
3249 order
= get_order(size
);
3251 if (!iommu_no_mapping(dev
))
3252 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3253 else if (dev
->coherent_dma_mask
< dma_get_required_mask(dev
)) {
3254 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3260 if (flags
& __GFP_WAIT
) {
3261 unsigned int count
= size
>> PAGE_SHIFT
;
3263 page
= dma_alloc_from_contiguous(dev
, count
, order
);
3264 if (page
&& iommu_no_mapping(dev
) &&
3265 page_to_phys(page
) + size
> dev
->coherent_dma_mask
) {
3266 dma_release_from_contiguous(dev
, page
, count
);
3272 page
= alloc_pages(flags
, order
);
3275 memset(page_address(page
), 0, size
);
3277 *dma_handle
= __intel_map_single(dev
, page_to_phys(page
), size
,
3279 dev
->coherent_dma_mask
);
3281 return page_address(page
);
3282 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3283 __free_pages(page
, order
);
3288 static void intel_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
3289 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3292 struct page
*page
= virt_to_page(vaddr
);
3294 size
= PAGE_ALIGN(size
);
3295 order
= get_order(size
);
3297 intel_unmap(dev
, dma_handle
);
3298 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3299 __free_pages(page
, order
);
3302 static void intel_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
3303 int nelems
, enum dma_data_direction dir
,
3304 struct dma_attrs
*attrs
)
3306 intel_unmap(dev
, sglist
[0].dma_address
);
3309 static int intel_nontranslate_map_sg(struct device
*hddev
,
3310 struct scatterlist
*sglist
, int nelems
, int dir
)
3313 struct scatterlist
*sg
;
3315 for_each_sg(sglist
, sg
, nelems
, i
) {
3316 BUG_ON(!sg_page(sg
));
3317 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3318 sg
->dma_length
= sg
->length
;
3323 static int intel_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
3324 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3327 struct dmar_domain
*domain
;
3330 struct iova
*iova
= NULL
;
3332 struct scatterlist
*sg
;
3333 unsigned long start_vpfn
;
3334 struct intel_iommu
*iommu
;
3336 BUG_ON(dir
== DMA_NONE
);
3337 if (iommu_no_mapping(dev
))
3338 return intel_nontranslate_map_sg(dev
, sglist
, nelems
, dir
);
3340 domain
= get_valid_domain_for_dev(dev
);
3344 iommu
= domain_get_iommu(domain
);
3346 for_each_sg(sglist
, sg
, nelems
, i
)
3347 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3349 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
),
3352 sglist
->dma_length
= 0;
3357 * Check if DMAR supports zero-length reads on write only
3360 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3361 !cap_zlr(iommu
->cap
))
3362 prot
|= DMA_PTE_READ
;
3363 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3364 prot
|= DMA_PTE_WRITE
;
3366 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3368 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3369 if (unlikely(ret
)) {
3370 dma_pte_free_pagetable(domain
, start_vpfn
,
3371 start_vpfn
+ size
- 1);
3372 __free_iova(&domain
->iovad
, iova
);
3376 /* it's a non-present to present mapping. Only flush if caching mode */
3377 if (cap_caching_mode(iommu
->cap
))
3378 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 0, 1);
3380 iommu_flush_write_buffer(iommu
);
3385 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3390 struct dma_map_ops intel_dma_ops
= {
3391 .alloc
= intel_alloc_coherent
,
3392 .free
= intel_free_coherent
,
3393 .map_sg
= intel_map_sg
,
3394 .unmap_sg
= intel_unmap_sg
,
3395 .map_page
= intel_map_page
,
3396 .unmap_page
= intel_unmap_page
,
3397 .mapping_error
= intel_mapping_error
,
3400 static inline int iommu_domain_cache_init(void)
3404 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3405 sizeof(struct dmar_domain
),
3410 if (!iommu_domain_cache
) {
3411 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3418 static inline int iommu_devinfo_cache_init(void)
3422 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3423 sizeof(struct device_domain_info
),
3427 if (!iommu_devinfo_cache
) {
3428 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3435 static inline int iommu_iova_cache_init(void)
3439 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3440 sizeof(struct iova
),
3444 if (!iommu_iova_cache
) {
3445 printk(KERN_ERR
"Couldn't create iova cache\n");
3452 static int __init
iommu_init_mempool(void)
3455 ret
= iommu_iova_cache_init();
3459 ret
= iommu_domain_cache_init();
3463 ret
= iommu_devinfo_cache_init();
3467 kmem_cache_destroy(iommu_domain_cache
);
3469 kmem_cache_destroy(iommu_iova_cache
);
3474 static void __init
iommu_exit_mempool(void)
3476 kmem_cache_destroy(iommu_devinfo_cache
);
3477 kmem_cache_destroy(iommu_domain_cache
);
3478 kmem_cache_destroy(iommu_iova_cache
);
3482 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3484 struct dmar_drhd_unit
*drhd
;
3488 /* We know that this device on this chipset has its own IOMMU.
3489 * If we find it under a different IOMMU, then the BIOS is lying
3490 * to us. Hope that the IOMMU for this device is actually
3491 * disabled, and it needs no translation...
3493 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3495 /* "can't" happen */
3496 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3499 vtbar
&= 0xffff0000;
3501 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3502 drhd
= dmar_find_matched_drhd_unit(pdev
);
3503 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3504 TAINT_FIRMWARE_WORKAROUND
,
3505 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3506 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3508 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3510 static void __init
init_no_remapping_devices(void)
3512 struct dmar_drhd_unit
*drhd
;
3516 for_each_drhd_unit(drhd
) {
3517 if (!drhd
->include_all
) {
3518 for_each_active_dev_scope(drhd
->devices
,
3519 drhd
->devices_cnt
, i
, dev
)
3521 /* ignore DMAR unit if no devices exist */
3522 if (i
== drhd
->devices_cnt
)
3527 for_each_active_drhd_unit(drhd
) {
3528 if (drhd
->include_all
)
3531 for_each_active_dev_scope(drhd
->devices
,
3532 drhd
->devices_cnt
, i
, dev
)
3533 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
3535 if (i
< drhd
->devices_cnt
)
3538 /* This IOMMU has *only* gfx devices. Either bypass it or
3539 set the gfx_mapped flag, as appropriate */
3541 intel_iommu_gfx_mapped
= 1;
3544 for_each_active_dev_scope(drhd
->devices
,
3545 drhd
->devices_cnt
, i
, dev
)
3546 dev
->archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3551 #ifdef CONFIG_SUSPEND
3552 static int init_iommu_hw(void)
3554 struct dmar_drhd_unit
*drhd
;
3555 struct intel_iommu
*iommu
= NULL
;
3557 for_each_active_iommu(iommu
, drhd
)
3559 dmar_reenable_qi(iommu
);
3561 for_each_iommu(iommu
, drhd
) {
3562 if (drhd
->ignored
) {
3564 * we always have to disable PMRs or DMA may fail on
3568 iommu_disable_protect_mem_regions(iommu
);
3572 iommu_flush_write_buffer(iommu
);
3574 iommu_set_root_entry(iommu
);
3576 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3577 DMA_CCMD_GLOBAL_INVL
);
3578 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3579 iommu_enable_translation(iommu
);
3580 iommu_disable_protect_mem_regions(iommu
);
3586 static void iommu_flush_all(void)
3588 struct dmar_drhd_unit
*drhd
;
3589 struct intel_iommu
*iommu
;
3591 for_each_active_iommu(iommu
, drhd
) {
3592 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3593 DMA_CCMD_GLOBAL_INVL
);
3594 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3595 DMA_TLB_GLOBAL_FLUSH
);
3599 static int iommu_suspend(void)
3601 struct dmar_drhd_unit
*drhd
;
3602 struct intel_iommu
*iommu
= NULL
;
3605 for_each_active_iommu(iommu
, drhd
) {
3606 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3608 if (!iommu
->iommu_state
)
3614 for_each_active_iommu(iommu
, drhd
) {
3615 iommu_disable_translation(iommu
);
3617 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3619 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3620 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3621 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3622 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3623 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3624 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3625 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3626 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3628 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3633 for_each_active_iommu(iommu
, drhd
)
3634 kfree(iommu
->iommu_state
);
3639 static void iommu_resume(void)
3641 struct dmar_drhd_unit
*drhd
;
3642 struct intel_iommu
*iommu
= NULL
;
3645 if (init_iommu_hw()) {
3647 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3649 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3653 for_each_active_iommu(iommu
, drhd
) {
3655 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3657 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3658 iommu
->reg
+ DMAR_FECTL_REG
);
3659 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3660 iommu
->reg
+ DMAR_FEDATA_REG
);
3661 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3662 iommu
->reg
+ DMAR_FEADDR_REG
);
3663 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3664 iommu
->reg
+ DMAR_FEUADDR_REG
);
3666 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3669 for_each_active_iommu(iommu
, drhd
)
3670 kfree(iommu
->iommu_state
);
3673 static struct syscore_ops iommu_syscore_ops
= {
3674 .resume
= iommu_resume
,
3675 .suspend
= iommu_suspend
,
3678 static void __init
init_iommu_pm_ops(void)
3680 register_syscore_ops(&iommu_syscore_ops
);
3684 static inline void init_iommu_pm_ops(void) {}
3685 #endif /* CONFIG_PM */
3688 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
3690 struct acpi_dmar_reserved_memory
*rmrr
;
3691 struct dmar_rmrr_unit
*rmrru
;
3693 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3697 rmrru
->hdr
= header
;
3698 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3699 rmrru
->base_address
= rmrr
->base_address
;
3700 rmrru
->end_address
= rmrr
->end_address
;
3701 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3702 ((void *)rmrr
) + rmrr
->header
.length
,
3703 &rmrru
->devices_cnt
);
3704 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
3709 list_add(&rmrru
->list
, &dmar_rmrr_units
);
3714 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
3716 struct dmar_atsr_unit
*atsru
;
3717 struct acpi_dmar_atsr
*tmp
;
3719 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3720 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
3721 if (atsr
->segment
!= tmp
->segment
)
3723 if (atsr
->header
.length
!= tmp
->header
.length
)
3725 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
3732 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3734 struct acpi_dmar_atsr
*atsr
;
3735 struct dmar_atsr_unit
*atsru
;
3737 if (system_state
!= SYSTEM_BOOTING
&& !intel_iommu_enabled
)
3740 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3741 atsru
= dmar_find_atsr(atsr
);
3745 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
3750 * If memory is allocated from slab by ACPI _DSM method, we need to
3751 * copy the memory content because the memory buffer will be freed
3754 atsru
->hdr
= (void *)(atsru
+ 1);
3755 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
3756 atsru
->include_all
= atsr
->flags
& 0x1;
3757 if (!atsru
->include_all
) {
3758 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
3759 (void *)atsr
+ atsr
->header
.length
,
3760 &atsru
->devices_cnt
);
3761 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
3767 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
3772 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3774 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3778 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3780 struct acpi_dmar_atsr
*atsr
;
3781 struct dmar_atsr_unit
*atsru
;
3783 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3784 atsru
= dmar_find_atsr(atsr
);
3786 list_del_rcu(&atsru
->list
);
3788 intel_iommu_free_atsr(atsru
);
3794 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3798 struct acpi_dmar_atsr
*atsr
;
3799 struct dmar_atsr_unit
*atsru
;
3801 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3802 atsru
= dmar_find_atsr(atsr
);
3806 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
)
3807 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
3814 static int intel_iommu_add(struct dmar_drhd_unit
*dmaru
)
3817 struct intel_iommu
*iommu
= dmaru
->iommu
;
3819 if (g_iommus
[iommu
->seq_id
])
3822 if (hw_pass_through
&& !ecap_pass_through(iommu
->ecap
)) {
3823 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3827 if (!ecap_sc_support(iommu
->ecap
) &&
3828 domain_update_iommu_snooping(iommu
)) {
3829 pr_warn("IOMMU: %s doesn't support snooping.\n",
3833 sp
= domain_update_iommu_superpage(iommu
) - 1;
3834 if (sp
>= 0 && !(cap_super_page_val(iommu
->cap
) & (1 << sp
))) {
3835 pr_warn("IOMMU: %s doesn't support large page.\n",
3841 * Disable translation if already enabled prior to OS handover.
3843 if (iommu
->gcmd
& DMA_GCMD_TE
)
3844 iommu_disable_translation(iommu
);
3846 g_iommus
[iommu
->seq_id
] = iommu
;
3847 ret
= iommu_init_domains(iommu
);
3849 ret
= iommu_alloc_root_entry(iommu
);
3853 if (dmaru
->ignored
) {
3855 * we always have to disable PMRs or DMA may fail on this device
3858 iommu_disable_protect_mem_regions(iommu
);
3862 intel_iommu_init_qi(iommu
);
3863 iommu_flush_write_buffer(iommu
);
3864 ret
= dmar_set_interrupt(iommu
);
3868 iommu_set_root_entry(iommu
);
3869 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
3870 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3871 iommu_enable_translation(iommu
);
3874 ret
= iommu_attach_domain(si_domain
, iommu
);
3875 if (ret
< 0 || si_domain
->id
!= ret
)
3877 domain_attach_iommu(si_domain
, iommu
);
3880 iommu_disable_protect_mem_regions(iommu
);
3884 disable_dmar_iommu(iommu
);
3886 free_dmar_iommu(iommu
);
3890 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
3893 struct intel_iommu
*iommu
= dmaru
->iommu
;
3895 if (!intel_iommu_enabled
)
3901 ret
= intel_iommu_add(dmaru
);
3903 disable_dmar_iommu(iommu
);
3904 free_dmar_iommu(iommu
);
3910 static void intel_iommu_free_dmars(void)
3912 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3913 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3915 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3916 list_del(&rmrru
->list
);
3917 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3921 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3922 list_del(&atsru
->list
);
3923 intel_iommu_free_atsr(atsru
);
3927 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3930 struct pci_bus
*bus
;
3931 struct pci_dev
*bridge
= NULL
;
3933 struct acpi_dmar_atsr
*atsr
;
3934 struct dmar_atsr_unit
*atsru
;
3936 dev
= pci_physfn(dev
);
3937 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3939 if (!bridge
|| !pci_is_pcie(bridge
) ||
3940 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3942 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
3949 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3950 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3951 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
3954 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
3955 if (tmp
== &bridge
->dev
)
3958 if (atsru
->include_all
)
3968 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
3971 struct dmar_rmrr_unit
*rmrru
;
3972 struct dmar_atsr_unit
*atsru
;
3973 struct acpi_dmar_atsr
*atsr
;
3974 struct acpi_dmar_reserved_memory
*rmrr
;
3976 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
3979 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
3980 rmrr
= container_of(rmrru
->hdr
,
3981 struct acpi_dmar_reserved_memory
, header
);
3982 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3983 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
3984 ((void *)rmrr
) + rmrr
->header
.length
,
3985 rmrr
->segment
, rmrru
->devices
,
3986 rmrru
->devices_cnt
);
3989 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3990 dmar_remove_dev_scope(info
, rmrr
->segment
,
3991 rmrru
->devices
, rmrru
->devices_cnt
);
3995 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3996 if (atsru
->include_all
)
3999 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
4000 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
4001 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
4002 (void *)atsr
+ atsr
->header
.length
,
4003 atsr
->segment
, atsru
->devices
,
4004 atsru
->devices_cnt
);
4009 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
4010 if (dmar_remove_dev_scope(info
, atsr
->segment
,
4011 atsru
->devices
, atsru
->devices_cnt
))
4020 * Here we only respond to action of unbound device from driver.
4022 * Added device is not attached to its DMAR domain here yet. That will happen
4023 * when mapping the device to iova.
4025 static int device_notifier(struct notifier_block
*nb
,
4026 unsigned long action
, void *data
)
4028 struct device
*dev
= data
;
4029 struct dmar_domain
*domain
;
4031 if (iommu_dummy(dev
))
4034 if (action
!= BUS_NOTIFY_REMOVED_DEVICE
)
4037 domain
= find_domain(dev
);
4041 down_read(&dmar_global_lock
);
4042 domain_remove_one_dev_info(domain
, dev
);
4043 if (!domain_type_is_vm_or_si(domain
) && list_empty(&domain
->devices
))
4044 domain_exit(domain
);
4045 up_read(&dmar_global_lock
);
4050 static struct notifier_block device_nb
= {
4051 .notifier_call
= device_notifier
,
4054 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
4055 unsigned long val
, void *v
)
4057 struct memory_notify
*mhp
= v
;
4058 unsigned long long start
, end
;
4059 unsigned long start_vpfn
, last_vpfn
;
4062 case MEM_GOING_ONLINE
:
4063 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
4064 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
4065 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
4066 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4073 case MEM_CANCEL_ONLINE
:
4074 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
4075 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
4076 while (start_vpfn
<= last_vpfn
) {
4078 struct dmar_drhd_unit
*drhd
;
4079 struct intel_iommu
*iommu
;
4080 struct page
*freelist
;
4082 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
4084 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4089 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
4090 start_vpfn
, last_vpfn
);
4092 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4093 start_vpfn
, last_vpfn
);
4097 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
4101 for_each_active_iommu(iommu
, drhd
)
4102 iommu_flush_iotlb_psi(iommu
, si_domain
->id
,
4103 iova
->pfn_lo
, iova_size(iova
),
4106 dma_free_pagelist(freelist
);
4108 start_vpfn
= iova
->pfn_hi
+ 1;
4109 free_iova_mem(iova
);
4117 static struct notifier_block intel_iommu_memory_nb
= {
4118 .notifier_call
= intel_iommu_memory_notifier
,
4123 static ssize_t
intel_iommu_show_version(struct device
*dev
,
4124 struct device_attribute
*attr
,
4127 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4128 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
4129 return sprintf(buf
, "%d:%d\n",
4130 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
4132 static DEVICE_ATTR(version
, S_IRUGO
, intel_iommu_show_version
, NULL
);
4134 static ssize_t
intel_iommu_show_address(struct device
*dev
,
4135 struct device_attribute
*attr
,
4138 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4139 return sprintf(buf
, "%llx\n", iommu
->reg_phys
);
4141 static DEVICE_ATTR(address
, S_IRUGO
, intel_iommu_show_address
, NULL
);
4143 static ssize_t
intel_iommu_show_cap(struct device
*dev
,
4144 struct device_attribute
*attr
,
4147 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4148 return sprintf(buf
, "%llx\n", iommu
->cap
);
4150 static DEVICE_ATTR(cap
, S_IRUGO
, intel_iommu_show_cap
, NULL
);
4152 static ssize_t
intel_iommu_show_ecap(struct device
*dev
,
4153 struct device_attribute
*attr
,
4156 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4157 return sprintf(buf
, "%llx\n", iommu
->ecap
);
4159 static DEVICE_ATTR(ecap
, S_IRUGO
, intel_iommu_show_ecap
, NULL
);
4161 static struct attribute
*intel_iommu_attrs
[] = {
4162 &dev_attr_version
.attr
,
4163 &dev_attr_address
.attr
,
4165 &dev_attr_ecap
.attr
,
4169 static struct attribute_group intel_iommu_group
= {
4170 .name
= "intel-iommu",
4171 .attrs
= intel_iommu_attrs
,
4174 const struct attribute_group
*intel_iommu_groups
[] = {
4179 int __init
intel_iommu_init(void)
4182 struct dmar_drhd_unit
*drhd
;
4183 struct intel_iommu
*iommu
;
4185 /* VT-d is required for a TXT/tboot launch, so enforce that */
4186 force_on
= tboot_force_iommu();
4188 if (iommu_init_mempool()) {
4190 panic("tboot: Failed to initialize iommu memory\n");
4194 down_write(&dmar_global_lock
);
4195 if (dmar_table_init()) {
4197 panic("tboot: Failed to initialize DMAR table\n");
4202 * Disable translation if already enabled prior to OS handover.
4204 for_each_active_iommu(iommu
, drhd
)
4205 if (iommu
->gcmd
& DMA_GCMD_TE
)
4206 iommu_disable_translation(iommu
);
4208 if (dmar_dev_scope_init() < 0) {
4210 panic("tboot: Failed to initialize DMAR device scope\n");
4214 if (no_iommu
|| dmar_disabled
)
4217 if (list_empty(&dmar_rmrr_units
))
4218 printk(KERN_INFO
"DMAR: No RMRR found\n");
4220 if (list_empty(&dmar_atsr_units
))
4221 printk(KERN_INFO
"DMAR: No ATSR found\n");
4223 if (dmar_init_reserved_ranges()) {
4225 panic("tboot: Failed to reserve iommu ranges\n");
4226 goto out_free_reserved_range
;
4229 init_no_remapping_devices();
4234 panic("tboot: Failed to initialize DMARs\n");
4235 printk(KERN_ERR
"IOMMU: dmar init failed\n");
4236 goto out_free_reserved_range
;
4238 up_write(&dmar_global_lock
);
4240 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4242 init_timer(&unmap_timer
);
4243 #ifdef CONFIG_SWIOTLB
4246 dma_ops
= &intel_dma_ops
;
4248 init_iommu_pm_ops();
4250 for_each_active_iommu(iommu
, drhd
)
4251 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
4255 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
4256 bus_register_notifier(&pci_bus_type
, &device_nb
);
4257 if (si_domain
&& !hw_pass_through
)
4258 register_memory_notifier(&intel_iommu_memory_nb
);
4260 intel_iommu_enabled
= 1;
4264 out_free_reserved_range
:
4265 put_iova_domain(&reserved_iova_list
);
4267 intel_iommu_free_dmars();
4268 up_write(&dmar_global_lock
);
4269 iommu_exit_mempool();
4273 static int iommu_detach_dev_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
4275 struct intel_iommu
*iommu
= opaque
;
4277 iommu_detach_dev(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
4282 * NB - intel-iommu lacks any sort of reference counting for the users of
4283 * dependent devices. If multiple endpoints have intersecting dependent
4284 * devices, unbinding the driver from any one of them will possibly leave
4285 * the others unable to operate.
4287 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
4290 if (!iommu
|| !dev
|| !dev_is_pci(dev
))
4293 pci_for_each_dma_alias(to_pci_dev(dev
), &iommu_detach_dev_cb
, iommu
);
4296 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
4299 struct device_domain_info
*info
, *tmp
;
4300 struct intel_iommu
*iommu
;
4301 unsigned long flags
;
4305 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4309 spin_lock_irqsave(&device_domain_lock
, flags
);
4310 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
4311 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
4312 info
->devfn
== devfn
) {
4313 unlink_domain_info(info
);
4314 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4316 iommu_disable_dev_iotlb(info
);
4317 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
4318 iommu_detach_dependent_devices(iommu
, dev
);
4319 free_devinfo_mem(info
);
4321 spin_lock_irqsave(&device_domain_lock
, flags
);
4329 /* if there is no other devices under the same iommu
4330 * owned by this domain, clear this iommu in iommu_bmp
4331 * update iommu count and coherency
4333 if (info
->iommu
== iommu
)
4337 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4340 domain_detach_iommu(domain
, iommu
);
4341 if (!domain_type_is_vm_or_si(domain
))
4342 iommu_detach_domain(domain
, iommu
);
4346 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4350 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
4351 domain_reserve_special_ranges(domain
);
4353 /* calculate AGAW */
4354 domain
->gaw
= guest_width
;
4355 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4356 domain
->agaw
= width_to_agaw(adjust_width
);
4358 domain
->iommu_coherency
= 0;
4359 domain
->iommu_snooping
= 0;
4360 domain
->iommu_superpage
= 0;
4361 domain
->max_addr
= 0;
4363 /* always allocate the top pgd */
4364 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4367 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4371 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
4373 struct dmar_domain
*dmar_domain
;
4375 dmar_domain
= alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE
);
4378 "intel_iommu_domain_init: dmar_domain == NULL\n");
4381 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4383 "intel_iommu_domain_init() failed\n");
4384 domain_exit(dmar_domain
);
4387 domain_update_iommu_cap(dmar_domain
);
4388 domain
->priv
= dmar_domain
;
4390 domain
->geometry
.aperture_start
= 0;
4391 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4392 domain
->geometry
.force_aperture
= true;
4397 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
4399 struct dmar_domain
*dmar_domain
= domain
->priv
;
4401 domain
->priv
= NULL
;
4402 domain_exit(dmar_domain
);
4405 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4408 struct dmar_domain
*dmar_domain
= domain
->priv
;
4409 struct intel_iommu
*iommu
;
4413 if (device_is_rmrr_locked(dev
)) {
4414 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4418 /* normally dev is not mapped */
4419 if (unlikely(domain_context_mapped(dev
))) {
4420 struct dmar_domain
*old_domain
;
4422 old_domain
= find_domain(dev
);
4424 if (domain_type_is_vm_or_si(dmar_domain
))
4425 domain_remove_one_dev_info(old_domain
, dev
);
4427 domain_remove_dev_info(old_domain
);
4429 if (!domain_type_is_vm_or_si(old_domain
) &&
4430 list_empty(&old_domain
->devices
))
4431 domain_exit(old_domain
);
4435 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4439 /* check if this iommu agaw is sufficient for max mapped address */
4440 addr_width
= agaw_to_width(iommu
->agaw
);
4441 if (addr_width
> cap_mgaw(iommu
->cap
))
4442 addr_width
= cap_mgaw(iommu
->cap
);
4444 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4445 printk(KERN_ERR
"%s: iommu width (%d) is not "
4446 "sufficient for the mapped address (%llx)\n",
4447 __func__
, addr_width
, dmar_domain
->max_addr
);
4450 dmar_domain
->gaw
= addr_width
;
4453 * Knock out extra levels of page tables if necessary
4455 while (iommu
->agaw
< dmar_domain
->agaw
) {
4456 struct dma_pte
*pte
;
4458 pte
= dmar_domain
->pgd
;
4459 if (dma_pte_present(pte
)) {
4460 dmar_domain
->pgd
= (struct dma_pte
*)
4461 phys_to_virt(dma_pte_addr(pte
));
4462 free_pgtable_page(pte
);
4464 dmar_domain
->agaw
--;
4467 return domain_add_dev_info(dmar_domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
4470 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4473 struct dmar_domain
*dmar_domain
= domain
->priv
;
4475 domain_remove_one_dev_info(dmar_domain
, dev
);
4478 static int intel_iommu_map(struct iommu_domain
*domain
,
4479 unsigned long iova
, phys_addr_t hpa
,
4480 size_t size
, int iommu_prot
)
4482 struct dmar_domain
*dmar_domain
= domain
->priv
;
4487 if (iommu_prot
& IOMMU_READ
)
4488 prot
|= DMA_PTE_READ
;
4489 if (iommu_prot
& IOMMU_WRITE
)
4490 prot
|= DMA_PTE_WRITE
;
4491 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4492 prot
|= DMA_PTE_SNP
;
4494 max_addr
= iova
+ size
;
4495 if (dmar_domain
->max_addr
< max_addr
) {
4498 /* check if minimum agaw is sufficient for mapped address */
4499 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4500 if (end
< max_addr
) {
4501 printk(KERN_ERR
"%s: iommu width (%d) is not "
4502 "sufficient for the mapped address (%llx)\n",
4503 __func__
, dmar_domain
->gaw
, max_addr
);
4506 dmar_domain
->max_addr
= max_addr
;
4508 /* Round up size to next multiple of PAGE_SIZE, if it and
4509 the low bits of hpa would take us onto the next page */
4510 size
= aligned_nrpages(hpa
, size
);
4511 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4512 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4516 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4517 unsigned long iova
, size_t size
)
4519 struct dmar_domain
*dmar_domain
= domain
->priv
;
4520 struct page
*freelist
= NULL
;
4521 struct intel_iommu
*iommu
;
4522 unsigned long start_pfn
, last_pfn
;
4523 unsigned int npages
;
4524 int iommu_id
, num
, ndomains
, level
= 0;
4526 /* Cope with horrid API which requires us to unmap more than the
4527 size argument if it happens to be a large-page mapping. */
4528 if (!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
))
4531 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4532 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4534 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4535 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4537 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
4539 npages
= last_pfn
- start_pfn
+ 1;
4541 for_each_set_bit(iommu_id
, dmar_domain
->iommu_bmp
, g_num_of_iommus
) {
4542 iommu
= g_iommus
[iommu_id
];
4545 * find bit position of dmar_domain
4547 ndomains
= cap_ndoms(iommu
->cap
);
4548 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
4549 if (iommu
->domains
[num
] == dmar_domain
)
4550 iommu_flush_iotlb_psi(iommu
, num
, start_pfn
,
4551 npages
, !freelist
, 0);
4556 dma_free_pagelist(freelist
);
4558 if (dmar_domain
->max_addr
== iova
+ size
)
4559 dmar_domain
->max_addr
= iova
;
4564 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4567 struct dmar_domain
*dmar_domain
= domain
->priv
;
4568 struct dma_pte
*pte
;
4572 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
4574 phys
= dma_pte_addr(pte
);
4579 static bool intel_iommu_capable(enum iommu_cap cap
)
4581 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4582 return domain_update_iommu_snooping(NULL
) == 1;
4583 if (cap
== IOMMU_CAP_INTR_REMAP
)
4584 return irq_remapping_enabled
== 1;
4589 static int intel_iommu_add_device(struct device
*dev
)
4591 struct intel_iommu
*iommu
;
4592 struct iommu_group
*group
;
4595 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4599 iommu_device_link(iommu
->iommu_dev
, dev
);
4601 group
= iommu_group_get_for_dev(dev
);
4604 return PTR_ERR(group
);
4606 iommu_group_put(group
);
4610 static void intel_iommu_remove_device(struct device
*dev
)
4612 struct intel_iommu
*iommu
;
4615 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4619 iommu_group_remove_device(dev
);
4621 iommu_device_unlink(iommu
->iommu_dev
, dev
);
4624 static const struct iommu_ops intel_iommu_ops
= {
4625 .capable
= intel_iommu_capable
,
4626 .domain_init
= intel_iommu_domain_init
,
4627 .domain_destroy
= intel_iommu_domain_destroy
,
4628 .attach_dev
= intel_iommu_attach_device
,
4629 .detach_dev
= intel_iommu_detach_device
,
4630 .map
= intel_iommu_map
,
4631 .unmap
= intel_iommu_unmap
,
4632 .map_sg
= default_iommu_map_sg
,
4633 .iova_to_phys
= intel_iommu_iova_to_phys
,
4634 .add_device
= intel_iommu_add_device
,
4635 .remove_device
= intel_iommu_remove_device
,
4636 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4639 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4641 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4642 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
4646 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4648 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4649 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4654 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4657 * Mobile 4 Series Chipset neglects to set RWBF capability,
4658 * but needs it. Same seems to hold for the desktop versions.
4660 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
4664 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4665 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4666 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4667 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4668 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4669 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4673 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4674 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4675 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4676 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4677 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4678 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4679 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4680 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4682 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4686 if (pci_read_config_word(dev
, GGC
, &ggc
))
4689 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4690 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4692 } else if (dmar_map_gfx
) {
4693 /* we have to ensure the gfx device is idle before we flush */
4694 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
4695 intel_iommu_strict
= 1;
4698 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4699 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4700 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4701 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4703 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4704 ISOCH DMAR unit for the Azalia sound device, but not give it any
4705 TLB entries, which causes it to deadlock. Check for that. We do
4706 this in a function called from init_dmars(), instead of in a PCI
4707 quirk, because we don't want to print the obnoxious "BIOS broken"
4708 message if VT-d is actually disabled.
4710 static void __init
check_tylersburg_isoch(void)
4712 struct pci_dev
*pdev
;
4713 uint32_t vtisochctrl
;
4715 /* If there's no Azalia in the system anyway, forget it. */
4716 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4721 /* System Management Registers. Might be hidden, in which case
4722 we can't do the sanity check. But that's OK, because the
4723 known-broken BIOSes _don't_ actually hide it, so far. */
4724 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4728 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4735 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4736 if (vtisochctrl
& 1)
4739 /* Drop all bits other than the number of TLB entries */
4740 vtisochctrl
&= 0x1c;
4742 /* If we have the recommended number of TLB entries (16), fine. */
4743 if (vtisochctrl
== 0x10)
4746 /* Zero TLB entries? You get to ride the short bus to school. */
4748 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4749 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4750 dmi_get_system_info(DMI_BIOS_VENDOR
),
4751 dmi_get_system_info(DMI_BIOS_VERSION
),
4752 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4753 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4757 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",