Commit | Line | Data |
---|---|---|
62fdd767 FY |
1 | /* |
2 | * Dynamic DMA mapping support. | |
3 | */ | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/string.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/dmar.h> | |
11 | #include <asm/iommu.h> | |
12 | #include <asm/machvec.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | ||
62fdd767 | 15 | |
d3f13810 | 16 | #ifdef CONFIG_INTEL_IOMMU |
62fdd767 FY |
17 | |
18 | #include <linux/kernel.h> | |
62fdd767 FY |
19 | |
20 | #include <asm/page.h> | |
62fdd767 FY |
21 | |
22 | dma_addr_t bad_dma_address __read_mostly; | |
23 | EXPORT_SYMBOL(bad_dma_address); | |
24 | ||
25 | static int iommu_sac_force __read_mostly; | |
26 | ||
27 | int no_iommu __read_mostly; | |
28 | #ifdef CONFIG_IOMMU_DEBUG | |
29 | int force_iommu __read_mostly = 1; | |
30 | #else | |
31 | int force_iommu __read_mostly; | |
32 | #endif | |
33 | ||
aed5d5f4 FY |
34 | int iommu_pass_through; |
35 | ||
160c1d8e | 36 | extern struct dma_map_ops intel_dma_ops; |
62fdd767 FY |
37 | |
38 | static int __init pci_iommu_init(void) | |
39 | { | |
40 | if (iommu_detected) | |
41 | intel_iommu_init(); | |
42 | ||
43 | return 0; | |
44 | } | |
45 | ||
46 | /* Must execute after PCI subsystem */ | |
47 | fs_initcall(pci_iommu_init); | |
48 | ||
49 | void pci_iommu_shutdown(void) | |
50 | { | |
51 | return; | |
52 | } | |
53 | ||
54 | void __init | |
55 | iommu_dma_init(void) | |
56 | { | |
57 | return; | |
58 | } | |
59 | ||
62fdd767 FY |
60 | int iommu_dma_supported(struct device *dev, u64 mask) |
61 | { | |
62fdd767 FY |
62 | /* Copied from i386. Doesn't make much sense, because it will |
63 | only work for pci_alloc_coherent. | |
64 | The caller just has to use GFP_DMA in this case. */ | |
2f4f27d4 | 65 | if (mask < DMA_BIT_MASK(24)) |
62fdd767 FY |
66 | return 0; |
67 | ||
68 | /* Tell the device to use SAC when IOMMU force is on. This | |
69 | allows the driver to use cheaper accesses in some cases. | |
70 | ||
71 | Problem with this is that if we overflow the IOMMU area and | |
72 | return DAC as fallback address the device may not handle it | |
73 | correctly. | |
74 | ||
75 | As a special case some controllers have a 39bit address | |
76 | mode that is as efficient as 32bit (aic79xx). Don't force | |
77 | SAC for these. Assume all masks <= 40 bits are of this | |
78 | type. Normally this doesn't make any difference, but gives | |
79 | more gentle handling of IOMMU overflow. */ | |
50cf156a | 80 | if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { |
e088a4ad | 81 | dev_info(dev, "Force SAC with mask %llx\n", mask); |
62fdd767 FY |
82 | return 0; |
83 | } | |
84 | ||
85 | return 1; | |
86 | } | |
87 | EXPORT_SYMBOL(iommu_dma_supported); | |
88 | ||
160c1d8e FT |
89 | void __init pci_iommu_alloc(void) |
90 | { | |
91 | dma_ops = &intel_dma_ops; | |
92 | ||
93 | dma_ops->sync_single_for_cpu = machvec_dma_sync_single; | |
94 | dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; | |
95 | dma_ops->sync_single_for_device = machvec_dma_sync_single; | |
96 | dma_ops->sync_sg_for_device = machvec_dma_sync_sg; | |
97 | dma_ops->dma_supported = iommu_dma_supported; | |
160c1d8e FT |
98 | |
99 | /* | |
100 | * The order of these functions is important for | |
101 | * fall-back/fail-over reasons | |
102 | */ | |
103 | detect_intel_iommu(); | |
104 | ||
105 | #ifdef CONFIG_SWIOTLB | |
106 | pci_swiotlb_init(); | |
107 | #endif | |
108 | } | |
109 | ||
62fdd767 | 110 | #endif |