Commit | Line | Data |
---|---|---|
62fdd767 FY |
1 | /* |
2 | * Dynamic DMA mapping support. | |
3 | */ | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/string.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/dmar.h> | |
11 | #include <asm/iommu.h> | |
12 | #include <asm/machvec.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | ||
15 | #include <asm/machvec.h> | |
16 | #include <asm/system.h> | |
17 | ||
18 | #ifdef CONFIG_DMAR | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/string.h> | |
22 | ||
23 | #include <asm/page.h> | |
24 | #include <asm/iommu.h> | |
25 | ||
26 | dma_addr_t bad_dma_address __read_mostly; | |
27 | EXPORT_SYMBOL(bad_dma_address); | |
28 | ||
29 | static int iommu_sac_force __read_mostly; | |
30 | ||
31 | int no_iommu __read_mostly; | |
32 | #ifdef CONFIG_IOMMU_DEBUG | |
33 | int force_iommu __read_mostly = 1; | |
34 | #else | |
35 | int force_iommu __read_mostly; | |
36 | #endif | |
37 | ||
38 | /* Set this to 1 if there is a HW IOMMU in the system */ | |
39 | int iommu_detected __read_mostly; | |
40 | ||
41 | /* Dummy device used for NULL arguments (normally ISA). Better would | |
42 | be probably a smaller DMA mask, but this is bug-to-bug compatible | |
43 | to i386. */ | |
44 | struct device fallback_dev = { | |
45 | .bus_id = "fallback device", | |
46 | .coherent_dma_mask = DMA_32BIT_MASK, | |
47 | .dma_mask = &fallback_dev.coherent_dma_mask, | |
48 | }; | |
49 | ||
50 | void __init pci_iommu_alloc(void) | |
51 | { | |
52 | /* | |
53 | * The order of these functions is important for | |
54 | * fall-back/fail-over reasons | |
55 | */ | |
56 | detect_intel_iommu(); | |
57 | ||
58 | #ifdef CONFIG_SWIOTLB | |
59 | pci_swiotlb_init(); | |
60 | #endif | |
61 | } | |
62 | ||
63 | static int __init pci_iommu_init(void) | |
64 | { | |
65 | if (iommu_detected) | |
66 | intel_iommu_init(); | |
67 | ||
68 | return 0; | |
69 | } | |
70 | ||
71 | /* Must execute after PCI subsystem */ | |
72 | fs_initcall(pci_iommu_init); | |
73 | ||
74 | void pci_iommu_shutdown(void) | |
75 | { | |
76 | return; | |
77 | } | |
78 | ||
79 | void __init | |
80 | iommu_dma_init(void) | |
81 | { | |
82 | return; | |
83 | } | |
84 | ||
85 | struct dma_mapping_ops *dma_ops; | |
86 | EXPORT_SYMBOL(dma_ops); | |
87 | ||
88 | int iommu_dma_supported(struct device *dev, u64 mask) | |
89 | { | |
90 | struct dma_mapping_ops *ops = get_dma_ops(dev); | |
91 | ||
62fdd767 FY |
92 | if (ops->dma_supported_op) |
93 | return ops->dma_supported_op(dev, mask); | |
94 | ||
95 | /* Copied from i386. Doesn't make much sense, because it will | |
96 | only work for pci_alloc_coherent. | |
97 | The caller just has to use GFP_DMA in this case. */ | |
98 | if (mask < DMA_24BIT_MASK) | |
99 | return 0; | |
100 | ||
101 | /* Tell the device to use SAC when IOMMU force is on. This | |
102 | allows the driver to use cheaper accesses in some cases. | |
103 | ||
104 | Problem with this is that if we overflow the IOMMU area and | |
105 | return DAC as fallback address the device may not handle it | |
106 | correctly. | |
107 | ||
108 | As a special case some controllers have a 39bit address | |
109 | mode that is as efficient as 32bit (aic79xx). Don't force | |
110 | SAC for these. Assume all masks <= 40 bits are of this | |
111 | type. Normally this doesn't make any difference, but gives | |
112 | more gentle handling of IOMMU overflow. */ | |
113 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | |
114 | dev_info(dev, "Force SAC with mask %lx\n", mask); | |
115 | return 0; | |
116 | } | |
117 | ||
118 | return 1; | |
119 | } | |
120 | EXPORT_SYMBOL(iommu_dma_supported); | |
121 | ||
122 | #endif |