swiotlb: Remove the swiotlb variable usage
[deliverable/linux.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
116890d5 4#include <linux/bootmem.h>
bca5c096 5#include <linux/pci.h>
acde31dc 6#include <linux/kmemleak.h>
cb5867a5 7
116890d5
GC
8#include <asm/proto.h>
9#include <asm/dma.h>
46a7fa27 10#include <asm/iommu.h>
1d9b16d1 11#include <asm/gart.h>
cb5867a5 12#include <asm/calgary.h>
a69ca340 13#include <asm/amd_iommu.h>
b4941a9a 14#include <asm/x86_init.h>
459121c9 15
3b15e581
FY
16static int forbid_dac __read_mostly;
17
160c1d8e 18struct dma_map_ops *dma_ops;
85c246ee
GC
19EXPORT_SYMBOL(dma_ops);
20
b4cdc430 21static int iommu_sac_force __read_mostly;
8e0c3797 22
f9c258de
GC
23#ifdef CONFIG_IOMMU_DEBUG
24int panic_on_overflow __read_mostly = 1;
25int force_iommu __read_mostly = 1;
26#else
27int panic_on_overflow __read_mostly = 0;
28int force_iommu __read_mostly = 0;
29#endif
30
fae9a0d8
GC
31int iommu_merge __read_mostly = 0;
32
33int no_iommu __read_mostly;
34/* Set this to 1 if there is a HW IOMMU in the system */
35int iommu_detected __read_mostly = 0;
36
ac0101d3
JR
37/*
38 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 39 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3
JR
40 * devices and allow every device to access to whole physical memory. This is
41 * useful if a user want to use an IOMMU only for KVM device assignment to
42 * guests and not for driver dma translation.
43 */
44int iommu_pass_through __read_mostly;
aed5d5f4 45
cac67877
GC
46dma_addr_t bad_dma_address __read_mostly = 0;
47EXPORT_SYMBOL(bad_dma_address);
fae9a0d8 48
098cb7f2
GC
49/* Dummy device used for NULL arguments (normally ISA). Better would
50 be probably a smaller DMA mask, but this is bug-to-bug compatible
51 to older i386. */
6c505ce3 52struct device x86_dma_fallback_dev = {
1a927133 53 .init_name = "fallback device",
284901a9 54 .coherent_dma_mask = DMA_BIT_MASK(32),
6c505ce3 55 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 56};
6c505ce3 57EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 58
2118d0c5
JR
59/* Number of entries preallocated for DMA-API debugging */
60#define PREALLOC_DMA_DEBUG_ENTRIES 32768
61
459121c9
GC
62int dma_set_mask(struct device *dev, u64 mask)
63{
64 if (!dev->dma_mask || !dma_supported(dev, mask))
65 return -EIO;
66
67 *dev->dma_mask = mask;
68
69 return 0;
70}
71EXPORT_SYMBOL(dma_set_mask);
72
116890d5
GC
73#ifdef CONFIG_X86_64
74static __initdata void *dma32_bootmem_ptr;
75static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
76
77static int __init parse_dma32_size_opt(char *p)
78{
79 if (!p)
80 return -EINVAL;
81 dma32_bootmem_size = memparse(p, &p);
82 return 0;
83}
84early_param("dma32_size", parse_dma32_size_opt);
85
86void __init dma32_reserve_bootmem(void)
87{
88 unsigned long size, align;
c987d12f 89 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
90 return;
91
7677b2ef
YL
92 /*
93 * check aperture_64.c allocate_aperture() for reason about
94 * using 512M as goal
95 */
116890d5 96 align = 64ULL<<20;
1ddb5518 97 size = roundup(dma32_bootmem_size, align);
116890d5 98 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
7677b2ef 99 512ULL<<20);
acde31dc
CM
100 /*
101 * Kmemleak should not scan this block as it may not be mapped via the
102 * kernel direct mapping.
103 */
104 kmemleak_ignore(dma32_bootmem_ptr);
116890d5
GC
105 if (dma32_bootmem_ptr)
106 dma32_bootmem_size = size;
107 else
108 dma32_bootmem_size = 0;
109}
110static void __init dma32_free_bootmem(void)
111{
116890d5 112
c987d12f 113 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
114 return;
115
116 if (!dma32_bootmem_ptr)
117 return;
118
330fce23 119 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
116890d5
GC
120
121 dma32_bootmem_ptr = NULL;
122 dma32_bootmem_size = 0;
123}
cfb80c9e 124#endif
116890d5
GC
125
126void __init pci_iommu_alloc(void)
127{
cfb80c9e 128#ifdef CONFIG_X86_64
116890d5
GC
129 /* free the range so iommu could get some range less than 4G */
130 dma32_free_bootmem();
72d03802
FT
131#else
132 dma_ops = &nommu_dma_ops;
cfb80c9e 133#endif
b18485e7 134 if (pci_swiotlb_init())
75f1cdf1 135 return;
cfb80c9e 136
116890d5 137 gart_iommu_hole_init();
116890d5 138
116890d5 139 detect_calgary();
116890d5
GC
140
141 detect_intel_iommu();
142
75f1cdf1 143 /* needs to be called after gart_iommu_hole_init */
a69ca340 144 amd_iommu_detect();
116890d5 145}
8978b742 146
9f6ac577
FT
147void *dma_generic_alloc_coherent(struct device *dev, size_t size,
148 dma_addr_t *dma_addr, gfp_t flag)
149{
150 unsigned long dma_mask;
151 struct page *page;
152 dma_addr_t addr;
153
154 dma_mask = dma_alloc_coherent_mask(dev, flag);
155
156 flag |= __GFP_ZERO;
157again:
158 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
159 if (!page)
160 return NULL;
161
162 addr = page_to_phys(page);
a4c2baa6 163 if (addr + size > dma_mask) {
9f6ac577
FT
164 __free_pages(page, get_order(size));
165
284901a9 166 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
167 flag = (flag & ~GFP_DMA32) | GFP_DMA;
168 goto again;
169 }
170
171 return NULL;
172 }
173
174 *dma_addr = addr;
175 return page_address(page);
176}
177
fae9a0d8
GC
178/*
179 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
180 * documentation.
181 */
182static __init int iommu_setup(char *p)
183{
184 iommu_merge = 1;
185
186 if (!p)
187 return -EINVAL;
188
189 while (*p) {
190 if (!strncmp(p, "off", 3))
191 no_iommu = 1;
192 /* gart_parse_options has more force support */
193 if (!strncmp(p, "force", 5))
194 force_iommu = 1;
195 if (!strncmp(p, "noforce", 7)) {
196 iommu_merge = 0;
197 force_iommu = 0;
198 }
199
200 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
201 iommu_merge = 1;
202 force_iommu = 1;
203 }
204 if (!strncmp(p, "panic", 5))
205 panic_on_overflow = 1;
206 if (!strncmp(p, "nopanic", 7))
207 panic_on_overflow = 0;
208 if (!strncmp(p, "merge", 5)) {
209 iommu_merge = 1;
210 force_iommu = 1;
211 }
212 if (!strncmp(p, "nomerge", 7))
213 iommu_merge = 0;
214 if (!strncmp(p, "forcesac", 8))
215 iommu_sac_force = 1;
216 if (!strncmp(p, "allowdac", 8))
217 forbid_dac = 0;
218 if (!strncmp(p, "nodac", 5))
2ae8bb75 219 forbid_dac = 1;
fae9a0d8
GC
220 if (!strncmp(p, "usedac", 6)) {
221 forbid_dac = -1;
222 return 1;
223 }
224#ifdef CONFIG_SWIOTLB
225 if (!strncmp(p, "soft", 4))
226 swiotlb = 1;
3238c0c4 227#endif
80286879 228 if (!strncmp(p, "pt", 2))
4ed0d3e6 229 iommu_pass_through = 1;
fae9a0d8 230
fae9a0d8 231 gart_parse_options(p);
fae9a0d8
GC
232
233#ifdef CONFIG_CALGARY_IOMMU
234 if (!strncmp(p, "calgary", 7))
235 use_calgary = 1;
236#endif /* CONFIG_CALGARY_IOMMU */
237
238 p += strcspn(p, ",");
239 if (*p == ',')
240 ++p;
241 }
242 return 0;
243}
244early_param("iommu", iommu_setup);
245
8e0c3797
GC
246int dma_supported(struct device *dev, u64 mask)
247{
160c1d8e 248 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 249
8e0c3797
GC
250#ifdef CONFIG_PCI
251 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 252 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
253 return 0;
254 }
255#endif
256
8d8bb39b
FT
257 if (ops->dma_supported)
258 return ops->dma_supported(dev, mask);
8e0c3797
GC
259
260 /* Copied from i386. Doesn't make much sense, because it will
261 only work for pci_alloc_coherent.
262 The caller just has to use GFP_DMA in this case. */
2f4f27d4 263 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
264 return 0;
265
266 /* Tell the device to use SAC when IOMMU force is on. This
267 allows the driver to use cheaper accesses in some cases.
268
269 Problem with this is that if we overflow the IOMMU area and
270 return DAC as fallback address the device may not handle it
271 correctly.
272
273 As a special case some controllers have a 39bit address
274 mode that is as efficient as 32bit (aic79xx). Don't force
275 SAC for these. Assume all masks <= 40 bits are of this
276 type. Normally this doesn't make any difference, but gives
277 more gentle handling of IOMMU overflow. */
50cf156a 278 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 279 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
280 return 0;
281 }
282
283 return 1;
284}
285EXPORT_SYMBOL(dma_supported);
286
cb5867a5
GC
287static int __init pci_iommu_init(void)
288{
2118d0c5
JR
289 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
290
86f31952
JR
291#ifdef CONFIG_PCI
292 dma_debug_add_bus(&pci_bus_type);
293#endif
d07c1be0
FT
294 x86_init.iommu.iommu_init();
295
75f1cdf1
FT
296 if (swiotlb) {
297 printk(KERN_INFO "PCI-DMA: "
298 "Using software bounce buffering for IO (SWIOTLB)\n");
299 swiotlb_print_info();
300 } else
301 swiotlb_free();
302
cb5867a5
GC
303 return 0;
304}
cb5867a5 305/* Must execute after PCI subsystem */
9a821b23 306rootfs_initcall(pci_iommu_init);
3b15e581
FY
307
308#ifdef CONFIG_PCI
309/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
310
311static __devinit void via_no_dac(struct pci_dev *dev)
312{
313 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
13bf7576 314 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
315 forbid_dac = 1;
316 }
317}
318DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
319#endif
This page took 0.225794 seconds and 5 git commands to generate.