Commit | Line | Data |
---|---|---|
459121c9 | 1 | #include <linux/dma-mapping.h> |
cb5867a5 | 2 | #include <linux/dmar.h> |
116890d5 | 3 | #include <linux/bootmem.h> |
bca5c096 | 4 | #include <linux/pci.h> |
cb5867a5 | 5 | |
116890d5 GC |
6 | #include <asm/proto.h> |
7 | #include <asm/dma.h> | |
cb5867a5 GC |
8 | #include <asm/gart.h> |
9 | #include <asm/calgary.h> | |
a69ca340 | 10 | #include <asm/amd_iommu.h> |
459121c9 | 11 | |
bca5c096 GC |
12 | int forbid_dac __read_mostly; |
13 | EXPORT_SYMBOL(forbid_dac); | |
14 | ||
85c246ee GC |
15 | const struct dma_mapping_ops *dma_ops; |
16 | EXPORT_SYMBOL(dma_ops); | |
17 | ||
b4cdc430 | 18 | static int iommu_sac_force __read_mostly; |
8e0c3797 | 19 | |
f9c258de GC |
20 | #ifdef CONFIG_IOMMU_DEBUG |
21 | int panic_on_overflow __read_mostly = 1; | |
22 | int force_iommu __read_mostly = 1; | |
23 | #else | |
24 | int panic_on_overflow __read_mostly = 0; | |
25 | int force_iommu __read_mostly = 0; | |
26 | #endif | |
27 | ||
fae9a0d8 GC |
28 | int iommu_merge __read_mostly = 0; |
29 | ||
30 | int no_iommu __read_mostly; | |
31 | /* Set this to 1 if there is a HW IOMMU in the system */ | |
32 | int iommu_detected __read_mostly = 0; | |
33 | ||
34 | /* This tells the BIO block layer to assume merging. Default to off | |
35 | because we cannot guarantee merging later. */ | |
36 | int iommu_bio_merge __read_mostly = 0; | |
37 | EXPORT_SYMBOL(iommu_bio_merge); | |
38 | ||
cac67877 GC |
39 | dma_addr_t bad_dma_address __read_mostly = 0; |
40 | EXPORT_SYMBOL(bad_dma_address); | |
fae9a0d8 | 41 | |
098cb7f2 GC |
42 | /* Dummy device used for NULL arguments (normally ISA). Better would |
43 | be probably a smaller DMA mask, but this is bug-to-bug compatible | |
44 | to older i386. */ | |
45 | struct device fallback_dev = { | |
46 | .bus_id = "fallback device", | |
47 | .coherent_dma_mask = DMA_32BIT_MASK, | |
48 | .dma_mask = &fallback_dev.coherent_dma_mask, | |
49 | }; | |
50 | ||
459121c9 GC |
51 | int dma_set_mask(struct device *dev, u64 mask) |
52 | { | |
53 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
54 | return -EIO; | |
55 | ||
56 | *dev->dma_mask = mask; | |
57 | ||
58 | return 0; | |
59 | } | |
60 | EXPORT_SYMBOL(dma_set_mask); | |
61 | ||
116890d5 GC |
62 | #ifdef CONFIG_X86_64 |
63 | static __initdata void *dma32_bootmem_ptr; | |
64 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); | |
65 | ||
66 | static int __init parse_dma32_size_opt(char *p) | |
67 | { | |
68 | if (!p) | |
69 | return -EINVAL; | |
70 | dma32_bootmem_size = memparse(p, &p); | |
71 | return 0; | |
72 | } | |
73 | early_param("dma32_size", parse_dma32_size_opt); | |
74 | ||
75 | void __init dma32_reserve_bootmem(void) | |
76 | { | |
77 | unsigned long size, align; | |
78 | if (end_pfn <= MAX_DMA32_PFN) | |
79 | return; | |
80 | ||
7677b2ef YL |
81 | /* |
82 | * check aperture_64.c allocate_aperture() for reason about | |
83 | * using 512M as goal | |
84 | */ | |
116890d5 GC |
85 | align = 64ULL<<20; |
86 | size = round_up(dma32_bootmem_size, align); | |
87 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | |
7677b2ef | 88 | 512ULL<<20); |
116890d5 GC |
89 | if (dma32_bootmem_ptr) |
90 | dma32_bootmem_size = size; | |
91 | else | |
92 | dma32_bootmem_size = 0; | |
93 | } | |
94 | static void __init dma32_free_bootmem(void) | |
95 | { | |
116890d5 GC |
96 | |
97 | if (end_pfn <= MAX_DMA32_PFN) | |
98 | return; | |
99 | ||
100 | if (!dma32_bootmem_ptr) | |
101 | return; | |
102 | ||
330fce23 | 103 | free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); |
116890d5 GC |
104 | |
105 | dma32_bootmem_ptr = NULL; | |
106 | dma32_bootmem_size = 0; | |
107 | } | |
108 | ||
109 | void __init pci_iommu_alloc(void) | |
110 | { | |
111 | /* free the range so iommu could get some range less than 4G */ | |
112 | dma32_free_bootmem(); | |
113 | /* | |
114 | * The order of these functions is important for | |
115 | * fall-back/fail-over reasons | |
116 | */ | |
117 | #ifdef CONFIG_GART_IOMMU | |
118 | gart_iommu_hole_init(); | |
119 | #endif | |
120 | ||
121 | #ifdef CONFIG_CALGARY_IOMMU | |
122 | detect_calgary(); | |
123 | #endif | |
124 | ||
125 | detect_intel_iommu(); | |
126 | ||
a69ca340 JR |
127 | amd_iommu_detect(); |
128 | ||
116890d5 GC |
129 | #ifdef CONFIG_SWIOTLB |
130 | pci_swiotlb_init(); | |
131 | #endif | |
132 | } | |
133 | #endif | |
134 | ||
fae9a0d8 GC |
135 | /* |
136 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | |
137 | * documentation. | |
138 | */ | |
139 | static __init int iommu_setup(char *p) | |
140 | { | |
141 | iommu_merge = 1; | |
142 | ||
143 | if (!p) | |
144 | return -EINVAL; | |
145 | ||
146 | while (*p) { | |
147 | if (!strncmp(p, "off", 3)) | |
148 | no_iommu = 1; | |
149 | /* gart_parse_options has more force support */ | |
150 | if (!strncmp(p, "force", 5)) | |
151 | force_iommu = 1; | |
152 | if (!strncmp(p, "noforce", 7)) { | |
153 | iommu_merge = 0; | |
154 | force_iommu = 0; | |
155 | } | |
156 | ||
157 | if (!strncmp(p, "biomerge", 8)) { | |
158 | iommu_bio_merge = 4096; | |
159 | iommu_merge = 1; | |
160 | force_iommu = 1; | |
161 | } | |
162 | if (!strncmp(p, "panic", 5)) | |
163 | panic_on_overflow = 1; | |
164 | if (!strncmp(p, "nopanic", 7)) | |
165 | panic_on_overflow = 0; | |
166 | if (!strncmp(p, "merge", 5)) { | |
167 | iommu_merge = 1; | |
168 | force_iommu = 1; | |
169 | } | |
170 | if (!strncmp(p, "nomerge", 7)) | |
171 | iommu_merge = 0; | |
172 | if (!strncmp(p, "forcesac", 8)) | |
173 | iommu_sac_force = 1; | |
174 | if (!strncmp(p, "allowdac", 8)) | |
175 | forbid_dac = 0; | |
176 | if (!strncmp(p, "nodac", 5)) | |
177 | forbid_dac = -1; | |
178 | if (!strncmp(p, "usedac", 6)) { | |
179 | forbid_dac = -1; | |
180 | return 1; | |
181 | } | |
182 | #ifdef CONFIG_SWIOTLB | |
183 | if (!strncmp(p, "soft", 4)) | |
184 | swiotlb = 1; | |
185 | #endif | |
186 | ||
187 | #ifdef CONFIG_GART_IOMMU | |
188 | gart_parse_options(p); | |
189 | #endif | |
190 | ||
191 | #ifdef CONFIG_CALGARY_IOMMU | |
192 | if (!strncmp(p, "calgary", 7)) | |
193 | use_calgary = 1; | |
194 | #endif /* CONFIG_CALGARY_IOMMU */ | |
195 | ||
196 | p += strcspn(p, ","); | |
197 | if (*p == ',') | |
198 | ++p; | |
199 | } | |
200 | return 0; | |
201 | } | |
202 | early_param("iommu", iommu_setup); | |
203 | ||
8e8edc64 GC |
204 | #ifdef CONFIG_X86_32 |
205 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |
206 | dma_addr_t device_addr, size_t size, int flags) | |
207 | { | |
208 | void __iomem *mem_base = NULL; | |
209 | int pages = size >> PAGE_SHIFT; | |
210 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
211 | ||
212 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | |
213 | goto out; | |
214 | if (!size) | |
215 | goto out; | |
216 | if (dev->dma_mem) | |
217 | goto out; | |
218 | ||
219 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | |
220 | ||
221 | mem_base = ioremap(bus_addr, size); | |
222 | if (!mem_base) | |
223 | goto out; | |
224 | ||
225 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | |
226 | if (!dev->dma_mem) | |
227 | goto out; | |
228 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
229 | if (!dev->dma_mem->bitmap) | |
230 | goto free1_out; | |
231 | ||
232 | dev->dma_mem->virt_base = mem_base; | |
233 | dev->dma_mem->device_base = device_addr; | |
234 | dev->dma_mem->size = pages; | |
235 | dev->dma_mem->flags = flags; | |
236 | ||
237 | if (flags & DMA_MEMORY_MAP) | |
238 | return DMA_MEMORY_MAP; | |
239 | ||
240 | return DMA_MEMORY_IO; | |
241 | ||
242 | free1_out: | |
243 | kfree(dev->dma_mem); | |
244 | out: | |
245 | if (mem_base) | |
246 | iounmap(mem_base); | |
247 | return 0; | |
248 | } | |
249 | EXPORT_SYMBOL(dma_declare_coherent_memory); | |
250 | ||
251 | void dma_release_declared_memory(struct device *dev) | |
252 | { | |
253 | struct dma_coherent_mem *mem = dev->dma_mem; | |
254 | ||
255 | if (!mem) | |
256 | return; | |
257 | dev->dma_mem = NULL; | |
258 | iounmap(mem->virt_base); | |
259 | kfree(mem->bitmap); | |
260 | kfree(mem); | |
261 | } | |
262 | EXPORT_SYMBOL(dma_release_declared_memory); | |
263 | ||
264 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
265 | dma_addr_t device_addr, size_t size) | |
266 | { | |
267 | struct dma_coherent_mem *mem = dev->dma_mem; | |
268 | int pos, err; | |
269 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | |
270 | ||
271 | pages >>= PAGE_SHIFT; | |
272 | ||
273 | if (!mem) | |
274 | return ERR_PTR(-EINVAL); | |
275 | ||
276 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | |
277 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | |
278 | if (err != 0) | |
279 | return ERR_PTR(err); | |
280 | return mem->virt_base + (pos << PAGE_SHIFT); | |
281 | } | |
282 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |
098cb7f2 GC |
283 | |
284 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | |
285 | dma_addr_t *dma_handle, void **ret) | |
286 | { | |
287 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
288 | int order = get_order(size); | |
289 | ||
290 | if (mem) { | |
291 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | |
292 | order); | |
293 | if (page >= 0) { | |
294 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | |
295 | *ret = mem->virt_base + (page << PAGE_SHIFT); | |
296 | memset(*ret, 0, size); | |
297 | } | |
298 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | |
299 | *ret = NULL; | |
300 | } | |
301 | return (mem != NULL); | |
302 | } | |
303 | ||
304 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | |
305 | { | |
306 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
307 | ||
308 | if (mem && vaddr >= mem->virt_base && vaddr < | |
309 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
310 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
311 | ||
312 | bitmap_release_region(mem->bitmap, page, order); | |
313 | return 1; | |
314 | } | |
315 | return 0; | |
316 | } | |
317 | #else | |
318 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | |
319 | #define dma_release_coherent(dev, order, vaddr) (0) | |
8e8edc64 GC |
320 | #endif /* CONFIG_X86_32 */ |
321 | ||
8e0c3797 GC |
322 | int dma_supported(struct device *dev, u64 mask) |
323 | { | |
324 | #ifdef CONFIG_PCI | |
325 | if (mask > 0xffffffff && forbid_dac > 0) { | |
326 | printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", | |
327 | dev->bus_id); | |
328 | return 0; | |
329 | } | |
330 | #endif | |
331 | ||
332 | if (dma_ops->dma_supported) | |
333 | return dma_ops->dma_supported(dev, mask); | |
334 | ||
335 | /* Copied from i386. Doesn't make much sense, because it will | |
336 | only work for pci_alloc_coherent. | |
337 | The caller just has to use GFP_DMA in this case. */ | |
338 | if (mask < DMA_24BIT_MASK) | |
339 | return 0; | |
340 | ||
341 | /* Tell the device to use SAC when IOMMU force is on. This | |
342 | allows the driver to use cheaper accesses in some cases. | |
343 | ||
344 | Problem with this is that if we overflow the IOMMU area and | |
345 | return DAC as fallback address the device may not handle it | |
346 | correctly. | |
347 | ||
348 | As a special case some controllers have a 39bit address | |
349 | mode that is as efficient as 32bit (aic79xx). Don't force | |
350 | SAC for these. Assume all masks <= 40 bits are of this | |
351 | type. Normally this doesn't make any difference, but gives | |
352 | more gentle handling of IOMMU overflow. */ | |
353 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | |
354 | printk(KERN_INFO "%s: Force SAC with mask %Lx\n", | |
355 | dev->bus_id, mask); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | return 1; | |
360 | } | |
361 | EXPORT_SYMBOL(dma_supported); | |
362 | ||
098cb7f2 | 363 | /* Allocate DMA memory on node near device */ |
311f8349 | 364 | static noinline struct page * |
098cb7f2 GC |
365 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) |
366 | { | |
367 | int node; | |
368 | ||
369 | node = dev_to_node(dev); | |
370 | ||
371 | return alloc_pages_node(node, gfp, order); | |
372 | } | |
373 | ||
374 | /* | |
375 | * Allocate memory for a coherent mapping. | |
376 | */ | |
377 | void * | |
378 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
379 | gfp_t gfp) | |
380 | { | |
381 | void *memory = NULL; | |
382 | struct page *page; | |
383 | unsigned long dma_mask = 0; | |
384 | dma_addr_t bus; | |
b7f09ae5 | 385 | int noretry = 0; |
098cb7f2 GC |
386 | |
387 | /* ignore region specifiers */ | |
388 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | |
389 | ||
390 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | |
391 | return memory; | |
392 | ||
4a367f3a | 393 | if (!dev) { |
098cb7f2 | 394 | dev = &fallback_dev; |
4a367f3a TI |
395 | gfp |= GFP_DMA; |
396 | } | |
098cb7f2 GC |
397 | dma_mask = dev->coherent_dma_mask; |
398 | if (dma_mask == 0) | |
4a367f3a | 399 | dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; |
098cb7f2 GC |
400 | |
401 | /* Device not DMA able */ | |
402 | if (dev->dma_mask == NULL) | |
403 | return NULL; | |
404 | ||
b7f09ae5 MS |
405 | /* Don't invoke OOM killer or retry in lower 16MB DMA zone */ |
406 | if (gfp & __GFP_DMA) | |
407 | noretry = 1; | |
098cb7f2 GC |
408 | |
409 | #ifdef CONFIG_X86_64 | |
410 | /* Why <=? Even when the mask is smaller than 4GB it is often | |
411 | larger than 16MB and in this case we have a chance of | |
412 | finding fitting memory in the next higher zone first. If | |
413 | not retry with true GFP_DMA. -AK */ | |
b7f09ae5 | 414 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) { |
098cb7f2 | 415 | gfp |= GFP_DMA32; |
b7f09ae5 MS |
416 | if (dma_mask < DMA_32BIT_MASK) |
417 | noretry = 1; | |
418 | } | |
098cb7f2 GC |
419 | #endif |
420 | ||
421 | again: | |
db9f600b | 422 | page = dma_alloc_pages(dev, |
b7f09ae5 | 423 | noretry ? gfp | __GFP_NORETRY : gfp, get_order(size)); |
098cb7f2 GC |
424 | if (page == NULL) |
425 | return NULL; | |
426 | ||
427 | { | |
428 | int high, mmu; | |
429 | bus = page_to_phys(page); | |
430 | memory = page_address(page); | |
431 | high = (bus + size) >= dma_mask; | |
432 | mmu = high; | |
433 | if (force_iommu && !(gfp & GFP_DMA)) | |
434 | mmu = 1; | |
435 | else if (high) { | |
436 | free_pages((unsigned long)memory, | |
437 | get_order(size)); | |
438 | ||
439 | /* Don't use the 16MB ZONE_DMA unless absolutely | |
440 | needed. It's better to use remapping first. */ | |
441 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | |
442 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
443 | goto again; | |
444 | } | |
445 | ||
446 | /* Let low level make its own zone decisions */ | |
447 | gfp &= ~(GFP_DMA32|GFP_DMA); | |
448 | ||
449 | if (dma_ops->alloc_coherent) | |
450 | return dma_ops->alloc_coherent(dev, size, | |
451 | dma_handle, gfp); | |
452 | return NULL; | |
453 | } | |
454 | ||
455 | memset(memory, 0, size); | |
456 | if (!mmu) { | |
457 | *dma_handle = bus; | |
458 | return memory; | |
459 | } | |
460 | } | |
461 | ||
462 | if (dma_ops->alloc_coherent) { | |
463 | free_pages((unsigned long)memory, get_order(size)); | |
464 | gfp &= ~(GFP_DMA|GFP_DMA32); | |
465 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | |
466 | } | |
467 | ||
468 | if (dma_ops->map_simple) { | |
469 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | |
470 | size, | |
471 | PCI_DMA_BIDIRECTIONAL); | |
472 | if (*dma_handle != bad_dma_address) | |
473 | return memory; | |
474 | } | |
475 | ||
476 | if (panic_on_overflow) | |
477 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", | |
478 | (unsigned long)size); | |
479 | free_pages((unsigned long)memory, get_order(size)); | |
480 | return NULL; | |
481 | } | |
482 | EXPORT_SYMBOL(dma_alloc_coherent); | |
483 | ||
484 | /* | |
485 | * Unmap coherent memory. | |
486 | * The caller must ensure that the device has finished accessing the mapping. | |
487 | */ | |
488 | void dma_free_coherent(struct device *dev, size_t size, | |
489 | void *vaddr, dma_addr_t bus) | |
490 | { | |
491 | int order = get_order(size); | |
492 | WARN_ON(irqs_disabled()); /* for portability */ | |
493 | if (dma_release_coherent(dev, order, vaddr)) | |
494 | return; | |
495 | if (dma_ops->unmap_single) | |
496 | dma_ops->unmap_single(dev, bus, size, 0); | |
497 | free_pages((unsigned long)vaddr, order); | |
498 | } | |
499 | EXPORT_SYMBOL(dma_free_coherent); | |
8e0c3797 | 500 | |
cb5867a5 GC |
501 | static int __init pci_iommu_init(void) |
502 | { | |
503 | #ifdef CONFIG_CALGARY_IOMMU | |
504 | calgary_iommu_init(); | |
505 | #endif | |
506 | ||
507 | intel_iommu_init(); | |
508 | ||
a69ca340 JR |
509 | amd_iommu_init(); |
510 | ||
cb5867a5 GC |
511 | #ifdef CONFIG_GART_IOMMU |
512 | gart_iommu_init(); | |
513 | #endif | |
459121c9 | 514 | |
cb5867a5 GC |
515 | no_iommu_init(); |
516 | return 0; | |
517 | } | |
518 | ||
519 | void pci_iommu_shutdown(void) | |
520 | { | |
521 | gart_iommu_shutdown(); | |
522 | } | |
523 | /* Must execute after PCI subsystem */ | |
524 | fs_initcall(pci_iommu_init); | |
bca5c096 GC |
525 | |
526 | #ifdef CONFIG_PCI | |
527 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | |
528 | ||
529 | static __devinit void via_no_dac(struct pci_dev *dev) | |
530 | { | |
531 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | |
532 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | |
533 | "Disabling DAC.\n"); | |
534 | forbid_dac = 1; | |
535 | } | |
536 | } | |
537 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | |
538 | #endif |