2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/bootmem.h>
13 #include <linux/module.h>
15 #include <linux/genalloc.h>
16 #include <linux/gfp.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/init.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 #include <linux/iommu.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
30 #include <linux/cma.h>
32 #include <asm/memory.h>
33 #include <asm/highmem.h>
34 #include <asm/cacheflush.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/dma-iommu.h>
38 #include <asm/mach/map.h>
39 #include <asm/system_info.h>
40 #include <asm/dma-contiguous.h>
45 * The DMA API is built upon the notion of "buffer ownership". A buffer
46 * is either exclusively owned by the CPU (and therefore may be accessed
47 * by it) or exclusively owned by the DMA device. These helper functions
48 * represent the transitions between these two ownership states.
50 * Note, however, that on later ARMs, this notion does not work due to
51 * speculative prefetches. We model our approach on the assumption that
52 * the CPU does do speculative prefetches, which means we clean caches
53 * before transfers and delay cache invalidation until transfer completion.
56 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
57 size_t, enum dma_data_direction
);
58 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
59 size_t, enum dma_data_direction
);
62 * arm_dma_map_page - map a portion of a page for streaming DMA
63 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
64 * @page: page that buffer resides in
65 * @offset: offset into page for start of buffer
66 * @size: size of buffer to map
67 * @dir: DMA transfer direction
69 * Ensure that any data held in the cache is appropriately discarded
72 * The device owns this memory once this call has completed. The CPU
73 * can regain ownership by calling dma_unmap_page().
75 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
76 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
77 struct dma_attrs
*attrs
)
79 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
80 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
81 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
84 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
85 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
86 struct dma_attrs
*attrs
)
88 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
92 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
93 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
94 * @handle: DMA address of buffer
95 * @size: size of buffer (same as passed to dma_map_page)
96 * @dir: DMA transfer direction (same as passed to dma_map_page)
98 * Unmap a page streaming mode DMA translation. The handle and size
99 * must match what was provided in the previous dma_map_page() call.
100 * All other usages are undefined.
102 * After this call, reads by the CPU to the buffer are guaranteed to see
103 * whatever the device wrote there.
105 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
106 size_t size
, enum dma_data_direction dir
,
107 struct dma_attrs
*attrs
)
109 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
110 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
111 handle
& ~PAGE_MASK
, size
, dir
);
114 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
115 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
117 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
118 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
119 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
122 static void arm_dma_sync_single_for_device(struct device
*dev
,
123 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
125 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
126 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
127 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
130 struct dma_map_ops arm_dma_ops
= {
131 .alloc
= arm_dma_alloc
,
132 .free
= arm_dma_free
,
133 .mmap
= arm_dma_mmap
,
134 .get_sgtable
= arm_dma_get_sgtable
,
135 .map_page
= arm_dma_map_page
,
136 .unmap_page
= arm_dma_unmap_page
,
137 .map_sg
= arm_dma_map_sg
,
138 .unmap_sg
= arm_dma_unmap_sg
,
139 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
140 .sync_single_for_device
= arm_dma_sync_single_for_device
,
141 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
142 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
143 .set_dma_mask
= arm_dma_set_mask
,
145 EXPORT_SYMBOL(arm_dma_ops
);
147 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
148 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
);
149 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
150 dma_addr_t handle
, struct dma_attrs
*attrs
);
152 struct dma_map_ops arm_coherent_dma_ops
= {
153 .alloc
= arm_coherent_dma_alloc
,
154 .free
= arm_coherent_dma_free
,
155 .mmap
= arm_dma_mmap
,
156 .get_sgtable
= arm_dma_get_sgtable
,
157 .map_page
= arm_coherent_dma_map_page
,
158 .map_sg
= arm_dma_map_sg
,
159 .set_dma_mask
= arm_dma_set_mask
,
161 EXPORT_SYMBOL(arm_coherent_dma_ops
);
163 static int __dma_supported(struct device
*dev
, u64 mask
, bool warn
)
165 unsigned long max_dma_pfn
;
168 * If the mask allows for more memory than we can address,
169 * and we actually have that much memory, then we must
170 * indicate that DMA to this device is not supported.
172 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
173 mask
> (dma_addr_t
)~0 &&
174 dma_to_pfn(dev
, ~0) < max_pfn
) {
176 dev_warn(dev
, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
178 dev_warn(dev
, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
183 max_dma_pfn
= min(max_pfn
, arm_dma_pfn_limit
);
186 * Translate the device's DMA mask to a PFN limit. This
187 * PFN number includes the page which we can DMA to.
189 if (dma_to_pfn(dev
, mask
) < max_dma_pfn
) {
191 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
193 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
201 static u64
get_coherent_dma_mask(struct device
*dev
)
203 u64 mask
= (u64
)DMA_BIT_MASK(32);
206 mask
= dev
->coherent_dma_mask
;
209 * Sanity check the DMA mask - it must be non-zero, and
210 * must be able to be satisfied by a DMA allocation.
213 dev_warn(dev
, "coherent DMA mask is unset\n");
217 if (!__dma_supported(dev
, mask
, true))
224 static void __dma_clear_buffer(struct page
*page
, size_t size
)
227 * Ensure that the allocated pages are zeroed, and that any data
228 * lurking in the kernel direct-mapped region is invalidated.
230 if (PageHighMem(page
)) {
231 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
232 phys_addr_t end
= base
+ size
;
234 void *ptr
= kmap_atomic(page
);
235 memset(ptr
, 0, PAGE_SIZE
);
236 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
241 outer_flush_range(base
, end
);
243 void *ptr
= page_address(page
);
244 memset(ptr
, 0, size
);
245 dmac_flush_range(ptr
, ptr
+ size
);
246 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
251 * Allocate a DMA buffer for 'dev' of size 'size' using the
252 * specified gfp mask. Note that 'size' must be page aligned.
254 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
256 unsigned long order
= get_order(size
);
257 struct page
*page
, *p
, *e
;
259 page
= alloc_pages(gfp
, order
);
264 * Now split the huge page and free the excess pages
266 split_page(page
, order
);
267 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
270 __dma_clear_buffer(page
, size
);
276 * Free a DMA buffer. 'size' must be page aligned.
278 static void __dma_free_buffer(struct page
*page
, size_t size
)
280 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
290 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
291 pgprot_t prot
, struct page
**ret_page
,
294 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
295 pgprot_t prot
, struct page
**ret_page
,
299 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
303 * DMA allocation can be mapped to user space, so lets
304 * set VM_USERMAP flags too.
306 return dma_common_contiguous_remap(page
, size
,
307 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
311 static void __dma_free_remap(void *cpu_addr
, size_t size
)
313 dma_common_free_remap(cpu_addr
, size
,
314 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
317 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
318 static struct gen_pool
*atomic_pool
;
320 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
322 static int __init
early_coherent_pool(char *p
)
324 atomic_pool_size
= memparse(p
, &p
);
327 early_param("coherent_pool", early_coherent_pool
);
329 void __init
init_dma_coherent_pool_size(unsigned long size
)
332 * Catch any attempt to set the pool size too late.
337 * Set architecture specific coherent pool size only if
338 * it has not been changed by kernel command line parameter.
340 if (atomic_pool_size
== DEFAULT_DMA_COHERENT_POOL_SIZE
)
341 atomic_pool_size
= size
;
345 * Initialise the coherent pool for atomic allocations.
347 static int __init
atomic_pool_init(void)
349 pgprot_t prot
= pgprot_dmacoherent(PAGE_KERNEL
);
350 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
354 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
358 if (dev_get_cma_area(NULL
))
359 ptr
= __alloc_from_contiguous(NULL
, atomic_pool_size
, prot
,
360 &page
, atomic_pool_init
);
362 ptr
= __alloc_remap_buffer(NULL
, atomic_pool_size
, gfp
, prot
,
363 &page
, atomic_pool_init
);
367 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)ptr
,
369 atomic_pool_size
, -1);
371 goto destroy_genpool
;
373 gen_pool_set_algo(atomic_pool
,
374 gen_pool_first_fit_order_align
,
376 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
377 atomic_pool_size
/ 1024);
382 gen_pool_destroy(atomic_pool
);
385 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
386 atomic_pool_size
/ 1024);
390 * CMA is activated by core_initcall, so we must be called after it.
392 postcore_initcall(atomic_pool_init
);
394 struct dma_contig_early_reserve
{
399 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
401 static int dma_mmu_remap_num __initdata
;
403 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
405 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
406 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
410 void __init
dma_contiguous_remap(void)
413 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
414 phys_addr_t start
= dma_mmu_remap
[i
].base
;
415 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
419 if (end
> arm_lowmem_limit
)
420 end
= arm_lowmem_limit
;
424 map
.pfn
= __phys_to_pfn(start
);
425 map
.virtual = __phys_to_virt(start
);
426 map
.length
= end
- start
;
427 map
.type
= MT_MEMORY_DMA_READY
;
430 * Clear previous low-memory mapping to ensure that the
431 * TLB does not see any conflicting entries, then flush
432 * the TLB of the old entries before creating new mappings.
434 * This ensures that any speculatively loaded TLB entries
435 * (even though they may be rare) can not cause any problems,
436 * and ensures that this code is architecturally compliant.
438 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
440 pmd_clear(pmd_off_k(addr
));
442 flush_tlb_kernel_range(__phys_to_virt(start
),
443 __phys_to_virt(end
));
445 iotable_init(&map
, 1);
449 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
452 struct page
*page
= virt_to_page(addr
);
453 pgprot_t prot
= *(pgprot_t
*)data
;
455 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
459 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
461 unsigned long start
= (unsigned long) page_address(page
);
462 unsigned end
= start
+ size
;
464 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
465 flush_tlb_kernel_range(start
, end
);
468 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
469 pgprot_t prot
, struct page
**ret_page
,
474 page
= __dma_alloc_buffer(dev
, size
, gfp
);
478 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
480 __dma_free_buffer(page
, size
);
488 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
494 WARN(1, "coherent pool not initialised!\n");
498 val
= gen_pool_alloc(atomic_pool
, size
);
500 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
502 *ret_page
= phys_to_page(phys
);
509 static bool __in_atomic_pool(void *start
, size_t size
)
511 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
514 static int __free_from_pool(void *start
, size_t size
)
516 if (!__in_atomic_pool(start
, size
))
519 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
524 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
525 pgprot_t prot
, struct page
**ret_page
,
528 unsigned long order
= get_order(size
);
529 size_t count
= size
>> PAGE_SHIFT
;
533 page
= dma_alloc_from_contiguous(dev
, count
, order
);
537 __dma_clear_buffer(page
, size
);
539 if (PageHighMem(page
)) {
540 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
542 dma_release_from_contiguous(dev
, page
, count
);
546 __dma_remap(page
, size
, prot
);
547 ptr
= page_address(page
);
553 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
554 void *cpu_addr
, size_t size
)
556 if (PageHighMem(page
))
557 __dma_free_remap(cpu_addr
, size
);
559 __dma_remap(page
, size
, PAGE_KERNEL
);
560 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
563 static inline pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
)
565 prot
= dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
) ?
566 pgprot_writecombine(prot
) :
567 pgprot_dmacoherent(prot
);
573 #else /* !CONFIG_MMU */
577 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
578 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
579 #define __alloc_from_pool(size, ret_page) NULL
580 #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
581 #define __free_from_pool(cpu_addr, size) 0
582 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
583 #define __dma_free_remap(cpu_addr, size) do { } while (0)
585 #endif /* CONFIG_MMU */
587 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
588 struct page
**ret_page
)
591 page
= __dma_alloc_buffer(dev
, size
, gfp
);
596 return page_address(page
);
601 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
602 gfp_t gfp
, pgprot_t prot
, bool is_coherent
, const void *caller
)
604 u64 mask
= get_coherent_dma_mask(dev
);
605 struct page
*page
= NULL
;
608 #ifdef CONFIG_DMA_API_DEBUG
609 u64 limit
= (mask
+ 1) & ~mask
;
610 if (limit
&& size
>= limit
) {
611 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
620 if (mask
< 0xffffffffULL
)
624 * Following is a work-around (a.k.a. hack) to prevent pages
625 * with __GFP_COMP being passed to split_page() which cannot
626 * handle them. The real problem is that this flag probably
627 * should be 0 on ARM as it is not supported on this
628 * platform; see CONFIG_HUGETLBFS.
630 gfp
&= ~(__GFP_COMP
);
632 *handle
= DMA_ERROR_CODE
;
633 size
= PAGE_ALIGN(size
);
635 if (is_coherent
|| nommu())
636 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
637 else if (!(gfp
& __GFP_WAIT
))
638 addr
= __alloc_from_pool(size
, &page
);
639 else if (!dev_get_cma_area(dev
))
640 addr
= __alloc_remap_buffer(dev
, size
, gfp
, prot
, &page
, caller
);
642 addr
= __alloc_from_contiguous(dev
, size
, prot
, &page
, caller
);
645 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
651 * Allocate DMA-coherent memory space and return both the kernel remapped
652 * virtual and bus address for that space.
654 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
655 gfp_t gfp
, struct dma_attrs
*attrs
)
657 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
660 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
663 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
664 __builtin_return_address(0));
667 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
668 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
670 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
673 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
676 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, true,
677 __builtin_return_address(0));
681 * Create userspace mapping for the DMA-coherent memory.
683 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
684 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
685 struct dma_attrs
*attrs
)
689 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
690 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
691 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
692 unsigned long off
= vma
->vm_pgoff
;
694 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
696 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
699 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
700 ret
= remap_pfn_range(vma
, vma
->vm_start
,
702 vma
->vm_end
- vma
->vm_start
,
705 #endif /* CONFIG_MMU */
711 * Free a buffer as defined by the above mapping.
713 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
714 dma_addr_t handle
, struct dma_attrs
*attrs
,
717 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
719 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
722 size
= PAGE_ALIGN(size
);
724 if (is_coherent
|| nommu()) {
725 __dma_free_buffer(page
, size
);
726 } else if (__free_from_pool(cpu_addr
, size
)) {
728 } else if (!dev_get_cma_area(dev
)) {
729 __dma_free_remap(cpu_addr
, size
);
730 __dma_free_buffer(page
, size
);
733 * Non-atomic allocations cannot be freed with IRQs disabled
735 WARN_ON(irqs_disabled());
736 __free_from_contiguous(dev
, page
, cpu_addr
, size
);
740 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
741 dma_addr_t handle
, struct dma_attrs
*attrs
)
743 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
746 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
747 dma_addr_t handle
, struct dma_attrs
*attrs
)
749 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
752 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
753 void *cpu_addr
, dma_addr_t handle
, size_t size
,
754 struct dma_attrs
*attrs
)
756 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
759 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
763 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
767 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
768 size_t size
, enum dma_data_direction dir
,
769 void (*op
)(const void *, size_t, int))
774 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
778 * A single sg entry may refer to multiple physically contiguous
779 * pages. But we still need to process highmem pages individually.
780 * If highmem is not configured then the bulk of this loop gets
787 page
= pfn_to_page(pfn
);
789 if (PageHighMem(page
)) {
790 if (len
+ offset
> PAGE_SIZE
)
791 len
= PAGE_SIZE
- offset
;
793 if (cache_is_vipt_nonaliasing()) {
794 vaddr
= kmap_atomic(page
);
795 op(vaddr
+ offset
, len
, dir
);
796 kunmap_atomic(vaddr
);
798 vaddr
= kmap_high_get(page
);
800 op(vaddr
+ offset
, len
, dir
);
805 vaddr
= page_address(page
) + offset
;
815 * Make an area consistent for devices.
816 * Note: Drivers should NOT use this function directly, as it will break
817 * platforms with CONFIG_DMABOUNCE.
818 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
820 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
821 size_t size
, enum dma_data_direction dir
)
825 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
827 paddr
= page_to_phys(page
) + off
;
828 if (dir
== DMA_FROM_DEVICE
) {
829 outer_inv_range(paddr
, paddr
+ size
);
831 outer_clean_range(paddr
, paddr
+ size
);
833 /* FIXME: non-speculating: flush on bidirectional mappings? */
836 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
837 size_t size
, enum dma_data_direction dir
)
839 phys_addr_t paddr
= page_to_phys(page
) + off
;
841 /* FIXME: non-speculating: not required */
842 /* in any case, don't bother invalidating if DMA to device */
843 if (dir
!= DMA_TO_DEVICE
) {
844 outer_inv_range(paddr
, paddr
+ size
);
846 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
850 * Mark the D-cache clean for these pages to avoid extra flushing.
852 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
856 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
860 left
-= PAGE_SIZE
- off
;
862 while (left
>= PAGE_SIZE
) {
863 page
= pfn_to_page(pfn
++);
864 set_bit(PG_dcache_clean
, &page
->flags
);
871 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
872 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
873 * @sg: list of buffers
874 * @nents: number of buffers to map
875 * @dir: DMA transfer direction
877 * Map a set of buffers described by scatterlist in streaming mode for DMA.
878 * This is the scatter-gather version of the dma_map_single interface.
879 * Here the scatter gather list elements are each tagged with the
880 * appropriate dma address and length. They are obtained via
881 * sg_dma_{address,length}.
883 * Device ownership issues as mentioned for dma_map_single are the same
886 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
887 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
889 struct dma_map_ops
*ops
= get_dma_ops(dev
);
890 struct scatterlist
*s
;
893 for_each_sg(sg
, s
, nents
, i
) {
894 #ifdef CONFIG_NEED_SG_DMA_LENGTH
895 s
->dma_length
= s
->length
;
897 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
898 s
->length
, dir
, attrs
);
899 if (dma_mapping_error(dev
, s
->dma_address
))
905 for_each_sg(sg
, s
, i
, j
)
906 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
911 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
912 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
913 * @sg: list of buffers
914 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
915 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
917 * Unmap a set of streaming mode DMA translations. Again, CPU access
918 * rules concerning calls here are the same as for dma_unmap_single().
920 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
921 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
923 struct dma_map_ops
*ops
= get_dma_ops(dev
);
924 struct scatterlist
*s
;
928 for_each_sg(sg
, s
, nents
, i
)
929 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
933 * arm_dma_sync_sg_for_cpu
934 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
935 * @sg: list of buffers
936 * @nents: number of buffers to map (returned from dma_map_sg)
937 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
939 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
940 int nents
, enum dma_data_direction dir
)
942 struct dma_map_ops
*ops
= get_dma_ops(dev
);
943 struct scatterlist
*s
;
946 for_each_sg(sg
, s
, nents
, i
)
947 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
952 * arm_dma_sync_sg_for_device
953 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
954 * @sg: list of buffers
955 * @nents: number of buffers to map (returned from dma_map_sg)
956 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
958 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
959 int nents
, enum dma_data_direction dir
)
961 struct dma_map_ops
*ops
= get_dma_ops(dev
);
962 struct scatterlist
*s
;
965 for_each_sg(sg
, s
, nents
, i
)
966 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
971 * Return whether the given device DMA address mask can be supported
972 * properly. For example, if your device can only drive the low 24-bits
973 * during bus mastering, then you would pass 0x00ffffff as the mask
976 int dma_supported(struct device
*dev
, u64 mask
)
978 return __dma_supported(dev
, mask
, false);
980 EXPORT_SYMBOL(dma_supported
);
982 int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
)
984 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
987 *dev
->dma_mask
= dma_mask
;
992 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
994 static int __init
dma_debug_do_init(void)
996 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
999 fs_initcall(dma_debug_do_init
);
1001 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1005 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
);
1007 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1010 unsigned int order
= get_order(size
);
1011 unsigned int align
= 0;
1012 unsigned int count
, start
;
1013 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1014 unsigned long flags
;
1018 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1019 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1021 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1022 align
= (1 << order
) - 1;
1024 spin_lock_irqsave(&mapping
->lock
, flags
);
1025 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++) {
1026 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1027 mapping
->bits
, 0, count
, align
);
1029 if (start
> mapping
->bits
)
1032 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1037 * No unused range found. Try to extend the existing mapping
1038 * and perform a second attempt to reserve an IO virtual
1039 * address range of size bytes.
1041 if (i
== mapping
->nr_bitmaps
) {
1042 if (extend_iommu_mapping(mapping
)) {
1043 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1044 return DMA_ERROR_CODE
;
1047 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1048 mapping
->bits
, 0, count
, align
);
1050 if (start
> mapping
->bits
) {
1051 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1052 return DMA_ERROR_CODE
;
1055 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1057 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1059 iova
= mapping
->base
+ (mapping_size
* i
);
1060 iova
+= start
<< PAGE_SHIFT
;
1065 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1066 dma_addr_t addr
, size_t size
)
1068 unsigned int start
, count
;
1069 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1070 unsigned long flags
;
1071 dma_addr_t bitmap_base
;
1077 bitmap_index
= (u32
) (addr
- mapping
->base
) / (u32
) mapping_size
;
1078 BUG_ON(addr
< mapping
->base
|| bitmap_index
> mapping
->extensions
);
1080 bitmap_base
= mapping
->base
+ mapping_size
* bitmap_index
;
1082 start
= (addr
- bitmap_base
) >> PAGE_SHIFT
;
1084 if (addr
+ size
> bitmap_base
+ mapping_size
) {
1086 * The address range to be freed reaches into the iova
1087 * range of the next bitmap. This should not happen as
1088 * we don't allow this in __alloc_iova (at the
1093 count
= size
>> PAGE_SHIFT
;
1095 spin_lock_irqsave(&mapping
->lock
, flags
);
1096 bitmap_clear(mapping
->bitmaps
[bitmap_index
], start
, count
);
1097 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1100 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1101 gfp_t gfp
, struct dma_attrs
*attrs
)
1103 struct page
**pages
;
1104 int count
= size
>> PAGE_SHIFT
;
1105 int array_size
= count
* sizeof(struct page
*);
1108 if (array_size
<= PAGE_SIZE
)
1109 pages
= kzalloc(array_size
, gfp
);
1111 pages
= vzalloc(array_size
);
1115 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
))
1117 unsigned long order
= get_order(size
);
1120 page
= dma_alloc_from_contiguous(dev
, count
, order
);
1124 __dma_clear_buffer(page
, size
);
1126 for (i
= 0; i
< count
; i
++)
1127 pages
[i
] = page
+ i
;
1133 * IOMMU can map any pages, so himem can also be used here
1135 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1138 int j
, order
= __fls(count
);
1140 pages
[i
] = alloc_pages(gfp
, order
);
1141 while (!pages
[i
] && order
)
1142 pages
[i
] = alloc_pages(gfp
, --order
);
1147 split_page(pages
[i
], order
);
1150 pages
[i
+ j
] = pages
[i
] + j
;
1153 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
);
1155 count
-= 1 << order
;
1162 __free_pages(pages
[i
], 0);
1163 if (array_size
<= PAGE_SIZE
)
1170 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1171 size_t size
, struct dma_attrs
*attrs
)
1173 int count
= size
>> PAGE_SHIFT
;
1174 int array_size
= count
* sizeof(struct page
*);
1177 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
)) {
1178 dma_release_from_contiguous(dev
, pages
[0], count
);
1180 for (i
= 0; i
< count
; i
++)
1182 __free_pages(pages
[i
], 0);
1185 if (array_size
<= PAGE_SIZE
)
1193 * Create a CPU mapping for a specified pages
1196 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1199 return dma_common_pages_remap(pages
, size
,
1200 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
, prot
, caller
);
1204 * Create a mapping in device IO address space for specified pages
1207 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
)
1209 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1210 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1211 dma_addr_t dma_addr
, iova
;
1212 int i
, ret
= DMA_ERROR_CODE
;
1214 dma_addr
= __alloc_iova(mapping
, size
);
1215 if (dma_addr
== DMA_ERROR_CODE
)
1219 for (i
= 0; i
< count
; ) {
1220 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1221 phys_addr_t phys
= page_to_phys(pages
[i
]);
1222 unsigned int len
, j
;
1224 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1225 if (page_to_pfn(pages
[j
]) != next_pfn
)
1228 len
= (j
- i
) << PAGE_SHIFT
;
1229 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1230 IOMMU_READ
|IOMMU_WRITE
);
1238 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1239 __free_iova(mapping
, dma_addr
, size
);
1240 return DMA_ERROR_CODE
;
1243 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1245 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1248 * add optional in-page offset from iova to size and align
1249 * result to page size
1251 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1254 iommu_unmap(mapping
->domain
, iova
, size
);
1255 __free_iova(mapping
, iova
, size
);
1259 static struct page
**__atomic_get_pages(void *addr
)
1264 phys
= gen_pool_virt_to_phys(atomic_pool
, (unsigned long)addr
);
1265 page
= phys_to_page(phys
);
1267 return (struct page
**)page
;
1270 static struct page
**__iommu_get_pages(void *cpu_addr
, struct dma_attrs
*attrs
)
1272 struct vm_struct
*area
;
1274 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1275 return __atomic_get_pages(cpu_addr
);
1277 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1280 area
= find_vm_area(cpu_addr
);
1281 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1286 static void *__iommu_alloc_atomic(struct device
*dev
, size_t size
,
1292 addr
= __alloc_from_pool(size
, &page
);
1296 *handle
= __iommu_create_mapping(dev
, &page
, size
);
1297 if (*handle
== DMA_ERROR_CODE
)
1303 __free_from_pool(addr
, size
);
1307 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1308 dma_addr_t handle
, size_t size
)
1310 __iommu_remove_mapping(dev
, handle
, size
);
1311 __free_from_pool(cpu_addr
, size
);
1314 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1315 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
1317 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
1318 struct page
**pages
;
1321 *handle
= DMA_ERROR_CODE
;
1322 size
= PAGE_ALIGN(size
);
1324 if (!(gfp
& __GFP_WAIT
))
1325 return __iommu_alloc_atomic(dev
, size
, handle
);
1328 * Following is a work-around (a.k.a. hack) to prevent pages
1329 * with __GFP_COMP being passed to split_page() which cannot
1330 * handle them. The real problem is that this flag probably
1331 * should be 0 on ARM as it is not supported on this
1332 * platform; see CONFIG_HUGETLBFS.
1334 gfp
&= ~(__GFP_COMP
);
1336 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
);
1340 *handle
= __iommu_create_mapping(dev
, pages
, size
);
1341 if (*handle
== DMA_ERROR_CODE
)
1344 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1347 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1348 __builtin_return_address(0));
1355 __iommu_remove_mapping(dev
, *handle
, size
);
1357 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1361 static int arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1362 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1363 struct dma_attrs
*attrs
)
1365 unsigned long uaddr
= vma
->vm_start
;
1366 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1367 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1369 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1375 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1377 pr_err("Remapping memory failed: %d\n", ret
);
1382 } while (usize
> 0);
1388 * free a page as defined by the above mapping.
1389 * Must not be called with IRQs disabled.
1391 void arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1392 dma_addr_t handle
, struct dma_attrs
*attrs
)
1394 struct page
**pages
;
1395 size
= PAGE_ALIGN(size
);
1397 if (__in_atomic_pool(cpu_addr
, size
)) {
1398 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
);
1402 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1404 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1408 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
)) {
1409 dma_common_free_remap(cpu_addr
, size
,
1410 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
1413 __iommu_remove_mapping(dev
, handle
, size
);
1414 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1417 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1418 void *cpu_addr
, dma_addr_t dma_addr
,
1419 size_t size
, struct dma_attrs
*attrs
)
1421 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1422 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1427 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1431 static int __dma_direction_to_prot(enum dma_data_direction dir
)
1436 case DMA_BIDIRECTIONAL
:
1437 prot
= IOMMU_READ
| IOMMU_WRITE
;
1442 case DMA_FROM_DEVICE
:
1453 * Map a part of the scatter-gather list into contiguous io address space
1455 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1456 size_t size
, dma_addr_t
*handle
,
1457 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1460 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1461 dma_addr_t iova
, iova_base
;
1464 struct scatterlist
*s
;
1467 size
= PAGE_ALIGN(size
);
1468 *handle
= DMA_ERROR_CODE
;
1470 iova_base
= iova
= __alloc_iova(mapping
, size
);
1471 if (iova
== DMA_ERROR_CODE
)
1474 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1475 phys_addr_t phys
= page_to_phys(sg_page(s
));
1476 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1479 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1480 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1482 prot
= __dma_direction_to_prot(dir
);
1484 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1487 count
+= len
>> PAGE_SHIFT
;
1490 *handle
= iova_base
;
1494 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1495 __free_iova(mapping
, iova_base
, size
);
1499 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1500 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1503 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1505 unsigned int offset
= s
->offset
;
1506 unsigned int size
= s
->offset
+ s
->length
;
1507 unsigned int max
= dma_get_max_seg_size(dev
);
1509 for (i
= 1; i
< nents
; i
++) {
1512 s
->dma_address
= DMA_ERROR_CODE
;
1515 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1516 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1517 dir
, attrs
, is_coherent
) < 0)
1520 dma
->dma_address
+= offset
;
1521 dma
->dma_length
= size
- offset
;
1523 size
= offset
= s
->offset
;
1530 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1534 dma
->dma_address
+= offset
;
1535 dma
->dma_length
= size
- offset
;
1540 for_each_sg(sg
, s
, count
, i
)
1541 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1546 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1547 * @dev: valid struct device pointer
1548 * @sg: list of buffers
1549 * @nents: number of buffers to map
1550 * @dir: DMA transfer direction
1552 * Map a set of i/o coherent buffers described by scatterlist in streaming
1553 * mode for DMA. The scatter gather list elements are merged together (if
1554 * possible) and tagged with the appropriate dma address and length. They are
1555 * obtained via sg_dma_{address,length}.
1557 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1558 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1560 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1564 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1565 * @dev: valid struct device pointer
1566 * @sg: list of buffers
1567 * @nents: number of buffers to map
1568 * @dir: DMA transfer direction
1570 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1571 * The scatter gather list elements are merged together (if possible) and
1572 * tagged with the appropriate dma address and length. They are obtained via
1573 * sg_dma_{address,length}.
1575 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1576 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1578 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1581 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1582 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1585 struct scatterlist
*s
;
1588 for_each_sg(sg
, s
, nents
, i
) {
1590 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1593 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1594 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1600 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1601 * @dev: valid struct device pointer
1602 * @sg: list of buffers
1603 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1604 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1606 * Unmap a set of streaming mode DMA translations. Again, CPU access
1607 * rules concerning calls here are the same as for dma_unmap_single().
1609 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1610 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1612 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1616 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1617 * @dev: valid struct device pointer
1618 * @sg: list of buffers
1619 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1620 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1622 * Unmap a set of streaming mode DMA translations. Again, CPU access
1623 * rules concerning calls here are the same as for dma_unmap_single().
1625 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1626 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1628 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1632 * arm_iommu_sync_sg_for_cpu
1633 * @dev: valid struct device pointer
1634 * @sg: list of buffers
1635 * @nents: number of buffers to map (returned from dma_map_sg)
1636 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1638 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1639 int nents
, enum dma_data_direction dir
)
1641 struct scatterlist
*s
;
1644 for_each_sg(sg
, s
, nents
, i
)
1645 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1650 * arm_iommu_sync_sg_for_device
1651 * @dev: valid struct device pointer
1652 * @sg: list of buffers
1653 * @nents: number of buffers to map (returned from dma_map_sg)
1654 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1656 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1657 int nents
, enum dma_data_direction dir
)
1659 struct scatterlist
*s
;
1662 for_each_sg(sg
, s
, nents
, i
)
1663 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1668 * arm_coherent_iommu_map_page
1669 * @dev: valid struct device pointer
1670 * @page: page that buffer resides in
1671 * @offset: offset into page for start of buffer
1672 * @size: size of buffer to map
1673 * @dir: DMA transfer direction
1675 * Coherent IOMMU aware version of arm_dma_map_page()
1677 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1678 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1679 struct dma_attrs
*attrs
)
1681 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1682 dma_addr_t dma_addr
;
1683 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1685 dma_addr
= __alloc_iova(mapping
, len
);
1686 if (dma_addr
== DMA_ERROR_CODE
)
1689 prot
= __dma_direction_to_prot(dir
);
1691 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1695 return dma_addr
+ offset
;
1697 __free_iova(mapping
, dma_addr
, len
);
1698 return DMA_ERROR_CODE
;
1702 * arm_iommu_map_page
1703 * @dev: valid struct device pointer
1704 * @page: page that buffer resides in
1705 * @offset: offset into page for start of buffer
1706 * @size: size of buffer to map
1707 * @dir: DMA transfer direction
1709 * IOMMU aware version of arm_dma_map_page()
1711 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1712 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1713 struct dma_attrs
*attrs
)
1715 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1716 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1718 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1722 * arm_coherent_iommu_unmap_page
1723 * @dev: valid struct device pointer
1724 * @handle: DMA address of buffer
1725 * @size: size of buffer (same as passed to dma_map_page)
1726 * @dir: DMA transfer direction (same as passed to dma_map_page)
1728 * Coherent IOMMU aware version of arm_dma_unmap_page()
1730 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1731 size_t size
, enum dma_data_direction dir
,
1732 struct dma_attrs
*attrs
)
1734 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1735 dma_addr_t iova
= handle
& PAGE_MASK
;
1736 int offset
= handle
& ~PAGE_MASK
;
1737 int len
= PAGE_ALIGN(size
+ offset
);
1742 iommu_unmap(mapping
->domain
, iova
, len
);
1743 __free_iova(mapping
, iova
, len
);
1747 * arm_iommu_unmap_page
1748 * @dev: valid struct device pointer
1749 * @handle: DMA address of buffer
1750 * @size: size of buffer (same as passed to dma_map_page)
1751 * @dir: DMA transfer direction (same as passed to dma_map_page)
1753 * IOMMU aware version of arm_dma_unmap_page()
1755 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1756 size_t size
, enum dma_data_direction dir
,
1757 struct dma_attrs
*attrs
)
1759 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1760 dma_addr_t iova
= handle
& PAGE_MASK
;
1761 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1762 int offset
= handle
& ~PAGE_MASK
;
1763 int len
= PAGE_ALIGN(size
+ offset
);
1768 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1769 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1771 iommu_unmap(mapping
->domain
, iova
, len
);
1772 __free_iova(mapping
, iova
, len
);
1775 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
1776 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1778 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1779 dma_addr_t iova
= handle
& PAGE_MASK
;
1780 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1781 unsigned int offset
= handle
& ~PAGE_MASK
;
1786 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1789 static void arm_iommu_sync_single_for_device(struct device
*dev
,
1790 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1792 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1793 dma_addr_t iova
= handle
& PAGE_MASK
;
1794 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1795 unsigned int offset
= handle
& ~PAGE_MASK
;
1800 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1803 struct dma_map_ops iommu_ops
= {
1804 .alloc
= arm_iommu_alloc_attrs
,
1805 .free
= arm_iommu_free_attrs
,
1806 .mmap
= arm_iommu_mmap_attrs
,
1807 .get_sgtable
= arm_iommu_get_sgtable
,
1809 .map_page
= arm_iommu_map_page
,
1810 .unmap_page
= arm_iommu_unmap_page
,
1811 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
1812 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
1814 .map_sg
= arm_iommu_map_sg
,
1815 .unmap_sg
= arm_iommu_unmap_sg
,
1816 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
1817 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
1819 .set_dma_mask
= arm_dma_set_mask
,
1822 struct dma_map_ops iommu_coherent_ops
= {
1823 .alloc
= arm_iommu_alloc_attrs
,
1824 .free
= arm_iommu_free_attrs
,
1825 .mmap
= arm_iommu_mmap_attrs
,
1826 .get_sgtable
= arm_iommu_get_sgtable
,
1828 .map_page
= arm_coherent_iommu_map_page
,
1829 .unmap_page
= arm_coherent_iommu_unmap_page
,
1831 .map_sg
= arm_coherent_iommu_map_sg
,
1832 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
1834 .set_dma_mask
= arm_dma_set_mask
,
1838 * arm_iommu_create_mapping
1839 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1840 * @base: start address of the valid IO address space
1841 * @size: maximum size of the valid IO address space
1843 * Creates a mapping structure which holds information about used/unused
1844 * IO address ranges, which is required to perform memory allocation and
1845 * mapping with IOMMU aware functions.
1847 * The client device need to be attached to the mapping with
1848 * arm_iommu_attach_device function.
1850 struct dma_iommu_mapping
*
1851 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, size_t size
)
1853 unsigned int bits
= size
>> PAGE_SHIFT
;
1854 unsigned int bitmap_size
= BITS_TO_LONGS(bits
) * sizeof(long);
1855 struct dma_iommu_mapping
*mapping
;
1860 return ERR_PTR(-EINVAL
);
1862 if (bitmap_size
> PAGE_SIZE
) {
1863 extensions
= bitmap_size
/ PAGE_SIZE
;
1864 bitmap_size
= PAGE_SIZE
;
1867 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
1871 mapping
->bitmap_size
= bitmap_size
;
1872 mapping
->bitmaps
= kzalloc(extensions
* sizeof(unsigned long *),
1874 if (!mapping
->bitmaps
)
1877 mapping
->bitmaps
[0] = kzalloc(bitmap_size
, GFP_KERNEL
);
1878 if (!mapping
->bitmaps
[0])
1881 mapping
->nr_bitmaps
= 1;
1882 mapping
->extensions
= extensions
;
1883 mapping
->base
= base
;
1884 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
1886 spin_lock_init(&mapping
->lock
);
1888 mapping
->domain
= iommu_domain_alloc(bus
);
1889 if (!mapping
->domain
)
1892 kref_init(&mapping
->kref
);
1895 kfree(mapping
->bitmaps
[0]);
1897 kfree(mapping
->bitmaps
);
1901 return ERR_PTR(err
);
1903 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
1905 static void release_iommu_mapping(struct kref
*kref
)
1908 struct dma_iommu_mapping
*mapping
=
1909 container_of(kref
, struct dma_iommu_mapping
, kref
);
1911 iommu_domain_free(mapping
->domain
);
1912 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++)
1913 kfree(mapping
->bitmaps
[i
]);
1914 kfree(mapping
->bitmaps
);
1918 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
)
1922 if (mapping
->nr_bitmaps
> mapping
->extensions
)
1925 next_bitmap
= mapping
->nr_bitmaps
;
1926 mapping
->bitmaps
[next_bitmap
] = kzalloc(mapping
->bitmap_size
,
1928 if (!mapping
->bitmaps
[next_bitmap
])
1931 mapping
->nr_bitmaps
++;
1936 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
1939 kref_put(&mapping
->kref
, release_iommu_mapping
);
1941 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
1943 static int __arm_iommu_attach_device(struct device
*dev
,
1944 struct dma_iommu_mapping
*mapping
)
1948 err
= iommu_attach_device(mapping
->domain
, dev
);
1952 kref_get(&mapping
->kref
);
1953 dev
->archdata
.mapping
= mapping
;
1955 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
1960 * arm_iommu_attach_device
1961 * @dev: valid struct device pointer
1962 * @mapping: io address space mapping structure (returned from
1963 * arm_iommu_create_mapping)
1965 * Attaches specified io address space mapping to the provided device.
1966 * This replaces the dma operations (dma_map_ops pointer) with the
1967 * IOMMU aware version.
1969 * More than one client might be attached to the same io address space
1972 int arm_iommu_attach_device(struct device
*dev
,
1973 struct dma_iommu_mapping
*mapping
)
1977 err
= __arm_iommu_attach_device(dev
, mapping
);
1981 set_dma_ops(dev
, &iommu_ops
);
1984 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
1986 static void __arm_iommu_detach_device(struct device
*dev
)
1988 struct dma_iommu_mapping
*mapping
;
1990 mapping
= to_dma_iommu_mapping(dev
);
1992 dev_warn(dev
, "Not attached\n");
1996 iommu_detach_device(mapping
->domain
, dev
);
1997 kref_put(&mapping
->kref
, release_iommu_mapping
);
1998 dev
->archdata
.mapping
= NULL
;
2000 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2004 * arm_iommu_detach_device
2005 * @dev: valid struct device pointer
2007 * Detaches the provided device from a previously attached map.
2008 * This voids the dma operations (dma_map_ops pointer)
2010 void arm_iommu_detach_device(struct device
*dev
)
2012 __arm_iommu_detach_device(dev
);
2013 set_dma_ops(dev
, NULL
);
2015 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);
2017 static struct dma_map_ops
*arm_get_iommu_dma_map_ops(bool coherent
)
2019 return coherent
? &iommu_coherent_ops
: &iommu_ops
;
2022 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2023 struct iommu_ops
*iommu
)
2025 struct dma_iommu_mapping
*mapping
;
2030 mapping
= arm_iommu_create_mapping(dev
->bus
, dma_base
, size
);
2031 if (IS_ERR(mapping
)) {
2032 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2033 size
, dev_name(dev
));
2037 if (__arm_iommu_attach_device(dev
, mapping
)) {
2038 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2040 arm_iommu_release_mapping(mapping
);
2047 static void arm_teardown_iommu_dma_ops(struct device
*dev
)
2049 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
2054 __arm_iommu_detach_device(dev
);
2055 arm_iommu_release_mapping(mapping
);
2060 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2061 struct iommu_ops
*iommu
)
2066 static void arm_teardown_iommu_dma_ops(struct device
*dev
) { }
2068 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2070 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2072 static struct dma_map_ops
*arm_get_dma_map_ops(bool coherent
)
2074 return coherent
? &arm_coherent_dma_ops
: &arm_dma_ops
;
2077 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2078 struct iommu_ops
*iommu
, bool coherent
)
2080 struct dma_map_ops
*dma_ops
;
2082 dev
->archdata
.dma_coherent
= coherent
;
2083 if (arm_setup_iommu_dma_ops(dev
, dma_base
, size
, iommu
))
2084 dma_ops
= arm_get_iommu_dma_map_ops(coherent
);
2086 dma_ops
= arm_get_dma_map_ops(coherent
);
2088 set_dma_ops(dev
, dma_ops
);
2091 void arch_teardown_dma_ops(struct device
*dev
)
2093 arm_teardown_iommu_dma_ops(dev
);