2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/bootmem.h>
13 #include <linux/module.h>
15 #include <linux/genalloc.h>
16 #include <linux/gfp.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/init.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 #include <linux/iommu.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
30 #include <linux/cma.h>
32 #include <asm/memory.h>
33 #include <asm/highmem.h>
34 #include <asm/cacheflush.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/dma-iommu.h>
38 #include <asm/mach/map.h>
39 #include <asm/system_info.h>
40 #include <asm/dma-contiguous.h>
45 struct arm_dma_alloc_args
{
54 struct arm_dma_free_args
{
62 struct arm_dma_allocator
{
63 void *(*alloc
)(struct arm_dma_alloc_args
*args
,
64 struct page
**ret_page
);
65 void (*free
)(struct arm_dma_free_args
*args
);
68 struct arm_dma_buffer
{
69 struct list_head list
;
71 struct arm_dma_allocator
*allocator
;
74 static LIST_HEAD(arm_dma_bufs
);
75 static DEFINE_SPINLOCK(arm_dma_bufs_lock
);
77 static struct arm_dma_buffer
*arm_dma_buffer_find(void *virt
)
79 struct arm_dma_buffer
*buf
, *found
= NULL
;
82 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
83 list_for_each_entry(buf
, &arm_dma_bufs
, list
) {
84 if (buf
->virt
== virt
) {
90 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
95 * The DMA API is built upon the notion of "buffer ownership". A buffer
96 * is either exclusively owned by the CPU (and therefore may be accessed
97 * by it) or exclusively owned by the DMA device. These helper functions
98 * represent the transitions between these two ownership states.
100 * Note, however, that on later ARMs, this notion does not work due to
101 * speculative prefetches. We model our approach on the assumption that
102 * the CPU does do speculative prefetches, which means we clean caches
103 * before transfers and delay cache invalidation until transfer completion.
106 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
107 size_t, enum dma_data_direction
);
108 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
109 size_t, enum dma_data_direction
);
112 * arm_dma_map_page - map a portion of a page for streaming DMA
113 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114 * @page: page that buffer resides in
115 * @offset: offset into page for start of buffer
116 * @size: size of buffer to map
117 * @dir: DMA transfer direction
119 * Ensure that any data held in the cache is appropriately discarded
122 * The device owns this memory once this call has completed. The CPU
123 * can regain ownership by calling dma_unmap_page().
125 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
126 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
127 struct dma_attrs
*attrs
)
129 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
130 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
131 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
134 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
135 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
136 struct dma_attrs
*attrs
)
138 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
142 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
143 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
144 * @handle: DMA address of buffer
145 * @size: size of buffer (same as passed to dma_map_page)
146 * @dir: DMA transfer direction (same as passed to dma_map_page)
148 * Unmap a page streaming mode DMA translation. The handle and size
149 * must match what was provided in the previous dma_map_page() call.
150 * All other usages are undefined.
152 * After this call, reads by the CPU to the buffer are guaranteed to see
153 * whatever the device wrote there.
155 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
156 size_t size
, enum dma_data_direction dir
,
157 struct dma_attrs
*attrs
)
159 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
160 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
161 handle
& ~PAGE_MASK
, size
, dir
);
164 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
165 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
167 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
168 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
169 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
172 static void arm_dma_sync_single_for_device(struct device
*dev
,
173 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
175 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
176 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
177 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
180 struct dma_map_ops arm_dma_ops
= {
181 .alloc
= arm_dma_alloc
,
182 .free
= arm_dma_free
,
183 .mmap
= arm_dma_mmap
,
184 .get_sgtable
= arm_dma_get_sgtable
,
185 .map_page
= arm_dma_map_page
,
186 .unmap_page
= arm_dma_unmap_page
,
187 .map_sg
= arm_dma_map_sg
,
188 .unmap_sg
= arm_dma_unmap_sg
,
189 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
190 .sync_single_for_device
= arm_dma_sync_single_for_device
,
191 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
192 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
193 .set_dma_mask
= arm_dma_set_mask
,
195 EXPORT_SYMBOL(arm_dma_ops
);
197 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
198 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
);
199 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
200 dma_addr_t handle
, struct dma_attrs
*attrs
);
201 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
202 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
203 struct dma_attrs
*attrs
);
205 struct dma_map_ops arm_coherent_dma_ops
= {
206 .alloc
= arm_coherent_dma_alloc
,
207 .free
= arm_coherent_dma_free
,
208 .mmap
= arm_coherent_dma_mmap
,
209 .get_sgtable
= arm_dma_get_sgtable
,
210 .map_page
= arm_coherent_dma_map_page
,
211 .map_sg
= arm_dma_map_sg
,
212 .set_dma_mask
= arm_dma_set_mask
,
214 EXPORT_SYMBOL(arm_coherent_dma_ops
);
216 static int __dma_supported(struct device
*dev
, u64 mask
, bool warn
)
218 unsigned long max_dma_pfn
;
221 * If the mask allows for more memory than we can address,
222 * and we actually have that much memory, then we must
223 * indicate that DMA to this device is not supported.
225 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
226 mask
> (dma_addr_t
)~0 &&
227 dma_to_pfn(dev
, ~0) < max_pfn
- 1) {
229 dev_warn(dev
, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
231 dev_warn(dev
, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
236 max_dma_pfn
= min(max_pfn
, arm_dma_pfn_limit
);
239 * Translate the device's DMA mask to a PFN limit. This
240 * PFN number includes the page which we can DMA to.
242 if (dma_to_pfn(dev
, mask
) < max_dma_pfn
) {
244 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
246 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
254 static u64
get_coherent_dma_mask(struct device
*dev
)
256 u64 mask
= (u64
)DMA_BIT_MASK(32);
259 mask
= dev
->coherent_dma_mask
;
262 * Sanity check the DMA mask - it must be non-zero, and
263 * must be able to be satisfied by a DMA allocation.
266 dev_warn(dev
, "coherent DMA mask is unset\n");
270 if (!__dma_supported(dev
, mask
, true))
277 static void __dma_clear_buffer(struct page
*page
, size_t size
)
280 * Ensure that the allocated pages are zeroed, and that any data
281 * lurking in the kernel direct-mapped region is invalidated.
283 if (PageHighMem(page
)) {
284 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
285 phys_addr_t end
= base
+ size
;
287 void *ptr
= kmap_atomic(page
);
288 memset(ptr
, 0, PAGE_SIZE
);
289 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
294 outer_flush_range(base
, end
);
296 void *ptr
= page_address(page
);
297 memset(ptr
, 0, size
);
298 dmac_flush_range(ptr
, ptr
+ size
);
299 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
304 * Allocate a DMA buffer for 'dev' of size 'size' using the
305 * specified gfp mask. Note that 'size' must be page aligned.
307 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
309 unsigned long order
= get_order(size
);
310 struct page
*page
, *p
, *e
;
312 page
= alloc_pages(gfp
, order
);
317 * Now split the huge page and free the excess pages
319 split_page(page
, order
);
320 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
323 __dma_clear_buffer(page
, size
);
329 * Free a DMA buffer. 'size' must be page aligned.
331 static void __dma_free_buffer(struct page
*page
, size_t size
)
333 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
343 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
344 pgprot_t prot
, struct page
**ret_page
,
345 const void *caller
, bool want_vaddr
);
347 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
348 pgprot_t prot
, struct page
**ret_page
,
349 const void *caller
, bool want_vaddr
);
352 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
356 * DMA allocation can be mapped to user space, so lets
357 * set VM_USERMAP flags too.
359 return dma_common_contiguous_remap(page
, size
,
360 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
364 static void __dma_free_remap(void *cpu_addr
, size_t size
)
366 dma_common_free_remap(cpu_addr
, size
,
367 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
370 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
371 static struct gen_pool
*atomic_pool
;
373 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
375 static int __init
early_coherent_pool(char *p
)
377 atomic_pool_size
= memparse(p
, &p
);
380 early_param("coherent_pool", early_coherent_pool
);
382 void __init
init_dma_coherent_pool_size(unsigned long size
)
385 * Catch any attempt to set the pool size too late.
390 * Set architecture specific coherent pool size only if
391 * it has not been changed by kernel command line parameter.
393 if (atomic_pool_size
== DEFAULT_DMA_COHERENT_POOL_SIZE
)
394 atomic_pool_size
= size
;
398 * Initialise the coherent pool for atomic allocations.
400 static int __init
atomic_pool_init(void)
402 pgprot_t prot
= pgprot_dmacoherent(PAGE_KERNEL
);
403 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
407 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
411 if (dev_get_cma_area(NULL
))
412 ptr
= __alloc_from_contiguous(NULL
, atomic_pool_size
, prot
,
413 &page
, atomic_pool_init
, true);
415 ptr
= __alloc_remap_buffer(NULL
, atomic_pool_size
, gfp
, prot
,
416 &page
, atomic_pool_init
, true);
420 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)ptr
,
422 atomic_pool_size
, -1);
424 goto destroy_genpool
;
426 gen_pool_set_algo(atomic_pool
,
427 gen_pool_first_fit_order_align
,
429 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
430 atomic_pool_size
/ 1024);
435 gen_pool_destroy(atomic_pool
);
438 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
439 atomic_pool_size
/ 1024);
443 * CMA is activated by core_initcall, so we must be called after it.
445 postcore_initcall(atomic_pool_init
);
447 struct dma_contig_early_reserve
{
452 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
454 static int dma_mmu_remap_num __initdata
;
456 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
458 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
459 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
463 void __init
dma_contiguous_remap(void)
466 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
467 phys_addr_t start
= dma_mmu_remap
[i
].base
;
468 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
472 if (end
> arm_lowmem_limit
)
473 end
= arm_lowmem_limit
;
477 map
.pfn
= __phys_to_pfn(start
);
478 map
.virtual = __phys_to_virt(start
);
479 map
.length
= end
- start
;
480 map
.type
= MT_MEMORY_DMA_READY
;
483 * Clear previous low-memory mapping to ensure that the
484 * TLB does not see any conflicting entries, then flush
485 * the TLB of the old entries before creating new mappings.
487 * This ensures that any speculatively loaded TLB entries
488 * (even though they may be rare) can not cause any problems,
489 * and ensures that this code is architecturally compliant.
491 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
493 pmd_clear(pmd_off_k(addr
));
495 flush_tlb_kernel_range(__phys_to_virt(start
),
496 __phys_to_virt(end
));
498 iotable_init(&map
, 1);
502 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
505 struct page
*page
= virt_to_page(addr
);
506 pgprot_t prot
= *(pgprot_t
*)data
;
508 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
512 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
514 unsigned long start
= (unsigned long) page_address(page
);
515 unsigned end
= start
+ size
;
517 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
518 flush_tlb_kernel_range(start
, end
);
521 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
522 pgprot_t prot
, struct page
**ret_page
,
523 const void *caller
, bool want_vaddr
)
527 page
= __dma_alloc_buffer(dev
, size
, gfp
);
533 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
535 __dma_free_buffer(page
, size
);
544 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
550 WARN(1, "coherent pool not initialised!\n");
554 val
= gen_pool_alloc(atomic_pool
, size
);
556 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
558 *ret_page
= phys_to_page(phys
);
565 static bool __in_atomic_pool(void *start
, size_t size
)
567 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
570 static int __free_from_pool(void *start
, size_t size
)
572 if (!__in_atomic_pool(start
, size
))
575 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
580 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
581 pgprot_t prot
, struct page
**ret_page
,
582 const void *caller
, bool want_vaddr
)
584 unsigned long order
= get_order(size
);
585 size_t count
= size
>> PAGE_SHIFT
;
589 page
= dma_alloc_from_contiguous(dev
, count
, order
);
593 __dma_clear_buffer(page
, size
);
598 if (PageHighMem(page
)) {
599 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
601 dma_release_from_contiguous(dev
, page
, count
);
605 __dma_remap(page
, size
, prot
);
606 ptr
= page_address(page
);
614 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
615 void *cpu_addr
, size_t size
, bool want_vaddr
)
618 if (PageHighMem(page
))
619 __dma_free_remap(cpu_addr
, size
);
621 __dma_remap(page
, size
, PAGE_KERNEL
);
623 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
626 static inline pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
)
628 prot
= dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
) ?
629 pgprot_writecombine(prot
) :
630 pgprot_dmacoherent(prot
);
636 #else /* !CONFIG_MMU */
640 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
641 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
642 #define __alloc_from_pool(size, ret_page) NULL
643 #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
644 #define __free_from_pool(cpu_addr, size) do { } while (0)
645 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
646 #define __dma_free_remap(cpu_addr, size) do { } while (0)
648 #endif /* CONFIG_MMU */
650 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
651 struct page
**ret_page
)
654 page
= __dma_alloc_buffer(dev
, size
, gfp
);
659 return page_address(page
);
662 static void *simple_allocator_alloc(struct arm_dma_alloc_args
*args
,
663 struct page
**ret_page
)
665 return __alloc_simple_buffer(args
->dev
, args
->size
, args
->gfp
,
669 static void simple_allocator_free(struct arm_dma_free_args
*args
)
671 __dma_free_buffer(args
->page
, args
->size
);
674 static struct arm_dma_allocator simple_allocator
= {
675 .alloc
= simple_allocator_alloc
,
676 .free
= simple_allocator_free
,
679 static void *cma_allocator_alloc(struct arm_dma_alloc_args
*args
,
680 struct page
**ret_page
)
682 return __alloc_from_contiguous(args
->dev
, args
->size
, args
->prot
,
683 ret_page
, args
->caller
,
687 static void cma_allocator_free(struct arm_dma_free_args
*args
)
689 __free_from_contiguous(args
->dev
, args
->page
, args
->cpu_addr
,
690 args
->size
, args
->want_vaddr
);
693 static struct arm_dma_allocator cma_allocator
= {
694 .alloc
= cma_allocator_alloc
,
695 .free
= cma_allocator_free
,
698 static void *pool_allocator_alloc(struct arm_dma_alloc_args
*args
,
699 struct page
**ret_page
)
701 return __alloc_from_pool(args
->size
, ret_page
);
704 static void pool_allocator_free(struct arm_dma_free_args
*args
)
706 __free_from_pool(args
->cpu_addr
, args
->size
);
709 static struct arm_dma_allocator pool_allocator
= {
710 .alloc
= pool_allocator_alloc
,
711 .free
= pool_allocator_free
,
714 static void *remap_allocator_alloc(struct arm_dma_alloc_args
*args
,
715 struct page
**ret_page
)
717 return __alloc_remap_buffer(args
->dev
, args
->size
, args
->gfp
,
718 args
->prot
, ret_page
, args
->caller
,
722 static void remap_allocator_free(struct arm_dma_free_args
*args
)
724 if (args
->want_vaddr
)
725 __dma_free_remap(args
->cpu_addr
, args
->size
);
727 __dma_free_buffer(args
->page
, args
->size
);
730 static struct arm_dma_allocator remap_allocator
= {
731 .alloc
= remap_allocator_alloc
,
732 .free
= remap_allocator_free
,
735 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
736 gfp_t gfp
, pgprot_t prot
, bool is_coherent
,
737 struct dma_attrs
*attrs
, const void *caller
)
739 u64 mask
= get_coherent_dma_mask(dev
);
740 struct page
*page
= NULL
;
742 bool allowblock
, cma
;
743 struct arm_dma_buffer
*buf
;
744 struct arm_dma_alloc_args args
= {
746 .size
= PAGE_ALIGN(size
),
750 .want_vaddr
= !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
),
753 #ifdef CONFIG_DMA_API_DEBUG
754 u64 limit
= (mask
+ 1) & ~mask
;
755 if (limit
&& size
>= limit
) {
756 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
765 buf
= kzalloc(sizeof(*buf
),
766 gfp
& ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
));
770 if (mask
< 0xffffffffULL
)
774 * Following is a work-around (a.k.a. hack) to prevent pages
775 * with __GFP_COMP being passed to split_page() which cannot
776 * handle them. The real problem is that this flag probably
777 * should be 0 on ARM as it is not supported on this
778 * platform; see CONFIG_HUGETLBFS.
780 gfp
&= ~(__GFP_COMP
);
783 *handle
= DMA_ERROR_CODE
;
784 allowblock
= gfpflags_allow_blocking(gfp
);
785 cma
= allowblock
? dev_get_cma_area(dev
) : false;
788 buf
->allocator
= &cma_allocator
;
789 else if (nommu() || is_coherent
)
790 buf
->allocator
= &simple_allocator
;
792 buf
->allocator
= &remap_allocator
;
794 buf
->allocator
= &pool_allocator
;
796 addr
= buf
->allocator
->alloc(&args
, &page
);
801 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
802 buf
->virt
= args
.want_vaddr
? addr
: page
;
804 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
805 list_add(&buf
->list
, &arm_dma_bufs
);
806 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
811 return args
.want_vaddr
? addr
: page
;
815 * Allocate DMA-coherent memory space and return both the kernel remapped
816 * virtual and bus address for that space.
818 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
819 gfp_t gfp
, struct dma_attrs
*attrs
)
821 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
823 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
824 attrs
, __builtin_return_address(0));
827 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
828 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
830 return __dma_alloc(dev
, size
, handle
, gfp
, PAGE_KERNEL
, true,
831 attrs
, __builtin_return_address(0));
834 static int __arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
835 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
836 struct dma_attrs
*attrs
)
840 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
841 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
842 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
843 unsigned long off
= vma
->vm_pgoff
;
845 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
848 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
849 ret
= remap_pfn_range(vma
, vma
->vm_start
,
851 vma
->vm_end
- vma
->vm_start
,
854 #endif /* CONFIG_MMU */
860 * Create userspace mapping for the DMA-coherent memory.
862 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
863 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
864 struct dma_attrs
*attrs
)
866 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
869 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
870 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
871 struct dma_attrs
*attrs
)
874 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
875 #endif /* CONFIG_MMU */
876 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
880 * Free a buffer as defined by the above mapping.
882 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
883 dma_addr_t handle
, struct dma_attrs
*attrs
,
886 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
887 struct arm_dma_buffer
*buf
;
888 struct arm_dma_free_args args
= {
890 .size
= PAGE_ALIGN(size
),
891 .cpu_addr
= cpu_addr
,
893 .want_vaddr
= !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
),
896 buf
= arm_dma_buffer_find(cpu_addr
);
897 if (WARN(!buf
, "Freeing invalid buffer %p\n", cpu_addr
))
900 buf
->allocator
->free(&args
);
904 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
905 dma_addr_t handle
, struct dma_attrs
*attrs
)
907 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
910 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
911 dma_addr_t handle
, struct dma_attrs
*attrs
)
913 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
916 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
917 void *cpu_addr
, dma_addr_t handle
, size_t size
,
918 struct dma_attrs
*attrs
)
920 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
923 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
927 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
931 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
932 size_t size
, enum dma_data_direction dir
,
933 void (*op
)(const void *, size_t, int))
938 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
942 * A single sg entry may refer to multiple physically contiguous
943 * pages. But we still need to process highmem pages individually.
944 * If highmem is not configured then the bulk of this loop gets
951 page
= pfn_to_page(pfn
);
953 if (PageHighMem(page
)) {
954 if (len
+ offset
> PAGE_SIZE
)
955 len
= PAGE_SIZE
- offset
;
957 if (cache_is_vipt_nonaliasing()) {
958 vaddr
= kmap_atomic(page
);
959 op(vaddr
+ offset
, len
, dir
);
960 kunmap_atomic(vaddr
);
962 vaddr
= kmap_high_get(page
);
964 op(vaddr
+ offset
, len
, dir
);
969 vaddr
= page_address(page
) + offset
;
979 * Make an area consistent for devices.
980 * Note: Drivers should NOT use this function directly, as it will break
981 * platforms with CONFIG_DMABOUNCE.
982 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
984 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
985 size_t size
, enum dma_data_direction dir
)
989 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
991 paddr
= page_to_phys(page
) + off
;
992 if (dir
== DMA_FROM_DEVICE
) {
993 outer_inv_range(paddr
, paddr
+ size
);
995 outer_clean_range(paddr
, paddr
+ size
);
997 /* FIXME: non-speculating: flush on bidirectional mappings? */
1000 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
1001 size_t size
, enum dma_data_direction dir
)
1003 phys_addr_t paddr
= page_to_phys(page
) + off
;
1005 /* FIXME: non-speculating: not required */
1006 /* in any case, don't bother invalidating if DMA to device */
1007 if (dir
!= DMA_TO_DEVICE
) {
1008 outer_inv_range(paddr
, paddr
+ size
);
1010 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
1014 * Mark the D-cache clean for these pages to avoid extra flushing.
1016 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
1020 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
1024 left
-= PAGE_SIZE
- off
;
1026 while (left
>= PAGE_SIZE
) {
1027 page
= pfn_to_page(pfn
++);
1028 set_bit(PG_dcache_clean
, &page
->flags
);
1035 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1036 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1037 * @sg: list of buffers
1038 * @nents: number of buffers to map
1039 * @dir: DMA transfer direction
1041 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1042 * This is the scatter-gather version of the dma_map_single interface.
1043 * Here the scatter gather list elements are each tagged with the
1044 * appropriate dma address and length. They are obtained via
1045 * sg_dma_{address,length}.
1047 * Device ownership issues as mentioned for dma_map_single are the same
1050 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1051 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1053 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1054 struct scatterlist
*s
;
1057 for_each_sg(sg
, s
, nents
, i
) {
1058 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1059 s
->dma_length
= s
->length
;
1061 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
1062 s
->length
, dir
, attrs
);
1063 if (dma_mapping_error(dev
, s
->dma_address
))
1069 for_each_sg(sg
, s
, i
, j
)
1070 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1075 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1076 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1077 * @sg: list of buffers
1078 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1079 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1081 * Unmap a set of streaming mode DMA translations. Again, CPU access
1082 * rules concerning calls here are the same as for dma_unmap_single().
1084 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1085 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1087 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1088 struct scatterlist
*s
;
1092 for_each_sg(sg
, s
, nents
, i
)
1093 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1097 * arm_dma_sync_sg_for_cpu
1098 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1099 * @sg: list of buffers
1100 * @nents: number of buffers to map (returned from dma_map_sg)
1101 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1103 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1104 int nents
, enum dma_data_direction dir
)
1106 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1107 struct scatterlist
*s
;
1110 for_each_sg(sg
, s
, nents
, i
)
1111 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
1116 * arm_dma_sync_sg_for_device
1117 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1118 * @sg: list of buffers
1119 * @nents: number of buffers to map (returned from dma_map_sg)
1120 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1122 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1123 int nents
, enum dma_data_direction dir
)
1125 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1126 struct scatterlist
*s
;
1129 for_each_sg(sg
, s
, nents
, i
)
1130 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
1135 * Return whether the given device DMA address mask can be supported
1136 * properly. For example, if your device can only drive the low 24-bits
1137 * during bus mastering, then you would pass 0x00ffffff as the mask
1140 int dma_supported(struct device
*dev
, u64 mask
)
1142 return __dma_supported(dev
, mask
, false);
1144 EXPORT_SYMBOL(dma_supported
);
1146 int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
)
1148 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
1151 *dev
->dma_mask
= dma_mask
;
1156 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
1158 static int __init
dma_debug_do_init(void)
1160 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
1163 fs_initcall(dma_debug_do_init
);
1165 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1169 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
);
1171 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1174 unsigned int order
= get_order(size
);
1175 unsigned int align
= 0;
1176 unsigned int count
, start
;
1177 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1178 unsigned long flags
;
1182 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1183 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1185 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1186 align
= (1 << order
) - 1;
1188 spin_lock_irqsave(&mapping
->lock
, flags
);
1189 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++) {
1190 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1191 mapping
->bits
, 0, count
, align
);
1193 if (start
> mapping
->bits
)
1196 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1201 * No unused range found. Try to extend the existing mapping
1202 * and perform a second attempt to reserve an IO virtual
1203 * address range of size bytes.
1205 if (i
== mapping
->nr_bitmaps
) {
1206 if (extend_iommu_mapping(mapping
)) {
1207 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1208 return DMA_ERROR_CODE
;
1211 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1212 mapping
->bits
, 0, count
, align
);
1214 if (start
> mapping
->bits
) {
1215 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1216 return DMA_ERROR_CODE
;
1219 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1221 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1223 iova
= mapping
->base
+ (mapping_size
* i
);
1224 iova
+= start
<< PAGE_SHIFT
;
1229 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1230 dma_addr_t addr
, size_t size
)
1232 unsigned int start
, count
;
1233 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1234 unsigned long flags
;
1235 dma_addr_t bitmap_base
;
1241 bitmap_index
= (u32
) (addr
- mapping
->base
) / (u32
) mapping_size
;
1242 BUG_ON(addr
< mapping
->base
|| bitmap_index
> mapping
->extensions
);
1244 bitmap_base
= mapping
->base
+ mapping_size
* bitmap_index
;
1246 start
= (addr
- bitmap_base
) >> PAGE_SHIFT
;
1248 if (addr
+ size
> bitmap_base
+ mapping_size
) {
1250 * The address range to be freed reaches into the iova
1251 * range of the next bitmap. This should not happen as
1252 * we don't allow this in __alloc_iova (at the
1257 count
= size
>> PAGE_SHIFT
;
1259 spin_lock_irqsave(&mapping
->lock
, flags
);
1260 bitmap_clear(mapping
->bitmaps
[bitmap_index
], start
, count
);
1261 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1264 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1265 static const int iommu_order_array
[] = { 9, 8, 4, 0 };
1267 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1268 gfp_t gfp
, struct dma_attrs
*attrs
)
1270 struct page
**pages
;
1271 int count
= size
>> PAGE_SHIFT
;
1272 int array_size
= count
* sizeof(struct page
*);
1276 if (array_size
<= PAGE_SIZE
)
1277 pages
= kzalloc(array_size
, GFP_KERNEL
);
1279 pages
= vzalloc(array_size
);
1283 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
))
1285 unsigned long order
= get_order(size
);
1288 page
= dma_alloc_from_contiguous(dev
, count
, order
);
1292 __dma_clear_buffer(page
, size
);
1294 for (i
= 0; i
< count
; i
++)
1295 pages
[i
] = page
+ i
;
1300 /* Go straight to 4K chunks if caller says it's OK. */
1301 if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES
, attrs
))
1302 order_idx
= ARRAY_SIZE(iommu_order_array
) - 1;
1305 * IOMMU can map any pages, so himem can also be used here
1307 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1312 order
= iommu_order_array
[order_idx
];
1314 /* Drop down when we get small */
1315 if (__fls(count
) < order
) {
1321 /* See if it's easy to allocate a high-order chunk */
1322 pages
[i
] = alloc_pages(gfp
| __GFP_NORETRY
, order
);
1324 /* Go down a notch at first sign of pressure */
1330 pages
[i
] = alloc_pages(gfp
, 0);
1336 split_page(pages
[i
], order
);
1339 pages
[i
+ j
] = pages
[i
] + j
;
1342 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
);
1344 count
-= 1 << order
;
1351 __free_pages(pages
[i
], 0);
1356 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1357 size_t size
, struct dma_attrs
*attrs
)
1359 int count
= size
>> PAGE_SHIFT
;
1362 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
)) {
1363 dma_release_from_contiguous(dev
, pages
[0], count
);
1365 for (i
= 0; i
< count
; i
++)
1367 __free_pages(pages
[i
], 0);
1375 * Create a CPU mapping for a specified pages
1378 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1381 return dma_common_pages_remap(pages
, size
,
1382 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
, prot
, caller
);
1386 * Create a mapping in device IO address space for specified pages
1389 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
)
1391 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1392 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1393 dma_addr_t dma_addr
, iova
;
1396 dma_addr
= __alloc_iova(mapping
, size
);
1397 if (dma_addr
== DMA_ERROR_CODE
)
1401 for (i
= 0; i
< count
; ) {
1404 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1405 phys_addr_t phys
= page_to_phys(pages
[i
]);
1406 unsigned int len
, j
;
1408 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1409 if (page_to_pfn(pages
[j
]) != next_pfn
)
1412 len
= (j
- i
) << PAGE_SHIFT
;
1413 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1414 IOMMU_READ
|IOMMU_WRITE
);
1422 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1423 __free_iova(mapping
, dma_addr
, size
);
1424 return DMA_ERROR_CODE
;
1427 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1429 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1432 * add optional in-page offset from iova to size and align
1433 * result to page size
1435 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1438 iommu_unmap(mapping
->domain
, iova
, size
);
1439 __free_iova(mapping
, iova
, size
);
1443 static struct page
**__atomic_get_pages(void *addr
)
1448 phys
= gen_pool_virt_to_phys(atomic_pool
, (unsigned long)addr
);
1449 page
= phys_to_page(phys
);
1451 return (struct page
**)page
;
1454 static struct page
**__iommu_get_pages(void *cpu_addr
, struct dma_attrs
*attrs
)
1456 struct vm_struct
*area
;
1458 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1459 return __atomic_get_pages(cpu_addr
);
1461 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1464 area
= find_vm_area(cpu_addr
);
1465 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1470 static void *__iommu_alloc_atomic(struct device
*dev
, size_t size
,
1476 addr
= __alloc_from_pool(size
, &page
);
1480 *handle
= __iommu_create_mapping(dev
, &page
, size
);
1481 if (*handle
== DMA_ERROR_CODE
)
1487 __free_from_pool(addr
, size
);
1491 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1492 dma_addr_t handle
, size_t size
)
1494 __iommu_remove_mapping(dev
, handle
, size
);
1495 __free_from_pool(cpu_addr
, size
);
1498 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1499 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
1501 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
1502 struct page
**pages
;
1505 *handle
= DMA_ERROR_CODE
;
1506 size
= PAGE_ALIGN(size
);
1508 if (!gfpflags_allow_blocking(gfp
))
1509 return __iommu_alloc_atomic(dev
, size
, handle
);
1512 * Following is a work-around (a.k.a. hack) to prevent pages
1513 * with __GFP_COMP being passed to split_page() which cannot
1514 * handle them. The real problem is that this flag probably
1515 * should be 0 on ARM as it is not supported on this
1516 * platform; see CONFIG_HUGETLBFS.
1518 gfp
&= ~(__GFP_COMP
);
1520 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
);
1524 *handle
= __iommu_create_mapping(dev
, pages
, size
);
1525 if (*handle
== DMA_ERROR_CODE
)
1528 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1531 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1532 __builtin_return_address(0));
1539 __iommu_remove_mapping(dev
, *handle
, size
);
1541 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1545 static int arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1546 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1547 struct dma_attrs
*attrs
)
1549 unsigned long uaddr
= vma
->vm_start
;
1550 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1551 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1552 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1553 unsigned long off
= vma
->vm_pgoff
;
1555 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1560 if (off
>= nr_pages
|| (usize
>> PAGE_SHIFT
) > nr_pages
- off
)
1566 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1568 pr_err("Remapping memory failed: %d\n", ret
);
1573 } while (usize
> 0);
1579 * free a page as defined by the above mapping.
1580 * Must not be called with IRQs disabled.
1582 void arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1583 dma_addr_t handle
, struct dma_attrs
*attrs
)
1585 struct page
**pages
;
1586 size
= PAGE_ALIGN(size
);
1588 if (__in_atomic_pool(cpu_addr
, size
)) {
1589 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
);
1593 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1595 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1599 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
)) {
1600 dma_common_free_remap(cpu_addr
, size
,
1601 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
1604 __iommu_remove_mapping(dev
, handle
, size
);
1605 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1608 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1609 void *cpu_addr
, dma_addr_t dma_addr
,
1610 size_t size
, struct dma_attrs
*attrs
)
1612 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1613 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1618 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1622 static int __dma_direction_to_prot(enum dma_data_direction dir
)
1627 case DMA_BIDIRECTIONAL
:
1628 prot
= IOMMU_READ
| IOMMU_WRITE
;
1633 case DMA_FROM_DEVICE
:
1644 * Map a part of the scatter-gather list into contiguous io address space
1646 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1647 size_t size
, dma_addr_t
*handle
,
1648 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1651 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1652 dma_addr_t iova
, iova_base
;
1655 struct scatterlist
*s
;
1658 size
= PAGE_ALIGN(size
);
1659 *handle
= DMA_ERROR_CODE
;
1661 iova_base
= iova
= __alloc_iova(mapping
, size
);
1662 if (iova
== DMA_ERROR_CODE
)
1665 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1666 phys_addr_t phys
= page_to_phys(sg_page(s
));
1667 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1670 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1671 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1673 prot
= __dma_direction_to_prot(dir
);
1675 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1678 count
+= len
>> PAGE_SHIFT
;
1681 *handle
= iova_base
;
1685 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1686 __free_iova(mapping
, iova_base
, size
);
1690 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1691 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1694 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1696 unsigned int offset
= s
->offset
;
1697 unsigned int size
= s
->offset
+ s
->length
;
1698 unsigned int max
= dma_get_max_seg_size(dev
);
1700 for (i
= 1; i
< nents
; i
++) {
1703 s
->dma_address
= DMA_ERROR_CODE
;
1706 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1707 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1708 dir
, attrs
, is_coherent
) < 0)
1711 dma
->dma_address
+= offset
;
1712 dma
->dma_length
= size
- offset
;
1714 size
= offset
= s
->offset
;
1721 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1725 dma
->dma_address
+= offset
;
1726 dma
->dma_length
= size
- offset
;
1731 for_each_sg(sg
, s
, count
, i
)
1732 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1737 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1738 * @dev: valid struct device pointer
1739 * @sg: list of buffers
1740 * @nents: number of buffers to map
1741 * @dir: DMA transfer direction
1743 * Map a set of i/o coherent buffers described by scatterlist in streaming
1744 * mode for DMA. The scatter gather list elements are merged together (if
1745 * possible) and tagged with the appropriate dma address and length. They are
1746 * obtained via sg_dma_{address,length}.
1748 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1749 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1751 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1755 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1756 * @dev: valid struct device pointer
1757 * @sg: list of buffers
1758 * @nents: number of buffers to map
1759 * @dir: DMA transfer direction
1761 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1762 * The scatter gather list elements are merged together (if possible) and
1763 * tagged with the appropriate dma address and length. They are obtained via
1764 * sg_dma_{address,length}.
1766 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1767 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1769 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1772 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1773 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1776 struct scatterlist
*s
;
1779 for_each_sg(sg
, s
, nents
, i
) {
1781 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1784 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1785 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1791 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1792 * @dev: valid struct device pointer
1793 * @sg: list of buffers
1794 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1795 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1797 * Unmap a set of streaming mode DMA translations. Again, CPU access
1798 * rules concerning calls here are the same as for dma_unmap_single().
1800 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1801 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1803 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1807 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1808 * @dev: valid struct device pointer
1809 * @sg: list of buffers
1810 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1811 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1813 * Unmap a set of streaming mode DMA translations. Again, CPU access
1814 * rules concerning calls here are the same as for dma_unmap_single().
1816 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1817 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1819 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1823 * arm_iommu_sync_sg_for_cpu
1824 * @dev: valid struct device pointer
1825 * @sg: list of buffers
1826 * @nents: number of buffers to map (returned from dma_map_sg)
1827 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1829 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1830 int nents
, enum dma_data_direction dir
)
1832 struct scatterlist
*s
;
1835 for_each_sg(sg
, s
, nents
, i
)
1836 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1841 * arm_iommu_sync_sg_for_device
1842 * @dev: valid struct device pointer
1843 * @sg: list of buffers
1844 * @nents: number of buffers to map (returned from dma_map_sg)
1845 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1847 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1848 int nents
, enum dma_data_direction dir
)
1850 struct scatterlist
*s
;
1853 for_each_sg(sg
, s
, nents
, i
)
1854 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1859 * arm_coherent_iommu_map_page
1860 * @dev: valid struct device pointer
1861 * @page: page that buffer resides in
1862 * @offset: offset into page for start of buffer
1863 * @size: size of buffer to map
1864 * @dir: DMA transfer direction
1866 * Coherent IOMMU aware version of arm_dma_map_page()
1868 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1869 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1870 struct dma_attrs
*attrs
)
1872 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1873 dma_addr_t dma_addr
;
1874 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1876 dma_addr
= __alloc_iova(mapping
, len
);
1877 if (dma_addr
== DMA_ERROR_CODE
)
1880 prot
= __dma_direction_to_prot(dir
);
1882 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1886 return dma_addr
+ offset
;
1888 __free_iova(mapping
, dma_addr
, len
);
1889 return DMA_ERROR_CODE
;
1893 * arm_iommu_map_page
1894 * @dev: valid struct device pointer
1895 * @page: page that buffer resides in
1896 * @offset: offset into page for start of buffer
1897 * @size: size of buffer to map
1898 * @dir: DMA transfer direction
1900 * IOMMU aware version of arm_dma_map_page()
1902 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1903 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1904 struct dma_attrs
*attrs
)
1906 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1907 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1909 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1913 * arm_coherent_iommu_unmap_page
1914 * @dev: valid struct device pointer
1915 * @handle: DMA address of buffer
1916 * @size: size of buffer (same as passed to dma_map_page)
1917 * @dir: DMA transfer direction (same as passed to dma_map_page)
1919 * Coherent IOMMU aware version of arm_dma_unmap_page()
1921 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1922 size_t size
, enum dma_data_direction dir
,
1923 struct dma_attrs
*attrs
)
1925 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1926 dma_addr_t iova
= handle
& PAGE_MASK
;
1927 int offset
= handle
& ~PAGE_MASK
;
1928 int len
= PAGE_ALIGN(size
+ offset
);
1933 iommu_unmap(mapping
->domain
, iova
, len
);
1934 __free_iova(mapping
, iova
, len
);
1938 * arm_iommu_unmap_page
1939 * @dev: valid struct device pointer
1940 * @handle: DMA address of buffer
1941 * @size: size of buffer (same as passed to dma_map_page)
1942 * @dir: DMA transfer direction (same as passed to dma_map_page)
1944 * IOMMU aware version of arm_dma_unmap_page()
1946 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1947 size_t size
, enum dma_data_direction dir
,
1948 struct dma_attrs
*attrs
)
1950 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1951 dma_addr_t iova
= handle
& PAGE_MASK
;
1952 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1953 int offset
= handle
& ~PAGE_MASK
;
1954 int len
= PAGE_ALIGN(size
+ offset
);
1959 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1960 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1962 iommu_unmap(mapping
->domain
, iova
, len
);
1963 __free_iova(mapping
, iova
, len
);
1966 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
1967 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1969 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1970 dma_addr_t iova
= handle
& PAGE_MASK
;
1971 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1972 unsigned int offset
= handle
& ~PAGE_MASK
;
1977 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1980 static void arm_iommu_sync_single_for_device(struct device
*dev
,
1981 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1983 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1984 dma_addr_t iova
= handle
& PAGE_MASK
;
1985 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1986 unsigned int offset
= handle
& ~PAGE_MASK
;
1991 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1994 struct dma_map_ops iommu_ops
= {
1995 .alloc
= arm_iommu_alloc_attrs
,
1996 .free
= arm_iommu_free_attrs
,
1997 .mmap
= arm_iommu_mmap_attrs
,
1998 .get_sgtable
= arm_iommu_get_sgtable
,
2000 .map_page
= arm_iommu_map_page
,
2001 .unmap_page
= arm_iommu_unmap_page
,
2002 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
2003 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
2005 .map_sg
= arm_iommu_map_sg
,
2006 .unmap_sg
= arm_iommu_unmap_sg
,
2007 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
2008 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
2010 .set_dma_mask
= arm_dma_set_mask
,
2013 struct dma_map_ops iommu_coherent_ops
= {
2014 .alloc
= arm_iommu_alloc_attrs
,
2015 .free
= arm_iommu_free_attrs
,
2016 .mmap
= arm_iommu_mmap_attrs
,
2017 .get_sgtable
= arm_iommu_get_sgtable
,
2019 .map_page
= arm_coherent_iommu_map_page
,
2020 .unmap_page
= arm_coherent_iommu_unmap_page
,
2022 .map_sg
= arm_coherent_iommu_map_sg
,
2023 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
2025 .set_dma_mask
= arm_dma_set_mask
,
2029 * arm_iommu_create_mapping
2030 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2031 * @base: start address of the valid IO address space
2032 * @size: maximum size of the valid IO address space
2034 * Creates a mapping structure which holds information about used/unused
2035 * IO address ranges, which is required to perform memory allocation and
2036 * mapping with IOMMU aware functions.
2038 * The client device need to be attached to the mapping with
2039 * arm_iommu_attach_device function.
2041 struct dma_iommu_mapping
*
2042 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, u64 size
)
2044 unsigned int bits
= size
>> PAGE_SHIFT
;
2045 unsigned int bitmap_size
= BITS_TO_LONGS(bits
) * sizeof(long);
2046 struct dma_iommu_mapping
*mapping
;
2050 /* currently only 32-bit DMA address space is supported */
2051 if (size
> DMA_BIT_MASK(32) + 1)
2052 return ERR_PTR(-ERANGE
);
2055 return ERR_PTR(-EINVAL
);
2057 if (bitmap_size
> PAGE_SIZE
) {
2058 extensions
= bitmap_size
/ PAGE_SIZE
;
2059 bitmap_size
= PAGE_SIZE
;
2062 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
2066 mapping
->bitmap_size
= bitmap_size
;
2067 mapping
->bitmaps
= kzalloc(extensions
* sizeof(unsigned long *),
2069 if (!mapping
->bitmaps
)
2072 mapping
->bitmaps
[0] = kzalloc(bitmap_size
, GFP_KERNEL
);
2073 if (!mapping
->bitmaps
[0])
2076 mapping
->nr_bitmaps
= 1;
2077 mapping
->extensions
= extensions
;
2078 mapping
->base
= base
;
2079 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
2081 spin_lock_init(&mapping
->lock
);
2083 mapping
->domain
= iommu_domain_alloc(bus
);
2084 if (!mapping
->domain
)
2087 kref_init(&mapping
->kref
);
2090 kfree(mapping
->bitmaps
[0]);
2092 kfree(mapping
->bitmaps
);
2096 return ERR_PTR(err
);
2098 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
2100 static void release_iommu_mapping(struct kref
*kref
)
2103 struct dma_iommu_mapping
*mapping
=
2104 container_of(kref
, struct dma_iommu_mapping
, kref
);
2106 iommu_domain_free(mapping
->domain
);
2107 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++)
2108 kfree(mapping
->bitmaps
[i
]);
2109 kfree(mapping
->bitmaps
);
2113 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
)
2117 if (mapping
->nr_bitmaps
>= mapping
->extensions
)
2120 next_bitmap
= mapping
->nr_bitmaps
;
2121 mapping
->bitmaps
[next_bitmap
] = kzalloc(mapping
->bitmap_size
,
2123 if (!mapping
->bitmaps
[next_bitmap
])
2126 mapping
->nr_bitmaps
++;
2131 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
2134 kref_put(&mapping
->kref
, release_iommu_mapping
);
2136 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
2138 static int __arm_iommu_attach_device(struct device
*dev
,
2139 struct dma_iommu_mapping
*mapping
)
2143 err
= iommu_attach_device(mapping
->domain
, dev
);
2147 kref_get(&mapping
->kref
);
2148 to_dma_iommu_mapping(dev
) = mapping
;
2150 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
2155 * arm_iommu_attach_device
2156 * @dev: valid struct device pointer
2157 * @mapping: io address space mapping structure (returned from
2158 * arm_iommu_create_mapping)
2160 * Attaches specified io address space mapping to the provided device.
2161 * This replaces the dma operations (dma_map_ops pointer) with the
2162 * IOMMU aware version.
2164 * More than one client might be attached to the same io address space
2167 int arm_iommu_attach_device(struct device
*dev
,
2168 struct dma_iommu_mapping
*mapping
)
2172 err
= __arm_iommu_attach_device(dev
, mapping
);
2176 set_dma_ops(dev
, &iommu_ops
);
2179 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
2181 static void __arm_iommu_detach_device(struct device
*dev
)
2183 struct dma_iommu_mapping
*mapping
;
2185 mapping
= to_dma_iommu_mapping(dev
);
2187 dev_warn(dev
, "Not attached\n");
2191 iommu_detach_device(mapping
->domain
, dev
);
2192 kref_put(&mapping
->kref
, release_iommu_mapping
);
2193 to_dma_iommu_mapping(dev
) = NULL
;
2195 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2199 * arm_iommu_detach_device
2200 * @dev: valid struct device pointer
2202 * Detaches the provided device from a previously attached map.
2203 * This voids the dma operations (dma_map_ops pointer)
2205 void arm_iommu_detach_device(struct device
*dev
)
2207 __arm_iommu_detach_device(dev
);
2208 set_dma_ops(dev
, NULL
);
2210 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);
2212 static struct dma_map_ops
*arm_get_iommu_dma_map_ops(bool coherent
)
2214 return coherent
? &iommu_coherent_ops
: &iommu_ops
;
2217 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2218 struct iommu_ops
*iommu
)
2220 struct dma_iommu_mapping
*mapping
;
2225 mapping
= arm_iommu_create_mapping(dev
->bus
, dma_base
, size
);
2226 if (IS_ERR(mapping
)) {
2227 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2228 size
, dev_name(dev
));
2232 if (__arm_iommu_attach_device(dev
, mapping
)) {
2233 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2235 arm_iommu_release_mapping(mapping
);
2242 static void arm_teardown_iommu_dma_ops(struct device
*dev
)
2244 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2249 __arm_iommu_detach_device(dev
);
2250 arm_iommu_release_mapping(mapping
);
2255 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2256 struct iommu_ops
*iommu
)
2261 static void arm_teardown_iommu_dma_ops(struct device
*dev
) { }
2263 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2265 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2267 static struct dma_map_ops
*arm_get_dma_map_ops(bool coherent
)
2269 return coherent
? &arm_coherent_dma_ops
: &arm_dma_ops
;
2272 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2273 struct iommu_ops
*iommu
, bool coherent
)
2275 struct dma_map_ops
*dma_ops
;
2277 dev
->archdata
.dma_coherent
= coherent
;
2278 if (arm_setup_iommu_dma_ops(dev
, dma_base
, size
, iommu
))
2279 dma_ops
= arm_get_iommu_dma_map_ops(coherent
);
2281 dma_ops
= arm_get_dma_map_ops(coherent
);
2283 set_dma_ops(dev
, dma_ops
);
2286 void arch_teardown_dma_ops(struct device
*dev
)
2288 arm_teardown_iommu_dma_ops(dev
);