2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/bootmem.h>
13 #include <linux/module.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dma-contiguous.h>
22 #include <linux/highmem.h>
23 #include <linux/memblock.h>
24 #include <linux/slab.h>
25 #include <linux/iommu.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sizes.h>
29 #include <linux/cma.h>
31 #include <asm/memory.h>
32 #include <asm/highmem.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35 #include <asm/mach/arch.h>
36 #include <asm/dma-iommu.h>
37 #include <asm/mach/map.h>
38 #include <asm/system_info.h>
39 #include <asm/dma-contiguous.h>
44 * The DMA API is built upon the notion of "buffer ownership". A buffer
45 * is either exclusively owned by the CPU (and therefore may be accessed
46 * by it) or exclusively owned by the DMA device. These helper functions
47 * represent the transitions between these two ownership states.
49 * Note, however, that on later ARMs, this notion does not work due to
50 * speculative prefetches. We model our approach on the assumption that
51 * the CPU does do speculative prefetches, which means we clean caches
52 * before transfers and delay cache invalidation until transfer completion.
55 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
56 size_t, enum dma_data_direction
);
57 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
58 size_t, enum dma_data_direction
);
61 * arm_dma_map_page - map a portion of a page for streaming DMA
62 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
63 * @page: page that buffer resides in
64 * @offset: offset into page for start of buffer
65 * @size: size of buffer to map
66 * @dir: DMA transfer direction
68 * Ensure that any data held in the cache is appropriately discarded
71 * The device owns this memory once this call has completed. The CPU
72 * can regain ownership by calling dma_unmap_page().
74 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
75 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
76 struct dma_attrs
*attrs
)
78 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
79 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
80 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
83 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
84 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
85 struct dma_attrs
*attrs
)
87 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
91 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
92 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
93 * @handle: DMA address of buffer
94 * @size: size of buffer (same as passed to dma_map_page)
95 * @dir: DMA transfer direction (same as passed to dma_map_page)
97 * Unmap a page streaming mode DMA translation. The handle and size
98 * must match what was provided in the previous dma_map_page() call.
99 * All other usages are undefined.
101 * After this call, reads by the CPU to the buffer are guaranteed to see
102 * whatever the device wrote there.
104 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
105 size_t size
, enum dma_data_direction dir
,
106 struct dma_attrs
*attrs
)
108 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
109 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
110 handle
& ~PAGE_MASK
, size
, dir
);
113 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
114 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
116 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
117 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
118 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
121 static void arm_dma_sync_single_for_device(struct device
*dev
,
122 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
124 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
125 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
126 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
129 struct dma_map_ops arm_dma_ops
= {
130 .alloc
= arm_dma_alloc
,
131 .free
= arm_dma_free
,
132 .mmap
= arm_dma_mmap
,
133 .get_sgtable
= arm_dma_get_sgtable
,
134 .map_page
= arm_dma_map_page
,
135 .unmap_page
= arm_dma_unmap_page
,
136 .map_sg
= arm_dma_map_sg
,
137 .unmap_sg
= arm_dma_unmap_sg
,
138 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
139 .sync_single_for_device
= arm_dma_sync_single_for_device
,
140 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
141 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
142 .set_dma_mask
= arm_dma_set_mask
,
144 EXPORT_SYMBOL(arm_dma_ops
);
146 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
147 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
);
148 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
149 dma_addr_t handle
, struct dma_attrs
*attrs
);
151 struct dma_map_ops arm_coherent_dma_ops
= {
152 .alloc
= arm_coherent_dma_alloc
,
153 .free
= arm_coherent_dma_free
,
154 .mmap
= arm_dma_mmap
,
155 .get_sgtable
= arm_dma_get_sgtable
,
156 .map_page
= arm_coherent_dma_map_page
,
157 .map_sg
= arm_dma_map_sg
,
158 .set_dma_mask
= arm_dma_set_mask
,
160 EXPORT_SYMBOL(arm_coherent_dma_ops
);
162 static int __dma_supported(struct device
*dev
, u64 mask
, bool warn
)
164 unsigned long max_dma_pfn
;
167 * If the mask allows for more memory than we can address,
168 * and we actually have that much memory, then we must
169 * indicate that DMA to this device is not supported.
171 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
172 mask
> (dma_addr_t
)~0 &&
173 dma_to_pfn(dev
, ~0) < max_pfn
) {
175 dev_warn(dev
, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
177 dev_warn(dev
, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
182 max_dma_pfn
= min(max_pfn
, arm_dma_pfn_limit
);
185 * Translate the device's DMA mask to a PFN limit. This
186 * PFN number includes the page which we can DMA to.
188 if (dma_to_pfn(dev
, mask
) < max_dma_pfn
) {
190 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
192 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
200 static u64
get_coherent_dma_mask(struct device
*dev
)
202 u64 mask
= (u64
)DMA_BIT_MASK(32);
205 mask
= dev
->coherent_dma_mask
;
208 * Sanity check the DMA mask - it must be non-zero, and
209 * must be able to be satisfied by a DMA allocation.
212 dev_warn(dev
, "coherent DMA mask is unset\n");
216 if (!__dma_supported(dev
, mask
, true))
223 static void __dma_clear_buffer(struct page
*page
, size_t size
)
226 * Ensure that the allocated pages are zeroed, and that any data
227 * lurking in the kernel direct-mapped region is invalidated.
229 if (PageHighMem(page
)) {
230 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
231 phys_addr_t end
= base
+ size
;
233 void *ptr
= kmap_atomic(page
);
234 memset(ptr
, 0, PAGE_SIZE
);
235 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
240 outer_flush_range(base
, end
);
242 void *ptr
= page_address(page
);
243 memset(ptr
, 0, size
);
244 dmac_flush_range(ptr
, ptr
+ size
);
245 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
250 * Allocate a DMA buffer for 'dev' of size 'size' using the
251 * specified gfp mask. Note that 'size' must be page aligned.
253 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
255 unsigned long order
= get_order(size
);
256 struct page
*page
, *p
, *e
;
258 page
= alloc_pages(gfp
, order
);
263 * Now split the huge page and free the excess pages
265 split_page(page
, order
);
266 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
269 __dma_clear_buffer(page
, size
);
275 * Free a DMA buffer. 'size' must be page aligned.
277 static void __dma_free_buffer(struct page
*page
, size_t size
)
279 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
289 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
290 pgprot_t prot
, struct page
**ret_page
,
293 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
294 pgprot_t prot
, struct page
**ret_page
,
298 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
301 struct vm_struct
*area
;
305 * DMA allocation can be mapped to user space, so lets
306 * set VM_USERMAP flags too.
308 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
312 addr
= (unsigned long)area
->addr
;
313 area
->phys_addr
= __pfn_to_phys(page_to_pfn(page
));
315 if (ioremap_page_range(addr
, addr
+ size
, area
->phys_addr
, prot
)) {
316 vunmap((void *)addr
);
322 static void __dma_free_remap(void *cpu_addr
, size_t size
)
324 unsigned int flags
= VM_ARM_DMA_CONSISTENT
| VM_USERMAP
;
325 struct vm_struct
*area
= find_vm_area(cpu_addr
);
326 if (!area
|| (area
->flags
& flags
) != flags
) {
327 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
330 unmap_kernel_range((unsigned long)cpu_addr
, size
);
334 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
339 unsigned long *bitmap
;
340 unsigned long nr_pages
;
345 static struct dma_pool atomic_pool
= {
346 .size
= DEFAULT_DMA_COHERENT_POOL_SIZE
,
349 static int __init
early_coherent_pool(char *p
)
351 atomic_pool
.size
= memparse(p
, &p
);
354 early_param("coherent_pool", early_coherent_pool
);
356 void __init
init_dma_coherent_pool_size(unsigned long size
)
359 * Catch any attempt to set the pool size too late.
361 BUG_ON(atomic_pool
.vaddr
);
364 * Set architecture specific coherent pool size only if
365 * it has not been changed by kernel command line parameter.
367 if (atomic_pool
.size
== DEFAULT_DMA_COHERENT_POOL_SIZE
)
368 atomic_pool
.size
= size
;
372 * Initialise the coherent pool for atomic allocations.
374 static int __init
atomic_pool_init(void)
376 struct dma_pool
*pool
= &atomic_pool
;
377 pgprot_t prot
= pgprot_dmacoherent(PAGE_KERNEL
);
378 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
379 unsigned long nr_pages
= pool
->size
>> PAGE_SHIFT
;
380 unsigned long *bitmap
;
384 int bitmap_size
= BITS_TO_LONGS(nr_pages
) * sizeof(long);
386 bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
390 pages
= kzalloc(nr_pages
* sizeof(struct page
*), GFP_KERNEL
);
394 if (dev_get_cma_area(NULL
))
395 ptr
= __alloc_from_contiguous(NULL
, pool
->size
, prot
, &page
,
398 ptr
= __alloc_remap_buffer(NULL
, pool
->size
, gfp
, prot
, &page
,
403 for (i
= 0; i
< nr_pages
; i
++)
406 spin_lock_init(&pool
->lock
);
409 pool
->bitmap
= bitmap
;
410 pool
->nr_pages
= nr_pages
;
411 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
412 (unsigned)pool
->size
/ 1024);
420 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
421 (unsigned)pool
->size
/ 1024);
425 * CMA is activated by core_initcall, so we must be called after it.
427 postcore_initcall(atomic_pool_init
);
429 struct dma_contig_early_reserve
{
434 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
436 static int dma_mmu_remap_num __initdata
;
438 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
440 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
441 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
445 void __init
dma_contiguous_remap(void)
448 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
449 phys_addr_t start
= dma_mmu_remap
[i
].base
;
450 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
454 if (end
> arm_lowmem_limit
)
455 end
= arm_lowmem_limit
;
459 map
.pfn
= __phys_to_pfn(start
);
460 map
.virtual = __phys_to_virt(start
);
461 map
.length
= end
- start
;
462 map
.type
= MT_MEMORY_DMA_READY
;
465 * Clear previous low-memory mapping to ensure that the
466 * TLB does not see any conflicting entries, then flush
467 * the TLB of the old entries before creating new mappings.
469 * This ensures that any speculatively loaded TLB entries
470 * (even though they may be rare) can not cause any problems,
471 * and ensures that this code is architecturally compliant.
473 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
475 pmd_clear(pmd_off_k(addr
));
477 flush_tlb_kernel_range(__phys_to_virt(start
),
478 __phys_to_virt(end
));
480 iotable_init(&map
, 1);
484 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
487 struct page
*page
= virt_to_page(addr
);
488 pgprot_t prot
= *(pgprot_t
*)data
;
490 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
494 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
496 unsigned long start
= (unsigned long) page_address(page
);
497 unsigned end
= start
+ size
;
499 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
500 flush_tlb_kernel_range(start
, end
);
503 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
504 pgprot_t prot
, struct page
**ret_page
,
509 page
= __dma_alloc_buffer(dev
, size
, gfp
);
513 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
515 __dma_free_buffer(page
, size
);
523 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
525 struct dma_pool
*pool
= &atomic_pool
;
526 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
530 unsigned long align_mask
;
533 WARN(1, "coherent pool not initialised!\n");
538 * Align the region allocation - allocations from pool are rather
539 * small, so align them to their order in pages, minimum is a page
540 * size. This helps reduce fragmentation of the DMA space.
542 align_mask
= (1 << get_order(size
)) - 1;
544 spin_lock_irqsave(&pool
->lock
, flags
);
545 pageno
= bitmap_find_next_zero_area(pool
->bitmap
, pool
->nr_pages
,
546 0, count
, align_mask
);
547 if (pageno
< pool
->nr_pages
) {
548 bitmap_set(pool
->bitmap
, pageno
, count
);
549 ptr
= pool
->vaddr
+ PAGE_SIZE
* pageno
;
550 *ret_page
= pool
->pages
[pageno
];
552 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
553 "Please increase it with coherent_pool= kernel parameter!\n",
554 (unsigned)pool
->size
/ 1024);
556 spin_unlock_irqrestore(&pool
->lock
, flags
);
561 static bool __in_atomic_pool(void *start
, size_t size
)
563 struct dma_pool
*pool
= &atomic_pool
;
564 void *end
= start
+ size
;
565 void *pool_start
= pool
->vaddr
;
566 void *pool_end
= pool
->vaddr
+ pool
->size
;
568 if (start
< pool_start
|| start
>= pool_end
)
574 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
575 start
, end
- 1, pool_start
, pool_end
- 1);
580 static int __free_from_pool(void *start
, size_t size
)
582 struct dma_pool
*pool
= &atomic_pool
;
583 unsigned long pageno
, count
;
586 if (!__in_atomic_pool(start
, size
))
589 pageno
= (start
- pool
->vaddr
) >> PAGE_SHIFT
;
590 count
= size
>> PAGE_SHIFT
;
592 spin_lock_irqsave(&pool
->lock
, flags
);
593 bitmap_clear(pool
->bitmap
, pageno
, count
);
594 spin_unlock_irqrestore(&pool
->lock
, flags
);
599 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
600 pgprot_t prot
, struct page
**ret_page
,
603 unsigned long order
= get_order(size
);
604 size_t count
= size
>> PAGE_SHIFT
;
608 page
= dma_alloc_from_contiguous(dev
, count
, order
);
612 __dma_clear_buffer(page
, size
);
614 if (PageHighMem(page
)) {
615 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
617 dma_release_from_contiguous(dev
, page
, count
);
621 __dma_remap(page
, size
, prot
);
622 ptr
= page_address(page
);
628 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
629 void *cpu_addr
, size_t size
)
631 if (PageHighMem(page
))
632 __dma_free_remap(cpu_addr
, size
);
634 __dma_remap(page
, size
, PAGE_KERNEL
);
635 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
638 static inline pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
)
640 prot
= dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
) ?
641 pgprot_writecombine(prot
) :
642 pgprot_dmacoherent(prot
);
648 #else /* !CONFIG_MMU */
652 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
653 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
654 #define __alloc_from_pool(size, ret_page) NULL
655 #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
656 #define __free_from_pool(cpu_addr, size) 0
657 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
658 #define __dma_free_remap(cpu_addr, size) do { } while (0)
660 #endif /* CONFIG_MMU */
662 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
663 struct page
**ret_page
)
666 page
= __dma_alloc_buffer(dev
, size
, gfp
);
671 return page_address(page
);
676 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
677 gfp_t gfp
, pgprot_t prot
, bool is_coherent
, const void *caller
)
679 u64 mask
= get_coherent_dma_mask(dev
);
680 struct page
*page
= NULL
;
683 #ifdef CONFIG_DMA_API_DEBUG
684 u64 limit
= (mask
+ 1) & ~mask
;
685 if (limit
&& size
>= limit
) {
686 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
695 if (mask
< 0xffffffffULL
)
699 * Following is a work-around (a.k.a. hack) to prevent pages
700 * with __GFP_COMP being passed to split_page() which cannot
701 * handle them. The real problem is that this flag probably
702 * should be 0 on ARM as it is not supported on this
703 * platform; see CONFIG_HUGETLBFS.
705 gfp
&= ~(__GFP_COMP
);
707 *handle
= DMA_ERROR_CODE
;
708 size
= PAGE_ALIGN(size
);
710 if (is_coherent
|| nommu())
711 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
712 else if (!(gfp
& __GFP_WAIT
))
713 addr
= __alloc_from_pool(size
, &page
);
714 else if (!dev_get_cma_area(dev
))
715 addr
= __alloc_remap_buffer(dev
, size
, gfp
, prot
, &page
, caller
);
717 addr
= __alloc_from_contiguous(dev
, size
, prot
, &page
, caller
);
720 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
726 * Allocate DMA-coherent memory space and return both the kernel remapped
727 * virtual and bus address for that space.
729 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
730 gfp_t gfp
, struct dma_attrs
*attrs
)
732 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
735 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
738 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
739 __builtin_return_address(0));
742 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
743 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
745 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
748 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
751 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, true,
752 __builtin_return_address(0));
756 * Create userspace mapping for the DMA-coherent memory.
758 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
759 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
760 struct dma_attrs
*attrs
)
764 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
765 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
766 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
767 unsigned long off
= vma
->vm_pgoff
;
769 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
771 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
774 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
775 ret
= remap_pfn_range(vma
, vma
->vm_start
,
777 vma
->vm_end
- vma
->vm_start
,
780 #endif /* CONFIG_MMU */
786 * Free a buffer as defined by the above mapping.
788 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
789 dma_addr_t handle
, struct dma_attrs
*attrs
,
792 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
794 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
797 size
= PAGE_ALIGN(size
);
799 if (is_coherent
|| nommu()) {
800 __dma_free_buffer(page
, size
);
801 } else if (__free_from_pool(cpu_addr
, size
)) {
803 } else if (!dev_get_cma_area(dev
)) {
804 __dma_free_remap(cpu_addr
, size
);
805 __dma_free_buffer(page
, size
);
808 * Non-atomic allocations cannot be freed with IRQs disabled
810 WARN_ON(irqs_disabled());
811 __free_from_contiguous(dev
, page
, cpu_addr
, size
);
815 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
816 dma_addr_t handle
, struct dma_attrs
*attrs
)
818 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
821 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
822 dma_addr_t handle
, struct dma_attrs
*attrs
)
824 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
827 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
828 void *cpu_addr
, dma_addr_t handle
, size_t size
,
829 struct dma_attrs
*attrs
)
831 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
834 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
838 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
842 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
843 size_t size
, enum dma_data_direction dir
,
844 void (*op
)(const void *, size_t, int))
849 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
853 * A single sg entry may refer to multiple physically contiguous
854 * pages. But we still need to process highmem pages individually.
855 * If highmem is not configured then the bulk of this loop gets
862 page
= pfn_to_page(pfn
);
864 if (PageHighMem(page
)) {
865 if (len
+ offset
> PAGE_SIZE
)
866 len
= PAGE_SIZE
- offset
;
868 if (cache_is_vipt_nonaliasing()) {
869 vaddr
= kmap_atomic(page
);
870 op(vaddr
+ offset
, len
, dir
);
871 kunmap_atomic(vaddr
);
873 vaddr
= kmap_high_get(page
);
875 op(vaddr
+ offset
, len
, dir
);
880 vaddr
= page_address(page
) + offset
;
890 * Make an area consistent for devices.
891 * Note: Drivers should NOT use this function directly, as it will break
892 * platforms with CONFIG_DMABOUNCE.
893 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
895 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
896 size_t size
, enum dma_data_direction dir
)
900 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
902 paddr
= page_to_phys(page
) + off
;
903 if (dir
== DMA_FROM_DEVICE
) {
904 outer_inv_range(paddr
, paddr
+ size
);
906 outer_clean_range(paddr
, paddr
+ size
);
908 /* FIXME: non-speculating: flush on bidirectional mappings? */
911 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
912 size_t size
, enum dma_data_direction dir
)
914 phys_addr_t paddr
= page_to_phys(page
) + off
;
916 /* FIXME: non-speculating: not required */
917 /* in any case, don't bother invalidating if DMA to device */
918 if (dir
!= DMA_TO_DEVICE
) {
919 outer_inv_range(paddr
, paddr
+ size
);
921 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
925 * Mark the D-cache clean for these pages to avoid extra flushing.
927 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
931 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
935 left
-= PAGE_SIZE
- off
;
937 while (left
>= PAGE_SIZE
) {
938 page
= pfn_to_page(pfn
++);
939 set_bit(PG_dcache_clean
, &page
->flags
);
946 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
947 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
948 * @sg: list of buffers
949 * @nents: number of buffers to map
950 * @dir: DMA transfer direction
952 * Map a set of buffers described by scatterlist in streaming mode for DMA.
953 * This is the scatter-gather version of the dma_map_single interface.
954 * Here the scatter gather list elements are each tagged with the
955 * appropriate dma address and length. They are obtained via
956 * sg_dma_{address,length}.
958 * Device ownership issues as mentioned for dma_map_single are the same
961 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
962 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
964 struct dma_map_ops
*ops
= get_dma_ops(dev
);
965 struct scatterlist
*s
;
968 for_each_sg(sg
, s
, nents
, i
) {
969 #ifdef CONFIG_NEED_SG_DMA_LENGTH
970 s
->dma_length
= s
->length
;
972 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
973 s
->length
, dir
, attrs
);
974 if (dma_mapping_error(dev
, s
->dma_address
))
980 for_each_sg(sg
, s
, i
, j
)
981 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
986 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
987 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
988 * @sg: list of buffers
989 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
990 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
992 * Unmap a set of streaming mode DMA translations. Again, CPU access
993 * rules concerning calls here are the same as for dma_unmap_single().
995 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
996 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
998 struct dma_map_ops
*ops
= get_dma_ops(dev
);
999 struct scatterlist
*s
;
1003 for_each_sg(sg
, s
, nents
, i
)
1004 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1008 * arm_dma_sync_sg_for_cpu
1009 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1010 * @sg: list of buffers
1011 * @nents: number of buffers to map (returned from dma_map_sg)
1012 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1014 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1015 int nents
, enum dma_data_direction dir
)
1017 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1018 struct scatterlist
*s
;
1021 for_each_sg(sg
, s
, nents
, i
)
1022 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
1027 * arm_dma_sync_sg_for_device
1028 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1029 * @sg: list of buffers
1030 * @nents: number of buffers to map (returned from dma_map_sg)
1031 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1033 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1034 int nents
, enum dma_data_direction dir
)
1036 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1037 struct scatterlist
*s
;
1040 for_each_sg(sg
, s
, nents
, i
)
1041 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
1046 * Return whether the given device DMA address mask can be supported
1047 * properly. For example, if your device can only drive the low 24-bits
1048 * during bus mastering, then you would pass 0x00ffffff as the mask
1051 int dma_supported(struct device
*dev
, u64 mask
)
1053 return __dma_supported(dev
, mask
, false);
1055 EXPORT_SYMBOL(dma_supported
);
1057 int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
)
1059 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
1062 *dev
->dma_mask
= dma_mask
;
1067 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
1069 static int __init
dma_debug_do_init(void)
1071 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
1074 fs_initcall(dma_debug_do_init
);
1076 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1080 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
);
1082 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1085 unsigned int order
= get_order(size
);
1086 unsigned int align
= 0;
1087 unsigned int count
, start
;
1088 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1089 unsigned long flags
;
1093 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1094 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1096 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1097 align
= (1 << order
) - 1;
1099 spin_lock_irqsave(&mapping
->lock
, flags
);
1100 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++) {
1101 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1102 mapping
->bits
, 0, count
, align
);
1104 if (start
> mapping
->bits
)
1107 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1112 * No unused range found. Try to extend the existing mapping
1113 * and perform a second attempt to reserve an IO virtual
1114 * address range of size bytes.
1116 if (i
== mapping
->nr_bitmaps
) {
1117 if (extend_iommu_mapping(mapping
)) {
1118 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1119 return DMA_ERROR_CODE
;
1122 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1123 mapping
->bits
, 0, count
, align
);
1125 if (start
> mapping
->bits
) {
1126 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1127 return DMA_ERROR_CODE
;
1130 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1132 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1134 iova
= mapping
->base
+ (mapping_size
* i
);
1135 iova
+= start
<< PAGE_SHIFT
;
1140 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1141 dma_addr_t addr
, size_t size
)
1143 unsigned int start
, count
;
1144 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1145 unsigned long flags
;
1146 dma_addr_t bitmap_base
;
1152 bitmap_index
= (u32
) (addr
- mapping
->base
) / (u32
) mapping_size
;
1153 BUG_ON(addr
< mapping
->base
|| bitmap_index
> mapping
->extensions
);
1155 bitmap_base
= mapping
->base
+ mapping_size
* bitmap_index
;
1157 start
= (addr
- bitmap_base
) >> PAGE_SHIFT
;
1159 if (addr
+ size
> bitmap_base
+ mapping_size
) {
1161 * The address range to be freed reaches into the iova
1162 * range of the next bitmap. This should not happen as
1163 * we don't allow this in __alloc_iova (at the
1168 count
= size
>> PAGE_SHIFT
;
1170 spin_lock_irqsave(&mapping
->lock
, flags
);
1171 bitmap_clear(mapping
->bitmaps
[bitmap_index
], start
, count
);
1172 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1175 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1176 gfp_t gfp
, struct dma_attrs
*attrs
)
1178 struct page
**pages
;
1179 int count
= size
>> PAGE_SHIFT
;
1180 int array_size
= count
* sizeof(struct page
*);
1183 if (array_size
<= PAGE_SIZE
)
1184 pages
= kzalloc(array_size
, gfp
);
1186 pages
= vzalloc(array_size
);
1190 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
))
1192 unsigned long order
= get_order(size
);
1195 page
= dma_alloc_from_contiguous(dev
, count
, order
);
1199 __dma_clear_buffer(page
, size
);
1201 for (i
= 0; i
< count
; i
++)
1202 pages
[i
] = page
+ i
;
1208 * IOMMU can map any pages, so himem can also be used here
1210 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1213 int j
, order
= __fls(count
);
1215 pages
[i
] = alloc_pages(gfp
, order
);
1216 while (!pages
[i
] && order
)
1217 pages
[i
] = alloc_pages(gfp
, --order
);
1222 split_page(pages
[i
], order
);
1225 pages
[i
+ j
] = pages
[i
] + j
;
1228 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
);
1230 count
-= 1 << order
;
1237 __free_pages(pages
[i
], 0);
1238 if (array_size
<= PAGE_SIZE
)
1245 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1246 size_t size
, struct dma_attrs
*attrs
)
1248 int count
= size
>> PAGE_SHIFT
;
1249 int array_size
= count
* sizeof(struct page
*);
1252 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
)) {
1253 dma_release_from_contiguous(dev
, pages
[0], count
);
1255 for (i
= 0; i
< count
; i
++)
1257 __free_pages(pages
[i
], 0);
1260 if (array_size
<= PAGE_SIZE
)
1268 * Create a CPU mapping for a specified pages
1271 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1274 unsigned int i
, nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1275 struct vm_struct
*area
;
1278 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
1283 area
->pages
= pages
;
1284 area
->nr_pages
= nr_pages
;
1285 p
= (unsigned long)area
->addr
;
1287 for (i
= 0; i
< nr_pages
; i
++) {
1288 phys_addr_t phys
= __pfn_to_phys(page_to_pfn(pages
[i
]));
1289 if (ioremap_page_range(p
, p
+ PAGE_SIZE
, phys
, prot
))
1295 unmap_kernel_range((unsigned long)area
->addr
, size
);
1301 * Create a mapping in device IO address space for specified pages
1304 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
)
1306 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1307 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1308 dma_addr_t dma_addr
, iova
;
1309 int i
, ret
= DMA_ERROR_CODE
;
1311 dma_addr
= __alloc_iova(mapping
, size
);
1312 if (dma_addr
== DMA_ERROR_CODE
)
1316 for (i
= 0; i
< count
; ) {
1317 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1318 phys_addr_t phys
= page_to_phys(pages
[i
]);
1319 unsigned int len
, j
;
1321 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1322 if (page_to_pfn(pages
[j
]) != next_pfn
)
1325 len
= (j
- i
) << PAGE_SHIFT
;
1326 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1327 IOMMU_READ
|IOMMU_WRITE
);
1335 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1336 __free_iova(mapping
, dma_addr
, size
);
1337 return DMA_ERROR_CODE
;
1340 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1342 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1345 * add optional in-page offset from iova to size and align
1346 * result to page size
1348 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1351 iommu_unmap(mapping
->domain
, iova
, size
);
1352 __free_iova(mapping
, iova
, size
);
1356 static struct page
**__atomic_get_pages(void *addr
)
1358 struct dma_pool
*pool
= &atomic_pool
;
1359 struct page
**pages
= pool
->pages
;
1360 int offs
= (addr
- pool
->vaddr
) >> PAGE_SHIFT
;
1362 return pages
+ offs
;
1365 static struct page
**__iommu_get_pages(void *cpu_addr
, struct dma_attrs
*attrs
)
1367 struct vm_struct
*area
;
1369 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1370 return __atomic_get_pages(cpu_addr
);
1372 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1375 area
= find_vm_area(cpu_addr
);
1376 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1381 static void *__iommu_alloc_atomic(struct device
*dev
, size_t size
,
1387 addr
= __alloc_from_pool(size
, &page
);
1391 *handle
= __iommu_create_mapping(dev
, &page
, size
);
1392 if (*handle
== DMA_ERROR_CODE
)
1398 __free_from_pool(addr
, size
);
1402 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1403 dma_addr_t handle
, size_t size
)
1405 __iommu_remove_mapping(dev
, handle
, size
);
1406 __free_from_pool(cpu_addr
, size
);
1409 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1410 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
1412 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
1413 struct page
**pages
;
1416 *handle
= DMA_ERROR_CODE
;
1417 size
= PAGE_ALIGN(size
);
1419 if (!(gfp
& __GFP_WAIT
))
1420 return __iommu_alloc_atomic(dev
, size
, handle
);
1423 * Following is a work-around (a.k.a. hack) to prevent pages
1424 * with __GFP_COMP being passed to split_page() which cannot
1425 * handle them. The real problem is that this flag probably
1426 * should be 0 on ARM as it is not supported on this
1427 * platform; see CONFIG_HUGETLBFS.
1429 gfp
&= ~(__GFP_COMP
);
1431 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
);
1435 *handle
= __iommu_create_mapping(dev
, pages
, size
);
1436 if (*handle
== DMA_ERROR_CODE
)
1439 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1442 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1443 __builtin_return_address(0));
1450 __iommu_remove_mapping(dev
, *handle
, size
);
1452 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1456 static int arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1457 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1458 struct dma_attrs
*attrs
)
1460 unsigned long uaddr
= vma
->vm_start
;
1461 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1462 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1464 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1470 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1472 pr_err("Remapping memory failed: %d\n", ret
);
1477 } while (usize
> 0);
1483 * free a page as defined by the above mapping.
1484 * Must not be called with IRQs disabled.
1486 void arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1487 dma_addr_t handle
, struct dma_attrs
*attrs
)
1489 struct page
**pages
;
1490 size
= PAGE_ALIGN(size
);
1492 if (__in_atomic_pool(cpu_addr
, size
)) {
1493 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
);
1497 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1499 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1503 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
)) {
1504 unmap_kernel_range((unsigned long)cpu_addr
, size
);
1508 __iommu_remove_mapping(dev
, handle
, size
);
1509 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1512 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1513 void *cpu_addr
, dma_addr_t dma_addr
,
1514 size_t size
, struct dma_attrs
*attrs
)
1516 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1517 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1522 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1526 static int __dma_direction_to_prot(enum dma_data_direction dir
)
1531 case DMA_BIDIRECTIONAL
:
1532 prot
= IOMMU_READ
| IOMMU_WRITE
;
1537 case DMA_FROM_DEVICE
:
1548 * Map a part of the scatter-gather list into contiguous io address space
1550 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1551 size_t size
, dma_addr_t
*handle
,
1552 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1555 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1556 dma_addr_t iova
, iova_base
;
1559 struct scatterlist
*s
;
1562 size
= PAGE_ALIGN(size
);
1563 *handle
= DMA_ERROR_CODE
;
1565 iova_base
= iova
= __alloc_iova(mapping
, size
);
1566 if (iova
== DMA_ERROR_CODE
)
1569 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1570 phys_addr_t phys
= page_to_phys(sg_page(s
));
1571 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1574 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1575 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1577 prot
= __dma_direction_to_prot(dir
);
1579 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1582 count
+= len
>> PAGE_SHIFT
;
1585 *handle
= iova_base
;
1589 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1590 __free_iova(mapping
, iova_base
, size
);
1594 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1595 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1598 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1600 unsigned int offset
= s
->offset
;
1601 unsigned int size
= s
->offset
+ s
->length
;
1602 unsigned int max
= dma_get_max_seg_size(dev
);
1604 for (i
= 1; i
< nents
; i
++) {
1607 s
->dma_address
= DMA_ERROR_CODE
;
1610 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1611 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1612 dir
, attrs
, is_coherent
) < 0)
1615 dma
->dma_address
+= offset
;
1616 dma
->dma_length
= size
- offset
;
1618 size
= offset
= s
->offset
;
1625 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1629 dma
->dma_address
+= offset
;
1630 dma
->dma_length
= size
- offset
;
1635 for_each_sg(sg
, s
, count
, i
)
1636 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1641 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1642 * @dev: valid struct device pointer
1643 * @sg: list of buffers
1644 * @nents: number of buffers to map
1645 * @dir: DMA transfer direction
1647 * Map a set of i/o coherent buffers described by scatterlist in streaming
1648 * mode for DMA. The scatter gather list elements are merged together (if
1649 * possible) and tagged with the appropriate dma address and length. They are
1650 * obtained via sg_dma_{address,length}.
1652 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1653 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1655 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1659 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1660 * @dev: valid struct device pointer
1661 * @sg: list of buffers
1662 * @nents: number of buffers to map
1663 * @dir: DMA transfer direction
1665 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1666 * The scatter gather list elements are merged together (if possible) and
1667 * tagged with the appropriate dma address and length. They are obtained via
1668 * sg_dma_{address,length}.
1670 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1671 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1673 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1676 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1677 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1680 struct scatterlist
*s
;
1683 for_each_sg(sg
, s
, nents
, i
) {
1685 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1688 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1689 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1695 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1696 * @dev: valid struct device pointer
1697 * @sg: list of buffers
1698 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1699 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1701 * Unmap a set of streaming mode DMA translations. Again, CPU access
1702 * rules concerning calls here are the same as for dma_unmap_single().
1704 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1705 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1707 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1711 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1712 * @dev: valid struct device pointer
1713 * @sg: list of buffers
1714 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1715 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1717 * Unmap a set of streaming mode DMA translations. Again, CPU access
1718 * rules concerning calls here are the same as for dma_unmap_single().
1720 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1721 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1723 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1727 * arm_iommu_sync_sg_for_cpu
1728 * @dev: valid struct device pointer
1729 * @sg: list of buffers
1730 * @nents: number of buffers to map (returned from dma_map_sg)
1731 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1733 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1734 int nents
, enum dma_data_direction dir
)
1736 struct scatterlist
*s
;
1739 for_each_sg(sg
, s
, nents
, i
)
1740 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1745 * arm_iommu_sync_sg_for_device
1746 * @dev: valid struct device pointer
1747 * @sg: list of buffers
1748 * @nents: number of buffers to map (returned from dma_map_sg)
1749 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1751 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1752 int nents
, enum dma_data_direction dir
)
1754 struct scatterlist
*s
;
1757 for_each_sg(sg
, s
, nents
, i
)
1758 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1763 * arm_coherent_iommu_map_page
1764 * @dev: valid struct device pointer
1765 * @page: page that buffer resides in
1766 * @offset: offset into page for start of buffer
1767 * @size: size of buffer to map
1768 * @dir: DMA transfer direction
1770 * Coherent IOMMU aware version of arm_dma_map_page()
1772 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1773 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1774 struct dma_attrs
*attrs
)
1776 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1777 dma_addr_t dma_addr
;
1778 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1780 dma_addr
= __alloc_iova(mapping
, len
);
1781 if (dma_addr
== DMA_ERROR_CODE
)
1784 prot
= __dma_direction_to_prot(dir
);
1786 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1790 return dma_addr
+ offset
;
1792 __free_iova(mapping
, dma_addr
, len
);
1793 return DMA_ERROR_CODE
;
1797 * arm_iommu_map_page
1798 * @dev: valid struct device pointer
1799 * @page: page that buffer resides in
1800 * @offset: offset into page for start of buffer
1801 * @size: size of buffer to map
1802 * @dir: DMA transfer direction
1804 * IOMMU aware version of arm_dma_map_page()
1806 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1807 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1808 struct dma_attrs
*attrs
)
1810 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1811 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1813 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1817 * arm_coherent_iommu_unmap_page
1818 * @dev: valid struct device pointer
1819 * @handle: DMA address of buffer
1820 * @size: size of buffer (same as passed to dma_map_page)
1821 * @dir: DMA transfer direction (same as passed to dma_map_page)
1823 * Coherent IOMMU aware version of arm_dma_unmap_page()
1825 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1826 size_t size
, enum dma_data_direction dir
,
1827 struct dma_attrs
*attrs
)
1829 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1830 dma_addr_t iova
= handle
& PAGE_MASK
;
1831 int offset
= handle
& ~PAGE_MASK
;
1832 int len
= PAGE_ALIGN(size
+ offset
);
1837 iommu_unmap(mapping
->domain
, iova
, len
);
1838 __free_iova(mapping
, iova
, len
);
1842 * arm_iommu_unmap_page
1843 * @dev: valid struct device pointer
1844 * @handle: DMA address of buffer
1845 * @size: size of buffer (same as passed to dma_map_page)
1846 * @dir: DMA transfer direction (same as passed to dma_map_page)
1848 * IOMMU aware version of arm_dma_unmap_page()
1850 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1851 size_t size
, enum dma_data_direction dir
,
1852 struct dma_attrs
*attrs
)
1854 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1855 dma_addr_t iova
= handle
& PAGE_MASK
;
1856 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1857 int offset
= handle
& ~PAGE_MASK
;
1858 int len
= PAGE_ALIGN(size
+ offset
);
1863 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1864 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1866 iommu_unmap(mapping
->domain
, iova
, len
);
1867 __free_iova(mapping
, iova
, len
);
1870 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
1871 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1873 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1874 dma_addr_t iova
= handle
& PAGE_MASK
;
1875 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1876 unsigned int offset
= handle
& ~PAGE_MASK
;
1881 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1884 static void arm_iommu_sync_single_for_device(struct device
*dev
,
1885 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1887 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1888 dma_addr_t iova
= handle
& PAGE_MASK
;
1889 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1890 unsigned int offset
= handle
& ~PAGE_MASK
;
1895 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1898 struct dma_map_ops iommu_ops
= {
1899 .alloc
= arm_iommu_alloc_attrs
,
1900 .free
= arm_iommu_free_attrs
,
1901 .mmap
= arm_iommu_mmap_attrs
,
1902 .get_sgtable
= arm_iommu_get_sgtable
,
1904 .map_page
= arm_iommu_map_page
,
1905 .unmap_page
= arm_iommu_unmap_page
,
1906 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
1907 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
1909 .map_sg
= arm_iommu_map_sg
,
1910 .unmap_sg
= arm_iommu_unmap_sg
,
1911 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
1912 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
1914 .set_dma_mask
= arm_dma_set_mask
,
1917 struct dma_map_ops iommu_coherent_ops
= {
1918 .alloc
= arm_iommu_alloc_attrs
,
1919 .free
= arm_iommu_free_attrs
,
1920 .mmap
= arm_iommu_mmap_attrs
,
1921 .get_sgtable
= arm_iommu_get_sgtable
,
1923 .map_page
= arm_coherent_iommu_map_page
,
1924 .unmap_page
= arm_coherent_iommu_unmap_page
,
1926 .map_sg
= arm_coherent_iommu_map_sg
,
1927 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
1929 .set_dma_mask
= arm_dma_set_mask
,
1933 * arm_iommu_create_mapping
1934 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1935 * @base: start address of the valid IO address space
1936 * @size: maximum size of the valid IO address space
1938 * Creates a mapping structure which holds information about used/unused
1939 * IO address ranges, which is required to perform memory allocation and
1940 * mapping with IOMMU aware functions.
1942 * The client device need to be attached to the mapping with
1943 * arm_iommu_attach_device function.
1945 struct dma_iommu_mapping
*
1946 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, size_t size
)
1948 unsigned int bits
= size
>> PAGE_SHIFT
;
1949 unsigned int bitmap_size
= BITS_TO_LONGS(bits
) * sizeof(long);
1950 struct dma_iommu_mapping
*mapping
;
1955 return ERR_PTR(-EINVAL
);
1957 if (bitmap_size
> PAGE_SIZE
) {
1958 extensions
= bitmap_size
/ PAGE_SIZE
;
1959 bitmap_size
= PAGE_SIZE
;
1962 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
1966 mapping
->bitmap_size
= bitmap_size
;
1967 mapping
->bitmaps
= kzalloc(extensions
* sizeof(unsigned long *),
1969 if (!mapping
->bitmaps
)
1972 mapping
->bitmaps
[0] = kzalloc(bitmap_size
, GFP_KERNEL
);
1973 if (!mapping
->bitmaps
[0])
1976 mapping
->nr_bitmaps
= 1;
1977 mapping
->extensions
= extensions
;
1978 mapping
->base
= base
;
1979 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
1981 spin_lock_init(&mapping
->lock
);
1983 mapping
->domain
= iommu_domain_alloc(bus
);
1984 if (!mapping
->domain
)
1987 kref_init(&mapping
->kref
);
1990 kfree(mapping
->bitmaps
[0]);
1992 kfree(mapping
->bitmaps
);
1996 return ERR_PTR(err
);
1998 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
2000 static void release_iommu_mapping(struct kref
*kref
)
2003 struct dma_iommu_mapping
*mapping
=
2004 container_of(kref
, struct dma_iommu_mapping
, kref
);
2006 iommu_domain_free(mapping
->domain
);
2007 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++)
2008 kfree(mapping
->bitmaps
[i
]);
2009 kfree(mapping
->bitmaps
);
2013 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
)
2017 if (mapping
->nr_bitmaps
> mapping
->extensions
)
2020 next_bitmap
= mapping
->nr_bitmaps
;
2021 mapping
->bitmaps
[next_bitmap
] = kzalloc(mapping
->bitmap_size
,
2023 if (!mapping
->bitmaps
[next_bitmap
])
2026 mapping
->nr_bitmaps
++;
2031 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
2034 kref_put(&mapping
->kref
, release_iommu_mapping
);
2036 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
2039 * arm_iommu_attach_device
2040 * @dev: valid struct device pointer
2041 * @mapping: io address space mapping structure (returned from
2042 * arm_iommu_create_mapping)
2044 * Attaches specified io address space mapping to the provided device,
2045 * this replaces the dma operations (dma_map_ops pointer) with the
2046 * IOMMU aware version. More than one client might be attached to
2047 * the same io address space mapping.
2049 int arm_iommu_attach_device(struct device
*dev
,
2050 struct dma_iommu_mapping
*mapping
)
2054 err
= iommu_attach_device(mapping
->domain
, dev
);
2058 kref_get(&mapping
->kref
);
2059 dev
->archdata
.mapping
= mapping
;
2060 set_dma_ops(dev
, &iommu_ops
);
2062 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
2065 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
2068 * arm_iommu_detach_device
2069 * @dev: valid struct device pointer
2071 * Detaches the provided device from a previously attached map.
2072 * This voids the dma operations (dma_map_ops pointer)
2074 void arm_iommu_detach_device(struct device
*dev
)
2076 struct dma_iommu_mapping
*mapping
;
2078 mapping
= to_dma_iommu_mapping(dev
);
2080 dev_warn(dev
, "Not attached\n");
2084 iommu_detach_device(mapping
->domain
, dev
);
2085 kref_put(&mapping
->kref
, release_iommu_mapping
);
2086 dev
->archdata
.mapping
= NULL
;
2087 set_dma_ops(dev
, NULL
);
2089 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2091 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);