2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
26 #include <linux/platform_device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/swiotlb.h>
29 #include <linux/amba/bus.h>
31 #include <asm/cacheflush.h>
33 struct dma_map_ops
*dma_ops
;
34 EXPORT_SYMBOL(dma_ops
);
36 static pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
,
39 if (!coherent
|| dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
))
40 return pgprot_writecombine(prot
);
44 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
45 dma_addr_t
*dma_handle
, gfp_t flags
,
46 struct dma_attrs
*attrs
)
49 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
53 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
54 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
56 if (IS_ENABLED(CONFIG_DMA_CMA
)) {
59 size
= PAGE_ALIGN(size
);
60 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
65 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
66 return page_address(page
);
68 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
72 static void __dma_free_coherent(struct device
*dev
, size_t size
,
73 void *vaddr
, dma_addr_t dma_handle
,
74 struct dma_attrs
*attrs
)
77 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
81 if (IS_ENABLED(CONFIG_DMA_CMA
)) {
82 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
84 dma_release_from_contiguous(dev
,
88 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
92 static void *__dma_alloc_noncoherent(struct device
*dev
, size_t size
,
93 dma_addr_t
*dma_handle
, gfp_t flags
,
94 struct dma_attrs
*attrs
)
96 struct page
*page
, **map
;
97 void *ptr
, *coherent_ptr
;
100 size
= PAGE_ALIGN(size
);
101 order
= get_order(size
);
103 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
106 map
= kmalloc(sizeof(struct page
*) << order
, flags
& ~GFP_DMA
);
110 /* remove any dirty cache lines on the kernel alias */
111 __dma_flush_range(ptr
, ptr
+ size
);
113 /* create a coherent mapping */
114 page
= virt_to_page(ptr
);
115 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
117 coherent_ptr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
,
118 __get_dma_pgprot(attrs
, __pgprot(PROT_NORMAL_NC
), false));
126 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
132 static void __dma_free_noncoherent(struct device
*dev
, size_t size
,
133 void *vaddr
, dma_addr_t dma_handle
,
134 struct dma_attrs
*attrs
)
136 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
139 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
142 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
143 unsigned long offset
, size_t size
,
144 enum dma_data_direction dir
,
145 struct dma_attrs
*attrs
)
149 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
150 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
156 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
157 size_t size
, enum dma_data_direction dir
,
158 struct dma_attrs
*attrs
)
160 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
161 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
164 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
165 int nelems
, enum dma_data_direction dir
,
166 struct dma_attrs
*attrs
)
168 struct scatterlist
*sg
;
171 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
172 for_each_sg(sgl
, sg
, ret
, i
)
173 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
179 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
180 struct scatterlist
*sgl
, int nelems
,
181 enum dma_data_direction dir
,
182 struct dma_attrs
*attrs
)
184 struct scatterlist
*sg
;
187 for_each_sg(sgl
, sg
, nelems
, i
)
188 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
190 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
193 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
194 dma_addr_t dev_addr
, size_t size
,
195 enum dma_data_direction dir
)
197 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
198 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
201 static void __swiotlb_sync_single_for_device(struct device
*dev
,
202 dma_addr_t dev_addr
, size_t size
,
203 enum dma_data_direction dir
)
205 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
206 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
209 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
210 struct scatterlist
*sgl
, int nelems
,
211 enum dma_data_direction dir
)
213 struct scatterlist
*sg
;
216 for_each_sg(sgl
, sg
, nelems
, i
)
217 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
219 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
222 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
223 struct scatterlist
*sgl
, int nelems
,
224 enum dma_data_direction dir
)
226 struct scatterlist
*sg
;
229 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
230 for_each_sg(sgl
, sg
, nelems
, i
)
231 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
235 /* vma->vm_page_prot must be set appropriately before calling this function */
236 static int __dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
237 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
240 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >>
242 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
243 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
244 unsigned long off
= vma
->vm_pgoff
;
246 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
249 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
250 ret
= remap_pfn_range(vma
, vma
->vm_start
,
252 vma
->vm_end
- vma
->vm_start
,
259 static int __swiotlb_mmap_noncoherent(struct device
*dev
,
260 struct vm_area_struct
*vma
,
261 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
262 struct dma_attrs
*attrs
)
264 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
, false);
265 return __dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
268 static int __swiotlb_mmap_coherent(struct device
*dev
,
269 struct vm_area_struct
*vma
,
270 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
271 struct dma_attrs
*attrs
)
273 /* Just use whatever page_prot attributes were specified */
274 return __dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
277 struct dma_map_ops noncoherent_swiotlb_dma_ops
= {
278 .alloc
= __dma_alloc_noncoherent
,
279 .free
= __dma_free_noncoherent
,
280 .mmap
= __swiotlb_mmap_noncoherent
,
281 .map_page
= __swiotlb_map_page
,
282 .unmap_page
= __swiotlb_unmap_page
,
283 .map_sg
= __swiotlb_map_sg_attrs
,
284 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
285 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
286 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
287 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
288 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
289 .dma_supported
= swiotlb_dma_supported
,
290 .mapping_error
= swiotlb_dma_mapping_error
,
292 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops
);
294 struct dma_map_ops coherent_swiotlb_dma_ops
= {
295 .alloc
= __dma_alloc_coherent
,
296 .free
= __dma_free_coherent
,
297 .mmap
= __swiotlb_mmap_coherent
,
298 .map_page
= swiotlb_map_page
,
299 .unmap_page
= swiotlb_unmap_page
,
300 .map_sg
= swiotlb_map_sg_attrs
,
301 .unmap_sg
= swiotlb_unmap_sg_attrs
,
302 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
303 .sync_single_for_device
= swiotlb_sync_single_for_device
,
304 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
305 .sync_sg_for_device
= swiotlb_sync_sg_for_device
,
306 .dma_supported
= swiotlb_dma_supported
,
307 .mapping_error
= swiotlb_dma_mapping_error
,
309 EXPORT_SYMBOL(coherent_swiotlb_dma_ops
);
311 static int dma_bus_notifier(struct notifier_block
*nb
,
312 unsigned long event
, void *_dev
)
314 struct device
*dev
= _dev
;
316 if (event
!= BUS_NOTIFY_ADD_DEVICE
)
319 if (of_property_read_bool(dev
->of_node
, "dma-coherent"))
320 set_dma_ops(dev
, &coherent_swiotlb_dma_ops
);
325 static struct notifier_block platform_bus_nb
= {
326 .notifier_call
= dma_bus_notifier
,
329 static struct notifier_block amba_bus_nb
= {
330 .notifier_call
= dma_bus_notifier
,
333 extern int swiotlb_late_init_with_default_size(size_t default_size
);
335 static int __init
swiotlb_late_init(void)
337 size_t swiotlb_size
= min(SZ_64M
, MAX_ORDER_NR_PAGES
<< PAGE_SHIFT
);
340 * These must be registered before of_platform_populate().
342 bus_register_notifier(&platform_bus_type
, &platform_bus_nb
);
343 bus_register_notifier(&amba_bustype
, &amba_bus_nb
);
345 dma_ops
= &noncoherent_swiotlb_dma_ops
;
347 return swiotlb_late_init_with_default_size(swiotlb_size
);
349 arch_initcall(swiotlb_late_init
);
351 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
353 static int __init
dma_debug_do_init(void)
355 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
358 fs_initcall(dma_debug_do_init
);