Merge remote-tracking branch 'regulator/fix/core' into regulator-linus
[deliverable/linux.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/swiotlb.h>
29 #include <linux/amba/bus.h>
30
31 #include <asm/cacheflush.h>
32
33 struct dma_map_ops *dma_ops;
34 EXPORT_SYMBOL(dma_ops);
35
36 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
37 bool coherent)
38 {
39 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
40 return pgprot_writecombine(prot);
41 return prot;
42 }
43
44 static void *__dma_alloc_coherent(struct device *dev, size_t size,
45 dma_addr_t *dma_handle, gfp_t flags,
46 struct dma_attrs *attrs)
47 {
48 if (dev == NULL) {
49 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
50 return NULL;
51 }
52
53 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
54 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
55 flags |= GFP_DMA;
56 if (IS_ENABLED(CONFIG_DMA_CMA)) {
57 struct page *page;
58
59 size = PAGE_ALIGN(size);
60 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
61 get_order(size));
62 if (!page)
63 return NULL;
64
65 *dma_handle = phys_to_dma(dev, page_to_phys(page));
66 return page_address(page);
67 } else {
68 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
69 }
70 }
71
72 static void __dma_free_coherent(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle,
74 struct dma_attrs *attrs)
75 {
76 if (dev == NULL) {
77 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
78 return;
79 }
80
81 if (IS_ENABLED(CONFIG_DMA_CMA)) {
82 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
83
84 dma_release_from_contiguous(dev,
85 phys_to_page(paddr),
86 size >> PAGE_SHIFT);
87 } else {
88 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
89 }
90 }
91
92 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
93 dma_addr_t *dma_handle, gfp_t flags,
94 struct dma_attrs *attrs)
95 {
96 struct page *page, **map;
97 void *ptr, *coherent_ptr;
98 int order, i;
99
100 size = PAGE_ALIGN(size);
101 order = get_order(size);
102
103 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
104 if (!ptr)
105 goto no_mem;
106 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
107 if (!map)
108 goto no_map;
109
110 /* remove any dirty cache lines on the kernel alias */
111 __dma_flush_range(ptr, ptr + size);
112
113 /* create a coherent mapping */
114 page = virt_to_page(ptr);
115 for (i = 0; i < (size >> PAGE_SHIFT); i++)
116 map[i] = page + i;
117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
118 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
119 kfree(map);
120 if (!coherent_ptr)
121 goto no_map;
122
123 return coherent_ptr;
124
125 no_map:
126 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
127 no_mem:
128 *dma_handle = ~0;
129 return NULL;
130 }
131
132 static void __dma_free_noncoherent(struct device *dev, size_t size,
133 void *vaddr, dma_addr_t dma_handle,
134 struct dma_attrs *attrs)
135 {
136 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
137
138 vunmap(vaddr);
139 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
140 }
141
142 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
143 unsigned long offset, size_t size,
144 enum dma_data_direction dir,
145 struct dma_attrs *attrs)
146 {
147 dma_addr_t dev_addr;
148
149 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
150 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
151
152 return dev_addr;
153 }
154
155
156 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
157 size_t size, enum dma_data_direction dir,
158 struct dma_attrs *attrs)
159 {
160 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
161 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
162 }
163
164 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
165 int nelems, enum dma_data_direction dir,
166 struct dma_attrs *attrs)
167 {
168 struct scatterlist *sg;
169 int i, ret;
170
171 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
172 for_each_sg(sgl, sg, ret, i)
173 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
174 sg->length, dir);
175
176 return ret;
177 }
178
179 static void __swiotlb_unmap_sg_attrs(struct device *dev,
180 struct scatterlist *sgl, int nelems,
181 enum dma_data_direction dir,
182 struct dma_attrs *attrs)
183 {
184 struct scatterlist *sg;
185 int i;
186
187 for_each_sg(sgl, sg, nelems, i)
188 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
189 sg->length, dir);
190 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
191 }
192
193 static void __swiotlb_sync_single_for_cpu(struct device *dev,
194 dma_addr_t dev_addr, size_t size,
195 enum dma_data_direction dir)
196 {
197 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
198 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
199 }
200
201 static void __swiotlb_sync_single_for_device(struct device *dev,
202 dma_addr_t dev_addr, size_t size,
203 enum dma_data_direction dir)
204 {
205 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
206 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
207 }
208
209 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
210 struct scatterlist *sgl, int nelems,
211 enum dma_data_direction dir)
212 {
213 struct scatterlist *sg;
214 int i;
215
216 for_each_sg(sgl, sg, nelems, i)
217 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
218 sg->length, dir);
219 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
220 }
221
222 static void __swiotlb_sync_sg_for_device(struct device *dev,
223 struct scatterlist *sgl, int nelems,
224 enum dma_data_direction dir)
225 {
226 struct scatterlist *sg;
227 int i;
228
229 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
230 for_each_sg(sgl, sg, nelems, i)
231 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
232 sg->length, dir);
233 }
234
235 /* vma->vm_page_prot must be set appropriately before calling this function */
236 static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
237 void *cpu_addr, dma_addr_t dma_addr, size_t size)
238 {
239 int ret = -ENXIO;
240 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
241 PAGE_SHIFT;
242 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
243 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
244 unsigned long off = vma->vm_pgoff;
245
246 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
247 return ret;
248
249 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
250 ret = remap_pfn_range(vma, vma->vm_start,
251 pfn + off,
252 vma->vm_end - vma->vm_start,
253 vma->vm_page_prot);
254 }
255
256 return ret;
257 }
258
259 static int __swiotlb_mmap_noncoherent(struct device *dev,
260 struct vm_area_struct *vma,
261 void *cpu_addr, dma_addr_t dma_addr, size_t size,
262 struct dma_attrs *attrs)
263 {
264 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
265 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
266 }
267
268 static int __swiotlb_mmap_coherent(struct device *dev,
269 struct vm_area_struct *vma,
270 void *cpu_addr, dma_addr_t dma_addr, size_t size,
271 struct dma_attrs *attrs)
272 {
273 /* Just use whatever page_prot attributes were specified */
274 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
275 }
276
277 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
278 .alloc = __dma_alloc_noncoherent,
279 .free = __dma_free_noncoherent,
280 .mmap = __swiotlb_mmap_noncoherent,
281 .map_page = __swiotlb_map_page,
282 .unmap_page = __swiotlb_unmap_page,
283 .map_sg = __swiotlb_map_sg_attrs,
284 .unmap_sg = __swiotlb_unmap_sg_attrs,
285 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
286 .sync_single_for_device = __swiotlb_sync_single_for_device,
287 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
288 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
289 .dma_supported = swiotlb_dma_supported,
290 .mapping_error = swiotlb_dma_mapping_error,
291 };
292 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
293
294 struct dma_map_ops coherent_swiotlb_dma_ops = {
295 .alloc = __dma_alloc_coherent,
296 .free = __dma_free_coherent,
297 .mmap = __swiotlb_mmap_coherent,
298 .map_page = swiotlb_map_page,
299 .unmap_page = swiotlb_unmap_page,
300 .map_sg = swiotlb_map_sg_attrs,
301 .unmap_sg = swiotlb_unmap_sg_attrs,
302 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
303 .sync_single_for_device = swiotlb_sync_single_for_device,
304 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
305 .sync_sg_for_device = swiotlb_sync_sg_for_device,
306 .dma_supported = swiotlb_dma_supported,
307 .mapping_error = swiotlb_dma_mapping_error,
308 };
309 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
310
311 static int dma_bus_notifier(struct notifier_block *nb,
312 unsigned long event, void *_dev)
313 {
314 struct device *dev = _dev;
315
316 if (event != BUS_NOTIFY_ADD_DEVICE)
317 return NOTIFY_DONE;
318
319 if (of_property_read_bool(dev->of_node, "dma-coherent"))
320 set_dma_ops(dev, &coherent_swiotlb_dma_ops);
321
322 return NOTIFY_OK;
323 }
324
325 static struct notifier_block platform_bus_nb = {
326 .notifier_call = dma_bus_notifier,
327 };
328
329 static struct notifier_block amba_bus_nb = {
330 .notifier_call = dma_bus_notifier,
331 };
332
333 extern int swiotlb_late_init_with_default_size(size_t default_size);
334
335 static int __init swiotlb_late_init(void)
336 {
337 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
338
339 /*
340 * These must be registered before of_platform_populate().
341 */
342 bus_register_notifier(&platform_bus_type, &platform_bus_nb);
343 bus_register_notifier(&amba_bustype, &amba_bus_nb);
344
345 dma_ops = &noncoherent_swiotlb_dma_ops;
346
347 return swiotlb_late_init_with_default_size(swiotlb_size);
348 }
349 arch_initcall(swiotlb_late_init);
350
351 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
352
353 static int __init dma_debug_do_init(void)
354 {
355 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
356 return 0;
357 }
358 fs_initcall(dma_debug_do_init);
This page took 0.038514 seconds and 6 git commands to generate.