Commit | Line | Data |
---|---|---|
96532bab RD |
1 | #ifndef _LINUX_DMA_MAPPING_H |
2 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 3 | |
002edb6f | 4 | #include <linux/sizes.h> |
842fa69f | 5 | #include <linux/string.h> |
1da177e4 LT |
6 | #include <linux/device.h> |
7 | #include <linux/err.h> | |
f0402a26 | 8 | #include <linux/dma-attrs.h> |
e1c7e324 | 9 | #include <linux/dma-debug.h> |
b7f080cf | 10 | #include <linux/dma-direction.h> |
f0402a26 | 11 | #include <linux/scatterlist.h> |
e1c7e324 CH |
12 | #include <linux/kmemcheck.h> |
13 | #include <linux/bug.h> | |
1da177e4 | 14 | |
77f2ea2f BH |
15 | /* |
16 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
17 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
18 | * reference a dma_addr_t directly because there may be translation between | |
19 | * its physical address space and the bus address space. | |
20 | */ | |
f0402a26 | 21 | struct dma_map_ops { |
613c4578 MS |
22 | void* (*alloc)(struct device *dev, size_t size, |
23 | dma_addr_t *dma_handle, gfp_t gfp, | |
24 | struct dma_attrs *attrs); | |
25 | void (*free)(struct device *dev, size_t size, | |
26 | void *vaddr, dma_addr_t dma_handle, | |
27 | struct dma_attrs *attrs); | |
9adc5374 MS |
28 | int (*mmap)(struct device *, struct vm_area_struct *, |
29 | void *, dma_addr_t, size_t, struct dma_attrs *attrs); | |
30 | ||
d2b7428e MS |
31 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
32 | dma_addr_t, size_t, struct dma_attrs *attrs); | |
33 | ||
f0402a26 FT |
34 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
35 | unsigned long offset, size_t size, | |
36 | enum dma_data_direction dir, | |
37 | struct dma_attrs *attrs); | |
38 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | |
39 | size_t size, enum dma_data_direction dir, | |
40 | struct dma_attrs *attrs); | |
04abab69 RRD |
41 | /* |
42 | * map_sg returns 0 on error and a value > 0 on success. | |
43 | * It should never return a value < 0. | |
44 | */ | |
f0402a26 FT |
45 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
46 | int nents, enum dma_data_direction dir, | |
47 | struct dma_attrs *attrs); | |
48 | void (*unmap_sg)(struct device *dev, | |
49 | struct scatterlist *sg, int nents, | |
50 | enum dma_data_direction dir, | |
51 | struct dma_attrs *attrs); | |
52 | void (*sync_single_for_cpu)(struct device *dev, | |
53 | dma_addr_t dma_handle, size_t size, | |
54 | enum dma_data_direction dir); | |
55 | void (*sync_single_for_device)(struct device *dev, | |
56 | dma_addr_t dma_handle, size_t size, | |
57 | enum dma_data_direction dir); | |
f0402a26 FT |
58 | void (*sync_sg_for_cpu)(struct device *dev, |
59 | struct scatterlist *sg, int nents, | |
60 | enum dma_data_direction dir); | |
61 | void (*sync_sg_for_device)(struct device *dev, | |
62 | struct scatterlist *sg, int nents, | |
63 | enum dma_data_direction dir); | |
64 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | |
65 | int (*dma_supported)(struct device *dev, u64 mask); | |
f726f30e | 66 | int (*set_dma_mask)(struct device *dev, u64 mask); |
3a8f7558 MM |
67 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
68 | u64 (*get_required_mask)(struct device *dev); | |
69 | #endif | |
f0402a26 FT |
70 | int is_phys; |
71 | }; | |
72 | ||
8f286c33 | 73 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 74 | |
32e8f702 JB |
75 | #define DMA_MASK_NONE 0x0ULL |
76 | ||
d6bd3a39 REB |
77 | static inline int valid_dma_direction(int dma_direction) |
78 | { | |
79 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
80 | (dma_direction == DMA_TO_DEVICE) || | |
81 | (dma_direction == DMA_FROM_DEVICE)); | |
82 | } | |
83 | ||
32e8f702 JB |
84 | static inline int is_device_dma_capable(struct device *dev) |
85 | { | |
86 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
87 | } | |
88 | ||
20d666e4 CH |
89 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
90 | /* | |
91 | * These three functions are only for dma allocator. | |
92 | * Don't use them in device drivers. | |
93 | */ | |
94 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |
95 | dma_addr_t *dma_handle, void **ret); | |
96 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | |
97 | ||
98 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |
99 | void *cpu_addr, size_t size, int *ret); | |
100 | #else | |
101 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | |
102 | #define dma_release_from_coherent(dev, order, vaddr) (0) | |
103 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) | |
104 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | |
105 | ||
1b0fac45 | 106 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 107 | #include <asm/dma-mapping.h> |
1b0fac45 | 108 | #else |
e1c7e324 CH |
109 | /* |
110 | * Define the dma api to allow compilation but not linking of | |
111 | * dma dependent code. Code that depends on the dma-mapping | |
112 | * API needs to set 'depends on HAS_DMA' in its Kconfig | |
113 | */ | |
114 | extern struct dma_map_ops bad_dma_ops; | |
115 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |
116 | { | |
117 | return &bad_dma_ops; | |
118 | } | |
119 | #endif | |
120 | ||
121 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |
122 | size_t size, | |
123 | enum dma_data_direction dir, | |
124 | struct dma_attrs *attrs) | |
125 | { | |
126 | struct dma_map_ops *ops = get_dma_ops(dev); | |
127 | dma_addr_t addr; | |
128 | ||
129 | kmemcheck_mark_initialized(ptr, size); | |
130 | BUG_ON(!valid_dma_direction(dir)); | |
131 | addr = ops->map_page(dev, virt_to_page(ptr), | |
8e99469a | 132 | offset_in_page(ptr), size, |
e1c7e324 CH |
133 | dir, attrs); |
134 | debug_dma_map_page(dev, virt_to_page(ptr), | |
8e99469a | 135 | offset_in_page(ptr), size, |
e1c7e324 CH |
136 | dir, addr, true); |
137 | return addr; | |
138 | } | |
139 | ||
140 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
141 | size_t size, | |
142 | enum dma_data_direction dir, | |
143 | struct dma_attrs *attrs) | |
144 | { | |
145 | struct dma_map_ops *ops = get_dma_ops(dev); | |
146 | ||
147 | BUG_ON(!valid_dma_direction(dir)); | |
148 | if (ops->unmap_page) | |
149 | ops->unmap_page(dev, addr, size, dir, attrs); | |
150 | debug_dma_unmap_page(dev, addr, size, dir, true); | |
151 | } | |
152 | ||
153 | /* | |
154 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. | |
155 | * It should never return a value < 0. | |
156 | */ | |
157 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
158 | int nents, enum dma_data_direction dir, | |
159 | struct dma_attrs *attrs) | |
160 | { | |
161 | struct dma_map_ops *ops = get_dma_ops(dev); | |
162 | int i, ents; | |
163 | struct scatterlist *s; | |
164 | ||
165 | for_each_sg(sg, s, nents, i) | |
166 | kmemcheck_mark_initialized(sg_virt(s), s->length); | |
167 | BUG_ON(!valid_dma_direction(dir)); | |
168 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
169 | BUG_ON(ents < 0); | |
170 | debug_dma_map_sg(dev, sg, nents, ents, dir); | |
171 | ||
172 | return ents; | |
173 | } | |
174 | ||
175 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | |
176 | int nents, enum dma_data_direction dir, | |
177 | struct dma_attrs *attrs) | |
178 | { | |
179 | struct dma_map_ops *ops = get_dma_ops(dev); | |
180 | ||
181 | BUG_ON(!valid_dma_direction(dir)); | |
182 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
183 | if (ops->unmap_sg) | |
184 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
185 | } | |
186 | ||
187 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
188 | size_t offset, size_t size, | |
189 | enum dma_data_direction dir) | |
190 | { | |
191 | struct dma_map_ops *ops = get_dma_ops(dev); | |
192 | dma_addr_t addr; | |
193 | ||
194 | kmemcheck_mark_initialized(page_address(page) + offset, size); | |
195 | BUG_ON(!valid_dma_direction(dir)); | |
196 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | |
197 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | |
198 | ||
199 | return addr; | |
200 | } | |
201 | ||
202 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | |
203 | size_t size, enum dma_data_direction dir) | |
204 | { | |
205 | struct dma_map_ops *ops = get_dma_ops(dev); | |
206 | ||
207 | BUG_ON(!valid_dma_direction(dir)); | |
208 | if (ops->unmap_page) | |
209 | ops->unmap_page(dev, addr, size, dir, NULL); | |
210 | debug_dma_unmap_page(dev, addr, size, dir, false); | |
211 | } | |
212 | ||
213 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
214 | size_t size, | |
215 | enum dma_data_direction dir) | |
216 | { | |
217 | struct dma_map_ops *ops = get_dma_ops(dev); | |
218 | ||
219 | BUG_ON(!valid_dma_direction(dir)); | |
220 | if (ops->sync_single_for_cpu) | |
221 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
222 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
223 | } | |
224 | ||
225 | static inline void dma_sync_single_for_device(struct device *dev, | |
226 | dma_addr_t addr, size_t size, | |
227 | enum dma_data_direction dir) | |
228 | { | |
229 | struct dma_map_ops *ops = get_dma_ops(dev); | |
230 | ||
231 | BUG_ON(!valid_dma_direction(dir)); | |
232 | if (ops->sync_single_for_device) | |
233 | ops->sync_single_for_device(dev, addr, size, dir); | |
234 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
235 | } | |
236 | ||
237 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
238 | dma_addr_t addr, | |
239 | unsigned long offset, | |
240 | size_t size, | |
241 | enum dma_data_direction dir) | |
242 | { | |
243 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
244 | ||
245 | BUG_ON(!valid_dma_direction(dir)); | |
246 | if (ops->sync_single_for_cpu) | |
247 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); | |
248 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | |
249 | } | |
250 | ||
251 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
252 | dma_addr_t addr, | |
253 | unsigned long offset, | |
254 | size_t size, | |
255 | enum dma_data_direction dir) | |
256 | { | |
257 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
258 | ||
259 | BUG_ON(!valid_dma_direction(dir)); | |
260 | if (ops->sync_single_for_device) | |
261 | ops->sync_single_for_device(dev, addr + offset, size, dir); | |
262 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | |
263 | } | |
264 | ||
265 | static inline void | |
266 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
267 | int nelems, enum dma_data_direction dir) | |
268 | { | |
269 | struct dma_map_ops *ops = get_dma_ops(dev); | |
270 | ||
271 | BUG_ON(!valid_dma_direction(dir)); | |
272 | if (ops->sync_sg_for_cpu) | |
273 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
274 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
275 | } | |
276 | ||
277 | static inline void | |
278 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
279 | int nelems, enum dma_data_direction dir) | |
280 | { | |
281 | struct dma_map_ops *ops = get_dma_ops(dev); | |
282 | ||
283 | BUG_ON(!valid_dma_direction(dir)); | |
284 | if (ops->sync_sg_for_device) | |
285 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
286 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
287 | ||
288 | } | |
289 | ||
290 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | |
291 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | |
292 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | |
293 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | |
294 | ||
295 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
296 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
297 | ||
298 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
299 | unsigned long vm_flags, | |
300 | pgprot_t prot, const void *caller); | |
301 | ||
302 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
303 | unsigned long vm_flags, pgprot_t prot, | |
304 | const void *caller); | |
305 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |
306 | ||
307 | /** | |
308 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
309 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
310 | * @vma: vm_area_struct describing requested user mapping | |
311 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
312 | * @handle: device-view address returned from dma_alloc_attrs | |
313 | * @size: size of memory originally requested in dma_alloc_attrs | |
314 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
315 | * | |
316 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs | |
317 | * into user space. The coherent DMA buffer must not be freed by the | |
318 | * driver until the user space mapping has been released. | |
319 | */ | |
320 | static inline int | |
321 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |
322 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | |
323 | { | |
324 | struct dma_map_ops *ops = get_dma_ops(dev); | |
325 | BUG_ON(!ops); | |
326 | if (ops->mmap) | |
327 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
328 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | |
329 | } | |
330 | ||
331 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | |
332 | ||
333 | int | |
334 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
335 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | |
336 | ||
337 | static inline int | |
338 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |
339 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | |
340 | { | |
341 | struct dma_map_ops *ops = get_dma_ops(dev); | |
342 | BUG_ON(!ops); | |
343 | if (ops->get_sgtable) | |
344 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
345 | attrs); | |
346 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | |
347 | } | |
348 | ||
349 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) | |
350 | ||
351 | #ifndef arch_dma_alloc_attrs | |
352 | #define arch_dma_alloc_attrs(dev, flag) (true) | |
353 | #endif | |
354 | ||
355 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
356 | dma_addr_t *dma_handle, gfp_t flag, | |
357 | struct dma_attrs *attrs) | |
358 | { | |
359 | struct dma_map_ops *ops = get_dma_ops(dev); | |
360 | void *cpu_addr; | |
361 | ||
362 | BUG_ON(!ops); | |
363 | ||
364 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) | |
365 | return cpu_addr; | |
366 | ||
367 | if (!arch_dma_alloc_attrs(&dev, &flag)) | |
368 | return NULL; | |
369 | if (!ops->alloc) | |
370 | return NULL; | |
371 | ||
372 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
373 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | |
374 | return cpu_addr; | |
375 | } | |
376 | ||
377 | static inline void dma_free_attrs(struct device *dev, size_t size, | |
378 | void *cpu_addr, dma_addr_t dma_handle, | |
379 | struct dma_attrs *attrs) | |
380 | { | |
381 | struct dma_map_ops *ops = get_dma_ops(dev); | |
382 | ||
383 | BUG_ON(!ops); | |
384 | WARN_ON(irqs_disabled()); | |
385 | ||
386 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | |
387 | return; | |
388 | ||
389 | if (!ops->free) | |
390 | return; | |
391 | ||
392 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
393 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
394 | } | |
395 | ||
396 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
397 | dma_addr_t *dma_handle, gfp_t flag) | |
398 | { | |
399 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); | |
400 | } | |
401 | ||
402 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
403 | void *cpu_addr, dma_addr_t dma_handle) | |
404 | { | |
405 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); | |
406 | } | |
407 | ||
408 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
409 | dma_addr_t *dma_handle, gfp_t gfp) | |
410 | { | |
411 | DEFINE_DMA_ATTRS(attrs); | |
412 | ||
413 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | |
414 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); | |
415 | } | |
416 | ||
417 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | |
418 | void *cpu_addr, dma_addr_t dma_handle) | |
419 | { | |
420 | DEFINE_DMA_ATTRS(attrs); | |
421 | ||
422 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | |
423 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | |
424 | } | |
425 | ||
426 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
427 | { | |
428 | debug_dma_mapping_error(dev, dma_addr); | |
429 | ||
430 | if (get_dma_ops(dev)->mapping_error) | |
431 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | |
432 | ||
433 | #ifdef DMA_ERROR_CODE | |
434 | return dma_addr == DMA_ERROR_CODE; | |
435 | #else | |
436 | return 0; | |
437 | #endif | |
438 | } | |
439 | ||
440 | #ifndef HAVE_ARCH_DMA_SUPPORTED | |
441 | static inline int dma_supported(struct device *dev, u64 mask) | |
442 | { | |
443 | struct dma_map_ops *ops = get_dma_ops(dev); | |
444 | ||
445 | if (!ops) | |
446 | return 0; | |
447 | if (!ops->dma_supported) | |
448 | return 1; | |
449 | return ops->dma_supported(dev, mask); | |
450 | } | |
451 | #endif | |
452 | ||
453 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
454 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
455 | { | |
456 | struct dma_map_ops *ops = get_dma_ops(dev); | |
457 | ||
458 | if (ops->set_dma_mask) | |
459 | return ops->set_dma_mask(dev, mask); | |
460 | ||
461 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
462 | return -EIO; | |
463 | *dev->dma_mask = mask; | |
464 | return 0; | |
465 | } | |
1b0fac45 | 466 | #endif |
1da177e4 | 467 | |
589fc9a6 FT |
468 | static inline u64 dma_get_mask(struct device *dev) |
469 | { | |
07a2c01a | 470 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 471 | return *dev->dma_mask; |
284901a9 | 472 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
473 | } |
474 | ||
58af4a24 | 475 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
476 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
477 | #else | |
6a1961f4 FT |
478 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
479 | { | |
480 | if (!dma_supported(dev, mask)) | |
481 | return -EIO; | |
482 | dev->coherent_dma_mask = mask; | |
483 | return 0; | |
484 | } | |
710224fa | 485 | #endif |
6a1961f4 | 486 | |
4aa806b7 RK |
487 | /* |
488 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
489 | * Note that we don't check the return value from dma_set_coherent_mask() | |
490 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
491 | * the same or smaller than the streaming DMA mask. | |
492 | */ | |
493 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
494 | { | |
495 | int rc = dma_set_mask(dev, mask); | |
496 | if (rc == 0) | |
497 | dma_set_coherent_mask(dev, mask); | |
498 | return rc; | |
499 | } | |
500 | ||
fa6a8d6d RK |
501 | /* |
502 | * Similar to the above, except it deals with the case where the device | |
503 | * does not have dev->dma_mask appropriately setup. | |
504 | */ | |
505 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
506 | { | |
507 | dev->dma_mask = &dev->coherent_dma_mask; | |
508 | return dma_set_mask_and_coherent(dev, mask); | |
509 | } | |
510 | ||
1da177e4 LT |
511 | extern u64 dma_get_required_mask(struct device *dev); |
512 | ||
a3a60f81 | 513 | #ifndef arch_setup_dma_ops |
97890ba9 WD |
514 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
515 | u64 size, struct iommu_ops *iommu, | |
516 | bool coherent) { } | |
517 | #endif | |
518 | ||
519 | #ifndef arch_teardown_dma_ops | |
520 | static inline void arch_teardown_dma_ops(struct device *dev) { } | |
591c1ee4 SS |
521 | #endif |
522 | ||
6b7b6510 FT |
523 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
524 | { | |
002edb6f RM |
525 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
526 | return dev->dma_parms->max_segment_size; | |
527 | return SZ_64K; | |
6b7b6510 FT |
528 | } |
529 | ||
530 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
531 | unsigned int size) | |
532 | { | |
533 | if (dev->dma_parms) { | |
534 | dev->dma_parms->max_segment_size = size; | |
535 | return 0; | |
002edb6f RM |
536 | } |
537 | return -EIO; | |
6b7b6510 FT |
538 | } |
539 | ||
d22a6966 FT |
540 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
541 | { | |
002edb6f RM |
542 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
543 | return dev->dma_parms->segment_boundary_mask; | |
544 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
545 | } |
546 | ||
547 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
548 | { | |
549 | if (dev->dma_parms) { | |
550 | dev->dma_parms->segment_boundary_mask = mask; | |
551 | return 0; | |
002edb6f RM |
552 | } |
553 | return -EIO; | |
d22a6966 FT |
554 | } |
555 | ||
00c8f162 SS |
556 | #ifndef dma_max_pfn |
557 | static inline unsigned long dma_max_pfn(struct device *dev) | |
558 | { | |
559 | return *dev->dma_mask >> PAGE_SHIFT; | |
560 | } | |
561 | #endif | |
562 | ||
842fa69f AM |
563 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
564 | dma_addr_t *dma_handle, gfp_t flag) | |
565 | { | |
ede23fa8 JP |
566 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
567 | flag | __GFP_ZERO); | |
842fa69f AM |
568 | return ret; |
569 | } | |
570 | ||
e259f191 | 571 | #ifdef CONFIG_HAS_DMA |
4565f017 FT |
572 | static inline int dma_get_cache_alignment(void) |
573 | { | |
574 | #ifdef ARCH_DMA_MINALIGN | |
575 | return ARCH_DMA_MINALIGN; | |
576 | #endif | |
577 | return 1; | |
578 | } | |
e259f191 | 579 | #endif |
4565f017 | 580 | |
1da177e4 LT |
581 | /* flags for the coherent memory api */ |
582 | #define DMA_MEMORY_MAP 0x01 | |
583 | #define DMA_MEMORY_IO 0x02 | |
584 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 | |
585 | #define DMA_MEMORY_EXCLUSIVE 0x08 | |
586 | ||
20d666e4 CH |
587 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
588 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
589 | dma_addr_t device_addr, size_t size, int flags); | |
590 | void dma_release_declared_memory(struct device *dev); | |
591 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
592 | dma_addr_t device_addr, size_t size); | |
593 | #else | |
1da177e4 | 594 | static inline int |
88a984ba | 595 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
596 | dma_addr_t device_addr, size_t size, int flags) |
597 | { | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static inline void | |
602 | dma_release_declared_memory(struct device *dev) | |
603 | { | |
604 | } | |
605 | ||
606 | static inline void * | |
607 | dma_mark_declared_memory_occupied(struct device *dev, | |
608 | dma_addr_t device_addr, size_t size) | |
609 | { | |
610 | return ERR_PTR(-EBUSY); | |
611 | } | |
20d666e4 | 612 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 613 | |
9ac7849e TH |
614 | /* |
615 | * Managed DMA API | |
616 | */ | |
617 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, | |
618 | dma_addr_t *dma_handle, gfp_t gfp); | |
619 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
620 | dma_addr_t dma_handle); | |
621 | extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, | |
622 | dma_addr_t *dma_handle, gfp_t gfp); | |
623 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
624 | dma_addr_t dma_handle); | |
20d666e4 | 625 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
88a984ba BH |
626 | extern int dmam_declare_coherent_memory(struct device *dev, |
627 | phys_addr_t phys_addr, | |
9ac7849e TH |
628 | dma_addr_t device_addr, size_t size, |
629 | int flags); | |
630 | extern void dmam_release_declared_memory(struct device *dev); | |
20d666e4 | 631 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
9ac7849e | 632 | static inline int dmam_declare_coherent_memory(struct device *dev, |
88a984ba | 633 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
634 | size_t size, gfp_t gfp) |
635 | { | |
636 | return 0; | |
637 | } | |
1da177e4 | 638 | |
9ac7849e TH |
639 | static inline void dmam_release_declared_memory(struct device *dev) |
640 | { | |
641 | } | |
20d666e4 | 642 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
1da177e4 | 643 | |
b4bbb107 TR |
644 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
645 | dma_addr_t *dma_addr, gfp_t gfp) | |
646 | { | |
647 | DEFINE_DMA_ATTRS(attrs); | |
648 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
649 | return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); | |
650 | } | |
651 | ||
652 | static inline void dma_free_writecombine(struct device *dev, size_t size, | |
653 | void *cpu_addr, dma_addr_t dma_addr) | |
654 | { | |
655 | DEFINE_DMA_ATTRS(attrs); | |
656 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
657 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); | |
658 | } | |
659 | ||
660 | static inline int dma_mmap_writecombine(struct device *dev, | |
661 | struct vm_area_struct *vma, | |
662 | void *cpu_addr, dma_addr_t dma_addr, | |
663 | size_t size) | |
664 | { | |
665 | DEFINE_DMA_ATTRS(attrs); | |
666 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
667 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | |
668 | } | |
74bc7cee | 669 | |
0acedc12 FT |
670 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
671 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | |
672 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
673 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
674 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
675 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
676 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
677 | #else | |
678 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
679 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
680 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
681 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
682 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
683 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
684 | #endif | |
685 | ||
9ac7849e | 686 | #endif |