1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/bug.h>
16 * A dma_addr_t can hold any valid DMA or bus address for the platform.
17 * It can be given to a device to use as a DMA source or target. A CPU cannot
18 * reference a dma_addr_t directly because there may be translation between
19 * its physical address space and the bus address space.
22 void* (*alloc
)(struct device
*dev
, size_t size
,
23 dma_addr_t
*dma_handle
, gfp_t gfp
,
24 struct dma_attrs
*attrs
);
25 void (*free
)(struct device
*dev
, size_t size
,
26 void *vaddr
, dma_addr_t dma_handle
,
27 struct dma_attrs
*attrs
);
28 int (*mmap
)(struct device
*, struct vm_area_struct
*,
29 void *, dma_addr_t
, size_t, struct dma_attrs
*attrs
);
31 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
32 dma_addr_t
, size_t, struct dma_attrs
*attrs
);
34 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
35 unsigned long offset
, size_t size
,
36 enum dma_data_direction dir
,
37 struct dma_attrs
*attrs
);
38 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
39 size_t size
, enum dma_data_direction dir
,
40 struct dma_attrs
*attrs
);
42 * map_sg returns 0 on error and a value > 0 on success.
43 * It should never return a value < 0.
45 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
46 int nents
, enum dma_data_direction dir
,
47 struct dma_attrs
*attrs
);
48 void (*unmap_sg
)(struct device
*dev
,
49 struct scatterlist
*sg
, int nents
,
50 enum dma_data_direction dir
,
51 struct dma_attrs
*attrs
);
52 void (*sync_single_for_cpu
)(struct device
*dev
,
53 dma_addr_t dma_handle
, size_t size
,
54 enum dma_data_direction dir
);
55 void (*sync_single_for_device
)(struct device
*dev
,
56 dma_addr_t dma_handle
, size_t size
,
57 enum dma_data_direction dir
);
58 void (*sync_sg_for_cpu
)(struct device
*dev
,
59 struct scatterlist
*sg
, int nents
,
60 enum dma_data_direction dir
);
61 void (*sync_sg_for_device
)(struct device
*dev
,
62 struct scatterlist
*sg
, int nents
,
63 enum dma_data_direction dir
);
64 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
65 int (*dma_supported
)(struct device
*dev
, u64 mask
);
66 int (*set_dma_mask
)(struct device
*dev
, u64 mask
);
67 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
68 u64 (*get_required_mask
)(struct device
*dev
);
73 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
75 #define DMA_MASK_NONE 0x0ULL
77 static inline int valid_dma_direction(int dma_direction
)
79 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
80 (dma_direction
== DMA_TO_DEVICE
) ||
81 (dma_direction
== DMA_FROM_DEVICE
));
84 static inline int is_device_dma_capable(struct device
*dev
)
86 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
89 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
91 * These three functions are only for dma allocator.
92 * Don't use them in device drivers.
94 int dma_alloc_from_coherent(struct device
*dev
, ssize_t size
,
95 dma_addr_t
*dma_handle
, void **ret
);
96 int dma_release_from_coherent(struct device
*dev
, int order
, void *vaddr
);
98 int dma_mmap_from_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
99 void *cpu_addr
, size_t size
, int *ret
);
101 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
102 #define dma_release_from_coherent(dev, order, vaddr) (0)
103 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
104 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
106 #ifdef CONFIG_HAS_DMA
107 #include <asm/dma-mapping.h>
110 * Define the dma api to allow compilation but not linking of
111 * dma dependent code. Code that depends on the dma-mapping
112 * API needs to set 'depends on HAS_DMA' in its Kconfig
114 extern struct dma_map_ops bad_dma_ops
;
115 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
121 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
123 enum dma_data_direction dir
,
124 struct dma_attrs
*attrs
)
126 struct dma_map_ops
*ops
= get_dma_ops(dev
);
129 kmemcheck_mark_initialized(ptr
, size
);
130 BUG_ON(!valid_dma_direction(dir
));
131 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
132 offset_in_page(ptr
), size
,
134 debug_dma_map_page(dev
, virt_to_page(ptr
),
135 offset_in_page(ptr
), size
,
140 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
142 enum dma_data_direction dir
,
143 struct dma_attrs
*attrs
)
145 struct dma_map_ops
*ops
= get_dma_ops(dev
);
147 BUG_ON(!valid_dma_direction(dir
));
149 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
150 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
154 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
155 * It should never return a value < 0.
157 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
158 int nents
, enum dma_data_direction dir
,
159 struct dma_attrs
*attrs
)
161 struct dma_map_ops
*ops
= get_dma_ops(dev
);
163 struct scatterlist
*s
;
165 for_each_sg(sg
, s
, nents
, i
)
166 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
167 BUG_ON(!valid_dma_direction(dir
));
168 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
170 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
175 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
176 int nents
, enum dma_data_direction dir
,
177 struct dma_attrs
*attrs
)
179 struct dma_map_ops
*ops
= get_dma_ops(dev
);
181 BUG_ON(!valid_dma_direction(dir
));
182 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
184 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
187 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
188 size_t offset
, size_t size
,
189 enum dma_data_direction dir
)
191 struct dma_map_ops
*ops
= get_dma_ops(dev
);
194 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
195 BUG_ON(!valid_dma_direction(dir
));
196 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, NULL
);
197 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
202 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
203 size_t size
, enum dma_data_direction dir
)
205 struct dma_map_ops
*ops
= get_dma_ops(dev
);
207 BUG_ON(!valid_dma_direction(dir
));
209 ops
->unmap_page(dev
, addr
, size
, dir
, NULL
);
210 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
213 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
215 enum dma_data_direction dir
)
217 struct dma_map_ops
*ops
= get_dma_ops(dev
);
219 BUG_ON(!valid_dma_direction(dir
));
220 if (ops
->sync_single_for_cpu
)
221 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
222 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
225 static inline void dma_sync_single_for_device(struct device
*dev
,
226 dma_addr_t addr
, size_t size
,
227 enum dma_data_direction dir
)
229 struct dma_map_ops
*ops
= get_dma_ops(dev
);
231 BUG_ON(!valid_dma_direction(dir
));
232 if (ops
->sync_single_for_device
)
233 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
234 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
237 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
239 unsigned long offset
,
241 enum dma_data_direction dir
)
243 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
245 BUG_ON(!valid_dma_direction(dir
));
246 if (ops
->sync_single_for_cpu
)
247 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
248 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
251 static inline void dma_sync_single_range_for_device(struct device
*dev
,
253 unsigned long offset
,
255 enum dma_data_direction dir
)
257 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
259 BUG_ON(!valid_dma_direction(dir
));
260 if (ops
->sync_single_for_device
)
261 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
262 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
266 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
267 int nelems
, enum dma_data_direction dir
)
269 struct dma_map_ops
*ops
= get_dma_ops(dev
);
271 BUG_ON(!valid_dma_direction(dir
));
272 if (ops
->sync_sg_for_cpu
)
273 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
274 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
278 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
279 int nelems
, enum dma_data_direction dir
)
281 struct dma_map_ops
*ops
= get_dma_ops(dev
);
283 BUG_ON(!valid_dma_direction(dir
));
284 if (ops
->sync_sg_for_device
)
285 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
286 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
290 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
291 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
292 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
293 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
295 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
296 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
298 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
299 unsigned long vm_flags
,
300 pgprot_t prot
, const void *caller
);
302 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
303 unsigned long vm_flags
, pgprot_t prot
,
305 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
308 * dma_mmap_attrs - map a coherent DMA allocation into user space
309 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
310 * @vma: vm_area_struct describing requested user mapping
311 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
312 * @handle: device-view address returned from dma_alloc_attrs
313 * @size: size of memory originally requested in dma_alloc_attrs
314 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
316 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
317 * into user space. The coherent DMA buffer must not be freed by the
318 * driver until the user space mapping has been released.
321 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
322 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
324 struct dma_map_ops
*ops
= get_dma_ops(dev
);
327 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
328 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
331 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
334 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
335 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
338 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
339 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
341 struct dma_map_ops
*ops
= get_dma_ops(dev
);
343 if (ops
->get_sgtable
)
344 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
346 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
349 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
351 #ifndef arch_dma_alloc_attrs
352 #define arch_dma_alloc_attrs(dev, flag) (true)
355 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
356 dma_addr_t
*dma_handle
, gfp_t flag
,
357 struct dma_attrs
*attrs
)
359 struct dma_map_ops
*ops
= get_dma_ops(dev
);
364 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &cpu_addr
))
367 if (!arch_dma_alloc_attrs(&dev
, &flag
))
372 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
373 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
377 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
378 void *cpu_addr
, dma_addr_t dma_handle
,
379 struct dma_attrs
*attrs
)
381 struct dma_map_ops
*ops
= get_dma_ops(dev
);
384 WARN_ON(irqs_disabled());
386 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
389 if (!ops
->free
|| !cpu_addr
)
392 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
393 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
396 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
397 dma_addr_t
*dma_handle
, gfp_t flag
)
399 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, NULL
);
402 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
403 void *cpu_addr
, dma_addr_t dma_handle
)
405 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, NULL
);
408 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
409 dma_addr_t
*dma_handle
, gfp_t gfp
)
411 DEFINE_DMA_ATTRS(attrs
);
413 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
414 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, &attrs
);
417 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
418 void *cpu_addr
, dma_addr_t dma_handle
)
420 DEFINE_DMA_ATTRS(attrs
);
422 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
423 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, &attrs
);
426 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
428 debug_dma_mapping_error(dev
, dma_addr
);
430 if (get_dma_ops(dev
)->mapping_error
)
431 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
433 #ifdef DMA_ERROR_CODE
434 return dma_addr
== DMA_ERROR_CODE
;
440 #ifndef HAVE_ARCH_DMA_SUPPORTED
441 static inline int dma_supported(struct device
*dev
, u64 mask
)
443 struct dma_map_ops
*ops
= get_dma_ops(dev
);
447 if (!ops
->dma_supported
)
449 return ops
->dma_supported(dev
, mask
);
453 #ifndef HAVE_ARCH_DMA_SET_MASK
454 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
456 struct dma_map_ops
*ops
= get_dma_ops(dev
);
458 if (ops
->set_dma_mask
)
459 return ops
->set_dma_mask(dev
, mask
);
461 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
463 *dev
->dma_mask
= mask
;
468 static inline u64
dma_get_mask(struct device
*dev
)
470 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
471 return *dev
->dma_mask
;
472 return DMA_BIT_MASK(32);
475 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
476 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
478 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
480 if (!dma_supported(dev
, mask
))
482 dev
->coherent_dma_mask
= mask
;
488 * Set both the DMA mask and the coherent DMA mask to the same thing.
489 * Note that we don't check the return value from dma_set_coherent_mask()
490 * as the DMA API guarantees that the coherent DMA mask can be set to
491 * the same or smaller than the streaming DMA mask.
493 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
495 int rc
= dma_set_mask(dev
, mask
);
497 dma_set_coherent_mask(dev
, mask
);
502 * Similar to the above, except it deals with the case where the device
503 * does not have dev->dma_mask appropriately setup.
505 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
507 dev
->dma_mask
= &dev
->coherent_dma_mask
;
508 return dma_set_mask_and_coherent(dev
, mask
);
511 extern u64
dma_get_required_mask(struct device
*dev
);
513 #ifndef arch_setup_dma_ops
514 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
515 u64 size
, struct iommu_ops
*iommu
,
519 #ifndef arch_teardown_dma_ops
520 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
523 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
525 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
526 return dev
->dma_parms
->max_segment_size
;
530 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
533 if (dev
->dma_parms
) {
534 dev
->dma_parms
->max_segment_size
= size
;
540 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
542 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
543 return dev
->dma_parms
->segment_boundary_mask
;
544 return DMA_BIT_MASK(32);
547 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
549 if (dev
->dma_parms
) {
550 dev
->dma_parms
->segment_boundary_mask
= mask
;
557 static inline unsigned long dma_max_pfn(struct device
*dev
)
559 return *dev
->dma_mask
>> PAGE_SHIFT
;
563 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
564 dma_addr_t
*dma_handle
, gfp_t flag
)
566 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
571 #ifdef CONFIG_HAS_DMA
572 static inline int dma_get_cache_alignment(void)
574 #ifdef ARCH_DMA_MINALIGN
575 return ARCH_DMA_MINALIGN
;
581 /* flags for the coherent memory api */
582 #define DMA_MEMORY_MAP 0x01
583 #define DMA_MEMORY_IO 0x02
584 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
585 #define DMA_MEMORY_EXCLUSIVE 0x08
587 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
588 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
589 dma_addr_t device_addr
, size_t size
, int flags
);
590 void dma_release_declared_memory(struct device
*dev
);
591 void *dma_mark_declared_memory_occupied(struct device
*dev
,
592 dma_addr_t device_addr
, size_t size
);
595 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
596 dma_addr_t device_addr
, size_t size
, int flags
)
602 dma_release_declared_memory(struct device
*dev
)
607 dma_mark_declared_memory_occupied(struct device
*dev
,
608 dma_addr_t device_addr
, size_t size
)
610 return ERR_PTR(-EBUSY
);
612 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
617 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
618 dma_addr_t
*dma_handle
, gfp_t gfp
);
619 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
620 dma_addr_t dma_handle
);
621 extern void *dmam_alloc_noncoherent(struct device
*dev
, size_t size
,
622 dma_addr_t
*dma_handle
, gfp_t gfp
);
623 extern void dmam_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
624 dma_addr_t dma_handle
);
625 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
626 extern int dmam_declare_coherent_memory(struct device
*dev
,
627 phys_addr_t phys_addr
,
628 dma_addr_t device_addr
, size_t size
,
630 extern void dmam_release_declared_memory(struct device
*dev
);
631 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
632 static inline int dmam_declare_coherent_memory(struct device
*dev
,
633 phys_addr_t phys_addr
, dma_addr_t device_addr
,
634 size_t size
, gfp_t gfp
)
639 static inline void dmam_release_declared_memory(struct device
*dev
)
642 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
644 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
645 dma_addr_t
*dma_addr
, gfp_t gfp
)
647 DEFINE_DMA_ATTRS(attrs
);
648 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
649 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
, &attrs
);
651 #ifndef dma_alloc_writecombine
652 #define dma_alloc_writecombine dma_alloc_wc
655 static inline void dma_free_wc(struct device
*dev
, size_t size
,
656 void *cpu_addr
, dma_addr_t dma_addr
)
658 DEFINE_DMA_ATTRS(attrs
);
659 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
660 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
, &attrs
);
662 #ifndef dma_free_writecombine
663 #define dma_free_writecombine dma_free_wc
666 static inline int dma_mmap_wc(struct device
*dev
,
667 struct vm_area_struct
*vma
,
668 void *cpu_addr
, dma_addr_t dma_addr
,
671 DEFINE_DMA_ATTRS(attrs
);
672 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
673 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, &attrs
);
675 #ifndef dma_mmap_writecombine
676 #define dma_mmap_writecombine dma_mmap_wc
679 #ifdef CONFIG_NEED_DMA_MAP_STATE
680 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
681 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
682 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
683 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
684 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
685 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
687 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
688 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
689 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
690 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
691 #define dma_unmap_len(PTR, LEN_NAME) (0)
692 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
This page took 0.046061 seconds and 6 git commands to generate.