8c911ea9ee5f69c2217a117ce4339603af931275
[deliverable/linux.git] / arch / sparc / include / asm / dma-mapping.h
1 #ifndef ___ASM_SPARC_DMA_MAPPING_H
2 #define ___ASM_SPARC_DMA_MAPPING_H
3 #if defined(__sparc__) && defined(__arch64__)
4 #include <asm/dma-mapping_64.h>
5 #else
6 #include <asm/dma-mapping_32.h>
7 #endif
8
9 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
10
11 extern int dma_supported(struct device *dev, u64 mask);
12 extern int dma_set_mask(struct device *dev, u64 dma_mask);
13
14 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
15 {
16 return (dma_addr == DMA_ERROR_CODE);
17 }
18
19 static inline int dma_get_cache_alignment(void)
20 {
21 /*
22 * no easy way to get cache size on all processors, so return
23 * the maximum possible, to be safe
24 */
25 return (1 << INTERNODE_CACHE_SHIFT);
26 }
27
28 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
29 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
30 #define dma_is_consistent(d, h) (1)
31
32 static inline void dma_sync_single_range_for_cpu(struct device *dev,
33 dma_addr_t dma_handle,
34 unsigned long offset,
35 size_t size,
36 enum dma_data_direction dir)
37 {
38 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
39 }
40
41 static inline void dma_sync_single_range_for_device(struct device *dev,
42 dma_addr_t dma_handle,
43 unsigned long offset,
44 size_t size,
45 enum dma_data_direction dir)
46 {
47 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
48 }
49
50 #endif
This page took 0.035391 seconds and 4 git commands to generate.