sparc: remove dma-mapping_{32|64}.h
[deliverable/linux.git] / arch / sparc / include / asm / dma-mapping.h
CommitLineData
f5e706ad
SR
1#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define ___ASM_SPARC_DMA_MAPPING_H
d6986415
FT
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
b9f69f4f
FT
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9extern int dma_supported(struct device *dev, u64 mask);
10extern int dma_set_mask(struct device *dev, u64 dma_mask);
11
d6986415
FT
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1)
15
16struct dma_ops {
17 void *(*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46
47static inline void *dma_alloc_coherent(struct device *dev, size_t size,
48 dma_addr_t *dma_handle, gfp_t flag)
b9f69f4f 49{
d6986415 50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
b9f69f4f
FT
51}
52
d6986415
FT
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
b9f69f4f 55{
d6986415 56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
b9f69f4f
FT
57}
58
d6986415
FT
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{
98 dma_ops->unmap_sg(dev, sg, nents, direction);
99}
100
101static inline void dma_sync_single_for_cpu(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106}
107
108static inline void dma_sync_single_for_device(struct device *dev,
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117
118static inline void dma_sync_sg_for_cpu(struct device *dev,
119 struct scatterlist *sg, int nelems,
120 enum dma_data_direction direction)
121{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
123}
124
125static inline void dma_sync_sg_for_device(struct device *dev,
126 struct scatterlist *sg, int nelems,
127 enum dma_data_direction direction)
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131}
b9f69f4f
FT
132
133static inline void dma_sync_single_range_for_cpu(struct device *dev,
134 dma_addr_t dma_handle,
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
140}
141
142static inline void dma_sync_single_range_for_device(struct device *dev,
143 dma_addr_t dma_handle,
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149}
150
d6986415
FT
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{
154 return (dma_addr == DMA_ERROR_CODE);
155}
156
157static inline int dma_get_cache_alignment(void)
158{
159 /*
160 * no easy way to get cache size on all processors, so return
161 * the maximum possible, to be safe
162 */
163 return (1 << INTERNODE_CACHE_SHIFT);
164}
165
f5e706ad 166#endif
This page took 0.115015 seconds and 5 git commands to generate.