ia64: dma_alloc_coherent always use GFP_DMA
[deliverable/linux.git] / include / asm-x86 / dma-mapping.h
CommitLineData
6f536635
GC
1#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
6c505ce3 12#include <asm-generic/dma-coherent.h>
6f536635 13
7c183416 14extern dma_addr_t bad_dma_address;
b7107a3d 15extern int iommu_merge;
6c505ce3 16extern struct device x86_dma_fallback_dev;
b7107a3d 17extern int panic_on_overflow;
fae9a0d8 18extern int force_iommu;
7c183416 19
6f536635 20struct dma_mapping_ops {
8d8bb39b
FT
21 int (*mapping_error)(struct device *dev,
22 dma_addr_t dma_addr);
6f536635
GC
23 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
2be62149 27 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
6f536635 28 size_t size, int direction);
6f536635
GC
29 void (*unmap_single)(struct device *dev, dma_addr_t addr,
30 size_t size, int direction);
31 void (*sync_single_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
36 int direction);
37 void (*sync_single_range_for_cpu)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_single_range_for_device)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_sg_for_cpu)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 void (*sync_sg_for_device)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
48 int direction);
49 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
50 int nents, int direction);
51 void (*unmap_sg)(struct device *hwdev,
52 struct scatterlist *sg, int nents,
53 int direction);
54 int (*dma_supported)(struct device *hwdev, u64 mask);
55 int is_phys;
56};
57
8d8bb39b 58extern struct dma_mapping_ops *dma_ops;
22456b97 59
8d8bb39b 60static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
c786df08 61{
8d8bb39b
FT
62#ifdef CONFIG_X86_32
63 return dma_ops;
64#else
65 if (unlikely(!dev) || !dev->archdata.dma_ops)
66 return dma_ops;
67 else
68 return dev->archdata.dma_ops;
69#endif
70}
71
72/* Make sure we keep the same behaviour */
73static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
74{
75#ifdef CONFIG_X86_32
76 return 0;
77#else
78 struct dma_mapping_ops *ops = get_dma_ops(dev);
79 if (ops->mapping_error)
80 return ops->mapping_error(dev, dma_addr);
c786df08
GC
81
82 return (dma_addr == bad_dma_address);
8d8bb39b 83#endif
c786df08
GC
84}
85
8d396ded
GC
86#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
87#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
6c505ce3 88#define dma_is_consistent(d, h) (1)
8d396ded 89
802c1f66
GC
90extern int dma_supported(struct device *hwdev, u64 mask);
91extern int dma_set_mask(struct device *dev, u64 mask);
92
22456b97
GC
93static inline dma_addr_t
94dma_map_single(struct device *hwdev, void *ptr, size_t size,
95 int direction)
96{
8d8bb39b
FT
97 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
98
22456b97 99 BUG_ON(!valid_dma_direction(direction));
8d8bb39b 100 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
22456b97
GC
101}
102
0cb0ae68
GC
103static inline void
104dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
105 int direction)
106{
8d8bb39b
FT
107 struct dma_mapping_ops *ops = get_dma_ops(dev);
108
0cb0ae68 109 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
110 if (ops->unmap_single)
111 ops->unmap_single(dev, addr, size, direction);
0cb0ae68
GC
112}
113
16a3ce9b
GC
114static inline int
115dma_map_sg(struct device *hwdev, struct scatterlist *sg,
116 int nents, int direction)
117{
8d8bb39b
FT
118 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
119
16a3ce9b 120 BUG_ON(!valid_dma_direction(direction));
8d8bb39b 121 return ops->map_sg(hwdev, sg, nents, direction);
16a3ce9b 122}
72c784f8
GC
123
124static inline void
125dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
126 int direction)
127{
8d8bb39b
FT
128 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
129
72c784f8 130 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
131 if (ops->unmap_sg)
132 ops->unmap_sg(hwdev, sg, nents, direction);
72c784f8 133}
c01dd8cf
GC
134
135static inline void
136dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
137 size_t size, int direction)
138{
8d8bb39b
FT
139 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
140
c01dd8cf 141 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
142 if (ops->sync_single_for_cpu)
143 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
c01dd8cf
GC
144 flush_write_buffers();
145}
146
9231b269
GC
147static inline void
148dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
149 size_t size, int direction)
150{
8d8bb39b
FT
151 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
152
9231b269 153 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
154 if (ops->sync_single_for_device)
155 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
9231b269
GC
156 flush_write_buffers();
157}
158
627610fc
GC
159static inline void
160dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
161 unsigned long offset, size_t size, int direction)
162{
8d8bb39b 163 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
627610fc 164
8d8bb39b
FT
165 BUG_ON(!valid_dma_direction(direction));
166 if (ops->sync_single_range_for_cpu)
167 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
168 size, direction);
627610fc
GC
169 flush_write_buffers();
170}
71362332
GC
171
172static inline void
173dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
174 unsigned long offset, size_t size,
175 int direction)
176{
8d8bb39b 177 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
71362332 178
8d8bb39b
FT
179 BUG_ON(!valid_dma_direction(direction));
180 if (ops->sync_single_range_for_device)
181 ops->sync_single_range_for_device(hwdev, dma_handle,
182 offset, size, direction);
71362332
GC
183 flush_write_buffers();
184}
185
ed435dee
GC
186static inline void
187dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
188 int nelems, int direction)
189{
8d8bb39b
FT
190 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
191
ed435dee 192 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
193 if (ops->sync_sg_for_cpu)
194 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
ed435dee
GC
195 flush_write_buffers();
196}
e7f3a913
GC
197
198static inline void
199dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
200 int nelems, int direction)
201{
8d8bb39b
FT
202 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
203
e7f3a913 204 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
205 if (ops->sync_sg_for_device)
206 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
e7f3a913
GC
207
208 flush_write_buffers();
209}
4d92fbf2
GC
210
211static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
212 size_t offset, size_t size,
213 int direction)
214{
8d8bb39b
FT
215 struct dma_mapping_ops *ops = get_dma_ops(dev);
216
2be62149 217 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
218 return ops->map_single(dev, page_to_phys(page) + offset,
219 size, direction);
4d92fbf2
GC
220}
221
222static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
223 size_t size, int direction)
224{
225 dma_unmap_single(dev, addr, size, direction);
226}
227
3cb6a917
GC
228static inline void
229dma_cache_sync(struct device *dev, void *vaddr, size_t size,
230 enum dma_data_direction dir)
231{
232 flush_write_buffers();
233}
ae17a63b 234
b7107a3d
GC
235static inline int dma_get_cache_alignment(void)
236{
237 /* no easy way to get cache size on all x86, so return the
238 * maximum possible, to be safe */
239 return boot_cpu_data.x86_clflush_size;
240}
241
6c505ce3
JR
242static inline void *
243dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
244 gfp_t gfp)
245{
246 struct dma_mapping_ops *ops = get_dma_ops(dev);
247 void *memory;
248
249 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
250 return memory;
251
252 if (!dev) {
253 dev = &x86_dma_fallback_dev;
254 gfp |= GFP_DMA;
255 }
256
257 if (ops->alloc_coherent)
258 return ops->alloc_coherent(dev, size,
259 dma_handle, gfp);
260 return NULL;
261}
262
263static inline void dma_free_coherent(struct device *dev, size_t size,
264 void *vaddr, dma_addr_t bus)
265{
266 struct dma_mapping_ops *ops = get_dma_ops(dev);
267
268 WARN_ON(irqs_disabled()); /* for portability */
269
270 if (dma_release_from_coherent(dev, get_order(size), vaddr))
271 return;
272
273 if (ops->free_coherent)
274 ops->free_coherent(dev, size, vaddr, bus);
275}
b7107a3d 276
6f536635 277#endif
This page took 0.071295 seconds and 5 git commands to generate.