x86: Fix ASM_X86__ header guards
[deliverable/linux.git] / arch / x86 / include / asm / dma-mapping.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
6f536635
GC
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
6c505ce3 12#include <asm-generic/dma-coherent.h>
6f536635 13
7c183416 14extern dma_addr_t bad_dma_address;
b7107a3d 15extern int iommu_merge;
6c505ce3 16extern struct device x86_dma_fallback_dev;
b7107a3d 17extern int panic_on_overflow;
7c183416 18
6f536635 19struct dma_mapping_ops {
8d8bb39b
FT
20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
6f536635
GC
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
2be62149 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
6f536635 27 size_t size, int direction);
6f536635
GC
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
8d8bb39b 57extern struct dma_mapping_ops *dma_ops;
22456b97 58
8d8bb39b 59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
c786df08 60{
8d8bb39b
FT
61#ifdef CONFIG_X86_32
62 return dma_ops;
63#else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
1965aae3 68#endif /* _ASM_X86_DMA_MAPPING_H */
8d8bb39b
FT
69}
70
71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{
74#ifdef CONFIG_X86_32
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr);
c786df08
GC
80
81 return (dma_addr == bad_dma_address);
8d8bb39b 82#endif
c786df08
GC
83}
84
8d396ded
GC
85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
6c505ce3 87#define dma_is_consistent(d, h) (1)
8d396ded 88
802c1f66
GC
89extern int dma_supported(struct device *hwdev, u64 mask);
90extern int dma_set_mask(struct device *dev, u64 mask);
91
9f6ac577
FT
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
22456b97
GC
95static inline dma_addr_t
96dma_map_single(struct device *hwdev, void *ptr, size_t size,
97 int direction)
98{
8d8bb39b
FT
99 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
100
22456b97 101 BUG_ON(!valid_dma_direction(direction));
8d8bb39b 102 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
22456b97
GC
103}
104
0cb0ae68
GC
105static inline void
106dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
107 int direction)
108{
8d8bb39b
FT
109 struct dma_mapping_ops *ops = get_dma_ops(dev);
110
0cb0ae68 111 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
112 if (ops->unmap_single)
113 ops->unmap_single(dev, addr, size, direction);
0cb0ae68
GC
114}
115
16a3ce9b
GC
116static inline int
117dma_map_sg(struct device *hwdev, struct scatterlist *sg,
118 int nents, int direction)
119{
8d8bb39b
FT
120 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
121
16a3ce9b 122 BUG_ON(!valid_dma_direction(direction));
8d8bb39b 123 return ops->map_sg(hwdev, sg, nents, direction);
16a3ce9b 124}
72c784f8
GC
125
126static inline void
127dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
128 int direction)
129{
8d8bb39b
FT
130 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
131
72c784f8 132 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
133 if (ops->unmap_sg)
134 ops->unmap_sg(hwdev, sg, nents, direction);
72c784f8 135}
c01dd8cf
GC
136
137static inline void
138dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
139 size_t size, int direction)
140{
8d8bb39b
FT
141 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
142
c01dd8cf 143 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
144 if (ops->sync_single_for_cpu)
145 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
c01dd8cf
GC
146 flush_write_buffers();
147}
148
9231b269
GC
149static inline void
150dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
151 size_t size, int direction)
152{
8d8bb39b
FT
153 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
154
9231b269 155 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
156 if (ops->sync_single_for_device)
157 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
9231b269
GC
158 flush_write_buffers();
159}
160
627610fc
GC
161static inline void
162dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
163 unsigned long offset, size_t size, int direction)
164{
8d8bb39b 165 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
627610fc 166
8d8bb39b
FT
167 BUG_ON(!valid_dma_direction(direction));
168 if (ops->sync_single_range_for_cpu)
169 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
170 size, direction);
627610fc
GC
171 flush_write_buffers();
172}
71362332
GC
173
174static inline void
175dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
176 unsigned long offset, size_t size,
177 int direction)
178{
8d8bb39b 179 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
71362332 180
8d8bb39b
FT
181 BUG_ON(!valid_dma_direction(direction));
182 if (ops->sync_single_range_for_device)
183 ops->sync_single_range_for_device(hwdev, dma_handle,
184 offset, size, direction);
71362332
GC
185 flush_write_buffers();
186}
187
ed435dee
GC
188static inline void
189dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
190 int nelems, int direction)
191{
8d8bb39b
FT
192 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
193
ed435dee 194 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
195 if (ops->sync_sg_for_cpu)
196 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
ed435dee
GC
197 flush_write_buffers();
198}
e7f3a913
GC
199
200static inline void
201dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
202 int nelems, int direction)
203{
8d8bb39b
FT
204 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
205
e7f3a913 206 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
207 if (ops->sync_sg_for_device)
208 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
e7f3a913
GC
209
210 flush_write_buffers();
211}
4d92fbf2
GC
212
213static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
214 size_t offset, size_t size,
215 int direction)
216{
8d8bb39b
FT
217 struct dma_mapping_ops *ops = get_dma_ops(dev);
218
2be62149 219 BUG_ON(!valid_dma_direction(direction));
8d8bb39b
FT
220 return ops->map_single(dev, page_to_phys(page) + offset,
221 size, direction);
4d92fbf2
GC
222}
223
224static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
225 size_t size, int direction)
226{
227 dma_unmap_single(dev, addr, size, direction);
228}
229
3cb6a917
GC
230static inline void
231dma_cache_sync(struct device *dev, void *vaddr, size_t size,
232 enum dma_data_direction dir)
233{
234 flush_write_buffers();
235}
ae17a63b 236
b7107a3d
GC
237static inline int dma_get_cache_alignment(void)
238{
239 /* no easy way to get cache size on all x86, so return the
240 * maximum possible, to be safe */
241 return boot_cpu_data.x86_clflush_size;
242}
243
823e7e8c
FT
244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
b7107a3d 248
823e7e8c
FT
249 dma_mask = dev->coherent_dma_mask;
250 if (!dma_mask)
251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
252
253 return dma_mask;
254}
255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{
9fcaff0e 258#ifdef CONFIG_X86_64
823e7e8c
FT
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
823e7e8c
FT
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
6c505ce3
JR
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
8a53ad67
FT
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
6c505ce3
JR
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
98216260 284 if (!is_device_dma_capable(dev))
de9f521f
FT
285 return NULL;
286
823e7e8c
FT
287 if (!ops->alloc_coherent)
288 return NULL;
289
290 return ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
6c505ce3
JR
292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
298
299 WARN_ON(irqs_disabled()); /* for portability */
300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
b7107a3d 307
6f536635 308#endif
This page took 0.104488 seconds and 5 git commands to generate.