x86: move ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY to dma-mapping.h
[deliverable/linux.git] / include / asm-x86 / dma-mapping.h
CommitLineData
6f536635
GC
1#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
7c183416
GC
13extern dma_addr_t bad_dma_address;
14
6f536635
GC
15struct dma_mapping_ops {
16 int (*mapping_error)(dma_addr_t dma_addr);
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
2be62149 21 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
6f536635
GC
22 size_t size, int direction);
23 /* like map_single, but doesn't check the device mask */
2be62149 24 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
6f536635
GC
25 size_t size, int direction);
26 void (*unmap_single)(struct device *dev, dma_addr_t addr,
27 size_t size, int direction);
28 void (*sync_single_for_cpu)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_for_device)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_range_for_cpu)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_single_range_for_device)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_sg_for_cpu)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 void (*sync_sg_for_device)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
47 int nents, int direction);
48 void (*unmap_sg)(struct device *hwdev,
49 struct scatterlist *sg, int nents,
50 int direction);
51 int (*dma_supported)(struct device *hwdev, u64 mask);
52 int is_phys;
53};
54
22456b97
GC
55extern const struct dma_mapping_ops *dma_ops;
56
c786df08
GC
57static inline int dma_mapping_error(dma_addr_t dma_addr)
58{
59 if (dma_ops->mapping_error)
60 return dma_ops->mapping_error(dma_addr);
61
62 return (dma_addr == bad_dma_address);
63}
64
8d396ded
GC
65#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
66#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
67
68void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t *dma_handle, gfp_t flag);
70
71void dma_free_coherent(struct device *dev, size_t size,
72 void *vaddr, dma_addr_t dma_handle);
73
74
802c1f66
GC
75extern int dma_supported(struct device *hwdev, u64 mask);
76extern int dma_set_mask(struct device *dev, u64 mask);
77
96a388de
TG
78#ifdef CONFIG_X86_32
79# include "dma-mapping_32.h"
80#else
81# include "dma-mapping_64.h"
82#endif
6f536635 83
22456b97
GC
84static inline dma_addr_t
85dma_map_single(struct device *hwdev, void *ptr, size_t size,
86 int direction)
87{
88 BUG_ON(!valid_dma_direction(direction));
2be62149 89 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
22456b97
GC
90}
91
0cb0ae68
GC
92static inline void
93dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
94 int direction)
95{
96 BUG_ON(!valid_dma_direction(direction));
97 if (dma_ops->unmap_single)
98 dma_ops->unmap_single(dev, addr, size, direction);
99}
100
16a3ce9b
GC
101static inline int
102dma_map_sg(struct device *hwdev, struct scatterlist *sg,
103 int nents, int direction)
104{
105 BUG_ON(!valid_dma_direction(direction));
106 return dma_ops->map_sg(hwdev, sg, nents, direction);
107}
72c784f8
GC
108
109static inline void
110dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
111 int direction)
112{
113 BUG_ON(!valid_dma_direction(direction));
114 if (dma_ops->unmap_sg)
115 dma_ops->unmap_sg(hwdev, sg, nents, direction);
116}
c01dd8cf
GC
117
118static inline void
119dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
120 size_t size, int direction)
121{
122 BUG_ON(!valid_dma_direction(direction));
123 if (dma_ops->sync_single_for_cpu)
124 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
125 direction);
126 flush_write_buffers();
127}
128
9231b269
GC
129static inline void
130dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
131 size_t size, int direction)
132{
133 BUG_ON(!valid_dma_direction(direction));
134 if (dma_ops->sync_single_for_device)
135 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
136 direction);
137 flush_write_buffers();
138}
139
627610fc
GC
140static inline void
141dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
142 unsigned long offset, size_t size, int direction)
143{
144 BUG_ON(!valid_dma_direction(direction));
145 if (dma_ops->sync_single_range_for_cpu)
146 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
147 size, direction);
148
149 flush_write_buffers();
150}
71362332
GC
151
152static inline void
153dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
154 unsigned long offset, size_t size,
155 int direction)
156{
157 BUG_ON(!valid_dma_direction(direction));
158 if (dma_ops->sync_single_range_for_device)
159 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
160 offset, size, direction);
161
162 flush_write_buffers();
163}
164
ed435dee
GC
165static inline void
166dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
167 int nelems, int direction)
168{
169 BUG_ON(!valid_dma_direction(direction));
170 if (dma_ops->sync_sg_for_cpu)
171 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
172 flush_write_buffers();
173}
e7f3a913
GC
174
175static inline void
176dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
177 int nelems, int direction)
178{
179 BUG_ON(!valid_dma_direction(direction));
180 if (dma_ops->sync_sg_for_device)
181 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
182
183 flush_write_buffers();
184}
4d92fbf2
GC
185
186static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
187 size_t offset, size_t size,
188 int direction)
189{
2be62149
IM
190 BUG_ON(!valid_dma_direction(direction));
191 return dma_ops->map_single(dev, page_to_phys(page)+offset,
192 size, direction);
4d92fbf2
GC
193}
194
195static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
196 size_t size, int direction)
197{
198 dma_unmap_single(dev, addr, size, direction);
199}
200
3cb6a917
GC
201static inline void
202dma_cache_sync(struct device *dev, void *vaddr, size_t size,
203 enum dma_data_direction dir)
204{
205 flush_write_buffers();
206}
ae17a63b
GC
207
208#ifdef CONFIG_X86_32
209# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
210extern int
211dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
212 dma_addr_t device_addr, size_t size, int flags);
213
214extern void
215dma_release_declared_memory(struct device *dev);
216
217extern void *
218dma_mark_declared_memory_occupied(struct device *dev,
219 dma_addr_t device_addr, size_t size);
220#endif /* CONFIG_X86_32 */
6f536635 221#endif
This page took 0.096815 seconds and 5 git commands to generate.