1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
11 #include <asm/swiotlb.h>
13 extern dma_addr_t bad_dma_address
;
14 extern int iommu_merge
;
15 extern struct device fallback_dev
;
16 extern int panic_on_overflow
;
17 extern int force_iommu
;
19 struct dma_mapping_ops
{
20 int (*mapping_error
)(dma_addr_t dma_addr
);
21 void* (*alloc_coherent
)(struct device
*dev
, size_t size
,
22 dma_addr_t
*dma_handle
, gfp_t gfp
);
23 void (*free_coherent
)(struct device
*dev
, size_t size
,
24 void *vaddr
, dma_addr_t dma_handle
);
25 dma_addr_t (*map_single
)(struct device
*hwdev
, phys_addr_t ptr
,
26 size_t size
, int direction
);
27 /* like map_single, but doesn't check the device mask */
28 dma_addr_t (*map_simple
)(struct device
*hwdev
, phys_addr_t ptr
,
29 size_t size
, int direction
);
30 void (*unmap_single
)(struct device
*dev
, dma_addr_t addr
,
31 size_t size
, int direction
);
32 void (*sync_single_for_cpu
)(struct device
*hwdev
,
33 dma_addr_t dma_handle
, size_t size
,
35 void (*sync_single_for_device
)(struct device
*hwdev
,
36 dma_addr_t dma_handle
, size_t size
,
38 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
39 dma_addr_t dma_handle
, unsigned long offset
,
40 size_t size
, int direction
);
41 void (*sync_single_range_for_device
)(struct device
*hwdev
,
42 dma_addr_t dma_handle
, unsigned long offset
,
43 size_t size
, int direction
);
44 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
45 struct scatterlist
*sg
, int nelems
,
47 void (*sync_sg_for_device
)(struct device
*hwdev
,
48 struct scatterlist
*sg
, int nelems
,
50 int (*map_sg
)(struct device
*hwdev
, struct scatterlist
*sg
,
51 int nents
, int direction
);
52 void (*unmap_sg
)(struct device
*hwdev
,
53 struct scatterlist
*sg
, int nents
,
55 int (*dma_supported
)(struct device
*hwdev
, u64 mask
);
59 extern const struct dma_mapping_ops
*dma_ops
;
61 static inline int dma_mapping_error(dma_addr_t dma_addr
)
63 if (dma_ops
->mapping_error
)
64 return dma_ops
->mapping_error(dma_addr
);
66 return (dma_addr
== bad_dma_address
);
69 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
70 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
72 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
73 dma_addr_t
*dma_handle
, gfp_t flag
);
75 void dma_free_coherent(struct device
*dev
, size_t size
,
76 void *vaddr
, dma_addr_t dma_handle
);
79 extern int dma_supported(struct device
*hwdev
, u64 mask
);
80 extern int dma_set_mask(struct device
*dev
, u64 mask
);
82 static inline dma_addr_t
83 dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
,
86 BUG_ON(!valid_dma_direction(direction
));
87 return dma_ops
->map_single(hwdev
, virt_to_phys(ptr
), size
, direction
);
91 dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
94 BUG_ON(!valid_dma_direction(direction
));
95 if (dma_ops
->unmap_single
)
96 dma_ops
->unmap_single(dev
, addr
, size
, direction
);
100 dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
101 int nents
, int direction
)
103 BUG_ON(!valid_dma_direction(direction
));
104 return dma_ops
->map_sg(hwdev
, sg
, nents
, direction
);
108 dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
,
111 BUG_ON(!valid_dma_direction(direction
));
112 if (dma_ops
->unmap_sg
)
113 dma_ops
->unmap_sg(hwdev
, sg
, nents
, direction
);
117 dma_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
118 size_t size
, int direction
)
120 BUG_ON(!valid_dma_direction(direction
));
121 if (dma_ops
->sync_single_for_cpu
)
122 dma_ops
->sync_single_for_cpu(hwdev
, dma_handle
, size
,
124 flush_write_buffers();
128 dma_sync_single_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
129 size_t size
, int direction
)
131 BUG_ON(!valid_dma_direction(direction
));
132 if (dma_ops
->sync_single_for_device
)
133 dma_ops
->sync_single_for_device(hwdev
, dma_handle
, size
,
135 flush_write_buffers();
139 dma_sync_single_range_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
140 unsigned long offset
, size_t size
, int direction
)
142 BUG_ON(!valid_dma_direction(direction
));
143 if (dma_ops
->sync_single_range_for_cpu
)
144 dma_ops
->sync_single_range_for_cpu(hwdev
, dma_handle
, offset
,
147 flush_write_buffers();
151 dma_sync_single_range_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
152 unsigned long offset
, size_t size
,
155 BUG_ON(!valid_dma_direction(direction
));
156 if (dma_ops
->sync_single_range_for_device
)
157 dma_ops
->sync_single_range_for_device(hwdev
, dma_handle
,
158 offset
, size
, direction
);
160 flush_write_buffers();
164 dma_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
165 int nelems
, int direction
)
167 BUG_ON(!valid_dma_direction(direction
));
168 if (dma_ops
->sync_sg_for_cpu
)
169 dma_ops
->sync_sg_for_cpu(hwdev
, sg
, nelems
, direction
);
170 flush_write_buffers();
174 dma_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
175 int nelems
, int direction
)
177 BUG_ON(!valid_dma_direction(direction
));
178 if (dma_ops
->sync_sg_for_device
)
179 dma_ops
->sync_sg_for_device(hwdev
, sg
, nelems
, direction
);
181 flush_write_buffers();
184 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
185 size_t offset
, size_t size
,
188 BUG_ON(!valid_dma_direction(direction
));
189 return dma_ops
->map_single(dev
, page_to_phys(page
)+offset
,
193 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
194 size_t size
, int direction
)
196 dma_unmap_single(dev
, addr
, size
, direction
);
200 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
201 enum dma_data_direction dir
)
203 flush_write_buffers();
206 static inline int dma_get_cache_alignment(void)
208 /* no easy way to get cache size on all x86, so return the
209 * maximum possible, to be safe */
210 return boot_cpu_data
.x86_clflush_size
;
213 #define dma_is_consistent(d, h) (1)
216 # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
217 struct dma_coherent_mem
{
222 unsigned long *bitmap
;
226 dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
227 dma_addr_t device_addr
, size_t size
, int flags
);
230 dma_release_declared_memory(struct device
*dev
);
233 dma_mark_declared_memory_occupied(struct device
*dev
,
234 dma_addr_t device_addr
, size_t size
);
235 #endif /* CONFIG_X86_32 */