2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <linux/scatterlist.h>
16 #include <linux/dma-attrs.h>
19 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21 #ifdef CONFIG_NOT_COHERENT_CACHE
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
30 extern void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
31 dma_addr_t
*handle
, gfp_t gfp
);
32 extern void __dma_free_coherent(size_t size
, void *vaddr
);
33 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
34 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
35 size_t size
, int direction
);
37 #else /* ! CONFIG_NOT_COHERENT_CACHE */
39 * Cache coherent cores.
42 #define __dma_alloc_coherent(dev, gfp, size, handle) NULL
43 #define __dma_free_coherent(size, addr) ((void)0)
44 #define __dma_sync(addr, size, rw) ((void)0)
45 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
47 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
49 static inline unsigned long device_to_mask(struct device
*dev
)
51 if (dev
->dma_mask
&& *dev
->dma_mask
)
52 return *dev
->dma_mask
;
53 /* Assume devices without mask can take 32 bit addresses */
58 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
60 struct dma_mapping_ops
{
61 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
62 dma_addr_t
*dma_handle
, gfp_t flag
);
63 void (*free_coherent
)(struct device
*dev
, size_t size
,
64 void *vaddr
, dma_addr_t dma_handle
);
65 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
66 int nents
, enum dma_data_direction direction
,
67 struct dma_attrs
*attrs
);
68 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
69 int nents
, enum dma_data_direction direction
,
70 struct dma_attrs
*attrs
);
71 int (*dma_supported
)(struct device
*dev
, u64 mask
);
72 int (*set_dma_mask
)(struct device
*dev
, u64 dma_mask
);
73 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
74 unsigned long offset
, size_t size
,
75 enum dma_data_direction direction
,
76 struct dma_attrs
*attrs
);
77 void (*unmap_page
)(struct device
*dev
,
78 dma_addr_t dma_address
, size_t size
,
79 enum dma_data_direction direction
,
80 struct dma_attrs
*attrs
);
81 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
82 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
83 dma_addr_t dma_handle
, unsigned long offset
,
85 enum dma_data_direction direction
);
86 void (*sync_single_range_for_device
)(struct device
*hwdev
,
87 dma_addr_t dma_handle
, unsigned long offset
,
89 enum dma_data_direction direction
);
90 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
91 struct scatterlist
*sg
, int nelems
,
92 enum dma_data_direction direction
);
93 void (*sync_sg_for_device
)(struct device
*hwdev
,
94 struct scatterlist
*sg
, int nelems
,
95 enum dma_data_direction direction
);
100 * Available generic sets of operations
103 extern struct dma_mapping_ops dma_iommu_ops
;
105 extern struct dma_mapping_ops dma_direct_ops
;
107 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
109 /* We don't handle the NULL dev case for ISA for now. We could
110 * do it via an out of line call but it is not needed for now. The
111 * only ISA DMA device we support is the floppy and we have a hack
112 * in the floppy driver directly to get a device for us.
114 if (unlikely(dev
== NULL
))
117 return dev
->archdata
.dma_ops
;
120 static inline void set_dma_ops(struct device
*dev
, struct dma_mapping_ops
*ops
)
122 dev
->archdata
.dma_ops
= ops
;
125 static inline int dma_supported(struct device
*dev
, u64 mask
)
127 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
129 if (unlikely(dma_ops
== NULL
))
131 if (dma_ops
->dma_supported
== NULL
)
133 return dma_ops
->dma_supported(dev
, mask
);
136 /* We have our own implementation of pci_set_dma_mask() */
137 #define HAVE_ARCH_PCI_SET_DMA_MASK
139 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
141 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
143 if (unlikely(dma_ops
== NULL
))
145 if (dma_ops
->set_dma_mask
!= NULL
)
146 return dma_ops
->set_dma_mask(dev
, dma_mask
);
147 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
149 *dev
->dma_mask
= dma_mask
;
154 * map_/unmap_single actually call through to map/unmap_page now that all the
155 * dma_mapping_ops have been converted over. We just have to get the page and
156 * offset to pass through to map_page
158 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
,
161 enum dma_data_direction direction
,
162 struct dma_attrs
*attrs
)
164 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
168 return dma_ops
->map_page(dev
, virt_to_page(cpu_addr
),
169 (unsigned long)cpu_addr
% PAGE_SIZE
, size
,
173 static inline void dma_unmap_single_attrs(struct device
*dev
,
176 enum dma_data_direction direction
,
177 struct dma_attrs
*attrs
)
179 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
183 dma_ops
->unmap_page(dev
, dma_addr
, size
, direction
, attrs
);
186 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
188 unsigned long offset
, size_t size
,
189 enum dma_data_direction direction
,
190 struct dma_attrs
*attrs
)
192 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
196 return dma_ops
->map_page(dev
, page
, offset
, size
, direction
, attrs
);
199 static inline void dma_unmap_page_attrs(struct device
*dev
,
200 dma_addr_t dma_address
,
202 enum dma_data_direction direction
,
203 struct dma_attrs
*attrs
)
205 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
209 dma_ops
->unmap_page(dev
, dma_address
, size
, direction
, attrs
);
212 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
213 int nents
, enum dma_data_direction direction
,
214 struct dma_attrs
*attrs
)
216 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
219 return dma_ops
->map_sg(dev
, sg
, nents
, direction
, attrs
);
222 static inline void dma_unmap_sg_attrs(struct device
*dev
,
223 struct scatterlist
*sg
,
225 enum dma_data_direction direction
,
226 struct dma_attrs
*attrs
)
228 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
231 dma_ops
->unmap_sg(dev
, sg
, nhwentries
, direction
, attrs
);
234 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
235 dma_addr_t
*dma_handle
, gfp_t flag
)
237 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
240 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, flag
);
243 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
244 void *cpu_addr
, dma_addr_t dma_handle
)
246 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
249 dma_ops
->free_coherent(dev
, size
, cpu_addr
, dma_handle
);
252 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
254 enum dma_data_direction direction
)
256 return dma_map_single_attrs(dev
, cpu_addr
, size
, direction
, NULL
);
259 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
261 enum dma_data_direction direction
)
263 dma_unmap_single_attrs(dev
, dma_addr
, size
, direction
, NULL
);
266 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
267 unsigned long offset
, size_t size
,
268 enum dma_data_direction direction
)
270 return dma_map_page_attrs(dev
, page
, offset
, size
, direction
, NULL
);
273 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
275 enum dma_data_direction direction
)
277 dma_unmap_page_attrs(dev
, dma_address
, size
, direction
, NULL
);
280 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
281 int nents
, enum dma_data_direction direction
)
283 return dma_map_sg_attrs(dev
, sg
, nents
, direction
, NULL
);
286 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
288 enum dma_data_direction direction
)
290 dma_unmap_sg_attrs(dev
, sg
, nhwentries
, direction
, NULL
);
293 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
294 static inline void dma_sync_single_for_cpu(struct device
*dev
,
295 dma_addr_t dma_handle
, size_t size
,
296 enum dma_data_direction direction
)
298 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
301 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
, 0,
305 static inline void dma_sync_single_for_device(struct device
*dev
,
306 dma_addr_t dma_handle
, size_t size
,
307 enum dma_data_direction direction
)
309 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
312 dma_ops
->sync_single_range_for_device(dev
, dma_handle
,
316 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
317 struct scatterlist
*sgl
, int nents
,
318 enum dma_data_direction direction
)
320 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
323 dma_ops
->sync_sg_for_cpu(dev
, sgl
, nents
, direction
);
326 static inline void dma_sync_sg_for_device(struct device
*dev
,
327 struct scatterlist
*sgl
, int nents
,
328 enum dma_data_direction direction
)
330 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
333 dma_ops
->sync_sg_for_device(dev
, sgl
, nents
, direction
);
336 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
337 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
338 enum dma_data_direction direction
)
340 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
343 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
,
344 offset
, size
, direction
);
347 static inline void dma_sync_single_range_for_device(struct device
*dev
,
348 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
349 enum dma_data_direction direction
)
351 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
354 dma_ops
->sync_single_range_for_device(dev
, dma_handle
, offset
,
357 #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
358 static inline void dma_sync_single_for_cpu(struct device
*dev
,
359 dma_addr_t dma_handle
, size_t size
,
360 enum dma_data_direction direction
)
364 static inline void dma_sync_single_for_device(struct device
*dev
,
365 dma_addr_t dma_handle
, size_t size
,
366 enum dma_data_direction direction
)
370 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
371 struct scatterlist
*sgl
, int nents
,
372 enum dma_data_direction direction
)
376 static inline void dma_sync_sg_for_device(struct device
*dev
,
377 struct scatterlist
*sgl
, int nents
,
378 enum dma_data_direction direction
)
382 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
383 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
384 enum dma_data_direction direction
)
388 static inline void dma_sync_single_range_for_device(struct device
*dev
,
389 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
390 enum dma_data_direction direction
)
395 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
398 return (dma_addr
== DMA_ERROR_CODE
);
404 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
405 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
406 #ifdef CONFIG_NOT_COHERENT_CACHE
407 #define dma_is_consistent(d, h) (0)
409 #define dma_is_consistent(d, h) (1)
412 static inline int dma_get_cache_alignment(void)
415 /* no easy way to get cache size on all processors, so return
416 * the maximum possible, to be safe */
417 return (1 << INTERNODE_CACHE_SHIFT
);
420 * Each processor family will define its own L1_CACHE_SHIFT,
421 * L1_CACHE_BYTES wraps to this, so this is always safe.
423 return L1_CACHE_BYTES
;
427 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
428 enum dma_data_direction direction
)
430 BUG_ON(direction
== DMA_NONE
);
431 __dma_sync(vaddr
, size
, (int)direction
);
434 #endif /* __KERNEL__ */
435 #endif /* _ASM_DMA_MAPPING_H */