Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
78b09735 SR |
2 | * Copyright (C) 2004 IBM |
3 | * | |
4 | * Implements the generic device dma API for powerpc. | |
5 | * the pci and vio busses | |
1da177e4 | 6 | */ |
78b09735 SR |
7 | #ifndef _ASM_DMA_MAPPING_H |
8 | #define _ASM_DMA_MAPPING_H | |
33ff910f AB |
9 | #ifdef __KERNEL__ |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/cache.h> | |
13 | /* need struct page definitions */ | |
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
3affedc4 | 16 | #include <linux/dma-attrs.h> |
33ff910f AB |
17 | #include <asm/io.h> |
18 | ||
19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
20 | ||
21 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
22 | /* | |
23 | * DMA-consistent mapping functions for PowerPCs that don't support | |
24 | * cache snooping. These allocate/free a region of uncached mapped | |
25 | * memory space for use with DMA devices. Alternatively, you could | |
26 | * allocate the space "normally" and use the cache management functions | |
27 | * to ensure it is consistent. | |
28 | */ | |
8b31e49d BH |
29 | struct device; |
30 | extern void *__dma_alloc_coherent(struct device *dev, size_t size, | |
31 | dma_addr_t *handle, gfp_t gfp); | |
33ff910f AB |
32 | extern void __dma_free_coherent(size_t size, void *vaddr); |
33 | extern void __dma_sync(void *vaddr, size_t size, int direction); | |
34 | extern void __dma_sync_page(struct page *page, unsigned long offset, | |
35 | size_t size, int direction); | |
36 | ||
37 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | |
38 | /* | |
39 | * Cache coherent cores. | |
40 | */ | |
41 | ||
8b31e49d | 42 | #define __dma_alloc_coherent(dev, gfp, size, handle) NULL |
33ff910f AB |
43 | #define __dma_free_coherent(size, addr) ((void)0) |
44 | #define __dma_sync(addr, size, rw) ((void)0) | |
45 | #define __dma_sync_page(pg, off, sz, rw) ((void)0) | |
46 | ||
47 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | |
48 | ||
3a4c6f0b MN |
49 | static inline unsigned long device_to_mask(struct device *dev) |
50 | { | |
51 | if (dev->dma_mask && *dev->dma_mask) | |
52 | return *dev->dma_mask; | |
53 | /* Assume devices without mask can take 32 bit addresses */ | |
54 | return 0xfffffffful; | |
55 | } | |
56 | ||
33ff910f AB |
57 | /* |
58 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | |
59 | */ | |
60 | struct dma_mapping_ops { | |
61 | void * (*alloc_coherent)(struct device *dev, size_t size, | |
62 | dma_addr_t *dma_handle, gfp_t flag); | |
63 | void (*free_coherent)(struct device *dev, size_t size, | |
64 | void *vaddr, dma_addr_t dma_handle); | |
33ff910f | 65 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
66 | int nents, enum dma_data_direction direction, |
67 | struct dma_attrs *attrs); | |
33ff910f | 68 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
69 | int nents, enum dma_data_direction direction, |
70 | struct dma_attrs *attrs); | |
33ff910f AB |
71 | int (*dma_supported)(struct device *dev, u64 mask); |
72 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | |
4fc665b8 BB |
73 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
74 | unsigned long offset, size_t size, | |
75 | enum dma_data_direction direction, | |
76 | struct dma_attrs *attrs); | |
77 | void (*unmap_page)(struct device *dev, | |
78 | dma_addr_t dma_address, size_t size, | |
79 | enum dma_data_direction direction, | |
80 | struct dma_attrs *attrs); | |
15e09c0e BB |
81 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
82 | void (*sync_single_range_for_cpu)(struct device *hwdev, | |
83 | dma_addr_t dma_handle, unsigned long offset, | |
84 | size_t size, | |
85 | enum dma_data_direction direction); | |
86 | void (*sync_single_range_for_device)(struct device *hwdev, | |
87 | dma_addr_t dma_handle, unsigned long offset, | |
88 | size_t size, | |
89 | enum dma_data_direction direction); | |
90 | void (*sync_sg_for_cpu)(struct device *hwdev, | |
91 | struct scatterlist *sg, int nelems, | |
92 | enum dma_data_direction direction); | |
93 | void (*sync_sg_for_device)(struct device *hwdev, | |
94 | struct scatterlist *sg, int nelems, | |
95 | enum dma_data_direction direction); | |
96 | #endif | |
33ff910f AB |
97 | }; |
98 | ||
4fc665b8 BB |
99 | /* |
100 | * Available generic sets of operations | |
101 | */ | |
102 | #ifdef CONFIG_PPC64 | |
103 | extern struct dma_mapping_ops dma_iommu_ops; | |
104 | #endif | |
105 | extern struct dma_mapping_ops dma_direct_ops; | |
106 | ||
33ff910f AB |
107 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
108 | { | |
109 | /* We don't handle the NULL dev case for ISA for now. We could | |
110 | * do it via an out of line call but it is not needed for now. The | |
111 | * only ISA DMA device we support is the floppy and we have a hack | |
112 | * in the floppy driver directly to get a device for us. | |
113 | */ | |
4ae0ff60 | 114 | if (unlikely(dev == NULL)) |
33ff910f | 115 | return NULL; |
4fc665b8 | 116 | |
33ff910f | 117 | return dev->archdata.dma_ops; |
1f62a162 ME |
118 | } |
119 | ||
120 | static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) | |
121 | { | |
122 | dev->archdata.dma_ops = ops; | |
33ff910f AB |
123 | } |
124 | ||
125 | static inline int dma_supported(struct device *dev, u64 mask) | |
126 | { | |
127 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
128 | ||
129 | if (unlikely(dma_ops == NULL)) | |
130 | return 0; | |
131 | if (dma_ops->dma_supported == NULL) | |
132 | return 1; | |
133 | return dma_ops->dma_supported(dev, mask); | |
134 | } | |
135 | ||
84631f37 ME |
136 | /* We have our own implementation of pci_set_dma_mask() */ |
137 | #define HAVE_ARCH_PCI_SET_DMA_MASK | |
138 | ||
33ff910f AB |
139 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
140 | { | |
141 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
142 | ||
143 | if (unlikely(dma_ops == NULL)) | |
144 | return -EIO; | |
145 | if (dma_ops->set_dma_mask != NULL) | |
146 | return dma_ops->set_dma_mask(dev, dma_mask); | |
147 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
148 | return -EIO; | |
149 | *dev->dma_mask = dma_mask; | |
150 | return 0; | |
151 | } | |
152 | ||
4fc665b8 | 153 | /* |
c73049f6 MN |
154 | * map_/unmap_single actually call through to map/unmap_page now that all the |
155 | * dma_mapping_ops have been converted over. We just have to get the page and | |
156 | * offset to pass through to map_page | |
4fc665b8 | 157 | */ |
3affedc4 MN |
158 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
159 | void *cpu_addr, | |
160 | size_t size, | |
161 | enum dma_data_direction direction, | |
162 | struct dma_attrs *attrs) | |
163 | { | |
164 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
165 | ||
166 | BUG_ON(!dma_ops); | |
4fc665b8 | 167 | |
4fc665b8 BB |
168 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), |
169 | (unsigned long)cpu_addr % PAGE_SIZE, size, | |
170 | direction, attrs); | |
3affedc4 MN |
171 | } |
172 | ||
173 | static inline void dma_unmap_single_attrs(struct device *dev, | |
174 | dma_addr_t dma_addr, | |
175 | size_t size, | |
176 | enum dma_data_direction direction, | |
177 | struct dma_attrs *attrs) | |
178 | { | |
179 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
180 | ||
181 | BUG_ON(!dma_ops); | |
4fc665b8 | 182 | |
4fc665b8 | 183 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); |
3affedc4 MN |
184 | } |
185 | ||
186 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
187 | struct page *page, | |
188 | unsigned long offset, size_t size, | |
189 | enum dma_data_direction direction, | |
190 | struct dma_attrs *attrs) | |
191 | { | |
192 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
193 | ||
194 | BUG_ON(!dma_ops); | |
4fc665b8 | 195 | |
c73049f6 | 196 | return dma_ops->map_page(dev, page, offset, size, direction, attrs); |
3affedc4 MN |
197 | } |
198 | ||
199 | static inline void dma_unmap_page_attrs(struct device *dev, | |
200 | dma_addr_t dma_address, | |
201 | size_t size, | |
202 | enum dma_data_direction direction, | |
203 | struct dma_attrs *attrs) | |
204 | { | |
205 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
206 | ||
207 | BUG_ON(!dma_ops); | |
4fc665b8 | 208 | |
c73049f6 | 209 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); |
3affedc4 MN |
210 | } |
211 | ||
212 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
213 | int nents, enum dma_data_direction direction, | |
214 | struct dma_attrs *attrs) | |
215 | { | |
216 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
217 | ||
218 | BUG_ON(!dma_ops); | |
219 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | |
220 | } | |
221 | ||
222 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
223 | struct scatterlist *sg, | |
224 | int nhwentries, | |
225 | enum dma_data_direction direction, | |
226 | struct dma_attrs *attrs) | |
227 | { | |
228 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
229 | ||
230 | BUG_ON(!dma_ops); | |
231 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | |
232 | } | |
233 | ||
33ff910f AB |
234 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
235 | dma_addr_t *dma_handle, gfp_t flag) | |
236 | { | |
237 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
238 | ||
239 | BUG_ON(!dma_ops); | |
240 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | |
241 | } | |
242 | ||
243 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
244 | void *cpu_addr, dma_addr_t dma_handle) | |
245 | { | |
246 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
247 | ||
248 | BUG_ON(!dma_ops); | |
249 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | |
250 | } | |
251 | ||
252 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |
253 | size_t size, | |
254 | enum dma_data_direction direction) | |
255 | { | |
3affedc4 | 256 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); |
33ff910f AB |
257 | } |
258 | ||
259 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | |
260 | size_t size, | |
261 | enum dma_data_direction direction) | |
262 | { | |
3affedc4 | 263 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); |
33ff910f AB |
264 | } |
265 | ||
266 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
267 | unsigned long offset, size_t size, | |
268 | enum dma_data_direction direction) | |
269 | { | |
3affedc4 | 270 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); |
33ff910f | 271 | } |
12d04eef BH |
272 | |
273 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
274 | size_t size, | |
275 | enum dma_data_direction direction) | |
276 | { | |
3affedc4 | 277 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); |
12d04eef BH |
278 | } |
279 | ||
280 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
281 | int nents, enum dma_data_direction direction) | |
282 | { | |
3affedc4 | 283 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); |
12d04eef BH |
284 | } |
285 | ||
286 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
287 | int nhwentries, | |
288 | enum dma_data_direction direction) | |
289 | { | |
3affedc4 | 290 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
12d04eef | 291 | } |
78b09735 | 292 | |
15e09c0e | 293 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
78b09735 SR |
294 | static inline void dma_sync_single_for_cpu(struct device *dev, |
295 | dma_addr_t dma_handle, size_t size, | |
296 | enum dma_data_direction direction) | |
1da177e4 | 297 | { |
15e09c0e BB |
298 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
299 | ||
300 | BUG_ON(!dma_ops); | |
301 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | |
302 | size, direction); | |
1da177e4 LT |
303 | } |
304 | ||
78b09735 SR |
305 | static inline void dma_sync_single_for_device(struct device *dev, |
306 | dma_addr_t dma_handle, size_t size, | |
307 | enum dma_data_direction direction) | |
1da177e4 | 308 | { |
15e09c0e BB |
309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
310 | ||
311 | BUG_ON(!dma_ops); | |
312 | dma_ops->sync_single_range_for_device(dev, dma_handle, | |
313 | 0, size, direction); | |
1da177e4 LT |
314 | } |
315 | ||
78b09735 | 316 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
78bdc310 | 317 | struct scatterlist *sgl, int nents, |
78b09735 | 318 | enum dma_data_direction direction) |
1da177e4 | 319 | { |
15e09c0e | 320 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
1da177e4 | 321 | |
15e09c0e BB |
322 | BUG_ON(!dma_ops); |
323 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | |
1da177e4 LT |
324 | } |
325 | ||
78b09735 | 326 | static inline void dma_sync_sg_for_device(struct device *dev, |
78bdc310 | 327 | struct scatterlist *sgl, int nents, |
78b09735 | 328 | enum dma_data_direction direction) |
1da177e4 | 329 | { |
15e09c0e | 330 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
1da177e4 | 331 | |
15e09c0e BB |
332 | BUG_ON(!dma_ops); |
333 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | |
334 | } | |
335 | ||
336 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
337 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
338 | enum dma_data_direction direction) | |
339 | { | |
340 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
1da177e4 | 341 | |
15e09c0e BB |
342 | BUG_ON(!dma_ops); |
343 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | |
344 | offset, size, direction); | |
345 | } | |
346 | ||
347 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
348 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
349 | enum dma_data_direction direction) | |
350 | { | |
351 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
352 | ||
353 | BUG_ON(!dma_ops); | |
354 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | |
355 | size, direction); | |
1da177e4 | 356 | } |
15e09c0e | 357 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |
0efbb57e BB |
358 | static inline void dma_sync_single_for_cpu(struct device *dev, |
359 | dma_addr_t dma_handle, size_t size, | |
360 | enum dma_data_direction direction) | |
361 | { | |
362 | } | |
363 | ||
364 | static inline void dma_sync_single_for_device(struct device *dev, | |
365 | dma_addr_t dma_handle, size_t size, | |
366 | enum dma_data_direction direction) | |
367 | { | |
368 | } | |
369 | ||
370 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
371 | struct scatterlist *sgl, int nents, | |
372 | enum dma_data_direction direction) | |
373 | { | |
374 | } | |
375 | ||
376 | static inline void dma_sync_sg_for_device(struct device *dev, | |
377 | struct scatterlist *sgl, int nents, | |
378 | enum dma_data_direction direction) | |
379 | { | |
380 | } | |
381 | ||
382 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
383 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
384 | enum dma_data_direction direction) | |
385 | { | |
386 | } | |
387 | ||
388 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
389 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
390 | enum dma_data_direction direction) | |
391 | { | |
392 | } | |
15e09c0e | 393 | #endif |
1da177e4 | 394 | |
8d8bb39b | 395 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
78b09735 SR |
396 | { |
397 | #ifdef CONFIG_PPC64 | |
398 | return (dma_addr == DMA_ERROR_CODE); | |
399 | #else | |
400 | return 0; | |
401 | #endif | |
402 | } | |
403 | ||
1da177e4 LT |
404 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
405 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
406 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
f67637ee | 407 | #define dma_is_consistent(d, h) (0) |
1da177e4 | 408 | #else |
f67637ee | 409 | #define dma_is_consistent(d, h) (1) |
1da177e4 LT |
410 | #endif |
411 | ||
412 | static inline int dma_get_cache_alignment(void) | |
413 | { | |
78b09735 SR |
414 | #ifdef CONFIG_PPC64 |
415 | /* no easy way to get cache size on all processors, so return | |
416 | * the maximum possible, to be safe */ | |
1fd73c6b | 417 | return (1 << INTERNODE_CACHE_SHIFT); |
78b09735 | 418 | #else |
1da177e4 LT |
419 | /* |
420 | * Each processor family will define its own L1_CACHE_SHIFT, | |
421 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
422 | */ | |
423 | return L1_CACHE_BYTES; | |
78b09735 | 424 | #endif |
1da177e4 LT |
425 | } |
426 | ||
d3fa72e4 | 427 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
78b09735 | 428 | enum dma_data_direction direction) |
1da177e4 | 429 | { |
78b09735 | 430 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
431 | __dma_sync(vaddr, size, (int)direction); |
432 | } | |
433 | ||
88ced031 | 434 | #endif /* __KERNEL__ */ |
78b09735 | 435 | #endif /* _ASM_DMA_MAPPING_H */ |