Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
78b09735 SR |
2 | * Copyright (C) 2004 IBM |
3 | * | |
4 | * Implements the generic device dma API for powerpc. | |
5 | * the pci and vio busses | |
1da177e4 | 6 | */ |
78b09735 SR |
7 | #ifndef _ASM_DMA_MAPPING_H |
8 | #define _ASM_DMA_MAPPING_H | |
33ff910f AB |
9 | #ifdef __KERNEL__ |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/cache.h> | |
13 | /* need struct page definitions */ | |
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
16 | #include <asm/io.h> | |
17 | ||
18 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
19 | ||
20 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
21 | /* | |
22 | * DMA-consistent mapping functions for PowerPCs that don't support | |
23 | * cache snooping. These allocate/free a region of uncached mapped | |
24 | * memory space for use with DMA devices. Alternatively, you could | |
25 | * allocate the space "normally" and use the cache management functions | |
26 | * to ensure it is consistent. | |
27 | */ | |
28 | extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); | |
29 | extern void __dma_free_coherent(size_t size, void *vaddr); | |
30 | extern void __dma_sync(void *vaddr, size_t size, int direction); | |
31 | extern void __dma_sync_page(struct page *page, unsigned long offset, | |
32 | size_t size, int direction); | |
33 | ||
34 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | |
35 | /* | |
36 | * Cache coherent cores. | |
37 | */ | |
38 | ||
39 | #define __dma_alloc_coherent(gfp, size, handle) NULL | |
40 | #define __dma_free_coherent(size, addr) ((void)0) | |
41 | #define __dma_sync(addr, size, rw) ((void)0) | |
42 | #define __dma_sync_page(pg, off, sz, rw) ((void)0) | |
43 | ||
44 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | |
45 | ||
46 | #ifdef CONFIG_PPC64 | |
47 | /* | |
48 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | |
49 | */ | |
50 | struct dma_mapping_ops { | |
51 | void * (*alloc_coherent)(struct device *dev, size_t size, | |
52 | dma_addr_t *dma_handle, gfp_t flag); | |
53 | void (*free_coherent)(struct device *dev, size_t size, | |
54 | void *vaddr, dma_addr_t dma_handle); | |
55 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | |
56 | size_t size, enum dma_data_direction direction); | |
57 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | |
58 | size_t size, enum dma_data_direction direction); | |
59 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | |
60 | int nents, enum dma_data_direction direction); | |
61 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | |
62 | int nents, enum dma_data_direction direction); | |
63 | int (*dma_supported)(struct device *dev, u64 mask); | |
64 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | |
65 | }; | |
66 | ||
67 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |
68 | { | |
69 | /* We don't handle the NULL dev case for ISA for now. We could | |
70 | * do it via an out of line call but it is not needed for now. The | |
71 | * only ISA DMA device we support is the floppy and we have a hack | |
72 | * in the floppy driver directly to get a device for us. | |
73 | */ | |
74 | if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) | |
75 | return NULL; | |
76 | return dev->archdata.dma_ops; | |
77 | } | |
78 | ||
79 | static inline int dma_supported(struct device *dev, u64 mask) | |
80 | { | |
81 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
82 | ||
83 | if (unlikely(dma_ops == NULL)) | |
84 | return 0; | |
85 | if (dma_ops->dma_supported == NULL) | |
86 | return 1; | |
87 | return dma_ops->dma_supported(dev, mask); | |
88 | } | |
89 | ||
90 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
91 | { | |
92 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
93 | ||
94 | if (unlikely(dma_ops == NULL)) | |
95 | return -EIO; | |
96 | if (dma_ops->set_dma_mask != NULL) | |
97 | return dma_ops->set_dma_mask(dev, dma_mask); | |
98 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
99 | return -EIO; | |
100 | *dev->dma_mask = dma_mask; | |
101 | return 0; | |
102 | } | |
103 | ||
104 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
105 | dma_addr_t *dma_handle, gfp_t flag) | |
106 | { | |
107 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
108 | ||
109 | BUG_ON(!dma_ops); | |
110 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | |
111 | } | |
112 | ||
113 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
114 | void *cpu_addr, dma_addr_t dma_handle) | |
115 | { | |
116 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
117 | ||
118 | BUG_ON(!dma_ops); | |
119 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | |
120 | } | |
121 | ||
122 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |
123 | size_t size, | |
124 | enum dma_data_direction direction) | |
125 | { | |
126 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
127 | ||
128 | BUG_ON(!dma_ops); | |
129 | return dma_ops->map_single(dev, cpu_addr, size, direction); | |
130 | } | |
131 | ||
132 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | |
133 | size_t size, | |
134 | enum dma_data_direction direction) | |
135 | { | |
136 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
137 | ||
138 | BUG_ON(!dma_ops); | |
139 | dma_ops->unmap_single(dev, dma_addr, size, direction); | |
140 | } | |
141 | ||
142 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
143 | unsigned long offset, size_t size, | |
144 | enum dma_data_direction direction) | |
145 | { | |
146 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
147 | ||
148 | BUG_ON(!dma_ops); | |
149 | return dma_ops->map_single(dev, page_address(page) + offset, size, | |
150 | direction); | |
151 | } | |
12d04eef BH |
152 | |
153 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
154 | size_t size, | |
155 | enum dma_data_direction direction) | |
156 | { | |
157 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
158 | ||
159 | BUG_ON(!dma_ops); | |
160 | dma_ops->unmap_single(dev, dma_address, size, direction); | |
161 | } | |
162 | ||
163 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
164 | int nents, enum dma_data_direction direction) | |
165 | { | |
166 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
167 | ||
168 | BUG_ON(!dma_ops); | |
169 | return dma_ops->map_sg(dev, sg, nents, direction); | |
170 | } | |
171 | ||
172 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
173 | int nhwentries, | |
174 | enum dma_data_direction direction) | |
175 | { | |
176 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
177 | ||
178 | BUG_ON(!dma_ops); | |
179 | dma_ops->unmap_sg(dev, sg, nhwentries, direction); | |
180 | } | |
78b09735 | 181 | |
12d04eef BH |
182 | |
183 | /* | |
184 | * Available generic sets of operations | |
185 | */ | |
186 | extern struct dma_mapping_ops dma_iommu_ops; | |
187 | extern struct dma_mapping_ops dma_direct_ops; | |
78b09735 | 188 | |
92b20c40 BH |
189 | extern unsigned long dma_direct_offset; |
190 | ||
78b09735 SR |
191 | #else /* CONFIG_PPC64 */ |
192 | ||
1da177e4 LT |
193 | #define dma_supported(dev, mask) (1) |
194 | ||
195 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
196 | { | |
197 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
198 | return -EIO; | |
199 | ||
200 | *dev->dma_mask = dma_mask; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
205 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
d27477c2 | 206 | dma_addr_t * dma_handle, |
dd0fc66f | 207 | gfp_t gfp) |
1da177e4 LT |
208 | { |
209 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
210 | return __dma_alloc_coherent(size, dma_handle, gfp); | |
211 | #else | |
212 | void *ret; | |
213 | /* ignore region specifiers */ | |
214 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
215 | ||
216 | if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) | |
217 | gfp |= GFP_DMA; | |
218 | ||
219 | ret = (void *)__get_free_pages(gfp, get_order(size)); | |
220 | ||
221 | if (ret != NULL) { | |
222 | memset(ret, 0, size); | |
223 | *dma_handle = virt_to_bus(ret); | |
224 | } | |
225 | ||
226 | return ret; | |
227 | #endif | |
228 | } | |
229 | ||
230 | static inline void | |
231 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
232 | dma_addr_t dma_handle) | |
233 | { | |
234 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
235 | __dma_free_coherent(size, vaddr); | |
236 | #else | |
237 | free_pages((unsigned long)vaddr, get_order(size)); | |
238 | #endif | |
239 | } | |
240 | ||
241 | static inline dma_addr_t | |
242 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
243 | enum dma_data_direction direction) | |
244 | { | |
245 | BUG_ON(direction == DMA_NONE); | |
246 | ||
247 | __dma_sync(ptr, size, direction); | |
248 | ||
249 | return virt_to_bus(ptr); | |
250 | } | |
251 | ||
f774216d SB |
252 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
253 | size_t size, | |
254 | enum dma_data_direction direction) | |
255 | { | |
256 | /* We do nothing. */ | |
257 | } | |
1da177e4 LT |
258 | |
259 | static inline dma_addr_t | |
260 | dma_map_page(struct device *dev, struct page *page, | |
261 | unsigned long offset, size_t size, | |
262 | enum dma_data_direction direction) | |
263 | { | |
264 | BUG_ON(direction == DMA_NONE); | |
265 | ||
266 | __dma_sync_page(page, offset, size, direction); | |
267 | ||
9f6a3d08 | 268 | return page_to_bus(page) + offset; |
1da177e4 LT |
269 | } |
270 | ||
f774216d SB |
271 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
272 | size_t size, | |
273 | enum dma_data_direction direction) | |
274 | { | |
275 | /* We do nothing. */ | |
276 | } | |
1da177e4 LT |
277 | |
278 | static inline int | |
78bdc310 | 279 | dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
1da177e4 LT |
280 | enum dma_data_direction direction) |
281 | { | |
78bdc310 | 282 | struct scatterlist *sg; |
1da177e4 LT |
283 | int i; |
284 | ||
285 | BUG_ON(direction == DMA_NONE); | |
286 | ||
78bdc310 | 287 | for_each_sg(sgl, sg, nents, i) { |
5edadbd0 OJ |
288 | BUG_ON(!sg_page(sg)); |
289 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | |
290 | sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; | |
1da177e4 LT |
291 | } |
292 | ||
293 | return nents; | |
294 | } | |
295 | ||
f774216d SB |
296 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
297 | int nhwentries, | |
298 | enum dma_data_direction direction) | |
299 | { | |
300 | /* We don't do anything here. */ | |
301 | } | |
1da177e4 | 302 | |
78b09735 SR |
303 | #endif /* CONFIG_PPC64 */ |
304 | ||
305 | static inline void dma_sync_single_for_cpu(struct device *dev, | |
306 | dma_addr_t dma_handle, size_t size, | |
307 | enum dma_data_direction direction) | |
1da177e4 LT |
308 | { |
309 | BUG_ON(direction == DMA_NONE); | |
1da177e4 LT |
310 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
311 | } | |
312 | ||
78b09735 SR |
313 | static inline void dma_sync_single_for_device(struct device *dev, |
314 | dma_addr_t dma_handle, size_t size, | |
315 | enum dma_data_direction direction) | |
1da177e4 LT |
316 | { |
317 | BUG_ON(direction == DMA_NONE); | |
1da177e4 LT |
318 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
319 | } | |
320 | ||
78b09735 | 321 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
78bdc310 | 322 | struct scatterlist *sgl, int nents, |
78b09735 | 323 | enum dma_data_direction direction) |
1da177e4 | 324 | { |
78bdc310 | 325 | struct scatterlist *sg; |
1da177e4 LT |
326 | int i; |
327 | ||
328 | BUG_ON(direction == DMA_NONE); | |
329 | ||
78bdc310 | 330 | for_each_sg(sgl, sg, nents, i) |
5edadbd0 | 331 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
1da177e4 LT |
332 | } |
333 | ||
78b09735 | 334 | static inline void dma_sync_sg_for_device(struct device *dev, |
78bdc310 | 335 | struct scatterlist *sgl, int nents, |
78b09735 | 336 | enum dma_data_direction direction) |
1da177e4 | 337 | { |
78bdc310 | 338 | struct scatterlist *sg; |
1da177e4 LT |
339 | int i; |
340 | ||
341 | BUG_ON(direction == DMA_NONE); | |
342 | ||
78bdc310 | 343 | for_each_sg(sgl, sg, nents, i) |
5edadbd0 | 344 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
1da177e4 LT |
345 | } |
346 | ||
78b09735 SR |
347 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
348 | { | |
349 | #ifdef CONFIG_PPC64 | |
350 | return (dma_addr == DMA_ERROR_CODE); | |
351 | #else | |
352 | return 0; | |
353 | #endif | |
354 | } | |
355 | ||
1da177e4 LT |
356 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
357 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
358 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
f67637ee | 359 | #define dma_is_consistent(d, h) (0) |
1da177e4 | 360 | #else |
f67637ee | 361 | #define dma_is_consistent(d, h) (1) |
1da177e4 LT |
362 | #endif |
363 | ||
364 | static inline int dma_get_cache_alignment(void) | |
365 | { | |
78b09735 SR |
366 | #ifdef CONFIG_PPC64 |
367 | /* no easy way to get cache size on all processors, so return | |
368 | * the maximum possible, to be safe */ | |
1fd73c6b | 369 | return (1 << INTERNODE_CACHE_SHIFT); |
78b09735 | 370 | #else |
1da177e4 LT |
371 | /* |
372 | * Each processor family will define its own L1_CACHE_SHIFT, | |
373 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
374 | */ | |
375 | return L1_CACHE_BYTES; | |
78b09735 | 376 | #endif |
1da177e4 LT |
377 | } |
378 | ||
78b09735 SR |
379 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
380 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
381 | enum dma_data_direction direction) | |
1da177e4 LT |
382 | { |
383 | /* just sync everything for now */ | |
384 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | |
385 | } | |
386 | ||
78b09735 SR |
387 | static inline void dma_sync_single_range_for_device(struct device *dev, |
388 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
389 | enum dma_data_direction direction) | |
1da177e4 LT |
390 | { |
391 | /* just sync everything for now */ | |
392 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | |
393 | } | |
394 | ||
d3fa72e4 | 395 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
78b09735 | 396 | enum dma_data_direction direction) |
1da177e4 | 397 | { |
78b09735 | 398 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
399 | __dma_sync(vaddr, size, (int)direction); |
400 | } | |
401 | ||
88ced031 | 402 | #endif /* __KERNEL__ */ |
78b09735 | 403 | #endif /* _ASM_DMA_MAPPING_H */ |