Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef ASMARM_DMA_MAPPING_H |
2 | #define ASMARM_DMA_MAPPING_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
98ed7d4b | 6 | #include <linux/mm_types.h> |
dee9ba82 | 7 | #include <linux/scatterlist.h> |
1da177e4 | 8 | |
1fe53268 | 9 | #include <asm-generic/dma-coherent.h> |
98ed7d4b RK |
10 | #include <asm/memory.h> |
11 | ||
12 | /* | |
13 | * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions | |
14 | * used internally by the DMA-mapping API to provide DMA addresses. They | |
15 | * must not be used by drivers. | |
16 | */ | |
17 | #ifndef __arch_page_to_dma | |
58edb515 NP |
18 | |
19 | #if !defined(CONFIG_HIGHMEM) | |
98ed7d4b RK |
20 | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) |
21 | { | |
22 | return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); | |
23 | } | |
58edb515 NP |
24 | #elif defined(__pfn_to_bus) |
25 | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) | |
26 | { | |
27 | return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); | |
28 | } | |
29 | #else | |
30 | #error "this machine class needs to define __arch_page_to_dma to use HIGHMEM" | |
31 | #endif | |
98ed7d4b RK |
32 | |
33 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) | |
34 | { | |
35 | return (void *)__bus_to_virt(addr); | |
36 | } | |
37 | ||
38 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |
39 | { | |
40 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); | |
41 | } | |
42 | #else | |
43 | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) | |
44 | { | |
45 | return __arch_page_to_dma(dev, page); | |
46 | } | |
47 | ||
48 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) | |
49 | { | |
50 | return __arch_dma_to_virt(dev, addr); | |
51 | } | |
52 | ||
53 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |
54 | { | |
55 | return __arch_virt_to_dma(dev, addr); | |
56 | } | |
57 | #endif | |
1fe53268 | 58 | |
1da177e4 LT |
59 | /* |
60 | * DMA-consistent mapping functions. These allocate/free a region of | |
61 | * uncached, unwrite-buffered mapped memory space for use with DMA | |
62 | * devices. This is the "generic" version. The PCI specific version | |
63 | * is in pci.h | |
105ef9a0 DW |
64 | * |
65 | * Note: Drivers should NOT use this function directly, as it will break | |
66 | * platforms with CONFIG_DMABOUNCE. | |
67 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | |
1da177e4 | 68 | */ |
84aa462e | 69 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); |
43377453 NP |
70 | extern void dma_cache_maint_page(struct page *page, unsigned long offset, |
71 | size_t size, int rw); | |
1da177e4 LT |
72 | |
73 | /* | |
74 | * Return whether the given device DMA address mask can be supported | |
75 | * properly. For example, if your device can only drive the low 24-bits | |
76 | * during bus mastering, then you would pass 0x00ffffff as the mask | |
77 | * to this function. | |
7a228aaa | 78 | * |
79 | * FIXME: This should really be a platform specific issue - we should | |
80 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | |
1da177e4 LT |
81 | */ |
82 | static inline int dma_supported(struct device *dev, u64 mask) | |
83 | { | |
1124d6d2 RK |
84 | if (mask < ISA_DMA_THRESHOLD) |
85 | return 0; | |
86 | return 1; | |
1da177e4 LT |
87 | } |
88 | ||
89 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
90 | { | |
91 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
92 | return -EIO; | |
93 | ||
94 | *dev->dma_mask = dma_mask; | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | static inline int dma_get_cache_alignment(void) | |
100 | { | |
101 | return 32; | |
102 | } | |
103 | ||
f67637ee | 104 | static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) |
1da177e4 | 105 | { |
23759dc6 | 106 | return !!arch_is_coherent(); |
1da177e4 LT |
107 | } |
108 | ||
109 | /* | |
110 | * DMA errors are defined by all-bits-set in the DMA address. | |
111 | */ | |
8d8bb39b | 112 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 LT |
113 | { |
114 | return dma_addr == ~0; | |
115 | } | |
116 | ||
f454aa6b RK |
117 | /* |
118 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync | |
119 | * function so drivers using this API are highlighted with build warnings. | |
120 | */ | |
3216a97b RK |
121 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
122 | dma_addr_t *handle, gfp_t gfp) | |
f454aa6b RK |
123 | { |
124 | return NULL; | |
125 | } | |
126 | ||
3216a97b RK |
127 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
128 | void *cpu_addr, dma_addr_t handle) | |
f454aa6b RK |
129 | { |
130 | } | |
131 | ||
1da177e4 LT |
132 | /** |
133 | * dma_alloc_coherent - allocate consistent memory for DMA | |
134 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
135 | * @size: required memory size | |
136 | * @handle: bus-specific DMA address | |
137 | * | |
138 | * Allocate some uncached, unbuffered memory for a device for | |
139 | * performing DMA. This function allocates pages, and will | |
140 | * return the CPU-viewed address, and sets @handle to be the | |
141 | * device-viewed address. | |
142 | */ | |
3216a97b | 143 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
1da177e4 LT |
144 | |
145 | /** | |
146 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | |
147 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
148 | * @size: size of memory originally requested in dma_alloc_coherent | |
149 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | |
150 | * @handle: device-view address returned from dma_alloc_coherent | |
151 | * | |
152 | * Free (and unmap) a DMA buffer previously allocated by | |
153 | * dma_alloc_coherent(). | |
154 | * | |
155 | * References to memory and mappings associated with cpu_addr/handle | |
156 | * during and after this call executing are illegal. | |
157 | */ | |
3216a97b | 158 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); |
1da177e4 LT |
159 | |
160 | /** | |
161 | * dma_mmap_coherent - map a coherent DMA allocation into user space | |
162 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
163 | * @vma: vm_area_struct describing requested user mapping | |
164 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | |
165 | * @handle: device-view address returned from dma_alloc_coherent | |
166 | * @size: size of memory originally requested in dma_alloc_coherent | |
167 | * | |
168 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | |
169 | * into user space. The coherent DMA buffer must not be freed by the | |
170 | * driver until the user space mapping has been released. | |
171 | */ | |
3216a97b RK |
172 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, |
173 | void *, dma_addr_t, size_t); | |
1da177e4 LT |
174 | |
175 | ||
176 | /** | |
177 | * dma_alloc_writecombine - allocate writecombining memory for DMA | |
178 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
179 | * @size: required memory size | |
180 | * @handle: bus-specific DMA address | |
181 | * | |
182 | * Allocate some uncached, buffered memory for a device for | |
183 | * performing DMA. This function allocates pages, and will | |
184 | * return the CPU-viewed address, and sets @handle to be the | |
185 | * device-viewed address. | |
186 | */ | |
3216a97b RK |
187 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, |
188 | gfp_t); | |
1da177e4 LT |
189 | |
190 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | |
191 | dma_free_coherent(dev,size,cpu_addr,handle) | |
192 | ||
3216a97b RK |
193 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, |
194 | void *, dma_addr_t, size_t); | |
1da177e4 LT |
195 | |
196 | ||
8c8a0ec5 RK |
197 | #ifdef CONFIG_DMABOUNCE |
198 | /* | |
199 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | |
200 | * and utilize bounce buffers as needed to work around limited DMA windows. | |
201 | * | |
202 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | |
203 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | |
204 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | |
205 | * | |
206 | * The following are helper functions used by the dmabounce subystem | |
207 | * | |
208 | */ | |
209 | ||
210 | /** | |
211 | * dmabounce_register_dev | |
212 | * | |
213 | * @dev: valid struct device pointer | |
214 | * @small_buf_size: size of buffers to use with small buffer pool | |
215 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | |
216 | * | |
217 | * This function should be called by low-level platform code to register | |
218 | * a device as requireing DMA buffer bouncing. The function will allocate | |
219 | * appropriate DMA pools for the device. | |
220 | * | |
221 | */ | |
3216a97b RK |
222 | extern int dmabounce_register_dev(struct device *, unsigned long, |
223 | unsigned long); | |
8c8a0ec5 RK |
224 | |
225 | /** | |
226 | * dmabounce_unregister_dev | |
227 | * | |
228 | * @dev: valid struct device pointer | |
229 | * | |
230 | * This function should be called by low-level platform code when device | |
231 | * that was previously registered with dmabounce_register_dev is removed | |
232 | * from the system. | |
233 | * | |
234 | */ | |
235 | extern void dmabounce_unregister_dev(struct device *); | |
236 | ||
237 | /** | |
238 | * dma_needs_bounce | |
239 | * | |
240 | * @dev: valid struct device pointer | |
241 | * @dma_handle: dma_handle of unbounced buffer | |
242 | * @size: size of region being mapped | |
243 | * | |
244 | * Platforms that utilize the dmabounce mechanism must implement | |
245 | * this function. | |
246 | * | |
247 | * The dmabounce routines call this function whenever a dma-mapping | |
248 | * is requested to determine whether a given buffer needs to be bounced | |
249 | * or not. The function must return 0 if the buffer is OK for | |
250 | * DMA access and 1 if the buffer needs to be bounced. | |
251 | * | |
252 | */ | |
253 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | |
254 | ||
125ab12a RK |
255 | /* |
256 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | |
257 | */ | |
3216a97b RK |
258 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, |
259 | enum dma_data_direction); | |
260 | extern dma_addr_t dma_map_page(struct device *, struct page *, | |
261 | unsigned long, size_t, enum dma_data_direction); | |
262 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, | |
263 | enum dma_data_direction); | |
125ab12a | 264 | |
8c8a0ec5 RK |
265 | /* |
266 | * Private functions | |
267 | */ | |
268 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | |
3216a97b | 269 | size_t, enum dma_data_direction); |
8c8a0ec5 | 270 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, |
3216a97b | 271 | size_t, enum dma_data_direction); |
8c8a0ec5 | 272 | #else |
9fa76792 RK |
273 | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, |
274 | unsigned long offset, size_t size, enum dma_data_direction dir) | |
275 | { | |
276 | return 1; | |
277 | } | |
278 | ||
279 | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |
280 | unsigned long offset, size_t size, enum dma_data_direction dir) | |
281 | { | |
282 | return 1; | |
283 | } | |
8c8a0ec5 RK |
284 | |
285 | ||
1da177e4 LT |
286 | /** |
287 | * dma_map_single - map a single buffer for streaming DMA | |
288 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
289 | * @cpu_addr: CPU direct mapped address of buffer | |
290 | * @size: size of buffer to map | |
291 | * @dir: DMA transfer direction | |
292 | * | |
293 | * Ensure that any data held in the cache is appropriately discarded | |
294 | * or written back. | |
295 | * | |
296 | * The device owns this memory once this call has completed. The CPU | |
297 | * can regain ownership by calling dma_unmap_single() or | |
298 | * dma_sync_single_for_cpu(). | |
299 | */ | |
3216a97b RK |
300 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
301 | size_t size, enum dma_data_direction dir) | |
1da177e4 | 302 | { |
0e18b5d7 RK |
303 | BUG_ON(!valid_dma_direction(dir)); |
304 | ||
23759dc6 | 305 | if (!arch_is_coherent()) |
84aa462e | 306 | dma_cache_maint(cpu_addr, size, dir); |
23759dc6 | 307 | |
98ed7d4b | 308 | return virt_to_dma(dev, cpu_addr); |
1da177e4 | 309 | } |
125ab12a | 310 | |
1da177e4 LT |
311 | /** |
312 | * dma_map_page - map a portion of a page for streaming DMA | |
313 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
314 | * @page: page that buffer resides in | |
315 | * @offset: offset into page for start of buffer | |
316 | * @size: size of buffer to map | |
317 | * @dir: DMA transfer direction | |
318 | * | |
319 | * Ensure that any data held in the cache is appropriately discarded | |
320 | * or written back. | |
321 | * | |
322 | * The device owns this memory once this call has completed. The CPU | |
7807c609 | 323 | * can regain ownership by calling dma_unmap_page(). |
1da177e4 | 324 | */ |
3216a97b RK |
325 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
326 | unsigned long offset, size_t size, enum dma_data_direction dir) | |
1da177e4 | 327 | { |
0e18b5d7 RK |
328 | BUG_ON(!valid_dma_direction(dir)); |
329 | ||
56f55f8b | 330 | if (!arch_is_coherent()) |
43377453 | 331 | dma_cache_maint_page(page, offset, size, dir); |
56f55f8b RK |
332 | |
333 | return page_to_dma(dev, page) + offset; | |
1da177e4 LT |
334 | } |
335 | ||
336 | /** | |
337 | * dma_unmap_single - unmap a single buffer previously mapped | |
338 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
339 | * @handle: DMA address of buffer | |
7807c609 RK |
340 | * @size: size of buffer (same as passed to dma_map_single) |
341 | * @dir: DMA transfer direction (same as passed to dma_map_single) | |
1da177e4 LT |
342 | * |
343 | * Unmap a single streaming mode DMA translation. The handle and size | |
344 | * must match what was provided in the previous dma_map_single() call. | |
345 | * All other usages are undefined. | |
346 | * | |
347 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
348 | * whatever the device wrote there. | |
349 | */ | |
3216a97b RK |
350 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
351 | size_t size, enum dma_data_direction dir) | |
1da177e4 LT |
352 | { |
353 | /* nothing to do */ | |
354 | } | |
125ab12a | 355 | #endif /* CONFIG_DMABOUNCE */ |
1da177e4 LT |
356 | |
357 | /** | |
358 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | |
359 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
360 | * @handle: DMA address of buffer | |
7807c609 RK |
361 | * @size: size of buffer (same as passed to dma_map_page) |
362 | * @dir: DMA transfer direction (same as passed to dma_map_page) | |
1da177e4 | 363 | * |
7807c609 RK |
364 | * Unmap a page streaming mode DMA translation. The handle and size |
365 | * must match what was provided in the previous dma_map_page() call. | |
1da177e4 LT |
366 | * All other usages are undefined. |
367 | * | |
368 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
369 | * whatever the device wrote there. | |
370 | */ | |
3216a97b RK |
371 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
372 | size_t size, enum dma_data_direction dir) | |
1da177e4 | 373 | { |
98ed7d4b | 374 | dma_unmap_single(dev, handle, size, dir); |
1da177e4 LT |
375 | } |
376 | ||
1da177e4 | 377 | /** |
9dd42868 | 378 | * dma_sync_single_range_for_cpu |
1da177e4 LT |
379 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
380 | * @handle: DMA address of buffer | |
9dd42868 RK |
381 | * @offset: offset of region to start sync |
382 | * @size: size of region to sync | |
383 | * @dir: DMA transfer direction (same as passed to dma_map_single) | |
1da177e4 LT |
384 | * |
385 | * Make physical memory consistent for a single streaming mode DMA | |
386 | * translation after a transfer. | |
387 | * | |
388 | * If you perform a dma_map_single() but wish to interrogate the | |
389 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
390 | * mapping, you must call this function before doing so. At the | |
391 | * next point you give the PCI dma address back to the card, you | |
392 | * must first the perform a dma_sync_for_device, and then the | |
393 | * device again owns the buffer. | |
394 | */ | |
3216a97b RK |
395 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
396 | dma_addr_t handle, unsigned long offset, size_t size, | |
397 | enum dma_data_direction dir) | |
1da177e4 | 398 | { |
0e18b5d7 RK |
399 | BUG_ON(!valid_dma_direction(dir)); |
400 | ||
309dbbab | 401 | dmabounce_sync_for_cpu(dev, handle, offset, size, dir); |
1da177e4 LT |
402 | } |
403 | ||
3216a97b RK |
404 | static inline void dma_sync_single_range_for_device(struct device *dev, |
405 | dma_addr_t handle, unsigned long offset, size_t size, | |
406 | enum dma_data_direction dir) | |
1da177e4 | 407 | { |
0e18b5d7 RK |
408 | BUG_ON(!valid_dma_direction(dir)); |
409 | ||
8c8a0ec5 RK |
410 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
411 | return; | |
412 | ||
23759dc6 | 413 | if (!arch_is_coherent()) |
9dd42868 | 414 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
1da177e4 | 415 | } |
1da177e4 | 416 | |
3216a97b RK |
417 | static inline void dma_sync_single_for_cpu(struct device *dev, |
418 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
9dd42868 RK |
419 | { |
420 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | |
421 | } | |
422 | ||
3216a97b RK |
423 | static inline void dma_sync_single_for_device(struct device *dev, |
424 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
9dd42868 RK |
425 | { |
426 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | |
427 | } | |
428 | ||
afd1a321 RK |
429 | /* |
430 | * The scatter list versions of the above methods. | |
1da177e4 | 431 | */ |
3216a97b RK |
432 | extern int dma_map_sg(struct device *, struct scatterlist *, int, |
433 | enum dma_data_direction); | |
434 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | |
435 | enum dma_data_direction); | |
436 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | |
437 | enum dma_data_direction); | |
438 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | |
439 | enum dma_data_direction); | |
afd1a321 | 440 | |
1da177e4 | 441 | |
1da177e4 LT |
442 | #endif /* __KERNEL__ */ |
443 | #endif |