Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
2 | #define _ASM_IA64_DMA_MAPPING_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
1da177e4 | 8 | #include <asm/machvec.h> |
9b6eccfc | 9 | #include <linux/scatterlist.h> |
1da177e4 LT |
10 | |
11 | #define dma_alloc_coherent platform_dma_alloc_coherent | |
b7de8e7e RD |
12 | /* coherent mem. is cheap */ |
13 | static inline void * | |
14 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
15 | gfp_t flag) | |
16 | { | |
17 | return dma_alloc_coherent(dev, size, dma_handle, flag); | |
18 | } | |
1da177e4 | 19 | #define dma_free_coherent platform_dma_free_coherent |
b7de8e7e RD |
20 | static inline void |
21 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |
22 | dma_addr_t dma_handle) | |
23 | { | |
24 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
25 | } | |
1da177e4 LT |
26 | #define dma_map_single platform_dma_map_single |
27 | #define dma_map_sg platform_dma_map_sg | |
28 | #define dma_unmap_single platform_dma_unmap_single | |
29 | #define dma_unmap_sg platform_dma_unmap_sg | |
30 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | |
31 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | |
32 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | |
33 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | |
34 | #define dma_mapping_error platform_dma_mapping_error | |
35 | ||
36 | #define dma_map_page(dev, pg, off, size, dir) \ | |
37 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | |
38 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | |
39 | dma_unmap_single(dev, dma_addr, size, dir) | |
40 | ||
41 | /* | |
42 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | |
43 | * See Documentation/DMA-API.txt for details. | |
44 | */ | |
45 | ||
46 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | |
47 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
48 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
49 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
50 | ||
51 | #define dma_supported platform_dma_supported | |
52 | ||
53 | static inline int | |
54 | dma_set_mask (struct device *dev, u64 mask) | |
55 | { | |
56 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
57 | return -EIO; | |
58 | *dev->dma_mask = mask; | |
59 | return 0; | |
60 | } | |
61 | ||
e1531b42 | 62 | extern int dma_get_cache_alignment(void); |
1da177e4 LT |
63 | |
64 | static inline void | |
d3fa72e4 RB |
65 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
66 | enum dma_data_direction dir) | |
1da177e4 LT |
67 | { |
68 | /* | |
69 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to | |
70 | * ensure that dma_cache_sync() enforces order, hence the mb(). | |
71 | */ | |
72 | mb(); | |
73 | } | |
74 | ||
f67637ee | 75 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
1da177e4 LT |
76 | |
77 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |