Commit | Line | Data |
---|---|---|
59c61138 MS |
1 | /* DMA mapping. Nothing tricky here, just virt_to_phys */ |
2 | ||
1da177e4 LT |
3 | #ifndef _ASM_CRIS_DMA_MAPPING_H |
4 | #define _ASM_CRIS_DMA_MAPPING_H | |
5 | ||
59c61138 MS |
6 | #include <linux/mm.h> |
7 | #include <linux/kernel.h> | |
1da177e4 | 8 | |
59c61138 MS |
9 | #include <asm/cache.h> |
10 | #include <asm/io.h> | |
11 | #include <asm/scatterlist.h> | |
1da177e4 | 12 | |
59c61138 MS |
13 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
15 | ||
16 | #ifdef CONFIG_PCI | |
17 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
43b7eae1 | 18 | dma_addr_t *dma_handle, gfp_t flag); |
1da177e4 | 19 | |
59c61138 MS |
20 | void dma_free_coherent(struct device *dev, size_t size, |
21 | void *vaddr, dma_addr_t dma_handle); | |
22 | #else | |
1da177e4 LT |
23 | static inline void * |
24 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
43b7eae1 | 25 | gfp_t flag) |
1da177e4 | 26 | { |
59c61138 MS |
27 | BUG(); |
28 | return NULL; | |
1da177e4 LT |
29 | } |
30 | ||
31 | static inline void | |
32 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |
59c61138 | 33 | dma_addr_t dma_handle) |
1da177e4 | 34 | { |
59c61138 | 35 | BUG(); |
1da177e4 | 36 | } |
59c61138 | 37 | #endif |
1da177e4 | 38 | static inline dma_addr_t |
59c61138 | 39 | dma_map_single(struct device *dev, void *ptr, size_t size, |
1da177e4 LT |
40 | enum dma_data_direction direction) |
41 | { | |
59c61138 MS |
42 | BUG_ON(direction == DMA_NONE); |
43 | return virt_to_phys(ptr); | |
1da177e4 LT |
44 | } |
45 | ||
46 | static inline void | |
47 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
48 | enum dma_data_direction direction) | |
49 | { | |
59c61138 MS |
50 | BUG_ON(direction == DMA_NONE); |
51 | } | |
52 | ||
53 | static inline int | |
54 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
55 | enum dma_data_direction direction) | |
56 | { | |
57 | printk("Map sg\n"); | |
58 | return nents; | |
1da177e4 LT |
59 | } |
60 | ||
61 | static inline dma_addr_t | |
59c61138 MS |
62 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, |
63 | size_t size, enum dma_data_direction direction) | |
1da177e4 | 64 | { |
59c61138 MS |
65 | BUG_ON(direction == DMA_NONE); |
66 | return page_to_phys(page) + offset; | |
1da177e4 LT |
67 | } |
68 | ||
69 | static inline void | |
70 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
71 | enum dma_data_direction direction) | |
72 | { | |
59c61138 | 73 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
74 | } |
75 | ||
1da177e4 LT |
76 | |
77 | static inline void | |
78 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
79 | enum dma_data_direction direction) | |
80 | { | |
59c61138 | 81 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
82 | } |
83 | ||
84 | static inline void | |
59c61138 MS |
85 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
86 | enum dma_data_direction direction) | |
1da177e4 | 87 | { |
1da177e4 LT |
88 | } |
89 | ||
90 | static inline void | |
59c61138 MS |
91 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
92 | enum dma_data_direction direction) | |
1da177e4 | 93 | { |
1da177e4 LT |
94 | } |
95 | ||
59c61138 MS |
96 | static inline void |
97 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
98 | unsigned long offset, size_t size, | |
99 | enum dma_data_direction direction) | |
100 | { | |
101 | } | |
1da177e4 | 102 | |
59c61138 MS |
103 | static inline void |
104 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
105 | unsigned long offset, size_t size, | |
106 | enum dma_data_direction direction) | |
107 | { | |
108 | } | |
1da177e4 | 109 | |
59c61138 MS |
110 | static inline void |
111 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
112 | enum dma_data_direction direction) | |
1da177e4 | 113 | { |
1da177e4 LT |
114 | } |
115 | ||
116 | static inline void | |
59c61138 MS |
117 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
118 | enum dma_data_direction direction) | |
1da177e4 | 119 | { |
1da177e4 LT |
120 | } |
121 | ||
59c61138 MS |
122 | static inline int |
123 | dma_mapping_error(dma_addr_t dma_addr) | |
124 | { | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static inline int | |
129 | dma_supported(struct device *dev, u64 mask) | |
130 | { | |
131 | /* | |
132 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
133 | * so we can't guarantee allocations that must be | |
134 | * within a tighter range than GFP_DMA.. | |
135 | */ | |
136 | if(mask < 0x00ffffff) | |
137 | return 0; | |
138 | ||
139 | return 1; | |
140 | } | |
141 | ||
142 | static inline int | |
143 | dma_set_mask(struct device *dev, u64 mask) | |
144 | { | |
145 | if(!dev->dma_mask || !dma_supported(dev, mask)) | |
146 | return -EIO; | |
147 | ||
148 | *dev->dma_mask = mask; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | static inline int | |
154 | dma_get_cache_alignment(void) | |
155 | { | |
1fd73c6b | 156 | return (1 << INTERNODE_CACHE_SHIFT); |
59c61138 MS |
157 | } |
158 | ||
f67637ee | 159 | #define dma_is_consistent(d, h) (1) |
59c61138 | 160 | |
1da177e4 | 161 | static inline void |
d3fa72e4 | 162 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
1da177e4 LT |
163 | enum dma_data_direction direction) |
164 | { | |
1da177e4 LT |
165 | } |
166 | ||
59c61138 MS |
167 | |
168 | #endif |