Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This is based on both include/asm-sh/dma-mapping.h and | |
3 | * include/asm-ppc/pci.h | |
4 | */ | |
5 | #ifndef __ASM_PPC_DMA_MAPPING_H | |
6 | #define __ASM_PPC_DMA_MAPPING_H | |
7 | ||
8 | #include <linux/config.h> | |
9 | /* need struct page definitions */ | |
10 | #include <linux/mm.h> | |
11 | #include <asm/scatterlist.h> | |
12 | #include <asm/io.h> | |
13 | ||
14 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
15 | /* | |
16 | * DMA-consistent mapping functions for PowerPCs that don't support | |
17 | * cache snooping. These allocate/free a region of uncached mapped | |
18 | * memory space for use with DMA devices. Alternatively, you could | |
19 | * allocate the space "normally" and use the cache management functions | |
20 | * to ensure it is consistent. | |
21 | */ | |
22 | extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp); | |
23 | extern void __dma_free_coherent(size_t size, void *vaddr); | |
24 | extern void __dma_sync(void *vaddr, size_t size, int direction); | |
25 | extern void __dma_sync_page(struct page *page, unsigned long offset, | |
26 | size_t size, int direction); | |
27 | #define dma_cache_inv(_start,_size) \ | |
28 | invalidate_dcache_range(_start, (_start + _size)) | |
29 | #define dma_cache_wback(_start,_size) \ | |
30 | clean_dcache_range(_start, (_start + _size)) | |
31 | #define dma_cache_wback_inv(_start,_size) \ | |
32 | flush_dcache_range(_start, (_start + _size)) | |
33 | ||
34 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | |
35 | /* | |
36 | * Cache coherent cores. | |
37 | */ | |
38 | ||
39 | #define dma_cache_inv(_start,_size) do { } while (0) | |
40 | #define dma_cache_wback(_start,_size) do { } while (0) | |
41 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | |
42 | ||
43 | #define __dma_alloc_coherent(gfp, size, handle) NULL | |
44 | #define __dma_free_coherent(size, addr) do { } while (0) | |
45 | #define __dma_sync(addr, size, rw) do { } while (0) | |
46 | #define __dma_sync_page(pg, off, sz, rw) do { } while (0) | |
47 | ||
48 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | |
49 | ||
50 | #define dma_supported(dev, mask) (1) | |
51 | ||
52 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
53 | { | |
54 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
55 | return -EIO; | |
56 | ||
57 | *dev->dma_mask = dma_mask; | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
63 | dma_addr_t * dma_handle, int gfp) | |
64 | { | |
65 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
66 | return __dma_alloc_coherent(size, dma_handle, gfp); | |
67 | #else | |
68 | void *ret; | |
69 | /* ignore region specifiers */ | |
70 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
71 | ||
72 | if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) | |
73 | gfp |= GFP_DMA; | |
74 | ||
75 | ret = (void *)__get_free_pages(gfp, get_order(size)); | |
76 | ||
77 | if (ret != NULL) { | |
78 | memset(ret, 0, size); | |
79 | *dma_handle = virt_to_bus(ret); | |
80 | } | |
81 | ||
82 | return ret; | |
83 | #endif | |
84 | } | |
85 | ||
86 | static inline void | |
87 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
88 | dma_addr_t dma_handle) | |
89 | { | |
90 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
91 | __dma_free_coherent(size, vaddr); | |
92 | #else | |
93 | free_pages((unsigned long)vaddr, get_order(size)); | |
94 | #endif | |
95 | } | |
96 | ||
97 | static inline dma_addr_t | |
98 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
99 | enum dma_data_direction direction) | |
100 | { | |
101 | BUG_ON(direction == DMA_NONE); | |
102 | ||
103 | __dma_sync(ptr, size, direction); | |
104 | ||
105 | return virt_to_bus(ptr); | |
106 | } | |
107 | ||
108 | /* We do nothing. */ | |
109 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
110 | ||
111 | static inline dma_addr_t | |
112 | dma_map_page(struct device *dev, struct page *page, | |
113 | unsigned long offset, size_t size, | |
114 | enum dma_data_direction direction) | |
115 | { | |
116 | BUG_ON(direction == DMA_NONE); | |
117 | ||
118 | __dma_sync_page(page, offset, size, direction); | |
119 | ||
120 | return (page - mem_map) * PAGE_SIZE + PCI_DRAM_OFFSET + offset; | |
121 | } | |
122 | ||
123 | /* We do nothing. */ | |
124 | #define dma_unmap_page(dev, handle, size, dir) do { } while (0) | |
125 | ||
126 | static inline int | |
127 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
128 | enum dma_data_direction direction) | |
129 | { | |
130 | int i; | |
131 | ||
132 | BUG_ON(direction == DMA_NONE); | |
133 | ||
134 | for (i = 0; i < nents; i++, sg++) { | |
135 | BUG_ON(!sg->page); | |
136 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); | |
137 | sg->dma_address = page_to_bus(sg->page) + sg->offset; | |
138 | } | |
139 | ||
140 | return nents; | |
141 | } | |
142 | ||
143 | /* We don't do anything here. */ | |
144 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
145 | ||
146 | static inline void | |
147 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
148 | size_t size, | |
149 | enum dma_data_direction direction) | |
150 | { | |
151 | BUG_ON(direction == DMA_NONE); | |
152 | ||
153 | __dma_sync(bus_to_virt(dma_handle), size, direction); | |
154 | } | |
155 | ||
156 | static inline void | |
157 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
158 | size_t size, | |
159 | enum dma_data_direction direction) | |
160 | { | |
161 | BUG_ON(direction == DMA_NONE); | |
162 | ||
163 | __dma_sync(bus_to_virt(dma_handle), size, direction); | |
164 | } | |
165 | ||
166 | static inline void | |
167 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
168 | enum dma_data_direction direction) | |
169 | { | |
170 | int i; | |
171 | ||
172 | BUG_ON(direction == DMA_NONE); | |
173 | ||
174 | for (i = 0; i < nents; i++, sg++) | |
175 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); | |
176 | } | |
177 | ||
178 | static inline void | |
179 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
180 | enum dma_data_direction direction) | |
181 | { | |
182 | int i; | |
183 | ||
184 | BUG_ON(direction == DMA_NONE); | |
185 | ||
186 | for (i = 0; i < nents; i++, sg++) | |
187 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); | |
188 | } | |
189 | ||
190 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | |
191 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
192 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
193 | #define dma_is_consistent(d) (0) | |
194 | #else | |
195 | #define dma_is_consistent(d) (1) | |
196 | #endif | |
197 | ||
198 | static inline int dma_get_cache_alignment(void) | |
199 | { | |
200 | /* | |
201 | * Each processor family will define its own L1_CACHE_SHIFT, | |
202 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
203 | */ | |
204 | return L1_CACHE_BYTES; | |
205 | } | |
206 | ||
207 | static inline void | |
208 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
209 | unsigned long offset, size_t size, | |
210 | enum dma_data_direction direction) | |
211 | { | |
212 | /* just sync everything for now */ | |
213 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | |
214 | } | |
215 | ||
216 | static inline void | |
217 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
218 | unsigned long offset, size_t size, | |
219 | enum dma_data_direction direction) | |
220 | { | |
221 | /* just sync everything for now */ | |
222 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | |
223 | } | |
224 | ||
225 | static inline void dma_cache_sync(void *vaddr, size_t size, | |
226 | enum dma_data_direction direction) | |
227 | { | |
228 | __dma_sync(vaddr, size, (int)direction); | |
229 | } | |
230 | ||
231 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
232 | { | |
233 | return 0; | |
234 | } | |
235 | ||
236 | #endif /* __ASM_PPC_DMA_MAPPING_H */ |