Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> | |
9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
10 | * IP32 changes by Ilya. | |
11 | */ | |
12 | #include <linux/types.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | ||
18 | #include <asm/cache.h> | |
19 | #include <asm/io.h> | |
20 | #include <asm/ip32/crime.h> | |
21 | ||
22 | /* | |
23 | * Warning on the terminology - Linux calls an uncached area coherent; | |
24 | * MIPS terminology calls memory areas with hardware maintained coherency | |
25 | * coherent. | |
26 | */ | |
27 | ||
28 | /* | |
29 | * Few notes. | |
30 | * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | |
31 | * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian) | |
32 | * 3. All other devices see memory as one big chunk at 0x40000000 | |
33 | * 4. Non-PCI devices will pass NULL as struct device* | |
34 | * Thus we translate differently, depending on device. | |
35 | */ | |
36 | ||
37 | #define RAM_OFFSET_MASK 0x3fffffff | |
38 | ||
39 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
185a8ff5 | 40 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
41 | { |
42 | void *ret; | |
43 | /* ignore region specifiers */ | |
44 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
45 | ||
46 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
47 | gfp |= GFP_DMA; | |
48 | ret = (void *) __get_free_pages(gfp, get_order(size)); | |
49 | ||
50 | if (ret != NULL) { | |
51 | unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK; | |
52 | memset(ret, 0, size); | |
53 | if(dev==NULL) | |
54 | addr+= CRIME_HI_MEM_BASE; | |
55 | *dma_handle = addr; | |
56 | } | |
57 | ||
58 | return ret; | |
59 | } | |
60 | ||
61 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
62 | ||
63 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
185a8ff5 | 64 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
65 | { |
66 | void *ret; | |
67 | ||
68 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | |
69 | if (ret) { | |
70 | dma_cache_wback_inv((unsigned long) ret, size); | |
71 | ret = UNCAC_ADDR(ret); | |
72 | } | |
73 | ||
74 | return ret; | |
75 | } | |
76 | ||
77 | EXPORT_SYMBOL(dma_alloc_coherent); | |
78 | ||
79 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
80 | dma_addr_t dma_handle) | |
81 | { | |
82 | free_pages((unsigned long) vaddr, get_order(size)); | |
83 | } | |
84 | ||
85 | EXPORT_SYMBOL(dma_free_noncoherent); | |
86 | ||
87 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
88 | dma_addr_t dma_handle) | |
89 | { | |
90 | unsigned long addr = (unsigned long) vaddr; | |
91 | ||
92 | addr = CAC_ADDR(addr); | |
93 | free_pages(addr, get_order(size)); | |
94 | } | |
95 | ||
96 | EXPORT_SYMBOL(dma_free_coherent); | |
97 | ||
98 | static inline void __dma_sync(unsigned long addr, size_t size, | |
99 | enum dma_data_direction direction) | |
100 | { | |
101 | switch (direction) { | |
102 | case DMA_TO_DEVICE: | |
103 | dma_cache_wback(addr, size); | |
104 | break; | |
105 | ||
106 | case DMA_FROM_DEVICE: | |
107 | dma_cache_inv(addr, size); | |
108 | break; | |
109 | ||
110 | case DMA_BIDIRECTIONAL: | |
111 | dma_cache_wback_inv(addr, size); | |
112 | break; | |
113 | ||
114 | default: | |
115 | BUG(); | |
116 | } | |
117 | } | |
118 | ||
119 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
120 | enum dma_data_direction direction) | |
121 | { | |
122 | unsigned long addr = (unsigned long) ptr; | |
123 | ||
124 | switch (direction) { | |
125 | case DMA_TO_DEVICE: | |
126 | dma_cache_wback(addr, size); | |
127 | break; | |
128 | ||
129 | case DMA_FROM_DEVICE: | |
130 | dma_cache_inv(addr, size); | |
131 | break; | |
132 | ||
133 | case DMA_BIDIRECTIONAL: | |
134 | dma_cache_wback_inv(addr, size); | |
135 | break; | |
136 | ||
137 | default: | |
138 | BUG(); | |
139 | } | |
140 | ||
141 | addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;; | |
142 | if(dev == NULL) | |
143 | addr+=CRIME_HI_MEM_BASE; | |
144 | return (dma_addr_t)addr; | |
145 | } | |
146 | ||
147 | EXPORT_SYMBOL(dma_map_single); | |
148 | ||
149 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
150 | enum dma_data_direction direction) | |
151 | { | |
152 | switch (direction) { | |
153 | case DMA_TO_DEVICE: | |
154 | break; | |
155 | ||
156 | case DMA_FROM_DEVICE: | |
157 | break; | |
158 | ||
159 | case DMA_BIDIRECTIONAL: | |
160 | break; | |
161 | ||
162 | default: | |
163 | BUG(); | |
164 | } | |
165 | } | |
166 | ||
167 | EXPORT_SYMBOL(dma_unmap_single); | |
168 | ||
169 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
170 | enum dma_data_direction direction) | |
171 | { | |
172 | int i; | |
173 | ||
174 | BUG_ON(direction == DMA_NONE); | |
175 | ||
176 | for (i = 0; i < nents; i++, sg++) { | |
177 | unsigned long addr; | |
178 | ||
179 | addr = (unsigned long) page_address(sg->page)+sg->offset; | |
180 | if (addr) | |
181 | __dma_sync(addr, sg->length, direction); | |
182 | addr = __pa(addr)&RAM_OFFSET_MASK;; | |
183 | if(dev == NULL) | |
184 | addr += CRIME_HI_MEM_BASE; | |
185 | sg->dma_address = (dma_addr_t)addr; | |
186 | } | |
187 | ||
188 | return nents; | |
189 | } | |
190 | ||
191 | EXPORT_SYMBOL(dma_map_sg); | |
192 | ||
193 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
194 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
195 | { | |
196 | unsigned long addr; | |
197 | ||
198 | BUG_ON(direction == DMA_NONE); | |
199 | ||
200 | addr = (unsigned long) page_address(page) + offset; | |
201 | dma_cache_wback_inv(addr, size); | |
202 | addr = __pa(addr)&RAM_OFFSET_MASK;; | |
203 | if(dev == NULL) | |
204 | addr += CRIME_HI_MEM_BASE; | |
205 | ||
206 | return (dma_addr_t)addr; | |
207 | } | |
208 | ||
209 | EXPORT_SYMBOL(dma_map_page); | |
210 | ||
211 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
212 | enum dma_data_direction direction) | |
213 | { | |
214 | BUG_ON(direction == DMA_NONE); | |
215 | ||
216 | if (direction != DMA_TO_DEVICE) { | |
217 | unsigned long addr; | |
218 | ||
219 | dma_address&=RAM_OFFSET_MASK; | |
220 | addr = dma_address + PAGE_OFFSET; | |
221 | if(dma_address>=256*1024*1024) | |
222 | addr+=CRIME_HI_MEM_BASE; | |
223 | dma_cache_wback_inv(addr, size); | |
224 | } | |
225 | } | |
226 | ||
227 | EXPORT_SYMBOL(dma_unmap_page); | |
228 | ||
229 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
230 | enum dma_data_direction direction) | |
231 | { | |
232 | unsigned long addr; | |
233 | int i; | |
234 | ||
235 | BUG_ON(direction == DMA_NONE); | |
236 | ||
237 | if (direction == DMA_TO_DEVICE) | |
238 | return; | |
239 | ||
240 | for (i = 0; i < nhwentries; i++, sg++) { | |
241 | addr = (unsigned long) page_address(sg->page); | |
242 | if (!addr) | |
243 | continue; | |
244 | dma_cache_wback_inv(addr + sg->offset, sg->length); | |
245 | } | |
246 | } | |
247 | ||
248 | EXPORT_SYMBOL(dma_unmap_sg); | |
249 | ||
250 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
251 | size_t size, enum dma_data_direction direction) | |
252 | { | |
253 | unsigned long addr; | |
254 | ||
255 | BUG_ON(direction == DMA_NONE); | |
256 | ||
257 | dma_handle&=RAM_OFFSET_MASK; | |
258 | addr = dma_handle + PAGE_OFFSET; | |
259 | if(dma_handle>=256*1024*1024) | |
260 | addr+=CRIME_HI_MEM_BASE; | |
261 | __dma_sync(addr, size, direction); | |
262 | } | |
263 | ||
264 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
265 | ||
266 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
267 | size_t size, enum dma_data_direction direction) | |
268 | { | |
269 | unsigned long addr; | |
270 | ||
271 | BUG_ON(direction == DMA_NONE); | |
272 | ||
273 | dma_handle&=RAM_OFFSET_MASK; | |
274 | addr = dma_handle + PAGE_OFFSET; | |
275 | if(dma_handle>=256*1024*1024) | |
276 | addr+=CRIME_HI_MEM_BASE; | |
277 | __dma_sync(addr, size, direction); | |
278 | } | |
279 | ||
280 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
281 | ||
282 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
283 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
284 | { | |
285 | unsigned long addr; | |
286 | ||
287 | BUG_ON(direction == DMA_NONE); | |
288 | ||
289 | dma_handle&=RAM_OFFSET_MASK; | |
290 | addr = dma_handle + offset + PAGE_OFFSET; | |
291 | if(dma_handle>=256*1024*1024) | |
292 | addr+=CRIME_HI_MEM_BASE; | |
293 | __dma_sync(addr, size, direction); | |
294 | } | |
295 | ||
296 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
297 | ||
298 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
299 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
300 | { | |
301 | unsigned long addr; | |
302 | ||
303 | BUG_ON(direction == DMA_NONE); | |
304 | ||
305 | dma_handle&=RAM_OFFSET_MASK; | |
306 | addr = dma_handle + offset + PAGE_OFFSET; | |
307 | if(dma_handle>=256*1024*1024) | |
308 | addr+=CRIME_HI_MEM_BASE; | |
309 | __dma_sync(addr, size, direction); | |
310 | } | |
311 | ||
312 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
313 | ||
314 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
315 | enum dma_data_direction direction) | |
316 | { | |
317 | int i; | |
318 | ||
319 | BUG_ON(direction == DMA_NONE); | |
320 | ||
321 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
322 | for (i = 0; i < nelems; i++, sg++) | |
323 | __dma_sync((unsigned long)page_address(sg->page), | |
324 | sg->length, direction); | |
325 | } | |
326 | ||
327 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
328 | ||
329 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
330 | enum dma_data_direction direction) | |
331 | { | |
332 | int i; | |
333 | ||
334 | BUG_ON(direction == DMA_NONE); | |
335 | ||
336 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
337 | for (i = 0; i < nelems; i++, sg++) | |
338 | __dma_sync((unsigned long)page_address(sg->page), | |
339 | sg->length, direction); | |
340 | } | |
341 | ||
342 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
343 | ||
344 | int dma_mapping_error(dma_addr_t dma_addr) | |
345 | { | |
346 | return 0; | |
347 | } | |
348 | ||
349 | EXPORT_SYMBOL(dma_mapping_error); | |
350 | ||
351 | int dma_supported(struct device *dev, u64 mask) | |
352 | { | |
353 | /* | |
354 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
355 | * so we can't guarantee allocations that must be | |
356 | * within a tighter range than GFP_DMA.. | |
357 | */ | |
358 | if (mask < 0x00ffffff) | |
359 | return 0; | |
360 | ||
361 | return 1; | |
362 | } | |
363 | ||
364 | EXPORT_SYMBOL(dma_supported); | |
365 | ||
366 | int dma_is_consistent(dma_addr_t dma_addr) | |
367 | { | |
368 | return 1; | |
369 | } | |
370 | ||
371 | EXPORT_SYMBOL(dma_is_consistent); | |
372 | ||
373 | void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) | |
374 | { | |
375 | if (direction == DMA_NONE) | |
376 | return; | |
377 | ||
378 | dma_cache_wback_inv((unsigned long)vaddr, size); | |
379 | } | |
380 | ||
381 | EXPORT_SYMBOL(dma_cache_sync); | |
382 |