Merge branch 'linus' of master.kernel.org:/pub/scm/linux/kernel/git/perex/alsa
[deliverable/linux.git] / arch / mips / mm / dma-default.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16
17 #include <asm/cache.h>
18 #include <asm/io.h>
19
20 #include <dma-coherence.h>
21
22 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
23 {
24 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
25
26 return (unsigned long)phys_to_virt(addr);
27 }
28
29 /*
30 * Warning on the terminology - Linux calls an uncached area coherent;
31 * MIPS terminology calls memory areas with hardware maintained coherency
32 * coherent.
33 */
34
35 static inline int cpu_is_noncoherent_r10000(struct device *dev)
36 {
37 return !plat_device_is_coherent(dev) &&
38 (current_cpu_type() == CPU_R10000 ||
39 current_cpu_type() == CPU_R12000);
40 }
41
42 void *dma_alloc_noncoherent(struct device *dev, size_t size,
43 dma_addr_t * dma_handle, gfp_t gfp)
44 {
45 void *ret;
46
47 /* ignore region specifiers */
48 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
49
50 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
51 gfp |= GFP_DMA;
52 ret = (void *) __get_free_pages(gfp, get_order(size));
53
54 if (ret != NULL) {
55 memset(ret, 0, size);
56 *dma_handle = plat_map_dma_mem(dev, ret, size);
57 }
58
59 return ret;
60 }
61
62 EXPORT_SYMBOL(dma_alloc_noncoherent);
63
64 void *dma_alloc_coherent(struct device *dev, size_t size,
65 dma_addr_t * dma_handle, gfp_t gfp)
66 {
67 void *ret;
68
69 /* ignore region specifiers */
70 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
71
72 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
73 gfp |= GFP_DMA;
74 ret = (void *) __get_free_pages(gfp, get_order(size));
75
76 if (ret) {
77 memset(ret, 0, size);
78 *dma_handle = plat_map_dma_mem(dev, ret, size);
79
80 if (!plat_device_is_coherent(dev)) {
81 dma_cache_wback_inv((unsigned long) ret, size);
82 ret = UNCAC_ADDR(ret);
83 }
84 }
85
86 return ret;
87 }
88
89 EXPORT_SYMBOL(dma_alloc_coherent);
90
91 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
92 dma_addr_t dma_handle)
93 {
94 free_pages((unsigned long) vaddr, get_order(size));
95 }
96
97 EXPORT_SYMBOL(dma_free_noncoherent);
98
99 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
100 dma_addr_t dma_handle)
101 {
102 unsigned long addr = (unsigned long) vaddr;
103
104 if (!plat_device_is_coherent(dev))
105 addr = CAC_ADDR(addr);
106
107 free_pages(addr, get_order(size));
108 }
109
110 EXPORT_SYMBOL(dma_free_coherent);
111
112 static inline void __dma_sync(unsigned long addr, size_t size,
113 enum dma_data_direction direction)
114 {
115 switch (direction) {
116 case DMA_TO_DEVICE:
117 dma_cache_wback(addr, size);
118 break;
119
120 case DMA_FROM_DEVICE:
121 dma_cache_inv(addr, size);
122 break;
123
124 case DMA_BIDIRECTIONAL:
125 dma_cache_wback_inv(addr, size);
126 break;
127
128 default:
129 BUG();
130 }
131 }
132
133 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
134 enum dma_data_direction direction)
135 {
136 unsigned long addr = (unsigned long) ptr;
137
138 if (!plat_device_is_coherent(dev))
139 __dma_sync(addr, size, direction);
140
141 return plat_map_dma_mem(dev, ptr, size);
142 }
143
144 EXPORT_SYMBOL(dma_map_single);
145
146 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
147 enum dma_data_direction direction)
148 {
149 if (cpu_is_noncoherent_r10000(dev))
150 __dma_sync(dma_addr_to_virt(dma_addr), size,
151 direction);
152
153 plat_unmap_dma_mem(dma_addr);
154 }
155
156 EXPORT_SYMBOL(dma_unmap_single);
157
158 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
159 enum dma_data_direction direction)
160 {
161 int i;
162
163 BUG_ON(direction == DMA_NONE);
164
165 for (i = 0; i < nents; i++, sg++) {
166 unsigned long addr;
167
168 addr = (unsigned long) sg_virt(sg);
169 if (!plat_device_is_coherent(dev) && addr)
170 __dma_sync(addr, sg->length, direction);
171 sg->dma_address = plat_map_dma_mem(dev,
172 (void *)addr, sg->length);
173 }
174
175 return nents;
176 }
177
178 EXPORT_SYMBOL(dma_map_sg);
179
180 dma_addr_t dma_map_page(struct device *dev, struct page *page,
181 unsigned long offset, size_t size, enum dma_data_direction direction)
182 {
183 BUG_ON(direction == DMA_NONE);
184
185 if (!plat_device_is_coherent(dev)) {
186 unsigned long addr;
187
188 addr = (unsigned long) page_address(page) + offset;
189 dma_cache_wback_inv(addr, size);
190 }
191
192 return plat_map_dma_mem_page(dev, page) + offset;
193 }
194
195 EXPORT_SYMBOL(dma_map_page);
196
197 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
198 enum dma_data_direction direction)
199 {
200 BUG_ON(direction == DMA_NONE);
201
202 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
203 unsigned long addr;
204
205 addr = plat_dma_addr_to_phys(dma_address);
206 dma_cache_wback_inv(addr, size);
207 }
208
209 plat_unmap_dma_mem(dma_address);
210 }
211
212 EXPORT_SYMBOL(dma_unmap_page);
213
214 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
215 enum dma_data_direction direction)
216 {
217 unsigned long addr;
218 int i;
219
220 BUG_ON(direction == DMA_NONE);
221
222 for (i = 0; i < nhwentries; i++, sg++) {
223 if (!plat_device_is_coherent(dev) &&
224 direction != DMA_TO_DEVICE) {
225 addr = (unsigned long) sg_virt(sg);
226 if (addr)
227 __dma_sync(addr, sg->length, direction);
228 }
229 plat_unmap_dma_mem(sg->dma_address);
230 }
231 }
232
233 EXPORT_SYMBOL(dma_unmap_sg);
234
235 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
236 size_t size, enum dma_data_direction direction)
237 {
238 BUG_ON(direction == DMA_NONE);
239
240 if (cpu_is_noncoherent_r10000(dev)) {
241 unsigned long addr;
242
243 addr = dma_addr_to_virt(dma_handle);
244 __dma_sync(addr, size, direction);
245 }
246 }
247
248 EXPORT_SYMBOL(dma_sync_single_for_cpu);
249
250 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
251 size_t size, enum dma_data_direction direction)
252 {
253 BUG_ON(direction == DMA_NONE);
254
255 if (!plat_device_is_coherent(dev)) {
256 unsigned long addr;
257
258 addr = dma_addr_to_virt(dma_handle);
259 __dma_sync(addr, size, direction);
260 }
261 }
262
263 EXPORT_SYMBOL(dma_sync_single_for_device);
264
265 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
266 unsigned long offset, size_t size, enum dma_data_direction direction)
267 {
268 BUG_ON(direction == DMA_NONE);
269
270 if (cpu_is_noncoherent_r10000(dev)) {
271 unsigned long addr;
272
273 addr = dma_addr_to_virt(dma_handle);
274 __dma_sync(addr + offset, size, direction);
275 }
276 }
277
278 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
279
280 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size, enum dma_data_direction direction)
282 {
283 BUG_ON(direction == DMA_NONE);
284
285 if (!plat_device_is_coherent(dev)) {
286 unsigned long addr;
287
288 addr = dma_addr_to_virt(dma_handle);
289 __dma_sync(addr + offset, size, direction);
290 }
291 }
292
293 EXPORT_SYMBOL(dma_sync_single_range_for_device);
294
295 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
296 enum dma_data_direction direction)
297 {
298 int i;
299
300 BUG_ON(direction == DMA_NONE);
301
302 /* Make sure that gcc doesn't leave the empty loop body. */
303 for (i = 0; i < nelems; i++, sg++) {
304 if (cpu_is_noncoherent_r10000(dev))
305 __dma_sync((unsigned long)page_address(sg_page(sg)),
306 sg->length, direction);
307 plat_unmap_dma_mem(sg->dma_address);
308 }
309 }
310
311 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
312
313 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
314 enum dma_data_direction direction)
315 {
316 int i;
317
318 BUG_ON(direction == DMA_NONE);
319
320 /* Make sure that gcc doesn't leave the empty loop body. */
321 for (i = 0; i < nelems; i++, sg++) {
322 if (!plat_device_is_coherent(dev))
323 __dma_sync((unsigned long)page_address(sg_page(sg)),
324 sg->length, direction);
325 plat_unmap_dma_mem(sg->dma_address);
326 }
327 }
328
329 EXPORT_SYMBOL(dma_sync_sg_for_device);
330
331 int dma_mapping_error(dma_addr_t dma_addr)
332 {
333 return 0;
334 }
335
336 EXPORT_SYMBOL(dma_mapping_error);
337
338 int dma_supported(struct device *dev, u64 mask)
339 {
340 /*
341 * we fall back to GFP_DMA when the mask isn't all 1s,
342 * so we can't guarantee allocations that must be
343 * within a tighter range than GFP_DMA..
344 */
345 if (mask < 0x00ffffff)
346 return 0;
347
348 return 1;
349 }
350
351 EXPORT_SYMBOL(dma_supported);
352
353 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
354 {
355 return plat_device_is_coherent(dev);
356 }
357
358 EXPORT_SYMBOL(dma_is_consistent);
359
360 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
361 enum dma_data_direction direction)
362 {
363 BUG_ON(direction == DMA_NONE);
364
365 if (!plat_device_is_coherent(dev))
366 dma_cache_wback_inv((unsigned long)vaddr, size);
367 }
368
369 EXPORT_SYMBOL(dma_cache_sync);
This page took 0.07019 seconds and 5 git commands to generate.