Merge branch 'sg' of git://git.kernel.dk/linux-2.6-block
[deliverable/linux.git] / arch / mips / mm / dma-default.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16 #include <linux/scatterlist.h>
17
18 #include <asm/cache.h>
19 #include <asm/io.h>
20
21 #include <dma-coherence.h>
22
23 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24 {
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26
27 return (unsigned long)phys_to_virt(addr);
28 }
29
30 /*
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
33 * coherent.
34 */
35
36 static inline int cpu_is_noncoherent_r10000(struct device *dev)
37 {
38 return !plat_device_is_coherent(dev) &&
39 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
41 }
42
43 void *dma_alloc_noncoherent(struct device *dev, size_t size,
44 dma_addr_t * dma_handle, gfp_t gfp)
45 {
46 void *ret;
47
48 /* ignore region specifiers */
49 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
50
51 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
52 gfp |= GFP_DMA;
53 ret = (void *) __get_free_pages(gfp, get_order(size));
54
55 if (ret != NULL) {
56 memset(ret, 0, size);
57 *dma_handle = plat_map_dma_mem(dev, ret, size);
58 }
59
60 return ret;
61 }
62
63 EXPORT_SYMBOL(dma_alloc_noncoherent);
64
65 void *dma_alloc_coherent(struct device *dev, size_t size,
66 dma_addr_t * dma_handle, gfp_t gfp)
67 {
68 void *ret;
69
70 /* ignore region specifiers */
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72
73 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
74 gfp |= GFP_DMA;
75 ret = (void *) __get_free_pages(gfp, get_order(size));
76
77 if (ret) {
78 memset(ret, 0, size);
79 *dma_handle = plat_map_dma_mem(dev, ret, size);
80
81 if (!plat_device_is_coherent(dev)) {
82 dma_cache_wback_inv((unsigned long) ret, size);
83 ret = UNCAC_ADDR(ret);
84 }
85 }
86
87 return ret;
88 }
89
90 EXPORT_SYMBOL(dma_alloc_coherent);
91
92 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
93 dma_addr_t dma_handle)
94 {
95 free_pages((unsigned long) vaddr, get_order(size));
96 }
97
98 EXPORT_SYMBOL(dma_free_noncoherent);
99
100 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
101 dma_addr_t dma_handle)
102 {
103 unsigned long addr = (unsigned long) vaddr;
104
105 if (!plat_device_is_coherent(dev))
106 addr = CAC_ADDR(addr);
107
108 free_pages(addr, get_order(size));
109 }
110
111 EXPORT_SYMBOL(dma_free_coherent);
112
113 static inline void __dma_sync(unsigned long addr, size_t size,
114 enum dma_data_direction direction)
115 {
116 switch (direction) {
117 case DMA_TO_DEVICE:
118 dma_cache_wback(addr, size);
119 break;
120
121 case DMA_FROM_DEVICE:
122 dma_cache_inv(addr, size);
123 break;
124
125 case DMA_BIDIRECTIONAL:
126 dma_cache_wback_inv(addr, size);
127 break;
128
129 default:
130 BUG();
131 }
132 }
133
134 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
135 enum dma_data_direction direction)
136 {
137 unsigned long addr = (unsigned long) ptr;
138
139 if (!plat_device_is_coherent(dev))
140 __dma_sync(addr, size, direction);
141
142 return plat_map_dma_mem(dev, ptr, size);
143 }
144
145 EXPORT_SYMBOL(dma_map_single);
146
147 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
148 enum dma_data_direction direction)
149 {
150 if (cpu_is_noncoherent_r10000(dev))
151 __dma_sync(dma_addr_to_virt(dma_addr), size,
152 direction);
153
154 plat_unmap_dma_mem(dma_addr);
155 }
156
157 EXPORT_SYMBOL(dma_unmap_single);
158
159 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
160 enum dma_data_direction direction)
161 {
162 int i;
163
164 BUG_ON(direction == DMA_NONE);
165
166 for (i = 0; i < nents; i++, sg++) {
167 unsigned long addr;
168
169 addr = (unsigned long) sg_virt(sg);
170 if (!plat_device_is_coherent(dev) && addr)
171 __dma_sync(addr, sg->length, direction);
172 sg->dma_address = plat_map_dma_mem(dev,
173 (void *)addr, sg->length);
174 }
175
176 return nents;
177 }
178
179 EXPORT_SYMBOL(dma_map_sg);
180
181 dma_addr_t dma_map_page(struct device *dev, struct page *page,
182 unsigned long offset, size_t size, enum dma_data_direction direction)
183 {
184 BUG_ON(direction == DMA_NONE);
185
186 if (!plat_device_is_coherent(dev)) {
187 unsigned long addr;
188
189 addr = (unsigned long) page_address(page) + offset;
190 dma_cache_wback_inv(addr, size);
191 }
192
193 return plat_map_dma_mem_page(dev, page) + offset;
194 }
195
196 EXPORT_SYMBOL(dma_map_page);
197
198 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
199 enum dma_data_direction direction)
200 {
201 BUG_ON(direction == DMA_NONE);
202
203 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
204 unsigned long addr;
205
206 addr = plat_dma_addr_to_phys(dma_address);
207 dma_cache_wback_inv(addr, size);
208 }
209
210 plat_unmap_dma_mem(dma_address);
211 }
212
213 EXPORT_SYMBOL(dma_unmap_page);
214
215 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
216 enum dma_data_direction direction)
217 {
218 unsigned long addr;
219 int i;
220
221 BUG_ON(direction == DMA_NONE);
222
223 for (i = 0; i < nhwentries; i++, sg++) {
224 if (!plat_device_is_coherent(dev) &&
225 direction != DMA_TO_DEVICE) {
226 addr = (unsigned long) sg_virt(sg);
227 if (addr)
228 __dma_sync(addr, sg->length, direction);
229 }
230 plat_unmap_dma_mem(sg->dma_address);
231 }
232 }
233
234 EXPORT_SYMBOL(dma_unmap_sg);
235
236 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
237 size_t size, enum dma_data_direction direction)
238 {
239 BUG_ON(direction == DMA_NONE);
240
241 if (cpu_is_noncoherent_r10000(dev)) {
242 unsigned long addr;
243
244 addr = dma_addr_to_virt(dma_handle);
245 __dma_sync(addr, size, direction);
246 }
247 }
248
249 EXPORT_SYMBOL(dma_sync_single_for_cpu);
250
251 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
252 size_t size, enum dma_data_direction direction)
253 {
254 BUG_ON(direction == DMA_NONE);
255
256 if (!plat_device_is_coherent(dev)) {
257 unsigned long addr;
258
259 addr = dma_addr_to_virt(dma_handle);
260 __dma_sync(addr, size, direction);
261 }
262 }
263
264 EXPORT_SYMBOL(dma_sync_single_for_device);
265
266 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
267 unsigned long offset, size_t size, enum dma_data_direction direction)
268 {
269 BUG_ON(direction == DMA_NONE);
270
271 if (cpu_is_noncoherent_r10000(dev)) {
272 unsigned long addr;
273
274 addr = dma_addr_to_virt(dma_handle);
275 __dma_sync(addr + offset, size, direction);
276 }
277 }
278
279 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
280
281 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
282 unsigned long offset, size_t size, enum dma_data_direction direction)
283 {
284 BUG_ON(direction == DMA_NONE);
285
286 if (!plat_device_is_coherent(dev)) {
287 unsigned long addr;
288
289 addr = dma_addr_to_virt(dma_handle);
290 __dma_sync(addr + offset, size, direction);
291 }
292 }
293
294 EXPORT_SYMBOL(dma_sync_single_range_for_device);
295
296 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
297 enum dma_data_direction direction)
298 {
299 int i;
300
301 BUG_ON(direction == DMA_NONE);
302
303 /* Make sure that gcc doesn't leave the empty loop body. */
304 for (i = 0; i < nelems; i++, sg++) {
305 if (cpu_is_noncoherent_r10000(dev))
306 __dma_sync((unsigned long)page_address(sg_page(sg)),
307 sg->length, direction);
308 plat_unmap_dma_mem(sg->dma_address);
309 }
310 }
311
312 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
313
314 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
315 enum dma_data_direction direction)
316 {
317 int i;
318
319 BUG_ON(direction == DMA_NONE);
320
321 /* Make sure that gcc doesn't leave the empty loop body. */
322 for (i = 0; i < nelems; i++, sg++) {
323 if (!plat_device_is_coherent(dev))
324 __dma_sync((unsigned long)page_address(sg_page(sg)),
325 sg->length, direction);
326 plat_unmap_dma_mem(sg->dma_address);
327 }
328 }
329
330 EXPORT_SYMBOL(dma_sync_sg_for_device);
331
332 int dma_mapping_error(dma_addr_t dma_addr)
333 {
334 return 0;
335 }
336
337 EXPORT_SYMBOL(dma_mapping_error);
338
339 int dma_supported(struct device *dev, u64 mask)
340 {
341 /*
342 * we fall back to GFP_DMA when the mask isn't all 1s,
343 * so we can't guarantee allocations that must be
344 * within a tighter range than GFP_DMA..
345 */
346 if (mask < 0x00ffffff)
347 return 0;
348
349 return 1;
350 }
351
352 EXPORT_SYMBOL(dma_supported);
353
354 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
355 {
356 return plat_device_is_coherent(dev);
357 }
358
359 EXPORT_SYMBOL(dma_is_consistent);
360
361 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
362 enum dma_data_direction direction)
363 {
364 BUG_ON(direction == DMA_NONE);
365
366 if (!plat_device_is_coherent(dev))
367 dma_cache_wback_inv((unsigned long)vaddr, size);
368 }
369
370 EXPORT_SYMBOL(dma_cache_sync);
This page took 0.039328 seconds and 5 git commands to generate.