Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/mm.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/vmalloc.h> | |
18 | #include <asm/tlbflush.h> | |
19 | #include <asm/homecache.h> | |
20 | ||
21 | /* Generic DMA mapping functions: */ | |
22 | ||
23 | /* | |
24 | * Allocate what Linux calls "coherent" memory, which for us just | |
25 | * means uncached. | |
26 | */ | |
27 | void *dma_alloc_coherent(struct device *dev, | |
28 | size_t size, | |
29 | dma_addr_t *dma_handle, | |
30 | gfp_t gfp) | |
31 | { | |
32 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | |
33 | int node = dev_to_node(dev); | |
34 | int order = get_order(size); | |
35 | struct page *pg; | |
36 | dma_addr_t addr; | |
37 | ||
482e6f84 | 38 | gfp |= __GFP_ZERO; |
867e359b CM |
39 | |
40 | /* | |
41 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | |
42 | * high 32 bits of the resulting PA will be zero. If the mask | |
43 | * size is, e.g., 24, we may still not be able to guarantee a | |
44 | * suitable memory address, in which case we will return NULL. | |
45 | * But such devices are uncommon. | |
46 | */ | |
47 | if (dma_mask <= DMA_BIT_MASK(32)) | |
48 | node = 0; | |
49 | ||
50 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | |
51 | if (pg == NULL) | |
52 | return NULL; | |
53 | ||
54 | addr = page_to_phys(pg); | |
55 | if (addr + size > dma_mask) { | |
56 | homecache_free_pages(addr, order); | |
57 | return NULL; | |
58 | } | |
59 | ||
60 | *dma_handle = addr; | |
61 | return page_address(pg); | |
62 | } | |
63 | EXPORT_SYMBOL(dma_alloc_coherent); | |
64 | ||
65 | /* | |
66 | * Free memory that was allocated with dma_alloc_coherent. | |
67 | */ | |
68 | void dma_free_coherent(struct device *dev, size_t size, | |
69 | void *vaddr, dma_addr_t dma_handle) | |
70 | { | |
71 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | |
72 | } | |
73 | EXPORT_SYMBOL(dma_free_coherent); | |
74 | ||
75 | /* | |
76 | * The map routines "map" the specified address range for DMA | |
77 | * accesses. The memory belongs to the device after this call is | |
78 | * issued, until it is unmapped with dma_unmap_single. | |
79 | * | |
80 | * We don't need to do any mapping, we just flush the address range | |
81 | * out of the cache and return a DMA address. | |
82 | * | |
83 | * The unmap routines do whatever is necessary before the processor | |
84 | * accesses the memory again, and must be called before the driver | |
85 | * touches the memory. We can get away with a cache invalidate if we | |
86 | * can count on nothing having been touched. | |
87 | */ | |
88 | ||
76c567fb CM |
89 | /* Flush a PA range from cache page by page. */ |
90 | static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) | |
91 | { | |
92 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | |
93 | size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); | |
94 | ||
95 | while ((ssize_t)size > 0) { | |
96 | /* Flush the page. */ | |
97 | homecache_flush_cache(page++, 0); | |
98 | ||
99 | /* Figure out if we need to continue on the next page. */ | |
100 | size -= bytesleft; | |
101 | bytesleft = PAGE_SIZE; | |
102 | } | |
103 | } | |
867e359b CM |
104 | |
105 | /* | |
106 | * dma_map_single can be passed any memory address, and there appear | |
107 | * to be no alignment constraints. | |
108 | * | |
109 | * There is a chance that the start of the buffer will share a cache | |
110 | * line with some other data that has been touched in the meantime. | |
111 | */ | |
112 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
113 | enum dma_data_direction direction) | |
114 | { | |
76c567fb | 115 | dma_addr_t dma_addr = __pa(ptr); |
867e359b CM |
116 | |
117 | BUG_ON(!valid_dma_direction(direction)); | |
118 | WARN_ON(size == 0); | |
119 | ||
76c567fb | 120 | __dma_map_pa_range(dma_addr, size); |
867e359b CM |
121 | |
122 | return dma_addr; | |
123 | } | |
124 | EXPORT_SYMBOL(dma_map_single); | |
125 | ||
126 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
127 | enum dma_data_direction direction) | |
128 | { | |
129 | BUG_ON(!valid_dma_direction(direction)); | |
130 | } | |
131 | EXPORT_SYMBOL(dma_unmap_single); | |
132 | ||
133 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |
134 | enum dma_data_direction direction) | |
135 | { | |
136 | struct scatterlist *sg; | |
137 | int i; | |
138 | ||
139 | BUG_ON(!valid_dma_direction(direction)); | |
140 | ||
141 | WARN_ON(nents == 0 || sglist->length == 0); | |
142 | ||
143 | for_each_sg(sglist, sg, nents, i) { | |
867e359b | 144 | sg->dma_address = sg_phys(sg); |
76c567fb | 145 | __dma_map_pa_range(sg->dma_address, sg->length); |
867e359b CM |
146 | } |
147 | ||
148 | return nents; | |
149 | } | |
150 | EXPORT_SYMBOL(dma_map_sg); | |
151 | ||
152 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
153 | enum dma_data_direction direction) | |
154 | { | |
155 | BUG_ON(!valid_dma_direction(direction)); | |
156 | } | |
157 | EXPORT_SYMBOL(dma_unmap_sg); | |
158 | ||
159 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
160 | unsigned long offset, size_t size, | |
161 | enum dma_data_direction direction) | |
162 | { | |
163 | BUG_ON(!valid_dma_direction(direction)); | |
164 | ||
76c567fb | 165 | BUG_ON(offset + size > PAGE_SIZE); |
867e359b CM |
166 | homecache_flush_cache(page, 0); |
167 | ||
168 | return page_to_pa(page) + offset; | |
169 | } | |
170 | EXPORT_SYMBOL(dma_map_page); | |
171 | ||
172 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
173 | enum dma_data_direction direction) | |
174 | { | |
175 | BUG_ON(!valid_dma_direction(direction)); | |
176 | } | |
177 | EXPORT_SYMBOL(dma_unmap_page); | |
178 | ||
179 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
180 | size_t size, enum dma_data_direction direction) | |
181 | { | |
182 | BUG_ON(!valid_dma_direction(direction)); | |
183 | } | |
184 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
185 | ||
186 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
187 | size_t size, enum dma_data_direction direction) | |
188 | { | |
189 | unsigned long start = PFN_DOWN(dma_handle); | |
190 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | |
191 | unsigned long i; | |
192 | ||
193 | BUG_ON(!valid_dma_direction(direction)); | |
194 | for (i = start; i <= end; ++i) | |
195 | homecache_flush_cache(pfn_to_page(i), 0); | |
196 | } | |
197 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
198 | ||
199 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
200 | enum dma_data_direction direction) | |
201 | { | |
202 | BUG_ON(!valid_dma_direction(direction)); | |
203 | WARN_ON(nelems == 0 || sg[0].length == 0); | |
204 | } | |
205 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
206 | ||
207 | /* | |
208 | * Flush and invalidate cache for scatterlist. | |
209 | */ | |
210 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | |
211 | int nelems, enum dma_data_direction direction) | |
212 | { | |
213 | struct scatterlist *sg; | |
214 | int i; | |
215 | ||
216 | BUG_ON(!valid_dma_direction(direction)); | |
217 | WARN_ON(nelems == 0 || sglist->length == 0); | |
218 | ||
219 | for_each_sg(sglist, sg, nelems, i) { | |
220 | dma_sync_single_for_device(dev, sg->dma_address, | |
221 | sg_dma_len(sg), direction); | |
222 | } | |
223 | } | |
224 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
225 | ||
226 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
227 | unsigned long offset, size_t size, | |
228 | enum dma_data_direction direction) | |
229 | { | |
230 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | |
231 | } | |
232 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
233 | ||
234 | void dma_sync_single_range_for_device(struct device *dev, | |
235 | dma_addr_t dma_handle, | |
236 | unsigned long offset, size_t size, | |
237 | enum dma_data_direction direction) | |
238 | { | |
239 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | |
240 | } | |
241 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
242 | ||
243 | /* | |
244 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | |
245 | * need to do any flushing here. | |
246 | */ | |
ef0aaf87 | 247 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
867e359b CM |
248 | enum dma_data_direction direction) |
249 | { | |
250 | } | |
251 | EXPORT_SYMBOL(dma_cache_sync); |