Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/consistent.c | |
3 | * | |
4 | * Copyright (C) 2000-2004 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * DMA uncached mapping support. | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | ||
21 | #include <asm/cacheflush.h> | |
1da177e4 LT |
22 | #include <asm/tlbflush.h> |
23 | ||
24 | #define CONSISTENT_BASE (0xffc00000) | |
25 | #define CONSISTENT_END (0xffe00000) | |
26 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | |
27 | ||
28 | /* | |
29 | * This is the page table (2MB) covering uncached, DMA consistent allocations | |
30 | */ | |
31 | static pte_t *consistent_pte; | |
32 | static DEFINE_SPINLOCK(consistent_lock); | |
33 | ||
34 | /* | |
35 | * VM region handling support. | |
36 | * | |
37 | * This should become something generic, handling VM region allocations for | |
38 | * vmalloc and similar (ioremap, module space, etc). | |
39 | * | |
40 | * I envisage vmalloc()'s supporting vm_struct becoming: | |
41 | * | |
42 | * struct vm_struct { | |
43 | * struct vm_region region; | |
44 | * unsigned long flags; | |
45 | * struct page **pages; | |
46 | * unsigned int nr_pages; | |
47 | * unsigned long phys_addr; | |
48 | * }; | |
49 | * | |
50 | * get_vm_area() would then call vm_region_alloc with an appropriate | |
51 | * struct vm_region head (eg): | |
52 | * | |
53 | * struct vm_region vmalloc_head = { | |
54 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | |
55 | * .vm_start = VMALLOC_START, | |
56 | * .vm_end = VMALLOC_END, | |
57 | * }; | |
58 | * | |
59 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | |
60 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | |
61 | * would have to initialise this each time prior to calling vm_region_alloc(). | |
62 | */ | |
63 | struct vm_region { | |
64 | struct list_head vm_list; | |
65 | unsigned long vm_start; | |
66 | unsigned long vm_end; | |
67 | struct page *vm_pages; | |
5edf71ae | 68 | int vm_active; |
1da177e4 LT |
69 | }; |
70 | ||
71 | static struct vm_region consistent_head = { | |
72 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | |
73 | .vm_start = CONSISTENT_BASE, | |
74 | .vm_end = CONSISTENT_END, | |
75 | }; | |
76 | ||
77 | static struct vm_region * | |
f9e3214a | 78 | vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) |
1da177e4 LT |
79 | { |
80 | unsigned long addr = head->vm_start, end = head->vm_end - size; | |
81 | unsigned long flags; | |
82 | struct vm_region *c, *new; | |
83 | ||
84 | new = kmalloc(sizeof(struct vm_region), gfp); | |
85 | if (!new) | |
86 | goto out; | |
87 | ||
88 | spin_lock_irqsave(&consistent_lock, flags); | |
89 | ||
90 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
91 | if ((addr + size) < addr) | |
92 | goto nospc; | |
93 | if ((addr + size) <= c->vm_start) | |
94 | goto found; | |
95 | addr = c->vm_end; | |
96 | if (addr > end) | |
97 | goto nospc; | |
98 | } | |
99 | ||
100 | found: | |
101 | /* | |
102 | * Insert this entry _before_ the one we found. | |
103 | */ | |
104 | list_add_tail(&new->vm_list, &c->vm_list); | |
105 | new->vm_start = addr; | |
106 | new->vm_end = addr + size; | |
5edf71ae | 107 | new->vm_active = 1; |
1da177e4 LT |
108 | |
109 | spin_unlock_irqrestore(&consistent_lock, flags); | |
110 | return new; | |
111 | ||
112 | nospc: | |
113 | spin_unlock_irqrestore(&consistent_lock, flags); | |
114 | kfree(new); | |
115 | out: | |
116 | return NULL; | |
117 | } | |
118 | ||
119 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | |
120 | { | |
121 | struct vm_region *c; | |
122 | ||
123 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
5edf71ae | 124 | if (c->vm_active && c->vm_start == addr) |
1da177e4 LT |
125 | goto out; |
126 | } | |
127 | c = NULL; | |
128 | out: | |
129 | return c; | |
130 | } | |
131 | ||
132 | #ifdef CONFIG_HUGETLB_PAGE | |
133 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | |
134 | #endif | |
135 | ||
136 | static void * | |
f9e3214a | 137 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
1da177e4 LT |
138 | pgprot_t prot) |
139 | { | |
140 | struct page *page; | |
141 | struct vm_region *c; | |
142 | unsigned long order; | |
143 | u64 mask = ISA_DMA_THRESHOLD, limit; | |
144 | ||
145 | if (!consistent_pte) { | |
146 | printk(KERN_ERR "%s: not initialised\n", __func__); | |
147 | dump_stack(); | |
148 | return NULL; | |
149 | } | |
150 | ||
151 | if (dev) { | |
152 | mask = dev->coherent_dma_mask; | |
153 | ||
154 | /* | |
155 | * Sanity check the DMA mask - it must be non-zero, and | |
156 | * must be able to be satisfied by a DMA allocation. | |
157 | */ | |
158 | if (mask == 0) { | |
159 | dev_warn(dev, "coherent DMA mask is unset\n"); | |
160 | goto no_page; | |
161 | } | |
162 | ||
163 | if ((~mask) & ISA_DMA_THRESHOLD) { | |
164 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | |
165 | "than system GFP_DMA mask %#llx\n", | |
166 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | |
167 | goto no_page; | |
168 | } | |
169 | } | |
170 | ||
171 | /* | |
172 | * Sanity check the allocation size. | |
173 | */ | |
174 | size = PAGE_ALIGN(size); | |
175 | limit = (mask + 1) & ~mask; | |
176 | if ((limit && size >= limit) || | |
177 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | |
178 | printk(KERN_WARNING "coherent allocation too big " | |
179 | "(requested %#x mask %#llx)\n", size, mask); | |
180 | goto no_page; | |
181 | } | |
182 | ||
183 | order = get_order(size); | |
184 | ||
185 | if (mask != 0xffffffff) | |
186 | gfp |= GFP_DMA; | |
187 | ||
188 | page = alloc_pages(gfp, order); | |
189 | if (!page) | |
190 | goto no_page; | |
191 | ||
192 | /* | |
193 | * Invalidate any data that might be lurking in the | |
194 | * kernel direct-mapped region for device DMA. | |
195 | */ | |
196 | { | |
197 | unsigned long kaddr = (unsigned long)page_address(page); | |
198 | memset(page_address(page), 0, size); | |
199 | dmac_flush_range(kaddr, kaddr + size); | |
200 | } | |
201 | ||
202 | /* | |
203 | * Allocate a virtual address in the consistent mapping region. | |
204 | */ | |
205 | c = vm_region_alloc(&consistent_head, size, | |
206 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | |
207 | if (c) { | |
208 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | |
209 | struct page *end = page + (1 << order); | |
210 | ||
211 | c->vm_pages = page; | |
212 | ||
213 | /* | |
214 | * Set the "dma handle" | |
215 | */ | |
216 | *handle = page_to_dma(dev, page); | |
217 | ||
218 | do { | |
219 | BUG_ON(!pte_none(*pte)); | |
220 | ||
221 | set_page_count(page, 1); | |
222 | /* | |
223 | * x86 does not mark the pages reserved... | |
224 | */ | |
225 | SetPageReserved(page); | |
226 | set_pte(pte, mk_pte(page, prot)); | |
227 | page++; | |
228 | pte++; | |
229 | } while (size -= PAGE_SIZE); | |
230 | ||
231 | /* | |
232 | * Free the otherwise unused pages. | |
233 | */ | |
234 | while (page < end) { | |
235 | set_page_count(page, 1); | |
236 | __free_page(page); | |
237 | page++; | |
238 | } | |
239 | ||
240 | return (void *)c->vm_start; | |
241 | } | |
242 | ||
243 | if (page) | |
244 | __free_pages(page, order); | |
245 | no_page: | |
246 | *handle = ~0; | |
247 | return NULL; | |
248 | } | |
249 | ||
250 | /* | |
251 | * Allocate DMA-coherent memory space and return both the kernel remapped | |
252 | * virtual and bus address for that space. | |
253 | */ | |
254 | void * | |
f9e3214a | 255 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
1da177e4 LT |
256 | { |
257 | return __dma_alloc(dev, size, handle, gfp, | |
258 | pgprot_noncached(pgprot_kernel)); | |
259 | } | |
260 | EXPORT_SYMBOL(dma_alloc_coherent); | |
261 | ||
262 | /* | |
263 | * Allocate a writecombining region, in much the same way as | |
264 | * dma_alloc_coherent above. | |
265 | */ | |
266 | void * | |
f9e3214a | 267 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
1da177e4 LT |
268 | { |
269 | return __dma_alloc(dev, size, handle, gfp, | |
270 | pgprot_writecombine(pgprot_kernel)); | |
271 | } | |
272 | EXPORT_SYMBOL(dma_alloc_writecombine); | |
273 | ||
274 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |
275 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
276 | { | |
277 | unsigned long flags, user_size, kern_size; | |
278 | struct vm_region *c; | |
279 | int ret = -ENXIO; | |
280 | ||
281 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | |
282 | ||
283 | spin_lock_irqsave(&consistent_lock, flags); | |
284 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | |
285 | spin_unlock_irqrestore(&consistent_lock, flags); | |
286 | ||
287 | if (c) { | |
288 | unsigned long off = vma->vm_pgoff; | |
289 | ||
290 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | |
291 | ||
292 | if (off < kern_size && | |
293 | user_size <= (kern_size - off)) { | |
294 | vma->vm_flags |= VM_RESERVED; | |
295 | ret = remap_pfn_range(vma, vma->vm_start, | |
296 | page_to_pfn(c->vm_pages) + off, | |
297 | user_size << PAGE_SHIFT, | |
298 | vma->vm_page_prot); | |
299 | } | |
300 | } | |
301 | ||
302 | return ret; | |
303 | } | |
304 | ||
305 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |
306 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
307 | { | |
308 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
309 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | |
310 | } | |
311 | EXPORT_SYMBOL(dma_mmap_coherent); | |
312 | ||
313 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |
314 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
315 | { | |
316 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
317 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | |
318 | } | |
319 | EXPORT_SYMBOL(dma_mmap_writecombine); | |
320 | ||
321 | /* | |
322 | * free a page as defined by the above mapping. | |
5edf71ae | 323 | * Must not be called with IRQs disabled. |
1da177e4 LT |
324 | */ |
325 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | |
326 | { | |
327 | struct vm_region *c; | |
328 | unsigned long flags, addr; | |
329 | pte_t *ptep; | |
330 | ||
5edf71ae RK |
331 | WARN_ON(irqs_disabled()); |
332 | ||
1da177e4 LT |
333 | size = PAGE_ALIGN(size); |
334 | ||
335 | spin_lock_irqsave(&consistent_lock, flags); | |
1da177e4 LT |
336 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
337 | if (!c) | |
338 | goto no_area; | |
339 | ||
5edf71ae RK |
340 | c->vm_active = 0; |
341 | spin_unlock_irqrestore(&consistent_lock, flags); | |
342 | ||
1da177e4 LT |
343 | if ((c->vm_end - c->vm_start) != size) { |
344 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | |
345 | __func__, c->vm_end - c->vm_start, size); | |
346 | dump_stack(); | |
347 | size = c->vm_end - c->vm_start; | |
348 | } | |
349 | ||
350 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | |
351 | addr = c->vm_start; | |
352 | do { | |
353 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | |
354 | unsigned long pfn; | |
355 | ||
356 | ptep++; | |
357 | addr += PAGE_SIZE; | |
358 | ||
359 | if (!pte_none(pte) && pte_present(pte)) { | |
360 | pfn = pte_pfn(pte); | |
361 | ||
362 | if (pfn_valid(pfn)) { | |
363 | struct page *page = pfn_to_page(pfn); | |
364 | ||
365 | /* | |
366 | * x86 does not mark the pages reserved... | |
367 | */ | |
368 | ClearPageReserved(page); | |
369 | ||
370 | __free_page(page); | |
371 | continue; | |
372 | } | |
373 | } | |
374 | ||
375 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | |
376 | __func__); | |
377 | } while (size -= PAGE_SIZE); | |
378 | ||
379 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | |
380 | ||
5edf71ae | 381 | spin_lock_irqsave(&consistent_lock, flags); |
1da177e4 | 382 | list_del(&c->vm_list); |
1da177e4 LT |
383 | spin_unlock_irqrestore(&consistent_lock, flags); |
384 | ||
385 | kfree(c); | |
386 | return; | |
387 | ||
388 | no_area: | |
389 | spin_unlock_irqrestore(&consistent_lock, flags); | |
390 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | |
391 | __func__, cpu_addr); | |
392 | dump_stack(); | |
393 | } | |
394 | EXPORT_SYMBOL(dma_free_coherent); | |
395 | ||
396 | /* | |
397 | * Initialise the consistent memory allocation. | |
398 | */ | |
399 | static int __init consistent_init(void) | |
400 | { | |
401 | pgd_t *pgd; | |
402 | pmd_t *pmd; | |
403 | pte_t *pte; | |
404 | int ret = 0; | |
405 | ||
1da177e4 LT |
406 | do { |
407 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | |
408 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | |
409 | if (!pmd) { | |
410 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | |
411 | ret = -ENOMEM; | |
412 | break; | |
413 | } | |
414 | WARN_ON(!pmd_none(*pmd)); | |
415 | ||
872fec16 | 416 | pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); |
1da177e4 LT |
417 | if (!pte) { |
418 | printk(KERN_ERR "%s: no pte tables\n", __func__); | |
419 | ret = -ENOMEM; | |
420 | break; | |
421 | } | |
422 | ||
423 | consistent_pte = pte; | |
424 | } while (0); | |
425 | ||
1da177e4 LT |
426 | return ret; |
427 | } | |
428 | ||
429 | core_initcall(consistent_init); | |
430 | ||
431 | /* | |
432 | * Make an area consistent for devices. | |
433 | */ | |
434 | void consistent_sync(void *vaddr, size_t size, int direction) | |
435 | { | |
436 | unsigned long start = (unsigned long)vaddr; | |
437 | unsigned long end = start + size; | |
438 | ||
439 | switch (direction) { | |
440 | case DMA_FROM_DEVICE: /* invalidate only */ | |
441 | dmac_inv_range(start, end); | |
442 | break; | |
443 | case DMA_TO_DEVICE: /* writeback only */ | |
444 | dmac_clean_range(start, end); | |
445 | break; | |
446 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | |
447 | dmac_flush_range(start, end); | |
448 | break; | |
449 | default: | |
450 | BUG(); | |
451 | } | |
452 | } | |
453 | EXPORT_SYMBOL(consistent_sync); |