Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/consistent.c | |
3 | * | |
4 | * Copyright (C) 2000-2004 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * DMA uncached mapping support. | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | ||
21 | #include <asm/cacheflush.h> | |
1da177e4 | 22 | #include <asm/tlbflush.h> |
37134cd5 KH |
23 | #include <asm/sizes.h> |
24 | ||
25 | /* Sanity check size */ | |
26 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | |
27 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | |
28 | #endif | |
1da177e4 | 29 | |
1da177e4 | 30 | #define CONSISTENT_END (0xffe00000) |
37134cd5 KH |
31 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) |
32 | ||
1da177e4 | 33 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
37134cd5 KH |
34 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
35 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | |
36 | ||
1da177e4 LT |
37 | |
38 | /* | |
37134cd5 | 39 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
1da177e4 | 40 | */ |
37134cd5 | 41 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; |
1da177e4 LT |
42 | static DEFINE_SPINLOCK(consistent_lock); |
43 | ||
44 | /* | |
45 | * VM region handling support. | |
46 | * | |
47 | * This should become something generic, handling VM region allocations for | |
48 | * vmalloc and similar (ioremap, module space, etc). | |
49 | * | |
50 | * I envisage vmalloc()'s supporting vm_struct becoming: | |
51 | * | |
52 | * struct vm_struct { | |
53 | * struct vm_region region; | |
54 | * unsigned long flags; | |
55 | * struct page **pages; | |
56 | * unsigned int nr_pages; | |
57 | * unsigned long phys_addr; | |
58 | * }; | |
59 | * | |
60 | * get_vm_area() would then call vm_region_alloc with an appropriate | |
61 | * struct vm_region head (eg): | |
62 | * | |
63 | * struct vm_region vmalloc_head = { | |
64 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | |
65 | * .vm_start = VMALLOC_START, | |
66 | * .vm_end = VMALLOC_END, | |
67 | * }; | |
68 | * | |
69 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | |
70 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | |
71 | * would have to initialise this each time prior to calling vm_region_alloc(). | |
72 | */ | |
73 | struct vm_region { | |
74 | struct list_head vm_list; | |
75 | unsigned long vm_start; | |
76 | unsigned long vm_end; | |
77 | struct page *vm_pages; | |
5edf71ae | 78 | int vm_active; |
1da177e4 LT |
79 | }; |
80 | ||
81 | static struct vm_region consistent_head = { | |
82 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | |
83 | .vm_start = CONSISTENT_BASE, | |
84 | .vm_end = CONSISTENT_END, | |
85 | }; | |
86 | ||
87 | static struct vm_region * | |
f9e3214a | 88 | vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) |
1da177e4 LT |
89 | { |
90 | unsigned long addr = head->vm_start, end = head->vm_end - size; | |
91 | unsigned long flags; | |
92 | struct vm_region *c, *new; | |
93 | ||
94 | new = kmalloc(sizeof(struct vm_region), gfp); | |
95 | if (!new) | |
96 | goto out; | |
97 | ||
98 | spin_lock_irqsave(&consistent_lock, flags); | |
99 | ||
100 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
101 | if ((addr + size) < addr) | |
102 | goto nospc; | |
103 | if ((addr + size) <= c->vm_start) | |
104 | goto found; | |
105 | addr = c->vm_end; | |
106 | if (addr > end) | |
107 | goto nospc; | |
108 | } | |
109 | ||
110 | found: | |
111 | /* | |
112 | * Insert this entry _before_ the one we found. | |
113 | */ | |
114 | list_add_tail(&new->vm_list, &c->vm_list); | |
115 | new->vm_start = addr; | |
116 | new->vm_end = addr + size; | |
5edf71ae | 117 | new->vm_active = 1; |
1da177e4 LT |
118 | |
119 | spin_unlock_irqrestore(&consistent_lock, flags); | |
120 | return new; | |
121 | ||
122 | nospc: | |
123 | spin_unlock_irqrestore(&consistent_lock, flags); | |
124 | kfree(new); | |
125 | out: | |
126 | return NULL; | |
127 | } | |
128 | ||
129 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | |
130 | { | |
131 | struct vm_region *c; | |
132 | ||
133 | list_for_each_entry(c, &head->vm_list, vm_list) { | |
5edf71ae | 134 | if (c->vm_active && c->vm_start == addr) |
1da177e4 LT |
135 | goto out; |
136 | } | |
137 | c = NULL; | |
138 | out: | |
139 | return c; | |
140 | } | |
141 | ||
142 | #ifdef CONFIG_HUGETLB_PAGE | |
143 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | |
144 | #endif | |
145 | ||
146 | static void * | |
f9e3214a | 147 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
1da177e4 LT |
148 | pgprot_t prot) |
149 | { | |
150 | struct page *page; | |
151 | struct vm_region *c; | |
152 | unsigned long order; | |
153 | u64 mask = ISA_DMA_THRESHOLD, limit; | |
154 | ||
37134cd5 | 155 | if (!consistent_pte[0]) { |
1da177e4 LT |
156 | printk(KERN_ERR "%s: not initialised\n", __func__); |
157 | dump_stack(); | |
158 | return NULL; | |
159 | } | |
160 | ||
161 | if (dev) { | |
162 | mask = dev->coherent_dma_mask; | |
163 | ||
164 | /* | |
165 | * Sanity check the DMA mask - it must be non-zero, and | |
166 | * must be able to be satisfied by a DMA allocation. | |
167 | */ | |
168 | if (mask == 0) { | |
169 | dev_warn(dev, "coherent DMA mask is unset\n"); | |
170 | goto no_page; | |
171 | } | |
172 | ||
173 | if ((~mask) & ISA_DMA_THRESHOLD) { | |
174 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | |
175 | "than system GFP_DMA mask %#llx\n", | |
176 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | |
177 | goto no_page; | |
178 | } | |
179 | } | |
180 | ||
181 | /* | |
182 | * Sanity check the allocation size. | |
183 | */ | |
184 | size = PAGE_ALIGN(size); | |
185 | limit = (mask + 1) & ~mask; | |
186 | if ((limit && size >= limit) || | |
187 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | |
188 | printk(KERN_WARNING "coherent allocation too big " | |
189 | "(requested %#x mask %#llx)\n", size, mask); | |
190 | goto no_page; | |
191 | } | |
192 | ||
193 | order = get_order(size); | |
194 | ||
195 | if (mask != 0xffffffff) | |
196 | gfp |= GFP_DMA; | |
197 | ||
198 | page = alloc_pages(gfp, order); | |
199 | if (!page) | |
200 | goto no_page; | |
201 | ||
202 | /* | |
203 | * Invalidate any data that might be lurking in the | |
204 | * kernel direct-mapped region for device DMA. | |
205 | */ | |
206 | { | |
207 | unsigned long kaddr = (unsigned long)page_address(page); | |
208 | memset(page_address(page), 0, size); | |
209 | dmac_flush_range(kaddr, kaddr + size); | |
210 | } | |
211 | ||
212 | /* | |
213 | * Allocate a virtual address in the consistent mapping region. | |
214 | */ | |
215 | c = vm_region_alloc(&consistent_head, size, | |
216 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | |
217 | if (c) { | |
37134cd5 | 218 | pte_t *pte; |
1da177e4 | 219 | struct page *end = page + (1 << order); |
37134cd5 KH |
220 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); |
221 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | |
1da177e4 | 222 | |
37134cd5 | 223 | pte = consistent_pte[idx] + off; |
1da177e4 LT |
224 | c->vm_pages = page; |
225 | ||
226 | /* | |
227 | * Set the "dma handle" | |
228 | */ | |
229 | *handle = page_to_dma(dev, page); | |
230 | ||
231 | do { | |
232 | BUG_ON(!pte_none(*pte)); | |
233 | ||
234 | set_page_count(page, 1); | |
235 | /* | |
236 | * x86 does not mark the pages reserved... | |
237 | */ | |
238 | SetPageReserved(page); | |
239 | set_pte(pte, mk_pte(page, prot)); | |
240 | page++; | |
241 | pte++; | |
37134cd5 KH |
242 | off++; |
243 | if (off >= PTRS_PER_PTE) { | |
244 | off = 0; | |
245 | pte = consistent_pte[++idx]; | |
246 | } | |
1da177e4 LT |
247 | } while (size -= PAGE_SIZE); |
248 | ||
249 | /* | |
250 | * Free the otherwise unused pages. | |
251 | */ | |
252 | while (page < end) { | |
253 | set_page_count(page, 1); | |
254 | __free_page(page); | |
255 | page++; | |
256 | } | |
257 | ||
258 | return (void *)c->vm_start; | |
259 | } | |
260 | ||
261 | if (page) | |
262 | __free_pages(page, order); | |
263 | no_page: | |
264 | *handle = ~0; | |
265 | return NULL; | |
266 | } | |
267 | ||
268 | /* | |
269 | * Allocate DMA-coherent memory space and return both the kernel remapped | |
270 | * virtual and bus address for that space. | |
271 | */ | |
272 | void * | |
f9e3214a | 273 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
1da177e4 LT |
274 | { |
275 | return __dma_alloc(dev, size, handle, gfp, | |
276 | pgprot_noncached(pgprot_kernel)); | |
277 | } | |
278 | EXPORT_SYMBOL(dma_alloc_coherent); | |
279 | ||
280 | /* | |
281 | * Allocate a writecombining region, in much the same way as | |
282 | * dma_alloc_coherent above. | |
283 | */ | |
284 | void * | |
f9e3214a | 285 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
1da177e4 LT |
286 | { |
287 | return __dma_alloc(dev, size, handle, gfp, | |
288 | pgprot_writecombine(pgprot_kernel)); | |
289 | } | |
290 | EXPORT_SYMBOL(dma_alloc_writecombine); | |
291 | ||
292 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |
293 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
294 | { | |
295 | unsigned long flags, user_size, kern_size; | |
296 | struct vm_region *c; | |
297 | int ret = -ENXIO; | |
298 | ||
299 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | |
300 | ||
301 | spin_lock_irqsave(&consistent_lock, flags); | |
302 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | |
303 | spin_unlock_irqrestore(&consistent_lock, flags); | |
304 | ||
305 | if (c) { | |
306 | unsigned long off = vma->vm_pgoff; | |
307 | ||
308 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | |
309 | ||
310 | if (off < kern_size && | |
311 | user_size <= (kern_size - off)) { | |
312 | vma->vm_flags |= VM_RESERVED; | |
313 | ret = remap_pfn_range(vma, vma->vm_start, | |
314 | page_to_pfn(c->vm_pages) + off, | |
315 | user_size << PAGE_SHIFT, | |
316 | vma->vm_page_prot); | |
317 | } | |
318 | } | |
319 | ||
320 | return ret; | |
321 | } | |
322 | ||
323 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |
324 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
325 | { | |
326 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
327 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | |
328 | } | |
329 | EXPORT_SYMBOL(dma_mmap_coherent); | |
330 | ||
331 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |
332 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
333 | { | |
334 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
335 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | |
336 | } | |
337 | EXPORT_SYMBOL(dma_mmap_writecombine); | |
338 | ||
339 | /* | |
340 | * free a page as defined by the above mapping. | |
5edf71ae | 341 | * Must not be called with IRQs disabled. |
1da177e4 LT |
342 | */ |
343 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | |
344 | { | |
345 | struct vm_region *c; | |
346 | unsigned long flags, addr; | |
347 | pte_t *ptep; | |
37134cd5 KH |
348 | int idx; |
349 | u32 off; | |
1da177e4 | 350 | |
5edf71ae RK |
351 | WARN_ON(irqs_disabled()); |
352 | ||
1da177e4 LT |
353 | size = PAGE_ALIGN(size); |
354 | ||
355 | spin_lock_irqsave(&consistent_lock, flags); | |
1da177e4 LT |
356 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
357 | if (!c) | |
358 | goto no_area; | |
359 | ||
5edf71ae RK |
360 | c->vm_active = 0; |
361 | spin_unlock_irqrestore(&consistent_lock, flags); | |
362 | ||
1da177e4 LT |
363 | if ((c->vm_end - c->vm_start) != size) { |
364 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | |
365 | __func__, c->vm_end - c->vm_start, size); | |
366 | dump_stack(); | |
367 | size = c->vm_end - c->vm_start; | |
368 | } | |
369 | ||
37134cd5 KH |
370 | idx = CONSISTENT_PTE_INDEX(c->vm_start); |
371 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | |
372 | ptep = consistent_pte[idx] + off; | |
1da177e4 LT |
373 | addr = c->vm_start; |
374 | do { | |
375 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | |
376 | unsigned long pfn; | |
377 | ||
378 | ptep++; | |
379 | addr += PAGE_SIZE; | |
37134cd5 KH |
380 | off++; |
381 | if (off >= PTRS_PER_PTE) { | |
382 | off = 0; | |
383 | ptep = consistent_pte[++idx]; | |
384 | } | |
1da177e4 LT |
385 | |
386 | if (!pte_none(pte) && pte_present(pte)) { | |
387 | pfn = pte_pfn(pte); | |
388 | ||
389 | if (pfn_valid(pfn)) { | |
390 | struct page *page = pfn_to_page(pfn); | |
391 | ||
392 | /* | |
393 | * x86 does not mark the pages reserved... | |
394 | */ | |
395 | ClearPageReserved(page); | |
396 | ||
397 | __free_page(page); | |
398 | continue; | |
399 | } | |
400 | } | |
401 | ||
402 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | |
403 | __func__); | |
404 | } while (size -= PAGE_SIZE); | |
405 | ||
406 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | |
407 | ||
5edf71ae | 408 | spin_lock_irqsave(&consistent_lock, flags); |
1da177e4 | 409 | list_del(&c->vm_list); |
1da177e4 LT |
410 | spin_unlock_irqrestore(&consistent_lock, flags); |
411 | ||
412 | kfree(c); | |
413 | return; | |
414 | ||
415 | no_area: | |
416 | spin_unlock_irqrestore(&consistent_lock, flags); | |
417 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | |
418 | __func__, cpu_addr); | |
419 | dump_stack(); | |
420 | } | |
421 | EXPORT_SYMBOL(dma_free_coherent); | |
422 | ||
423 | /* | |
424 | * Initialise the consistent memory allocation. | |
425 | */ | |
426 | static int __init consistent_init(void) | |
427 | { | |
428 | pgd_t *pgd; | |
429 | pmd_t *pmd; | |
430 | pte_t *pte; | |
37134cd5 KH |
431 | int ret = 0, i = 0; |
432 | u32 base = CONSISTENT_BASE; | |
1da177e4 | 433 | |
1da177e4 | 434 | do { |
37134cd5 KH |
435 | pgd = pgd_offset(&init_mm, base); |
436 | pmd = pmd_alloc(&init_mm, pgd, base); | |
1da177e4 LT |
437 | if (!pmd) { |
438 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | |
439 | ret = -ENOMEM; | |
440 | break; | |
441 | } | |
442 | WARN_ON(!pmd_none(*pmd)); | |
443 | ||
37134cd5 | 444 | pte = pte_alloc_kernel(pmd, base); |
1da177e4 LT |
445 | if (!pte) { |
446 | printk(KERN_ERR "%s: no pte tables\n", __func__); | |
447 | ret = -ENOMEM; | |
448 | break; | |
449 | } | |
450 | ||
37134cd5 KH |
451 | consistent_pte[i++] = pte; |
452 | base += (1 << PGDIR_SHIFT); | |
453 | } while (base < CONSISTENT_END); | |
1da177e4 | 454 | |
1da177e4 LT |
455 | return ret; |
456 | } | |
457 | ||
458 | core_initcall(consistent_init); | |
459 | ||
460 | /* | |
461 | * Make an area consistent for devices. | |
462 | */ | |
463 | void consistent_sync(void *vaddr, size_t size, int direction) | |
464 | { | |
465 | unsigned long start = (unsigned long)vaddr; | |
466 | unsigned long end = start + size; | |
467 | ||
468 | switch (direction) { | |
469 | case DMA_FROM_DEVICE: /* invalidate only */ | |
470 | dmac_inv_range(start, end); | |
471 | break; | |
472 | case DMA_TO_DEVICE: /* writeback only */ | |
473 | dmac_clean_range(start, end); | |
474 | break; | |
475 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | |
476 | dmac_flush_range(start, end); | |
477 | break; | |
478 | default: | |
479 | BUG(); | |
480 | } | |
481 | } | |
482 | EXPORT_SYMBOL(consistent_sync); |