Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/mmap.c | |
3 | */ | |
4 | #include <linux/config.h> | |
5 | #include <linux/fs.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/mman.h> | |
8 | #include <linux/shm.h> | |
9 | ||
10 | #include <asm/system.h> | |
11 | ||
12 | #define COLOUR_ALIGN(addr,pgoff) \ | |
13 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | |
14 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | |
15 | ||
16 | /* | |
17 | * We need to ensure that shared mappings are correctly aligned to | |
18 | * avoid aliasing issues with VIPT caches. We need to ensure that | |
19 | * a specific page of an object is always mapped at a multiple of | |
20 | * SHMLBA bytes. | |
21 | * | |
22 | * We unconditionally provide this function for all cases, however | |
23 | * in the VIVT case, we optimise out the alignment rules. | |
24 | */ | |
25 | unsigned long | |
26 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
27 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
28 | { | |
29 | struct mm_struct *mm = current->mm; | |
30 | struct vm_area_struct *vma; | |
31 | unsigned long start_addr; | |
32 | #ifdef CONFIG_CPU_V6 | |
33 | unsigned int cache_type; | |
34 | int do_align = 0, aliasing = 0; | |
35 | ||
36 | /* | |
37 | * We only need to do colour alignment if either the I or D | |
38 | * caches alias. This is indicated by bits 9 and 21 of the | |
39 | * cache type register. | |
40 | */ | |
41 | cache_type = read_cpuid(CPUID_CACHETYPE); | |
42 | if (cache_type != read_cpuid(CPUID_ID)) { | |
43 | aliasing = (cache_type | cache_type >> 12) & (1 << 11); | |
44 | if (aliasing) | |
45 | do_align = filp || flags & MAP_SHARED; | |
46 | } | |
47 | #else | |
48 | #define do_align 0 | |
49 | #define aliasing 0 | |
50 | #endif | |
51 | ||
52 | /* | |
53 | * We should enforce the MAP_FIXED case. However, currently | |
54 | * the generic kernel code doesn't allow us to handle this. | |
55 | */ | |
56 | if (flags & MAP_FIXED) { | |
57 | if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) | |
58 | return -EINVAL; | |
59 | return addr; | |
60 | } | |
61 | ||
62 | if (len > TASK_SIZE) | |
63 | return -ENOMEM; | |
64 | ||
65 | if (addr) { | |
66 | if (do_align) | |
67 | addr = COLOUR_ALIGN(addr, pgoff); | |
68 | else | |
69 | addr = PAGE_ALIGN(addr); | |
70 | ||
71 | vma = find_vma(mm, addr); | |
72 | if (TASK_SIZE - len >= addr && | |
73 | (!vma || addr + len <= vma->vm_start)) | |
74 | return addr; | |
75 | } | |
1363c3cd WW |
76 | if (len > mm->cached_hole_size) { |
77 | start_addr = addr = mm->free_area_cache; | |
78 | } else { | |
79 | start_addr = addr = TASK_UNMAPPED_BASE; | |
80 | mm->cached_hole_size = 0; | |
81 | } | |
1da177e4 LT |
82 | |
83 | full_search: | |
84 | if (do_align) | |
85 | addr = COLOUR_ALIGN(addr, pgoff); | |
86 | else | |
87 | addr = PAGE_ALIGN(addr); | |
88 | ||
89 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | |
90 | /* At this point: (!vma || addr < vma->vm_end). */ | |
91 | if (TASK_SIZE - len < addr) { | |
92 | /* | |
93 | * Start a new search - just in case we missed | |
94 | * some holes. | |
95 | */ | |
96 | if (start_addr != TASK_UNMAPPED_BASE) { | |
97 | start_addr = addr = TASK_UNMAPPED_BASE; | |
1363c3cd | 98 | mm->cached_hole_size = 0; |
1da177e4 LT |
99 | goto full_search; |
100 | } | |
101 | return -ENOMEM; | |
102 | } | |
103 | if (!vma || addr + len <= vma->vm_start) { | |
104 | /* | |
105 | * Remember the place where we stopped the search: | |
106 | */ | |
107 | mm->free_area_cache = addr + len; | |
108 | return addr; | |
109 | } | |
1363c3cd WW |
110 | if (addr + mm->cached_hole_size < vma->vm_start) |
111 | mm->cached_hole_size = vma->vm_start - addr; | |
1da177e4 LT |
112 | addr = vma->vm_end; |
113 | if (do_align) | |
114 | addr = COLOUR_ALIGN(addr, pgoff); | |
115 | } | |
116 | } | |
117 |