Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / arch / x86 / vdso / vma.c
1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include <asm/hpet.h>
19
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
22
23 extern unsigned short vdso_sync_cpuid;
24 #endif
25
26 void __init init_vdso_image(const struct vdso_image *image)
27 {
28 int i;
29 int npages = (image->size) / PAGE_SIZE;
30
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
35
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
39 }
40
41 #if defined(CONFIG_X86_64)
42 static int __init init_vdso(void)
43 {
44 init_vdso_image(&vdso_image_64);
45
46 #ifdef CONFIG_X86_X32_ABI
47 init_vdso_image(&vdso_image_x32);
48 #endif
49
50 return 0;
51 }
52 subsys_initcall(init_vdso);
53 #endif
54
55 struct linux_binprm;
56
57 /* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
60 This doesn't give that many random bits.
61
62 Only used for the 64-bit and x32 vdsos. */
63 static unsigned long vdso_addr(unsigned long start, unsigned len)
64 {
65 #ifdef CONFIG_X86_32
66 return 0;
67 #else
68 unsigned long addr, end;
69 unsigned offset;
70 end = (start + PMD_SIZE - 1) & PMD_MASK;
71 if (end >= TASK_SIZE_MAX)
72 end = TASK_SIZE_MAX;
73 end -= len;
74 /* This loses some more bits than a modulo, but is cheaper */
75 offset = get_random_int() & (PTRS_PER_PTE - 1);
76 addr = start + (offset << PAGE_SHIFT);
77 if (addr >= end)
78 addr = end;
79
80 /*
81 * page-align it here so that get_unmapped_area doesn't
82 * align it wrongfully again to the next page. addr can come in 4K
83 * unaligned here as a result of stack start randomization.
84 */
85 addr = PAGE_ALIGN(addr);
86 addr = align_vdso_addr(addr);
87
88 return addr;
89 #endif
90 }
91
92 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
93 {
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 unsigned long addr, text_start;
97 int ret = 0;
98 static struct page *no_pages[] = {NULL};
99 static struct vm_special_mapping vvar_mapping = {
100 .name = "[vvar]",
101 .pages = no_pages,
102 };
103
104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
106 image->size - image->sym_vvar_start);
107 } else {
108 addr = 0;
109 }
110
111 down_write(&mm->mmap_sem);
112
113 addr = get_unmapped_area(NULL, addr,
114 image->size - image->sym_vvar_start, 0, 0);
115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
120 text_start = addr - image->sym_vvar_start;
121 current->mm->context.vdso = (void __user *)text_start;
122
123 /*
124 * MAYWRITE to allow gdb to COW and set breakpoints
125 */
126 vma = _install_special_mapping(mm,
127 text_start,
128 image->size,
129 VM_READ|VM_EXEC|
130 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
131 &image->text_mapping);
132
133 if (IS_ERR(vma)) {
134 ret = PTR_ERR(vma);
135 goto up_fail;
136 }
137
138 vma = _install_special_mapping(mm,
139 addr,
140 -image->sym_vvar_start,
141 VM_READ|VM_MAYREAD,
142 &vvar_mapping);
143
144 if (IS_ERR(vma)) {
145 ret = PTR_ERR(vma);
146 goto up_fail;
147 }
148
149 if (image->sym_vvar_page)
150 ret = remap_pfn_range(vma,
151 text_start + image->sym_vvar_page,
152 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
153 PAGE_SIZE,
154 PAGE_READONLY);
155
156 if (ret)
157 goto up_fail;
158
159 #ifdef CONFIG_HPET_TIMER
160 if (hpet_address && image->sym_hpet_page) {
161 ret = io_remap_pfn_range(vma,
162 text_start + image->sym_hpet_page,
163 hpet_address >> PAGE_SHIFT,
164 PAGE_SIZE,
165 pgprot_noncached(PAGE_READONLY));
166
167 if (ret)
168 goto up_fail;
169 }
170 #endif
171
172 up_fail:
173 if (ret)
174 current->mm->context.vdso = NULL;
175
176 up_write(&mm->mmap_sem);
177 return ret;
178 }
179
180 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
181 static int load_vdso32(void)
182 {
183 int ret;
184
185 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 return 0;
187
188 ret = map_vdso(selected_vdso32, false);
189 if (ret)
190 return ret;
191
192 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
193 current_thread_info()->sysenter_return =
194 current->mm->context.vdso +
195 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
196
197 return 0;
198 }
199 #endif
200
201 #ifdef CONFIG_X86_64
202 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203 {
204 if (!vdso64_enabled)
205 return 0;
206
207 return map_vdso(&vdso_image_64, true);
208 }
209
210 #ifdef CONFIG_COMPAT
211 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
212 int uses_interp)
213 {
214 #ifdef CONFIG_X86_X32_ABI
215 if (test_thread_flag(TIF_X32)) {
216 if (!vdso64_enabled)
217 return 0;
218
219 return map_vdso(&vdso_image_x32, true);
220 }
221 #endif
222
223 return load_vdso32();
224 }
225 #endif
226 #else
227 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
228 {
229 return load_vdso32();
230 }
231 #endif
232
233 #ifdef CONFIG_X86_64
234 static __init int vdso_setup(char *s)
235 {
236 vdso64_enabled = simple_strtoul(s, NULL, 0);
237 return 0;
238 }
239 __setup("vdso=", vdso_setup);
240 #endif
This page took 0.036925 seconds and 5 git commands to generate.