Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / arch / x86 / vdso / vma.c
CommitLineData
2aae950b
AK
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
4e950f6f 7#include <linux/err.h>
2aae950b 8#include <linux/sched.h>
5a0e3ad6 9#include <linux/slab.h>
2aae950b
AK
10#include <linux/init.h>
11#include <linux/random.h>
3fa89ca7 12#include <linux/elf.h>
2aae950b
AK
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
7f3646aa 16#include <asm/vdso.h>
aafade24 17#include <asm/page.h>
18d0a6fd 18#include <asm/hpet.h>
2aae950b 19
b4b541a6 20#if defined(CONFIG_X86_64)
3d7ee969 21unsigned int __read_mostly vdso64_enabled = 1;
7f3646aa 22
2aae950b 23extern unsigned short vdso_sync_cpuid;
b4b541a6 24#endif
1a21d4e0 25
6f121e54 26void __init init_vdso_image(const struct vdso_image *image)
1a21d4e0 27{
1a21d4e0 28 int i;
6f121e54 29 int npages = (image->size) / PAGE_SIZE;
1a21d4e0 30
6f121e54
AL
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
a62c34bd
AL
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
1a21d4e0 35
6f121e54
AL
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
1a21d4e0 39}
1b3f2a72 40
6f121e54 41#if defined(CONFIG_X86_64)
aafade24 42static int __init init_vdso(void)
2aae950b 43{
6f121e54 44 init_vdso_image(&vdso_image_64);
2aae950b 45
1a21d4e0 46#ifdef CONFIG_X86_X32_ABI
6f121e54 47 init_vdso_image(&vdso_image_x32);
1a21d4e0
L
48#endif
49
2aae950b 50 return 0;
2aae950b 51}
aafade24 52subsys_initcall(init_vdso);
18d0a6fd 53#endif
2aae950b
AK
54
55struct linux_binprm;
56
57/* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
18d0a6fd
AL
60 This doesn't give that many random bits.
61
62 Only used for the 64-bit and x32 vdsos. */
2aae950b
AK
63static unsigned long vdso_addr(unsigned long start, unsigned len)
64{
d093601b
JB
65#ifdef CONFIG_X86_32
66 return 0;
67#else
2aae950b
AK
68 unsigned long addr, end;
69 unsigned offset;
70 end = (start + PMD_SIZE - 1) & PMD_MASK;
d9517346
IM
71 if (end >= TASK_SIZE_MAX)
72 end = TASK_SIZE_MAX;
2aae950b
AK
73 end -= len;
74 /* This loses some more bits than a modulo, but is cheaper */
75 offset = get_random_int() & (PTRS_PER_PTE - 1);
76 addr = start + (offset << PAGE_SHIFT);
77 if (addr >= end)
78 addr = end;
dfb09f9b
BP
79
80 /*
81 * page-align it here so that get_unmapped_area doesn't
82 * align it wrongfully again to the next page. addr can come in 4K
83 * unaligned here as a result of stack start randomization.
84 */
85 addr = PAGE_ALIGN(addr);
f9902472 86 addr = align_vdso_addr(addr);
dfb09f9b 87
2aae950b 88 return addr;
d093601b 89#endif
2aae950b
AK
90}
91
18d0a6fd 92static int map_vdso(const struct vdso_image *image, bool calculate_addr)
2aae950b
AK
93{
94 struct mm_struct *mm = current->mm;
18d0a6fd 95 struct vm_area_struct *vma;
e6577a7c 96 unsigned long addr, text_start;
18d0a6fd 97 int ret = 0;
1e844fb4 98 static struct page *no_pages[] = {NULL};
a62c34bd
AL
99 static struct vm_special_mapping vvar_mapping = {
100 .name = "[vvar]",
101 .pages = no_pages,
102 };
2aae950b 103
18d0a6fd
AL
104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
e6577a7c 106 image->size - image->sym_vvar_start);
18d0a6fd
AL
107 } else {
108 addr = 0;
109 }
2aae950b
AK
110
111 down_write(&mm->mmap_sem);
18d0a6fd 112
e6577a7c
AL
113 addr = get_unmapped_area(NULL, addr,
114 image->size - image->sym_vvar_start, 0, 0);
2aae950b
AK
115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
e6577a7c
AL
120 text_start = addr - image->sym_vvar_start;
121 current->mm->context.vdso = (void __user *)text_start;
f7b6eb3f 122
18d0a6fd
AL
123 /*
124 * MAYWRITE to allow gdb to COW and set breakpoints
125 */
a62c34bd 126 vma = _install_special_mapping(mm,
e6577a7c 127 text_start,
a62c34bd
AL
128 image->size,
129 VM_READ|VM_EXEC|
130 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
131 &image->text_mapping);
18d0a6fd 132
a62c34bd
AL
133 if (IS_ERR(vma)) {
134 ret = PTR_ERR(vma);
18d0a6fd 135 goto up_fail;
a62c34bd 136 }
18d0a6fd
AL
137
138 vma = _install_special_mapping(mm,
e6577a7c
AL
139 addr,
140 -image->sym_vvar_start,
ac379835 141 VM_READ|VM_MAYREAD,
a62c34bd 142 &vvar_mapping);
18d0a6fd
AL
143
144 if (IS_ERR(vma)) {
145 ret = PTR_ERR(vma);
2aae950b 146 goto up_fail;
f7b6eb3f 147 }
2aae950b 148
18d0a6fd
AL
149 if (image->sym_vvar_page)
150 ret = remap_pfn_range(vma,
e6577a7c 151 text_start + image->sym_vvar_page,
18d0a6fd
AL
152 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
153 PAGE_SIZE,
154 PAGE_READONLY);
155
156 if (ret)
157 goto up_fail;
158
159#ifdef CONFIG_HPET_TIMER
160 if (hpet_address && image->sym_hpet_page) {
161 ret = io_remap_pfn_range(vma,
e6577a7c 162 text_start + image->sym_hpet_page,
18d0a6fd
AL
163 hpet_address >> PAGE_SHIFT,
164 PAGE_SIZE,
165 pgprot_noncached(PAGE_READONLY));
166
167 if (ret)
168 goto up_fail;
169 }
170#endif
171
2aae950b 172up_fail:
18d0a6fd
AL
173 if (ret)
174 current->mm->context.vdso = NULL;
175
2aae950b
AK
176 up_write(&mm->mmap_sem);
177 return ret;
178}
179
18d0a6fd
AL
180#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
181static int load_vdso32(void)
182{
183 int ret;
184
185 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 return 0;
187
188 ret = map_vdso(selected_vdso32, false);
189 if (ret)
190 return ret;
191
192 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
193 current_thread_info()->sysenter_return =
194 current->mm->context.vdso +
195 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
196
197 return 0;
198}
199#endif
200
201#ifdef CONFIG_X86_64
1a21d4e0
L
202int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203{
18d0a6fd
AL
204 if (!vdso64_enabled)
205 return 0;
206
207 return map_vdso(&vdso_image_64, true);
1a21d4e0
L
208}
209
18d0a6fd
AL
210#ifdef CONFIG_COMPAT
211int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
212 int uses_interp)
213{
1a21d4e0 214#ifdef CONFIG_X86_X32_ABI
18d0a6fd
AL
215 if (test_thread_flag(TIF_X32)) {
216 if (!vdso64_enabled)
217 return 0;
218
219 return map_vdso(&vdso_image_x32, true);
220 }
221#endif
222
223 return load_vdso32();
224}
225#endif
226#else
227int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
1a21d4e0 228{
18d0a6fd 229 return load_vdso32();
1a21d4e0
L
230}
231#endif
232
18d0a6fd 233#ifdef CONFIG_X86_64
2aae950b
AK
234static __init int vdso_setup(char *s)
235{
3d7ee969 236 vdso64_enabled = simple_strtoul(s, NULL, 0);
2aae950b
AK
237 return 0;
238}
239__setup("vdso=", vdso_setup);
b4b541a6 240#endif
This page took 0.511312 seconds and 5 git commands to generate.