iommu/exynos: Fix checkpatch warning
[deliverable/linux.git] / arch / x86 / vdso / vdso32-setup.c
1 /*
2 * (C) Copyright 2002 Linus Torvalds
3 * Portions based on the vdso-randomization code from exec-shield:
4 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5 *
6 * This file contains the needed initializations to support sysenter.
7 */
8
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20
21 #include <asm/cpufeature.h>
22 #include <asm/msr.h>
23 #include <asm/pgtable.h>
24 #include <asm/unistd.h>
25 #include <asm/elf.h>
26 #include <asm/tlbflush.h>
27 #include <asm/vdso.h>
28 #include <asm/proto.h>
29 #include <asm/fixmap.h>
30 #include <asm/hpet.h>
31 #include <asm/vvar.h>
32
33 #ifdef CONFIG_COMPAT_VDSO
34 #define VDSO_DEFAULT 0
35 #else
36 #define VDSO_DEFAULT 1
37 #endif
38
39 #ifdef CONFIG_X86_64
40 #define vdso_enabled sysctl_vsyscall32
41 #define arch_setup_additional_pages syscall32_setup_pages
42 #endif
43
44 /*
45 * Should the kernel map a VDSO page into processes and pass its
46 * address down to glibc upon exec()?
47 */
48 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
49
50 static int __init vdso_setup(char *s)
51 {
52 vdso_enabled = simple_strtoul(s, NULL, 0);
53
54 if (vdso_enabled > 1)
55 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
56
57 return 1;
58 }
59
60 /*
61 * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
62 * behavior on both 64-bit and 32-bit kernels.
63 * On 32-bit kernels, vdso=[012] means the same thing.
64 */
65 __setup("vdso32=", vdso_setup);
66
67 #ifdef CONFIG_X86_32
68 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
69
70 EXPORT_SYMBOL_GPL(vdso_enabled);
71 #endif
72
73 static struct page **vdso32_pages;
74 static unsigned vdso32_size;
75
76 #ifdef CONFIG_X86_64
77
78 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
79 #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
80
81 /* May not be __init: called during resume */
82 void syscall32_cpu_init(void)
83 {
84 /* Load these always in case some future AMD CPU supports
85 SYSENTER from compat mode too. */
86 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
87 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
88 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
89
90 wrmsrl(MSR_CSTAR, ia32_cstar_target);
91 }
92
93 #else /* CONFIG_X86_32 */
94
95 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
96 #define vdso32_syscall() (0)
97
98 void enable_sep_cpu(void)
99 {
100 int cpu = get_cpu();
101 struct tss_struct *tss = &per_cpu(init_tss, cpu);
102
103 if (!boot_cpu_has(X86_FEATURE_SEP)) {
104 put_cpu();
105 return;
106 }
107
108 tss->x86_tss.ss1 = __KERNEL_CS;
109 tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
110 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
111 wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
112 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
113 put_cpu();
114 }
115
116 #endif /* CONFIG_X86_64 */
117
118 int __init sysenter_setup(void)
119 {
120 char *vdso32_start, *vdso32_end;
121 int npages, i;
122
123 #ifdef CONFIG_COMPAT
124 if (vdso32_syscall()) {
125 vdso32_start = vdso32_syscall_start;
126 vdso32_end = vdso32_syscall_end;
127 vdso32_pages = vdso32_syscall_pages;
128 } else
129 #endif
130 if (vdso32_sysenter()) {
131 vdso32_start = vdso32_sysenter_start;
132 vdso32_end = vdso32_sysenter_end;
133 vdso32_pages = vdso32_sysenter_pages;
134 } else {
135 vdso32_start = vdso32_int80_start;
136 vdso32_end = vdso32_int80_end;
137 vdso32_pages = vdso32_int80_pages;
138 }
139
140 npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE;
141 vdso32_size = npages << PAGE_SHIFT;
142 for (i = 0; i < npages; i++)
143 vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE);
144
145 patch_vdso32(vdso32_start, vdso32_size);
146
147 return 0;
148 }
149
150 /* Setup a VMA at program startup for the vsyscall page */
151 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
152 {
153 struct mm_struct *mm = current->mm;
154 unsigned long addr;
155 int ret = 0;
156 struct vm_area_struct *vma;
157
158 #ifdef CONFIG_X86_X32_ABI
159 if (test_thread_flag(TIF_X32))
160 return x32_setup_additional_pages(bprm, uses_interp);
161 #endif
162
163 if (vdso_enabled != 1) /* Other values all mean "disabled" */
164 return 0;
165
166 down_write(&mm->mmap_sem);
167
168 addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0);
169 if (IS_ERR_VALUE(addr)) {
170 ret = addr;
171 goto up_fail;
172 }
173
174 addr += VDSO_OFFSET(VDSO_PREV_PAGES);
175
176 current->mm->context.vdso = (void *)addr;
177
178 /*
179 * MAYWRITE to allow gdb to COW and set breakpoints
180 */
181 ret = install_special_mapping(mm,
182 addr,
183 vdso32_size,
184 VM_READ|VM_EXEC|
185 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
186 vdso32_pages);
187
188 if (ret)
189 goto up_fail;
190
191 vma = _install_special_mapping(mm,
192 addr - VDSO_OFFSET(VDSO_PREV_PAGES),
193 VDSO_OFFSET(VDSO_PREV_PAGES),
194 VM_READ,
195 NULL);
196
197 if (IS_ERR(vma)) {
198 ret = PTR_ERR(vma);
199 goto up_fail;
200 }
201
202 ret = remap_pfn_range(vma,
203 addr - VDSO_OFFSET(VDSO_VVAR_PAGE),
204 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
205 PAGE_SIZE,
206 PAGE_READONLY);
207
208 if (ret)
209 goto up_fail;
210
211 #ifdef CONFIG_HPET_TIMER
212 if (hpet_address) {
213 ret = io_remap_pfn_range(vma,
214 addr - VDSO_OFFSET(VDSO_HPET_PAGE),
215 hpet_address >> PAGE_SHIFT,
216 PAGE_SIZE,
217 pgprot_noncached(PAGE_READONLY));
218
219 if (ret)
220 goto up_fail;
221 }
222 #endif
223
224 current_thread_info()->sysenter_return =
225 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
226
227 up_fail:
228 if (ret)
229 current->mm->context.vdso = NULL;
230
231 up_write(&mm->mmap_sem);
232
233 return ret;
234 }
235
236 #ifdef CONFIG_X86_64
237
238 subsys_initcall(sysenter_setup);
239
240 #ifdef CONFIG_SYSCTL
241 /* Register vsyscall32 into the ABI table */
242 #include <linux/sysctl.h>
243
244 static struct ctl_table abi_table2[] = {
245 {
246 .procname = "vsyscall32",
247 .data = &sysctl_vsyscall32,
248 .maxlen = sizeof(int),
249 .mode = 0644,
250 .proc_handler = proc_dointvec
251 },
252 {}
253 };
254
255 static struct ctl_table abi_root_table2[] = {
256 {
257 .procname = "abi",
258 .mode = 0555,
259 .child = abi_table2
260 },
261 {}
262 };
263
264 static __init int ia32_binfmt_init(void)
265 {
266 register_sysctl_table(abi_root_table2);
267 return 0;
268 }
269 __initcall(ia32_binfmt_init);
270 #endif
271
272 #else /* CONFIG_X86_32 */
273
274 const char *arch_vma_name(struct vm_area_struct *vma)
275 {
276 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
277 return "[vdso]";
278 return NULL;
279 }
280
281 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
282 {
283 return NULL;
284 }
285
286 int in_gate_area(struct mm_struct *mm, unsigned long addr)
287 {
288 return 0;
289 }
290
291 int in_gate_area_no_mm(unsigned long addr)
292 {
293 return 0;
294 }
295
296 #endif /* CONFIG_X86_64 */
This page took 0.055625 seconds and 5 git commands to generate.