x86_64: ia32_signal.c: remove signal number conversion
[deliverable/linux.git] / arch / x86 / vdso / vma.c
CommitLineData
2aae950b
AK
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
4e950f6f 7#include <linux/err.h>
2aae950b
AK
8#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/random.h>
11#include <asm/vsyscall.h>
12#include <asm/vgtod.h>
13#include <asm/proto.h>
7f3646aa 14#include <asm/vdso.h>
2aae950b 15
7f3646aa 16#include "vextern.h" /* Just for VMAGIC. */
2aae950b
AK
17#undef VEXTERN
18
e6b0edef 19unsigned int __read_mostly vdso_enabled = 1;
7f3646aa
RM
20
21extern char vdso_start[], vdso_end[];
2aae950b
AK
22extern unsigned short vdso_sync_cpuid;
23
24struct page **vdso_pages;
25
7f3646aa 26static inline void *var_ref(void *p, char *name)
2aae950b 27{
2aae950b
AK
28 if (*(void **)p != (void *)VMAGIC) {
29 printk("VDSO: variable %s broken\n", name);
30 vdso_enabled = 0;
31 }
32 return p;
33}
34
35static int __init init_vdso_vars(void)
36{
37 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
38 int i;
39 char *vbase;
40
41 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
42 if (!vdso_pages)
43 goto oom;
44 for (i = 0; i < npages; i++) {
45 struct page *p;
46 p = alloc_page(GFP_KERNEL);
47 if (!p)
48 goto oom;
49 vdso_pages[i] = p;
50 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
51 }
52
53 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
54 if (!vbase)
55 goto oom;
56
57 if (memcmp(vbase, "\177ELF", 4)) {
58 printk("VDSO: I'm broken; not ELF\n");
59 vdso_enabled = 0;
60 }
61
2aae950b 62#define VEXTERN(x) \
7f3646aa 63 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
2aae950b
AK
64#include "vextern.h"
65#undef VEXTERN
66 return 0;
67
68 oom:
69 printk("Cannot allocate vdso\n");
70 vdso_enabled = 0;
71 return -ENOMEM;
72}
73__initcall(init_vdso_vars);
74
75struct linux_binprm;
76
77/* Put the vdso above the (randomized) stack with another randomized offset.
78 This way there is no hole in the middle of address space.
79 To save memory make sure it is still in the same PTE as the stack top.
80 This doesn't give that many random bits */
81static unsigned long vdso_addr(unsigned long start, unsigned len)
82{
83 unsigned long addr, end;
84 unsigned offset;
85 end = (start + PMD_SIZE - 1) & PMD_MASK;
86 if (end >= TASK_SIZE64)
87 end = TASK_SIZE64;
88 end -= len;
89 /* This loses some more bits than a modulo, but is cheaper */
90 offset = get_random_int() & (PTRS_PER_PTE - 1);
91 addr = start + (offset << PAGE_SHIFT);
92 if (addr >= end)
93 addr = end;
94 return addr;
95}
96
97/* Setup a VMA at program startup for the vsyscall page.
98 Not called for compat tasks */
99int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
100{
101 struct mm_struct *mm = current->mm;
102 unsigned long addr;
103 int ret;
104 unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
105
106 if (!vdso_enabled)
107 return 0;
108
109 down_write(&mm->mmap_sem);
110 addr = vdso_addr(mm->start_stack, len);
111 addr = get_unmapped_area(NULL, addr, len, 0, 0);
112 if (IS_ERR_VALUE(addr)) {
113 ret = addr;
114 goto up_fail;
115 }
116
117 ret = install_special_mapping(mm, addr, len,
118 VM_READ|VM_EXEC|
119 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
120 VM_ALWAYSDUMP,
121 vdso_pages);
122 if (ret)
123 goto up_fail;
124
125 current->mm->context.vdso = (void *)addr;
126up_fail:
127 up_write(&mm->mmap_sem);
128 return ret;
129}
130
131static __init int vdso_setup(char *s)
132{
133 vdso_enabled = simple_strtoul(s, NULL, 0);
134 return 0;
135}
136__setup("vdso=", vdso_setup);
This page took 1.939227 seconds and 5 git commands to generate.