2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/pvclock.h>
16 #include <asm/vgtod.h>
17 #include <asm/proto.h>
22 #include <asm/cpufeature.h>
24 #if defined(CONFIG_X86_64)
25 unsigned int __read_mostly vdso64_enabled
= 1;
28 void __init
init_vdso_image(const struct vdso_image
*image
)
30 BUG_ON(image
->size
% PAGE_SIZE
!= 0);
32 apply_alternatives((struct alt_instr
*)(image
->data
+ image
->alt
),
33 (struct alt_instr
*)(image
->data
+ image
->alt
+
40 * Put the vdso above the (randomized) stack with another randomized
41 * offset. This way there is no hole in the middle of address space.
42 * To save memory make sure it is still in the same PTE as the stack
43 * top. This doesn't give that many random bits.
45 * Note that this algorithm is imperfect: the distribution of the vdso
46 * start address within a PMD is biased toward the end.
48 * Only used for the 64-bit and x32 vdsos.
50 static unsigned long vdso_addr(unsigned long start
, unsigned len
)
55 unsigned long addr
, end
;
59 * Round up the start address. It can start out unaligned as a result
60 * of stack start randomization.
62 start
= PAGE_ALIGN(start
);
64 /* Round the lowest possible end address up to a PMD boundary. */
65 end
= (start
+ len
+ PMD_SIZE
- 1) & PMD_MASK
;
66 if (end
>= TASK_SIZE_MAX
)
71 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
72 addr
= start
+ (offset
<< PAGE_SHIFT
);
78 * Forcibly align the final address in case we have a hardware
79 * issue that requires alignment for performance reasons.
81 addr
= align_vdso_addr(addr
);
87 static int vdso_fault(const struct vm_special_mapping
*sm
,
88 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
90 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
92 if (!image
|| (vmf
->pgoff
<< PAGE_SHIFT
) >= image
->size
)
93 return VM_FAULT_SIGBUS
;
95 vmf
->page
= virt_to_page(image
->data
+ (vmf
->pgoff
<< PAGE_SHIFT
));
100 static const struct vm_special_mapping text_mapping
= {
105 static int vvar_fault(const struct vm_special_mapping
*sm
,
106 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
108 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
113 return VM_FAULT_SIGBUS
;
115 sym_offset
= (long)(vmf
->pgoff
<< PAGE_SHIFT
) +
116 image
->sym_vvar_start
;
119 * Sanity check: a symbol offset of zero means that the page
120 * does not exist for this vdso image, not that the page is at
121 * offset zero relative to the text mapping. This should be
122 * impossible here, because sym_offset should only be zero for
123 * the page past the end of the vvar mapping.
126 return VM_FAULT_SIGBUS
;
128 if (sym_offset
== image
->sym_vvar_page
) {
129 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
130 __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
);
131 } else if (sym_offset
== image
->sym_pvclock_page
) {
132 struct pvclock_vsyscall_time_info
*pvti
=
133 pvclock_pvti_cpu0_va();
134 if (pvti
&& vclock_was_used(VCLOCK_PVCLOCK
)) {
137 (unsigned long)vmf
->virtual_address
,
138 __pa(pvti
) >> PAGE_SHIFT
);
142 if (ret
== 0 || ret
== -EBUSY
)
143 return VM_FAULT_NOPAGE
;
145 return VM_FAULT_SIGBUS
;
148 static int map_vdso(const struct vdso_image
*image
, bool calculate_addr
)
150 struct mm_struct
*mm
= current
->mm
;
151 struct vm_area_struct
*vma
;
152 unsigned long addr
, text_start
;
154 static const struct vm_special_mapping vvar_mapping
= {
159 if (calculate_addr
) {
160 addr
= vdso_addr(current
->mm
->start_stack
,
161 image
->size
- image
->sym_vvar_start
);
166 if (down_write_killable(&mm
->mmap_sem
))
169 addr
= get_unmapped_area(NULL
, addr
,
170 image
->size
- image
->sym_vvar_start
, 0, 0);
171 if (IS_ERR_VALUE(addr
)) {
176 text_start
= addr
- image
->sym_vvar_start
;
177 current
->mm
->context
.vdso
= (void __user
*)text_start
;
178 current
->mm
->context
.vdso_image
= image
;
181 * MAYWRITE to allow gdb to COW and set breakpoints
183 vma
= _install_special_mapping(mm
,
187 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
195 vma
= _install_special_mapping(mm
,
197 -image
->sym_vvar_start
,
198 VM_READ
|VM_MAYREAD
|VM_IO
|VM_DONTDUMP
|
209 current
->mm
->context
.vdso
= NULL
;
211 up_write(&mm
->mmap_sem
);
215 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
216 static int load_vdso32(void)
218 if (vdso32_enabled
!= 1) /* Other values all mean "disabled" */
221 return map_vdso(&vdso_image_32
, false);
226 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
231 return map_vdso(&vdso_image_64
, true);
235 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
238 #ifdef CONFIG_X86_X32_ABI
239 if (test_thread_flag(TIF_X32
)) {
243 return map_vdso(&vdso_image_x32
, true);
246 #ifdef CONFIG_IA32_EMULATION
247 return load_vdso32();
254 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
256 return load_vdso32();
261 static __init
int vdso_setup(char *s
)
263 vdso64_enabled
= simple_strtoul(s
, NULL
, 0);
266 __setup("vdso=", vdso_setup
);
270 static void vgetcpu_cpu_init(void *arg
)
272 int cpu
= smp_processor_id();
273 struct desc_struct d
= { };
274 unsigned long node
= 0;
276 node
= cpu_to_node(cpu
);
278 if (static_cpu_has(X86_FEATURE_RDTSCP
))
279 write_rdtscp_aux((node
<< 12) | cpu
);
282 * Store cpu number in limit so that it can be loaded
283 * quickly in user space in vgetcpu. (12 bits for the CPU
284 * and 8 bits for the node)
286 d
.limit0
= cpu
| ((node
& 0xf) << 12);
288 d
.type
= 5; /* RO data, expand down, accessed */
289 d
.dpl
= 3; /* Visible to user code */
290 d
.s
= 1; /* Not a system segment */
291 d
.p
= 1; /* Present */
292 d
.d
= 1; /* 32-bit */
294 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_PER_CPU
, &d
, DESCTYPE_S
);
298 vgetcpu_cpu_notifier(struct notifier_block
*n
, unsigned long action
, void *arg
)
300 long cpu
= (long)arg
;
302 if (action
== CPU_ONLINE
|| action
== CPU_ONLINE_FROZEN
)
303 smp_call_function_single(cpu
, vgetcpu_cpu_init
, NULL
, 1);
308 static int __init
init_vdso(void)
310 init_vdso_image(&vdso_image_64
);
312 #ifdef CONFIG_X86_X32_ABI
313 init_vdso_image(&vdso_image_x32
);
316 cpu_notifier_register_begin();
318 on_each_cpu(vgetcpu_cpu_init
, NULL
, 1);
319 /* notifier priority > KVM */
320 __hotcpu_notifier(vgetcpu_cpu_notifier
, 30);
322 cpu_notifier_register_done();
326 subsys_initcall(init_vdso
);
327 #endif /* CONFIG_X86_64 */