[PATCH] Calgary IOMMU: consolidate per bus data structures
[deliverable/linux.git] / arch / x86_64 / kernel / vsyscall.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/seqlock.h>
27#include <linux/jiffies.h>
28#include <linux/sysctl.h>
c08c8205 29#include <linux/getcpu.h>
1da177e4
LT
30
31#include <asm/vsyscall.h>
32#include <asm/pgtable.h>
33#include <asm/page.h>
34#include <asm/fixmap.h>
35#include <asm/errno.h>
36#include <asm/io.h>
c08c8205
VP
37#include <asm/segment.h>
38#include <asm/desc.h>
39#include <asm/topology.h>
1da177e4
LT
40
41#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
1da177e4
LT
42
43int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
44seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
c08c8205 45int __vgetcpu_mode __section_vgetcpu_mode;
1da177e4
LT
46
47#include <asm/unistd.h>
48
2c8bc944 49static __always_inline void timeval_normalize(struct timeval * tv)
1da177e4
LT
50{
51 time_t __sec;
52
53 __sec = tv->tv_usec / 1000000;
54 if (__sec) {
55 tv->tv_usec %= 1000000;
56 tv->tv_sec += __sec;
57 }
58}
59
2c8bc944 60static __always_inline void do_vgettimeofday(struct timeval * tv)
1da177e4
LT
61{
62 long sequence, t;
63 unsigned long sec, usec;
64
65 do {
66 sequence = read_seqbegin(&__xtime_lock);
67
68 sec = __xtime.tv_sec;
69 usec = (__xtime.tv_nsec / 1000) +
70 (__jiffies - __wall_jiffies) * (1000000 / HZ);
71
312df5f1 72 if (__vxtime.mode != VXTIME_HPET) {
c818a181 73 t = get_cycles_sync();
1da177e4
LT
74 if (t < __vxtime.last_tsc)
75 t = __vxtime.last_tsc;
76 usec += ((t - __vxtime.last_tsc) *
77 __vxtime.tsc_quot) >> 32;
78 /* See comment in x86_64 do_gettimeofday. */
79 } else {
80 usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
81 __vxtime.last) * __vxtime.quot) >> 32;
82 }
83 } while (read_seqretry(&__xtime_lock, sequence));
84
85 tv->tv_sec = sec + usec / 1000000;
86 tv->tv_usec = usec % 1000000;
87}
88
89/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
2c8bc944 90static __always_inline void do_get_tz(struct timezone * tz)
1da177e4
LT
91{
92 *tz = __sys_tz;
93}
94
2c8bc944 95static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
1da177e4
LT
96{
97 int ret;
98 asm volatile("vsysc2: syscall"
99 : "=a" (ret)
100 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
101 return ret;
102}
103
2c8bc944 104static __always_inline long time_syscall(long *t)
1da177e4
LT
105{
106 long secs;
107 asm volatile("vsysc1: syscall"
108 : "=a" (secs)
109 : "0" (__NR_time),"D" (t) : __syscall_clobber);
110 return secs;
111}
112
2e8ad43e 113int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
1da177e4 114{
14118c3c 115 if (!__sysctl_vsyscall)
1da177e4
LT
116 return gettimeofday(tv,tz);
117 if (tv)
118 do_vgettimeofday(tv);
119 if (tz)
120 do_get_tz(tz);
121 return 0;
122}
123
124/* This will break when the xtime seconds get inaccurate, but that is
125 * unlikely */
2e8ad43e 126time_t __vsyscall(1) vtime(time_t *t)
1da177e4 127{
14118c3c 128 if (!__sysctl_vsyscall)
1da177e4
LT
129 return time_syscall(t);
130 else if (t)
131 *t = __xtime.tv_sec;
132 return __xtime.tv_sec;
133}
134
c08c8205
VP
135/* Fast way to get current CPU and node.
136 This helps to do per node and per CPU caches in user space.
137 The result is not guaranteed without CPU affinity, but usually
138 works out because the scheduler tries to keep a thread on the same
139 CPU.
140
141 tcache must point to a two element sized long array.
142 All arguments can be NULL. */
143long __vsyscall(2)
144vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
1da177e4 145{
c08c8205
VP
146 unsigned int dummy, p;
147 unsigned long j = 0;
148
149 /* Fast cache - only recompute value once per jiffies and avoid
150 relatively costly rdtscp/cpuid otherwise.
151 This works because the scheduler usually keeps the process
152 on the same CPU and this syscall doesn't guarantee its
153 results anyways.
154 We do this here because otherwise user space would do it on
155 its own in a likely inferior way (no access to jiffies).
156 If you don't like it pass NULL. */
157 if (tcache && tcache->t0 == (j = __jiffies)) {
158 p = tcache->t1;
159 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
160 /* Load per CPU data from RDTSCP */
161 rdtscp(dummy, dummy, p);
162 } else {
163 /* Load per CPU data from GDT */
164 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
165 }
166 if (tcache) {
167 tcache->t0 = j;
168 tcache->t1 = p;
169 }
170 if (cpu)
171 *cpu = p & 0xfff;
172 if (node)
173 *node = p >> 12;
174 return 0;
1da177e4
LT
175}
176
2e8ad43e 177long __vsyscall(3) venosys_1(void)
1da177e4
LT
178{
179 return -ENOSYS;
180}
181
182#ifdef CONFIG_SYSCTL
183
184#define SYSCALL 0x050f
185#define NOP2 0x9090
186
187/*
188 * NOP out syscall in vsyscall page when not needed.
189 */
190static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
191 void __user *buffer, size_t *lenp, loff_t *ppos)
192{
193 extern u16 vsysc1, vsysc2;
194 u16 *map1, *map2;
195 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
196 if (!write)
197 return ret;
198 /* gcc has some trouble with __va(__pa()), so just do it this
199 way. */
200 map1 = ioremap(__pa_symbol(&vsysc1), 2);
201 if (!map1)
202 return -ENOMEM;
203 map2 = ioremap(__pa_symbol(&vsysc2), 2);
204 if (!map2) {
205 ret = -ENOMEM;
206 goto out;
207 }
208 if (!sysctl_vsyscall) {
209 *map1 = SYSCALL;
210 *map2 = SYSCALL;
211 } else {
212 *map1 = NOP2;
213 *map2 = NOP2;
214 }
215 iounmap(map2);
216out:
217 iounmap(map1);
218 return ret;
219}
220
221static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
222 void __user *oldval, size_t __user *oldlenp,
223 void __user *newval, size_t newlen,
224 void **context)
225{
226 return -ENOSYS;
227}
228
229static ctl_table kernel_table2[] = {
230 { .ctl_name = 99, .procname = "vsyscall64",
231 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
232 .strategy = vsyscall_sysctl_nostrat,
233 .proc_handler = vsyscall_sysctl_change },
234 { 0, }
235};
236
237static ctl_table kernel_root_table2[] = {
238 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
239 .child = kernel_table2 },
240 { 0 },
241};
242
243#endif
244
c08c8205
VP
245static void __cpuinit write_rdtscp_cb(void *info)
246{
247 write_rdtscp_aux((unsigned long)info);
248}
249
250void __cpuinit vsyscall_set_cpu(int cpu)
251{
252 unsigned long *d;
253 unsigned long node = 0;
254#ifdef CONFIG_NUMA
255 node = cpu_to_node[cpu];
256#endif
257 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
258 void *info = (void *)((node << 12) | cpu);
259 /* Can happen on preemptive kernel */
260 if (get_cpu() == cpu)
261 write_rdtscp_cb(info);
262#ifdef CONFIG_SMP
263 else {
264 /* the notifier is unfortunately not executed on the
265 target CPU */
266 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
267 }
268#endif
269 put_cpu();
270 }
271
272 /* Store cpu number in limit so that it can be loaded quickly
273 in user space in vgetcpu.
274 12 bits for the CPU and 8 bits for the node. */
275 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
276 *d = 0x0f40000000000ULL;
277 *d |= cpu;
278 *d |= (node & 0xf) << 12;
279 *d |= (node >> 4) << 48;
280}
281
1da177e4
LT
282static void __init map_vsyscall(void)
283{
284 extern char __vsyscall_0;
285 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
286
287 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
288}
289
290static int __init vsyscall_init(void)
291{
292 BUG_ON(((unsigned long) &vgettimeofday !=
293 VSYSCALL_ADDR(__NR_vgettimeofday)));
294 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
295 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
c08c8205 296 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
1da177e4 297 map_vsyscall();
f3c5f5e7 298#ifdef CONFIG_SYSCTL
1da177e4 299 register_sysctl_table(kernel_root_table2, 0);
f3c5f5e7 300#endif
1da177e4
LT
301 return 0;
302}
303
304__initcall(vsyscall_init);
This page took 0.206036 seconds and 5 git commands to generate.