2 * linux/kernel/compat.c
4 * Kernel compatibililty routines for e.g. 32 bit syscall support
7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/migrate.h>
25 #include <linux/posix-timers.h>
27 #include <asm/uaccess.h>
29 extern void sigset_from_compat(sigset_t
*set
, compat_sigset_t
*compat
);
31 int get_compat_timespec(struct timespec
*ts
, const struct compat_timespec __user
*cts
)
33 return (!access_ok(VERIFY_READ
, cts
, sizeof(*cts
)) ||
34 __get_user(ts
->tv_sec
, &cts
->tv_sec
) ||
35 __get_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
38 int put_compat_timespec(const struct timespec
*ts
, struct compat_timespec __user
*cts
)
40 return (!access_ok(VERIFY_WRITE
, cts
, sizeof(*cts
)) ||
41 __put_user(ts
->tv_sec
, &cts
->tv_sec
) ||
42 __put_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
45 static long compat_nanosleep_restart(struct restart_block
*restart
)
47 unsigned long expire
= restart
->arg0
, now
= jiffies
;
48 struct compat_timespec __user
*rmtp
;
50 /* Did it expire while we handled signals? */
51 if (!time_after(expire
, now
))
54 expire
= schedule_timeout_interruptible(expire
- now
);
58 rmtp
= (struct compat_timespec __user
*)restart
->arg1
;
60 struct compat_timespec ct
;
63 jiffies_to_timespec(expire
, &t
);
65 ct
.tv_nsec
= t
.tv_nsec
;
66 if (copy_to_user(rmtp
, &ct
, sizeof(ct
)))
69 /* The 'restart' block is already filled in */
70 return -ERESTART_RESTARTBLOCK
;
73 asmlinkage
long compat_sys_nanosleep(struct compat_timespec __user
*rqtp
,
74 struct compat_timespec __user
*rmtp
)
77 struct restart_block
*restart
;
80 if (get_compat_timespec(&t
, rqtp
))
83 if ((t
.tv_nsec
>= 1000000000L) || (t
.tv_nsec
< 0) || (t
.tv_sec
< 0))
86 expire
= timespec_to_jiffies(&t
) + (t
.tv_sec
|| t
.tv_nsec
);
87 expire
= schedule_timeout_interruptible(expire
);
92 jiffies_to_timespec(expire
, &t
);
93 if (put_compat_timespec(&t
, rmtp
))
96 restart
= ¤t_thread_info()->restart_block
;
97 restart
->fn
= compat_nanosleep_restart
;
98 restart
->arg0
= jiffies
+ expire
;
99 restart
->arg1
= (unsigned long) rmtp
;
100 return -ERESTART_RESTARTBLOCK
;
103 static inline long get_compat_itimerval(struct itimerval
*o
,
104 struct compat_itimerval __user
*i
)
106 return (!access_ok(VERIFY_READ
, i
, sizeof(*i
)) ||
107 (__get_user(o
->it_interval
.tv_sec
, &i
->it_interval
.tv_sec
) |
108 __get_user(o
->it_interval
.tv_usec
, &i
->it_interval
.tv_usec
) |
109 __get_user(o
->it_value
.tv_sec
, &i
->it_value
.tv_sec
) |
110 __get_user(o
->it_value
.tv_usec
, &i
->it_value
.tv_usec
)));
113 static inline long put_compat_itimerval(struct compat_itimerval __user
*o
,
116 return (!access_ok(VERIFY_WRITE
, o
, sizeof(*o
)) ||
117 (__put_user(i
->it_interval
.tv_sec
, &o
->it_interval
.tv_sec
) |
118 __put_user(i
->it_interval
.tv_usec
, &o
->it_interval
.tv_usec
) |
119 __put_user(i
->it_value
.tv_sec
, &o
->it_value
.tv_sec
) |
120 __put_user(i
->it_value
.tv_usec
, &o
->it_value
.tv_usec
)));
123 asmlinkage
long compat_sys_getitimer(int which
,
124 struct compat_itimerval __user
*it
)
126 struct itimerval kit
;
129 error
= do_getitimer(which
, &kit
);
130 if (!error
&& put_compat_itimerval(it
, &kit
))
135 asmlinkage
long compat_sys_setitimer(int which
,
136 struct compat_itimerval __user
*in
,
137 struct compat_itimerval __user
*out
)
139 struct itimerval kin
, kout
;
143 if (get_compat_itimerval(&kin
, in
))
146 memset(&kin
, 0, sizeof(kin
));
148 error
= do_setitimer(which
, &kin
, out
? &kout
: NULL
);
151 if (put_compat_itimerval(out
, &kout
))
156 asmlinkage
long compat_sys_times(struct compat_tms __user
*tbuf
)
159 * In the SMP world we might just be unlucky and have one of
160 * the times increment as we use it. Since the value is an
161 * atomically safe type this is just fine. Conceptually its
162 * as if the syscall took an instant longer to occur.
165 struct compat_tms tmp
;
166 struct task_struct
*tsk
= current
;
167 struct task_struct
*t
;
168 cputime_t utime
, stime
, cutime
, cstime
;
170 read_lock(&tasklist_lock
);
171 utime
= tsk
->signal
->utime
;
172 stime
= tsk
->signal
->stime
;
175 utime
= cputime_add(utime
, t
->utime
);
176 stime
= cputime_add(stime
, t
->stime
);
181 * While we have tasklist_lock read-locked, no dying thread
182 * can be updating current->signal->[us]time. Instead,
183 * we got their counts included in the live thread loop.
184 * However, another thread can come in right now and
185 * do a wait call that updates current->signal->c[us]time.
186 * To make sure we always see that pair updated atomically,
187 * we take the siglock around fetching them.
189 spin_lock_irq(&tsk
->sighand
->siglock
);
190 cutime
= tsk
->signal
->cutime
;
191 cstime
= tsk
->signal
->cstime
;
192 spin_unlock_irq(&tsk
->sighand
->siglock
);
193 read_unlock(&tasklist_lock
);
195 tmp
.tms_utime
= compat_jiffies_to_clock_t(cputime_to_jiffies(utime
));
196 tmp
.tms_stime
= compat_jiffies_to_clock_t(cputime_to_jiffies(stime
));
197 tmp
.tms_cutime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cutime
));
198 tmp
.tms_cstime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cstime
));
199 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
202 return compat_jiffies_to_clock_t(jiffies
);
206 * Assumption: old_sigset_t and compat_old_sigset_t are both
207 * types that can be passed to put_user()/get_user().
210 asmlinkage
long compat_sys_sigpending(compat_old_sigset_t __user
*set
)
214 mm_segment_t old_fs
= get_fs();
217 ret
= sys_sigpending((old_sigset_t __user
*) &s
);
220 ret
= put_user(s
, set
);
224 asmlinkage
long compat_sys_sigprocmask(int how
, compat_old_sigset_t __user
*set
,
225 compat_old_sigset_t __user
*oset
)
231 if (set
&& get_user(s
, set
))
235 ret
= sys_sigprocmask(how
,
236 set
? (old_sigset_t __user
*) &s
: NULL
,
237 oset
? (old_sigset_t __user
*) &s
: NULL
);
241 ret
= put_user(s
, oset
);
245 asmlinkage
long compat_sys_setrlimit(unsigned int resource
,
246 struct compat_rlimit __user
*rlim
)
250 mm_segment_t old_fs
= get_fs ();
252 if (resource
>= RLIM_NLIMITS
)
255 if (!access_ok(VERIFY_READ
, rlim
, sizeof(*rlim
)) ||
256 __get_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
257 __get_user(r
.rlim_max
, &rlim
->rlim_max
))
260 if (r
.rlim_cur
== COMPAT_RLIM_INFINITY
)
261 r
.rlim_cur
= RLIM_INFINITY
;
262 if (r
.rlim_max
== COMPAT_RLIM_INFINITY
)
263 r
.rlim_max
= RLIM_INFINITY
;
265 ret
= sys_setrlimit(resource
, (struct rlimit __user
*) &r
);
270 #ifdef COMPAT_RLIM_OLD_INFINITY
272 asmlinkage
long compat_sys_old_getrlimit(unsigned int resource
,
273 struct compat_rlimit __user
*rlim
)
277 mm_segment_t old_fs
= get_fs();
280 ret
= sys_old_getrlimit(resource
, &r
);
284 if (r
.rlim_cur
> COMPAT_RLIM_OLD_INFINITY
)
285 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
286 if (r
.rlim_max
> COMPAT_RLIM_OLD_INFINITY
)
287 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
289 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
290 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
291 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
299 asmlinkage
long compat_sys_getrlimit (unsigned int resource
,
300 struct compat_rlimit __user
*rlim
)
304 mm_segment_t old_fs
= get_fs();
307 ret
= sys_getrlimit(resource
, (struct rlimit __user
*) &r
);
310 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
311 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
312 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
313 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
315 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
316 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
317 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
323 int put_compat_rusage(const struct rusage
*r
, struct compat_rusage __user
*ru
)
325 if (!access_ok(VERIFY_WRITE
, ru
, sizeof(*ru
)) ||
326 __put_user(r
->ru_utime
.tv_sec
, &ru
->ru_utime
.tv_sec
) ||
327 __put_user(r
->ru_utime
.tv_usec
, &ru
->ru_utime
.tv_usec
) ||
328 __put_user(r
->ru_stime
.tv_sec
, &ru
->ru_stime
.tv_sec
) ||
329 __put_user(r
->ru_stime
.tv_usec
, &ru
->ru_stime
.tv_usec
) ||
330 __put_user(r
->ru_maxrss
, &ru
->ru_maxrss
) ||
331 __put_user(r
->ru_ixrss
, &ru
->ru_ixrss
) ||
332 __put_user(r
->ru_idrss
, &ru
->ru_idrss
) ||
333 __put_user(r
->ru_isrss
, &ru
->ru_isrss
) ||
334 __put_user(r
->ru_minflt
, &ru
->ru_minflt
) ||
335 __put_user(r
->ru_majflt
, &ru
->ru_majflt
) ||
336 __put_user(r
->ru_nswap
, &ru
->ru_nswap
) ||
337 __put_user(r
->ru_inblock
, &ru
->ru_inblock
) ||
338 __put_user(r
->ru_oublock
, &ru
->ru_oublock
) ||
339 __put_user(r
->ru_msgsnd
, &ru
->ru_msgsnd
) ||
340 __put_user(r
->ru_msgrcv
, &ru
->ru_msgrcv
) ||
341 __put_user(r
->ru_nsignals
, &ru
->ru_nsignals
) ||
342 __put_user(r
->ru_nvcsw
, &ru
->ru_nvcsw
) ||
343 __put_user(r
->ru_nivcsw
, &ru
->ru_nivcsw
))
348 asmlinkage
long compat_sys_getrusage(int who
, struct compat_rusage __user
*ru
)
352 mm_segment_t old_fs
= get_fs();
355 ret
= sys_getrusage(who
, (struct rusage __user
*) &r
);
361 if (put_compat_rusage(&r
, ru
))
368 compat_sys_wait4(compat_pid_t pid
, compat_uint_t __user
*stat_addr
, int options
,
369 struct compat_rusage __user
*ru
)
372 return sys_wait4(pid
, stat_addr
, options
, NULL
);
377 mm_segment_t old_fs
= get_fs();
382 (unsigned int __user
*) &status
: NULL
),
383 options
, (struct rusage __user
*) &r
);
387 if (put_compat_rusage(&r
, ru
))
389 if (stat_addr
&& put_user(status
, stat_addr
))
396 asmlinkage
long compat_sys_waitid(int which
, compat_pid_t pid
,
397 struct compat_siginfo __user
*uinfo
, int options
,
398 struct compat_rusage __user
*uru
)
403 mm_segment_t old_fs
= get_fs();
405 memset(&info
, 0, sizeof(info
));
408 ret
= sys_waitid(which
, pid
, (siginfo_t __user
*)&info
, options
,
409 uru
? (struct rusage __user
*)&ru
: NULL
);
412 if ((ret
< 0) || (info
.si_signo
== 0))
416 ret
= put_compat_rusage(&ru
, uru
);
421 BUG_ON(info
.si_code
& __SI_MASK
);
422 info
.si_code
|= __SI_CHLD
;
423 return copy_siginfo_to_user32(uinfo
, &info
);
426 static int compat_get_user_cpu_mask(compat_ulong_t __user
*user_mask_ptr
,
427 unsigned len
, cpumask_t
*new_mask
)
431 if (len
< sizeof(cpumask_t
))
432 memset(new_mask
, 0, sizeof(cpumask_t
));
433 else if (len
> sizeof(cpumask_t
))
434 len
= sizeof(cpumask_t
);
436 k
= cpus_addr(*new_mask
);
437 return compat_get_bitmap(k
, user_mask_ptr
, len
* 8);
440 asmlinkage
long compat_sys_sched_setaffinity(compat_pid_t pid
,
442 compat_ulong_t __user
*user_mask_ptr
)
447 retval
= compat_get_user_cpu_mask(user_mask_ptr
, len
, &new_mask
);
451 return sched_setaffinity(pid
, new_mask
);
454 asmlinkage
long compat_sys_sched_getaffinity(compat_pid_t pid
, unsigned int len
,
455 compat_ulong_t __user
*user_mask_ptr
)
460 unsigned int min_length
= sizeof(cpumask_t
);
462 if (NR_CPUS
<= BITS_PER_COMPAT_LONG
)
463 min_length
= sizeof(compat_ulong_t
);
465 if (len
< min_length
)
468 ret
= sched_getaffinity(pid
, &mask
);
473 ret
= compat_put_bitmap(user_mask_ptr
, k
, min_length
* 8);
480 static int get_compat_itimerspec(struct itimerspec
*dst
,
481 struct compat_itimerspec __user
*src
)
483 if (get_compat_timespec(&dst
->it_interval
, &src
->it_interval
) ||
484 get_compat_timespec(&dst
->it_value
, &src
->it_value
))
489 static int put_compat_itimerspec(struct compat_itimerspec __user
*dst
,
490 struct itimerspec
*src
)
492 if (put_compat_timespec(&src
->it_interval
, &dst
->it_interval
) ||
493 put_compat_timespec(&src
->it_value
, &dst
->it_value
))
498 long compat_sys_timer_create(clockid_t which_clock
,
499 struct compat_sigevent __user
*timer_event_spec
,
500 timer_t __user
*created_timer_id
)
502 struct sigevent __user
*event
= NULL
;
504 if (timer_event_spec
) {
505 struct sigevent kevent
;
507 event
= compat_alloc_user_space(sizeof(*event
));
508 if (get_compat_sigevent(&kevent
, timer_event_spec
) ||
509 copy_to_user(event
, &kevent
, sizeof(*event
)))
513 return sys_timer_create(which_clock
, event
, created_timer_id
);
516 long compat_sys_timer_settime(timer_t timer_id
, int flags
,
517 struct compat_itimerspec __user
*new,
518 struct compat_itimerspec __user
*old
)
522 struct itimerspec newts
, oldts
;
526 if (get_compat_itimerspec(&newts
, new))
530 err
= sys_timer_settime(timer_id
, flags
,
531 (struct itimerspec __user
*) &newts
,
532 (struct itimerspec __user
*) &oldts
);
534 if (!err
&& old
&& put_compat_itimerspec(old
, &oldts
))
539 long compat_sys_timer_gettime(timer_t timer_id
,
540 struct compat_itimerspec __user
*setting
)
544 struct itimerspec ts
;
548 err
= sys_timer_gettime(timer_id
,
549 (struct itimerspec __user
*) &ts
);
551 if (!err
&& put_compat_itimerspec(setting
, &ts
))
556 long compat_sys_clock_settime(clockid_t which_clock
,
557 struct compat_timespec __user
*tp
)
563 if (get_compat_timespec(&ts
, tp
))
567 err
= sys_clock_settime(which_clock
,
568 (struct timespec __user
*) &ts
);
573 long compat_sys_clock_gettime(clockid_t which_clock
,
574 struct compat_timespec __user
*tp
)
582 err
= sys_clock_gettime(which_clock
,
583 (struct timespec __user
*) &ts
);
585 if (!err
&& put_compat_timespec(&ts
, tp
))
590 long compat_sys_clock_getres(clockid_t which_clock
,
591 struct compat_timespec __user
*tp
)
599 err
= sys_clock_getres(which_clock
,
600 (struct timespec __user
*) &ts
);
602 if (!err
&& tp
&& put_compat_timespec(&ts
, tp
))
607 static long compat_clock_nanosleep_restart(struct restart_block
*restart
)
612 struct compat_timespec
*rmtp
= (struct compat_timespec
*)(restart
->arg1
);
614 restart
->arg1
= (unsigned long) &tu
;
617 err
= clock_nanosleep_restart(restart
);
620 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
621 put_compat_timespec(&tu
, rmtp
))
624 if (err
== -ERESTART_RESTARTBLOCK
) {
625 restart
->fn
= compat_clock_nanosleep_restart
;
626 restart
->arg1
= (unsigned long) rmtp
;
631 long compat_sys_clock_nanosleep(clockid_t which_clock
, int flags
,
632 struct compat_timespec __user
*rqtp
,
633 struct compat_timespec __user
*rmtp
)
637 struct timespec in
, out
;
638 struct restart_block
*restart
;
640 if (get_compat_timespec(&in
, rqtp
))
645 err
= sys_clock_nanosleep(which_clock
, flags
,
646 (struct timespec __user
*) &in
,
647 (struct timespec __user
*) &out
);
650 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
651 put_compat_timespec(&out
, rmtp
))
654 if (err
== -ERESTART_RESTARTBLOCK
) {
655 restart
= ¤t_thread_info()->restart_block
;
656 restart
->fn
= compat_clock_nanosleep_restart
;
657 restart
->arg1
= (unsigned long) rmtp
;
663 * We currently only need the following fields from the sigevent
664 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
665 * sigev_notify_thread_id). The others are handled in user mode.
666 * We also assume that copying sigev_value.sival_int is sufficient
667 * to keep all the bits of sigev_value.sival_ptr intact.
669 int get_compat_sigevent(struct sigevent
*event
,
670 const struct compat_sigevent __user
*u_event
)
672 memset(event
, 0, sizeof(*event
));
673 return (!access_ok(VERIFY_READ
, u_event
, sizeof(*u_event
)) ||
674 __get_user(event
->sigev_value
.sival_int
,
675 &u_event
->sigev_value
.sival_int
) ||
676 __get_user(event
->sigev_signo
, &u_event
->sigev_signo
) ||
677 __get_user(event
->sigev_notify
, &u_event
->sigev_notify
) ||
678 __get_user(event
->sigev_notify_thread_id
,
679 &u_event
->sigev_notify_thread_id
))
683 long compat_get_bitmap(unsigned long *mask
, compat_ulong_t __user
*umask
,
684 unsigned long bitmap_size
)
689 unsigned long nr_compat_longs
;
691 /* align bitmap up to nearest compat_long_t boundary */
692 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
694 if (!access_ok(VERIFY_READ
, umask
, bitmap_size
/ 8))
697 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
699 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
702 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
704 * We dont want to read past the end of the userspace
705 * bitmap. We must however ensure the end of the
706 * kernel bitmap is zeroed.
708 if (nr_compat_longs
-- > 0) {
709 if (__get_user(um
, umask
))
716 m
|= (long)um
<< (j
* BITS_PER_COMPAT_LONG
);
724 long compat_put_bitmap(compat_ulong_t __user
*umask
, unsigned long *mask
,
725 unsigned long bitmap_size
)
730 unsigned long nr_compat_longs
;
732 /* align bitmap up to nearest compat_long_t boundary */
733 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
735 if (!access_ok(VERIFY_WRITE
, umask
, bitmap_size
/ 8))
738 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
740 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
743 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
747 * We dont want to write past the end of the userspace
750 if (nr_compat_longs
-- > 0) {
751 if (__put_user(um
, umask
))
765 sigset_from_compat (sigset_t
*set
, compat_sigset_t
*compat
)
767 switch (_NSIG_WORDS
) {
768 case 4: set
->sig
[3] = compat
->sig
[6] | (((long)compat
->sig
[7]) << 32 );
769 case 3: set
->sig
[2] = compat
->sig
[4] | (((long)compat
->sig
[5]) << 32 );
770 case 2: set
->sig
[1] = compat
->sig
[2] | (((long)compat
->sig
[3]) << 32 );
771 case 1: set
->sig
[0] = compat
->sig
[0] | (((long)compat
->sig
[1]) << 32 );
776 compat_sys_rt_sigtimedwait (compat_sigset_t __user
*uthese
,
777 struct compat_siginfo __user
*uinfo
,
778 struct compat_timespec __user
*uts
, compat_size_t sigsetsize
)
785 long ret
, timeout
= 0;
787 if (sigsetsize
!= sizeof(sigset_t
))
790 if (copy_from_user(&s32
, uthese
, sizeof(compat_sigset_t
)))
792 sigset_from_compat(&s
, &s32
);
793 sigdelsetmask(&s
,sigmask(SIGKILL
)|sigmask(SIGSTOP
));
797 if (get_compat_timespec (&t
, uts
))
799 if (t
.tv_nsec
>= 1000000000L || t
.tv_nsec
< 0
804 spin_lock_irq(¤t
->sighand
->siglock
);
805 sig
= dequeue_signal(current
, &s
, &info
);
807 timeout
= MAX_SCHEDULE_TIMEOUT
;
809 timeout
= timespec_to_jiffies(&t
)
810 +(t
.tv_sec
|| t
.tv_nsec
);
812 current
->real_blocked
= current
->blocked
;
813 sigandsets(¤t
->blocked
, ¤t
->blocked
, &s
);
816 spin_unlock_irq(¤t
->sighand
->siglock
);
818 timeout
= schedule_timeout_interruptible(timeout
);
820 spin_lock_irq(¤t
->sighand
->siglock
);
821 sig
= dequeue_signal(current
, &s
, &info
);
822 current
->blocked
= current
->real_blocked
;
823 siginitset(¤t
->real_blocked
, 0);
827 spin_unlock_irq(¤t
->sighand
->siglock
);
832 if (copy_siginfo_to_user32(uinfo
, &info
))
836 ret
= timeout
?-EINTR
:-EAGAIN
;
842 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
844 /* compat_time_t is a 32 bit "long" and needs to get converted. */
846 asmlinkage
long compat_sys_time(compat_time_t __user
* tloc
)
851 do_gettimeofday(&tv
);
855 if (put_user(i
,tloc
))
861 asmlinkage
long compat_sys_stime(compat_time_t __user
*tptr
)
866 if (get_user(tv
.tv_sec
, tptr
))
871 err
= security_settime(&tv
, NULL
);
875 do_settimeofday(&tv
);
879 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
881 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
882 asmlinkage
long compat_sys_rt_sigsuspend(compat_sigset_t __user
*unewset
, compat_size_t sigsetsize
)
885 compat_sigset_t newset32
;
887 /* XXX: Don't preclude handling different sized sigset_t's. */
888 if (sigsetsize
!= sizeof(sigset_t
))
891 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
893 sigset_from_compat(&newset
, &newset32
);
894 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
896 spin_lock_irq(¤t
->sighand
->siglock
);
897 current
->saved_sigmask
= current
->blocked
;
898 current
->blocked
= newset
;
900 spin_unlock_irq(¤t
->sighand
->siglock
);
902 current
->state
= TASK_INTERRUPTIBLE
;
904 set_thread_flag(TIF_RESTORE_SIGMASK
);
905 return -ERESTARTNOHAND
;
907 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
909 asmlinkage
long compat_sys_adjtimex(struct compat_timex __user
*utp
)
914 memset(&txc
, 0, sizeof(struct timex
));
916 if (!access_ok(VERIFY_READ
, utp
, sizeof(struct compat_timex
)) ||
917 __get_user(txc
.modes
, &utp
->modes
) ||
918 __get_user(txc
.offset
, &utp
->offset
) ||
919 __get_user(txc
.freq
, &utp
->freq
) ||
920 __get_user(txc
.maxerror
, &utp
->maxerror
) ||
921 __get_user(txc
.esterror
, &utp
->esterror
) ||
922 __get_user(txc
.status
, &utp
->status
) ||
923 __get_user(txc
.constant
, &utp
->constant
) ||
924 __get_user(txc
.precision
, &utp
->precision
) ||
925 __get_user(txc
.tolerance
, &utp
->tolerance
) ||
926 __get_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
927 __get_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
928 __get_user(txc
.tick
, &utp
->tick
) ||
929 __get_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
930 __get_user(txc
.jitter
, &utp
->jitter
) ||
931 __get_user(txc
.shift
, &utp
->shift
) ||
932 __get_user(txc
.stabil
, &utp
->stabil
) ||
933 __get_user(txc
.jitcnt
, &utp
->jitcnt
) ||
934 __get_user(txc
.calcnt
, &utp
->calcnt
) ||
935 __get_user(txc
.errcnt
, &utp
->errcnt
) ||
936 __get_user(txc
.stbcnt
, &utp
->stbcnt
))
939 ret
= do_adjtimex(&txc
);
941 if (!access_ok(VERIFY_WRITE
, utp
, sizeof(struct compat_timex
)) ||
942 __put_user(txc
.modes
, &utp
->modes
) ||
943 __put_user(txc
.offset
, &utp
->offset
) ||
944 __put_user(txc
.freq
, &utp
->freq
) ||
945 __put_user(txc
.maxerror
, &utp
->maxerror
) ||
946 __put_user(txc
.esterror
, &utp
->esterror
) ||
947 __put_user(txc
.status
, &utp
->status
) ||
948 __put_user(txc
.constant
, &utp
->constant
) ||
949 __put_user(txc
.precision
, &utp
->precision
) ||
950 __put_user(txc
.tolerance
, &utp
->tolerance
) ||
951 __put_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
952 __put_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
953 __put_user(txc
.tick
, &utp
->tick
) ||
954 __put_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
955 __put_user(txc
.jitter
, &utp
->jitter
) ||
956 __put_user(txc
.shift
, &utp
->shift
) ||
957 __put_user(txc
.stabil
, &utp
->stabil
) ||
958 __put_user(txc
.jitcnt
, &utp
->jitcnt
) ||
959 __put_user(txc
.calcnt
, &utp
->calcnt
) ||
960 __put_user(txc
.errcnt
, &utp
->errcnt
) ||
961 __put_user(txc
.stbcnt
, &utp
->stbcnt
))
968 asmlinkage
long compat_sys_move_pages(pid_t pid
, unsigned long nr_pages
,
969 compat_uptr_t __user
*pages32
,
970 const int __user
*nodes
,
974 const void __user
* __user
*pages
;
977 pages
= compat_alloc_user_space(nr_pages
* sizeof(void *));
978 for (i
= 0; i
< nr_pages
; i
++) {
981 if (get_user(p
, pages32
+ i
) ||
982 put_user(compat_ptr(p
), pages
+ i
))
985 return sys_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);