2 * linux/kernel/posix_timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/smp_lock.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/time.h>
38 #include <linux/calc64.h>
40 #include <asm/uaccess.h>
41 #include <asm/semaphore.h>
42 #include <linux/list.h>
43 #include <linux/init.h>
44 #include <linux/compiler.h>
45 #include <linux/idr.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/module.h>
52 #define CLOCK_REALTIME_RES TICK_NSEC /* In nano seconds. */
54 static inline u64
mpy_l_X_l_ll(unsigned long mpy1
,unsigned long mpy2
)
56 return (u64
)mpy1
* mpy2
;
59 * Management arrays for POSIX timers. Timers are kept in slab memory
60 * Timer ids are allocated by an external routine that keeps track of the
61 * id and the timer. The external interface is:
63 * void *idr_find(struct idr *idp, int id); to find timer_id <id>
64 * int idr_get_new(struct idr *idp, void *ptr); to get a new id and
66 * void idr_remove(struct idr *idp, int id); to release <id>
67 * void idr_init(struct idr *idp); to initialize <idp>
69 * The idr_get_new *may* call slab for more memory so it must not be
70 * called under a spin lock. Likewise idr_remore may release memory
71 * (but it may be ok to do this under a lock...).
72 * idr_find is just a memory look up and is quite fast. A -1 return
73 * indicates that the requested id does not exist.
77 * Lets keep our timers in a slab cache :-)
79 static kmem_cache_t
*posix_timers_cache
;
80 static struct idr posix_timers_id
;
81 static DEFINE_SPINLOCK(idr_lock
);
84 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
85 * SIGEV values. Here we put out an error if this assumption fails.
87 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
88 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
89 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
94 * The timer ID is turned into a timer address by idr_find().
95 * Verifying a valid ID consists of:
97 * a) checking that idr_find() returns other than -1.
98 * b) checking that the timer id matches the one in the timer itself.
99 * c) that the timer owner is in the callers thread group.
103 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
104 * to implement others. This structure defines the various
105 * clocks and allows the possibility of adding others. We
106 * provide an interface to add clocks to the table and expect
107 * the "arch" code to add at least one clock that is high
108 * resolution. Here we define the standard CLOCK_REALTIME as a
109 * 1/HZ resolution clock.
111 * RESOLUTION: Clock resolution is used to round up timer and interval
112 * times, NOT to report clock times, which are reported with as
113 * much resolution as the system can muster. In some cases this
114 * resolution may depend on the underlying clock hardware and
115 * may not be quantifiable until run time, and only then is the
116 * necessary code is written. The standard says we should say
117 * something about this issue in the documentation...
119 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
120 * various clock functions. For clocks that use the standard
121 * system timer code these entries should be NULL. This will
122 * allow dispatch without the overhead of indirect function
123 * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
124 * must supply functions here, even if the function just returns
125 * ENOSYS. The standard POSIX timer management code assumes the
126 * following: 1.) The k_itimer struct (sched.h) is used for the
127 * timer. 2.) The list, it_lock, it_clock, it_id and it_process
128 * fields are not modified by timer code.
130 * At this time all functions EXCEPT clock_nanosleep can be
131 * redirected by the CLOCKS structure. Clock_nanosleep is in
132 * there, but the code ignores it.
134 * Permissions: It is assumed that the clock_settime() function defined
135 * for each clock will take care of permission checks. Some
136 * clocks may be set able by any user (i.e. local process
137 * clocks) others not. Currently the only set able clock we
138 * have is CLOCK_REALTIME and its high res counter part, both of
139 * which we beg off on and pass to do_sys_settimeofday().
142 static struct k_clock posix_clocks
[MAX_CLOCKS
];
144 * We only have one real clock that can be set so we need only one abs list,
145 * even if we should want to have several clocks with differing resolutions.
147 static struct k_clock_abs abs_list
= {.list
= LIST_HEAD_INIT(abs_list
.list
),
148 .lock
= SPIN_LOCK_UNLOCKED
};
150 static void posix_timer_fn(unsigned long);
151 static u64
do_posix_clock_monotonic_gettime_parts(
152 struct timespec
*tp
, struct timespec
*mo
);
153 int do_posix_clock_monotonic_gettime(struct timespec
*tp
);
154 static int do_posix_clock_monotonic_get(clockid_t
, struct timespec
*tp
);
156 static struct k_itimer
*lock_timer(timer_t timer_id
, unsigned long *flags
);
158 static inline void unlock_timer(struct k_itimer
*timr
, unsigned long flags
)
160 spin_unlock_irqrestore(&timr
->it_lock
, flags
);
164 * Call the k_clock hook function if non-null, or the default function.
166 #define CLOCK_DISPATCH(clock, call, arglist) \
167 ((clock) < 0 ? posix_cpu_##call arglist : \
168 (posix_clocks[clock].call != NULL \
169 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
172 * Default clock hook functions when the struct k_clock passed
173 * to register_posix_clock leaves a function pointer null.
175 * The function common_CALL is the default implementation for
176 * the function pointer CALL in struct k_clock.
179 static inline int common_clock_getres(clockid_t which_clock
,
183 tp
->tv_nsec
= posix_clocks
[which_clock
].res
;
187 static inline int common_clock_get(clockid_t which_clock
, struct timespec
*tp
)
193 static inline int common_clock_set(clockid_t which_clock
, struct timespec
*tp
)
195 return do_sys_settimeofday(tp
, NULL
);
198 static inline int common_timer_create(struct k_itimer
*new_timer
)
200 INIT_LIST_HEAD(&new_timer
->it
.real
.abs_timer_entry
);
201 init_timer(&new_timer
->it
.real
.timer
);
202 new_timer
->it
.real
.timer
.data
= (unsigned long) new_timer
;
203 new_timer
->it
.real
.timer
.function
= posix_timer_fn
;
208 * These ones are defined below.
210 static int common_nsleep(clockid_t
, int flags
, struct timespec
*t
);
211 static void common_timer_get(struct k_itimer
*, struct itimerspec
*);
212 static int common_timer_set(struct k_itimer
*, int,
213 struct itimerspec
*, struct itimerspec
*);
214 static int common_timer_del(struct k_itimer
*timer
);
217 * Return nonzero iff we know a priori this clockid_t value is bogus.
219 static inline int invalid_clockid(clockid_t which_clock
)
221 if (which_clock
< 0) /* CPU clock, posix_cpu_* will check it */
223 if ((unsigned) which_clock
>= MAX_CLOCKS
)
225 if (posix_clocks
[which_clock
].clock_getres
!= NULL
)
227 #ifndef CLOCK_DISPATCH_DIRECT
228 if (posix_clocks
[which_clock
].res
!= 0)
236 * Initialize everything, well, just everything in Posix clocks/timers ;)
238 static __init
int init_posix_timers(void)
240 struct k_clock clock_realtime
= {.res
= CLOCK_REALTIME_RES
,
241 .abs_struct
= &abs_list
243 struct k_clock clock_monotonic
= {.res
= CLOCK_REALTIME_RES
,
245 .clock_get
= do_posix_clock_monotonic_get
,
246 .clock_set
= do_posix_clock_nosettime
249 register_posix_clock(CLOCK_REALTIME
, &clock_realtime
);
250 register_posix_clock(CLOCK_MONOTONIC
, &clock_monotonic
);
252 posix_timers_cache
= kmem_cache_create("posix_timers_cache",
253 sizeof (struct k_itimer
), 0, 0, NULL
, NULL
);
254 idr_init(&posix_timers_id
);
258 __initcall(init_posix_timers
);
260 static void tstojiffie(struct timespec
*tp
, int res
, u64
*jiff
)
262 long sec
= tp
->tv_sec
;
263 long nsec
= tp
->tv_nsec
+ res
- 1;
265 if (nsec
>= NSEC_PER_SEC
) {
267 nsec
-= NSEC_PER_SEC
;
271 * The scaling constants are defined in <linux/time.h>
272 * The difference between there and here is that we do the
273 * res rounding and compute a 64-bit result (well so does that
274 * but it then throws away the high bits).
276 *jiff
= (mpy_l_X_l_ll(sec
, SEC_CONVERSION
) +
277 (mpy_l_X_l_ll(nsec
, NSEC_CONVERSION
) >>
278 (NSEC_JIFFIE_SC
- SEC_JIFFIE_SC
))) >> SEC_JIFFIE_SC
;
282 * This function adjusts the timer as needed as a result of the clock
283 * being set. It should only be called for absolute timers, and then
284 * under the abs_list lock. It computes the time difference and sets
285 * the new jiffies value in the timer. It also updates the timers
286 * reference wall_to_monotonic value. It is complicated by the fact
287 * that tstojiffies() only handles positive times and it needs to work
288 * with both positive and negative times. Also, for negative offsets,
289 * we need to defeat the res round up.
291 * Return is true if there is a new time, else false.
293 static long add_clockset_delta(struct k_itimer
*timr
,
294 struct timespec
*new_wall_to
)
296 struct timespec delta
;
300 set_normalized_timespec(&delta
,
301 new_wall_to
->tv_sec
-
302 timr
->it
.real
.wall_to_prev
.tv_sec
,
303 new_wall_to
->tv_nsec
-
304 timr
->it
.real
.wall_to_prev
.tv_nsec
);
305 if (likely(!(delta
.tv_sec
| delta
.tv_nsec
)))
307 if (delta
.tv_sec
< 0) {
308 set_normalized_timespec(&delta
,
311 posix_clocks
[timr
->it_clock
].res
);
314 tstojiffie(&delta
, posix_clocks
[timr
->it_clock
].res
, &exp
);
315 timr
->it
.real
.wall_to_prev
= *new_wall_to
;
316 timr
->it
.real
.timer
.expires
+= (sign
? -exp
: exp
);
320 static void remove_from_abslist(struct k_itimer
*timr
)
322 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
323 spin_lock(&abs_list
.lock
);
324 list_del_init(&timr
->it
.real
.abs_timer_entry
);
325 spin_unlock(&abs_list
.lock
);
329 static void schedule_next_timer(struct k_itimer
*timr
)
331 struct timespec new_wall_to
;
332 struct now_struct now
;
336 * Set up the timer for the next interval (if there is one).
337 * Note: this code uses the abs_timer_lock to protect
338 * it.real.wall_to_prev and must hold it until exp is set, not exactly
341 * This function is used for CLOCK_REALTIME* and
342 * CLOCK_MONOTONIC* timers. If we ever want to handle other
343 * CLOCKs, the calling code (do_schedule_next_timer) would need
344 * to pull the "clock" info from the timer and dispatch the
345 * "other" CLOCKs "next timer" code (which, I suppose should
346 * also be added to the k_clock structure).
348 if (!timr
->it
.real
.incr
)
352 seq
= read_seqbegin(&xtime_lock
);
353 new_wall_to
= wall_to_monotonic
;
355 } while (read_seqretry(&xtime_lock
, seq
));
357 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
358 spin_lock(&abs_list
.lock
);
359 add_clockset_delta(timr
, &new_wall_to
);
361 posix_bump_timer(timr
, now
);
363 spin_unlock(&abs_list
.lock
);
365 posix_bump_timer(timr
, now
);
367 timr
->it_overrun_last
= timr
->it_overrun
;
368 timr
->it_overrun
= -1;
369 ++timr
->it_requeue_pending
;
370 add_timer(&timr
->it
.real
.timer
);
374 * This function is exported for use by the signal deliver code. It is
375 * called just prior to the info block being released and passes that
376 * block to us. It's function is to update the overrun entry AND to
377 * restart the timer. It should only be called if the timer is to be
378 * restarted (i.e. we have flagged this in the sys_private entry of the
381 * To protect aginst the timer going away while the interrupt is queued,
382 * we require that the it_requeue_pending flag be set.
384 void do_schedule_next_timer(struct siginfo
*info
)
386 struct k_itimer
*timr
;
389 timr
= lock_timer(info
->si_tid
, &flags
);
391 if (!timr
|| timr
->it_requeue_pending
!= info
->si_sys_private
)
394 if (timr
->it_clock
< 0) /* CPU clock */
395 posix_cpu_timer_schedule(timr
);
397 schedule_next_timer(timr
);
398 info
->si_overrun
= timr
->it_overrun_last
;
401 unlock_timer(timr
, flags
);
404 int posix_timer_event(struct k_itimer
*timr
,int si_private
)
406 memset(&timr
->sigq
->info
, 0, sizeof(siginfo_t
));
407 timr
->sigq
->info
.si_sys_private
= si_private
;
409 * Send signal to the process that owns this timer.
411 * This code assumes that all the possible abs_lists share the
412 * same lock (there is only one list at this time). If this is
413 * not the case, the CLOCK info would need to be used to find
414 * the proper abs list lock.
417 timr
->sigq
->info
.si_signo
= timr
->it_sigev_signo
;
418 timr
->sigq
->info
.si_errno
= 0;
419 timr
->sigq
->info
.si_code
= SI_TIMER
;
420 timr
->sigq
->info
.si_tid
= timr
->it_id
;
421 timr
->sigq
->info
.si_value
= timr
->it_sigev_value
;
423 if (timr
->it_sigev_notify
& SIGEV_THREAD_ID
) {
424 struct task_struct
*leader
;
425 int ret
= send_sigqueue(timr
->it_sigev_signo
, timr
->sigq
,
428 if (likely(ret
>= 0))
431 timr
->it_sigev_notify
= SIGEV_SIGNAL
;
432 leader
= timr
->it_process
->group_leader
;
433 put_task_struct(timr
->it_process
);
434 timr
->it_process
= leader
;
437 return send_group_sigqueue(timr
->it_sigev_signo
, timr
->sigq
,
440 EXPORT_SYMBOL_GPL(posix_timer_event
);
443 * This function gets called when a POSIX.1b interval timer expires. It
444 * is used as a callback from the kernel internal timer. The
445 * run_timer_list code ALWAYS calls with interrupts on.
447 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
449 static void posix_timer_fn(unsigned long __data
)
451 struct k_itimer
*timr
= (struct k_itimer
*) __data
;
454 struct timespec delta
, new_wall_to
;
458 spin_lock_irqsave(&timr
->it_lock
, flags
);
459 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
460 spin_lock(&abs_list
.lock
);
462 seq
= read_seqbegin(&xtime_lock
);
463 new_wall_to
= wall_to_monotonic
;
464 } while (read_seqretry(&xtime_lock
, seq
));
465 set_normalized_timespec(&delta
,
467 timr
->it
.real
.wall_to_prev
.tv_sec
,
468 new_wall_to
.tv_nsec
-
469 timr
->it
.real
.wall_to_prev
.tv_nsec
);
470 if (likely((delta
.tv_sec
| delta
.tv_nsec
) == 0)) {
471 /* do nothing, timer is on time */
472 } else if (delta
.tv_sec
< 0) {
473 /* do nothing, timer is already late */
475 /* timer is early due to a clock set */
477 posix_clocks
[timr
->it_clock
].res
,
479 timr
->it
.real
.wall_to_prev
= new_wall_to
;
480 timr
->it
.real
.timer
.expires
+= exp
;
481 add_timer(&timr
->it
.real
.timer
);
484 spin_unlock(&abs_list
.lock
);
490 if (timr
->it
.real
.incr
)
491 si_private
= ++timr
->it_requeue_pending
;
493 remove_from_abslist(timr
);
496 if (posix_timer_event(timr
, si_private
))
498 * signal was not sent because of sig_ignor
499 * we will not get a call back to restart it AND
500 * it should be restarted.
502 schedule_next_timer(timr
);
504 unlock_timer(timr
, flags
); /* hold thru abs lock to keep irq off */
508 static inline struct task_struct
* good_sigevent(sigevent_t
* event
)
510 struct task_struct
*rtn
= current
->group_leader
;
512 if ((event
->sigev_notify
& SIGEV_THREAD_ID
) &&
513 (!(rtn
= find_task_by_pid(event
->sigev_notify_thread_id
)) ||
514 rtn
->tgid
!= current
->tgid
||
515 (event
->sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_SIGNAL
))
518 if (((event
->sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
) &&
519 ((event
->sigev_signo
<= 0) || (event
->sigev_signo
> SIGRTMAX
)))
525 void register_posix_clock(clockid_t clock_id
, struct k_clock
*new_clock
)
527 if ((unsigned) clock_id
>= MAX_CLOCKS
) {
528 printk("POSIX clock register failed for clock_id %d\n",
533 posix_clocks
[clock_id
] = *new_clock
;
535 EXPORT_SYMBOL_GPL(register_posix_clock
);
537 static struct k_itimer
* alloc_posix_timer(void)
539 struct k_itimer
*tmr
;
540 tmr
= kmem_cache_alloc(posix_timers_cache
, GFP_KERNEL
);
543 memset(tmr
, 0, sizeof (struct k_itimer
));
544 if (unlikely(!(tmr
->sigq
= sigqueue_alloc()))) {
545 kmem_cache_free(posix_timers_cache
, tmr
);
552 #define IT_ID_NOT_SET 0
553 static void release_posix_timer(struct k_itimer
*tmr
, int it_id_set
)
557 spin_lock_irqsave(&idr_lock
, flags
);
558 idr_remove(&posix_timers_id
, tmr
->it_id
);
559 spin_unlock_irqrestore(&idr_lock
, flags
);
561 sigqueue_free(tmr
->sigq
);
562 if (unlikely(tmr
->it_process
) &&
563 tmr
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
564 put_task_struct(tmr
->it_process
);
565 kmem_cache_free(posix_timers_cache
, tmr
);
568 /* Create a POSIX.1b interval timer. */
571 sys_timer_create(clockid_t which_clock
,
572 struct sigevent __user
*timer_event_spec
,
573 timer_t __user
* created_timer_id
)
576 struct k_itimer
*new_timer
= NULL
;
578 struct task_struct
*process
= NULL
;
581 int it_id_set
= IT_ID_NOT_SET
;
583 if (invalid_clockid(which_clock
))
586 new_timer
= alloc_posix_timer();
587 if (unlikely(!new_timer
))
590 spin_lock_init(&new_timer
->it_lock
);
592 if (unlikely(!idr_pre_get(&posix_timers_id
, GFP_KERNEL
))) {
596 spin_lock_irq(&idr_lock
);
597 error
= idr_get_new(&posix_timers_id
,
600 spin_unlock_irq(&idr_lock
);
601 if (error
== -EAGAIN
)
605 * Wierd looking, but we return EAGAIN if the IDR is
606 * full (proper POSIX return value for this)
612 it_id_set
= IT_ID_SET
;
613 new_timer
->it_id
= (timer_t
) new_timer_id
;
614 new_timer
->it_clock
= which_clock
;
615 new_timer
->it_overrun
= -1;
616 error
= CLOCK_DISPATCH(which_clock
, timer_create
, (new_timer
));
621 * return the timer_id now. The next step is hard to
622 * back out if there is an error.
624 if (copy_to_user(created_timer_id
,
625 &new_timer_id
, sizeof (new_timer_id
))) {
629 if (timer_event_spec
) {
630 if (copy_from_user(&event
, timer_event_spec
, sizeof (event
))) {
634 new_timer
->it_sigev_notify
= event
.sigev_notify
;
635 new_timer
->it_sigev_signo
= event
.sigev_signo
;
636 new_timer
->it_sigev_value
= event
.sigev_value
;
638 read_lock(&tasklist_lock
);
639 if ((process
= good_sigevent(&event
))) {
641 * We may be setting up this process for another
642 * thread. It may be exiting. To catch this
643 * case the we check the PF_EXITING flag. If
644 * the flag is not set, the siglock will catch
645 * him before it is too late (in exit_itimers).
647 * The exec case is a bit more invloved but easy
648 * to code. If the process is in our thread
649 * group (and it must be or we would not allow
650 * it here) and is doing an exec, it will cause
651 * us to be killed. In this case it will wait
652 * for us to die which means we can finish this
653 * linkage with our last gasp. I.e. no code :)
655 spin_lock_irqsave(&process
->sighand
->siglock
, flags
);
656 if (!(process
->flags
& PF_EXITING
)) {
657 new_timer
->it_process
= process
;
658 list_add(&new_timer
->list
,
659 &process
->signal
->posix_timers
);
660 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
661 if (new_timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
662 get_task_struct(process
);
664 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
668 read_unlock(&tasklist_lock
);
674 new_timer
->it_sigev_notify
= SIGEV_SIGNAL
;
675 new_timer
->it_sigev_signo
= SIGALRM
;
676 new_timer
->it_sigev_value
.sival_int
= new_timer
->it_id
;
677 process
= current
->group_leader
;
678 spin_lock_irqsave(&process
->sighand
->siglock
, flags
);
679 new_timer
->it_process
= process
;
680 list_add(&new_timer
->list
, &process
->signal
->posix_timers
);
681 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
685 * In the case of the timer belonging to another task, after
686 * the task is unlocked, the timer is owned by the other task
687 * and may cease to exist at any time. Don't use or modify
688 * new_timer after the unlock call.
693 release_posix_timer(new_timer
, it_id_set
);
701 * This function checks the elements of a timespec structure.
704 * ts : Pointer to the timespec structure to check
707 * If a NULL pointer was passed in, or the tv_nsec field was less than 0
708 * or greater than NSEC_PER_SEC, or the tv_sec field was less than 0,
709 * this function returns 0. Otherwise it returns 1.
711 static int good_timespec(const struct timespec
*ts
)
713 if ((!ts
) || (ts
->tv_sec
< 0) ||
714 ((unsigned) ts
->tv_nsec
>= NSEC_PER_SEC
))
720 * Locking issues: We need to protect the result of the id look up until
721 * we get the timer locked down so it is not deleted under us. The
722 * removal is done under the idr spinlock so we use that here to bridge
723 * the find to the timer lock. To avoid a dead lock, the timer id MUST
724 * be release with out holding the timer lock.
726 static struct k_itimer
* lock_timer(timer_t timer_id
, unsigned long *flags
)
728 struct k_itimer
*timr
;
730 * Watch out here. We do a irqsave on the idr_lock and pass the
731 * flags part over to the timer lock. Must not let interrupts in
732 * while we are moving the lock.
735 spin_lock_irqsave(&idr_lock
, *flags
);
736 timr
= (struct k_itimer
*) idr_find(&posix_timers_id
, (int) timer_id
);
738 spin_lock(&timr
->it_lock
);
739 spin_unlock(&idr_lock
);
741 if ((timr
->it_id
!= timer_id
) || !(timr
->it_process
) ||
742 timr
->it_process
->tgid
!= current
->tgid
) {
743 unlock_timer(timr
, *flags
);
747 spin_unlock_irqrestore(&idr_lock
, *flags
);
753 * Get the time remaining on a POSIX.1b interval timer. This function
754 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
757 * We have a couple of messes to clean up here. First there is the case
758 * of a timer that has a requeue pending. These timers should appear to
759 * be in the timer list with an expiry as if we were to requeue them
762 * The second issue is the SIGEV_NONE timer which may be active but is
763 * not really ever put in the timer list (to save system resources).
764 * This timer may be expired, and if so, we will do it here. Otherwise
765 * it is the same as a requeue pending timer WRT to what we should
769 common_timer_get(struct k_itimer
*timr
, struct itimerspec
*cur_setting
)
771 unsigned long expires
;
772 struct now_struct now
;
775 expires
= timr
->it
.real
.timer
.expires
;
776 while ((volatile long) (timr
->it
.real
.timer
.expires
) != expires
);
781 ((timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) &&
782 !timr
->it
.real
.incr
&&
783 posix_time_before(&timr
->it
.real
.timer
, &now
))
784 timr
->it
.real
.timer
.expires
= expires
= 0;
786 if (timr
->it_requeue_pending
& REQUEUE_PENDING
||
787 (timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
788 posix_bump_timer(timr
, now
);
789 expires
= timr
->it
.real
.timer
.expires
;
792 if (!timer_pending(&timr
->it
.real
.timer
))
795 expires
-= now
.jiffies
;
797 jiffies_to_timespec(expires
, &cur_setting
->it_value
);
798 jiffies_to_timespec(timr
->it
.real
.incr
, &cur_setting
->it_interval
);
800 if (cur_setting
->it_value
.tv_sec
< 0) {
801 cur_setting
->it_value
.tv_nsec
= 1;
802 cur_setting
->it_value
.tv_sec
= 0;
806 /* Get the time remaining on a POSIX.1b interval timer. */
808 sys_timer_gettime(timer_t timer_id
, struct itimerspec __user
*setting
)
810 struct k_itimer
*timr
;
811 struct itimerspec cur_setting
;
814 timr
= lock_timer(timer_id
, &flags
);
818 CLOCK_DISPATCH(timr
->it_clock
, timer_get
, (timr
, &cur_setting
));
820 unlock_timer(timr
, flags
);
822 if (copy_to_user(setting
, &cur_setting
, sizeof (cur_setting
)))
828 * Get the number of overruns of a POSIX.1b interval timer. This is to
829 * be the overrun of the timer last delivered. At the same time we are
830 * accumulating overruns on the next timer. The overrun is frozen when
831 * the signal is delivered, either at the notify time (if the info block
832 * is not queued) or at the actual delivery time (as we are informed by
833 * the call back to do_schedule_next_timer(). So all we need to do is
834 * to pick up the frozen overrun.
838 sys_timer_getoverrun(timer_t timer_id
)
840 struct k_itimer
*timr
;
844 timr
= lock_timer(timer_id
, &flags
);
848 overrun
= timr
->it_overrun_last
;
849 unlock_timer(timr
, flags
);
854 * Adjust for absolute time
856 * If absolute time is given and it is not CLOCK_MONOTONIC, we need to
857 * adjust for the offset between the timer clock (CLOCK_MONOTONIC) and
858 * what ever clock he is using.
860 * If it is relative time, we need to add the current (CLOCK_MONOTONIC)
861 * time to it to get the proper time for the timer.
863 static int adjust_abs_time(struct k_clock
*clock
, struct timespec
*tp
,
864 int abs
, u64
*exp
, struct timespec
*wall_to
)
867 struct timespec oc
= *tp
;
873 * The mask pick up the 4 basic clocks
875 if (!((clock
- &posix_clocks
[0]) & ~CLOCKS_MASK
)) {
876 jiffies_64_f
= do_posix_clock_monotonic_gettime_parts(
879 * If we are doing a MONOTONIC clock
881 if((clock
- &posix_clocks
[0]) & CLOCKS_MONO
){
882 now
.tv_sec
+= wall_to
->tv_sec
;
883 now
.tv_nsec
+= wall_to
->tv_nsec
;
887 * Not one of the basic clocks
889 clock
->clock_get(clock
- posix_clocks
, &now
);
890 jiffies_64_f
= get_jiffies_64();
893 * Take away now to get delta and normalize
895 set_normalized_timespec(&oc
, oc
.tv_sec
- now
.tv_sec
,
896 oc
.tv_nsec
- now
.tv_nsec
);
898 jiffies_64_f
= get_jiffies_64();
901 * Check if the requested time is prior to now (if so set now)
904 oc
.tv_sec
= oc
.tv_nsec
= 0;
906 if (oc
.tv_sec
| oc
.tv_nsec
)
907 set_normalized_timespec(&oc
, oc
.tv_sec
,
908 oc
.tv_nsec
+ clock
->res
);
909 tstojiffie(&oc
, clock
->res
, exp
);
912 * Check if the requested time is more than the timer code
913 * can handle (if so we error out but return the value too).
915 if (*exp
> ((u64
)MAX_JIFFY_OFFSET
))
917 * This is a considered response, not exactly in
918 * line with the standard (in fact it is silent on
919 * possible overflows). We assume such a large
920 * value is ALMOST always a programming error and
921 * try not to compound it by setting a really dumb
926 * return the actual jiffies expire time, full 64 bits
928 *exp
+= jiffies_64_f
;
932 /* Set a POSIX.1b interval timer. */
933 /* timr->it_lock is taken. */
935 common_timer_set(struct k_itimer
*timr
, int flags
,
936 struct itimerspec
*new_setting
, struct itimerspec
*old_setting
)
938 struct k_clock
*clock
= &posix_clocks
[timr
->it_clock
];
942 common_timer_get(timr
, old_setting
);
944 /* disable the timer */
945 timr
->it
.real
.incr
= 0;
947 * careful here. If smp we could be in the "fire" routine which will
948 * be spinning as we hold the lock. But this is ONLY an SMP issue.
950 if (try_to_del_timer_sync(&timr
->it
.real
.timer
) < 0) {
953 * It can only be active if on an other cpu. Since
954 * we have cleared the interval stuff above, it should
955 * clear once we release the spin lock. Of course once
956 * we do that anything could happen, including the
957 * complete melt down of the timer. So return with
958 * a "retry" exit status.
964 remove_from_abslist(timr
);
966 timr
->it_requeue_pending
= (timr
->it_requeue_pending
+ 2) &
968 timr
->it_overrun_last
= 0;
969 timr
->it_overrun
= -1;
971 *switch off the timer when it_value is zero
973 if (!new_setting
->it_value
.tv_sec
&& !new_setting
->it_value
.tv_nsec
) {
974 timr
->it
.real
.timer
.expires
= 0;
978 if (adjust_abs_time(clock
,
979 &new_setting
->it_value
, flags
& TIMER_ABSTIME
,
980 &expire_64
, &(timr
->it
.real
.wall_to_prev
))) {
983 timr
->it
.real
.timer
.expires
= (unsigned long)expire_64
;
984 tstojiffie(&new_setting
->it_interval
, clock
->res
, &expire_64
);
985 timr
->it
.real
.incr
= (unsigned long)expire_64
;
988 * We do not even queue SIGEV_NONE timers! But we do put them
989 * in the abs list so we can do that right.
991 if (((timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
))
992 add_timer(&timr
->it
.real
.timer
);
994 if (flags
& TIMER_ABSTIME
&& clock
->abs_struct
) {
995 spin_lock(&clock
->abs_struct
->lock
);
996 list_add_tail(&(timr
->it
.real
.abs_timer_entry
),
997 &(clock
->abs_struct
->list
));
998 spin_unlock(&clock
->abs_struct
->lock
);
1003 /* Set a POSIX.1b interval timer */
1005 sys_timer_settime(timer_t timer_id
, int flags
,
1006 const struct itimerspec __user
*new_setting
,
1007 struct itimerspec __user
*old_setting
)
1009 struct k_itimer
*timr
;
1010 struct itimerspec new_spec
, old_spec
;
1013 struct itimerspec
*rtn
= old_setting
? &old_spec
: NULL
;
1018 if (copy_from_user(&new_spec
, new_setting
, sizeof (new_spec
)))
1021 if ((!good_timespec(&new_spec
.it_interval
)) ||
1022 (!good_timespec(&new_spec
.it_value
)))
1025 timr
= lock_timer(timer_id
, &flag
);
1029 error
= CLOCK_DISPATCH(timr
->it_clock
, timer_set
,
1030 (timr
, flags
, &new_spec
, rtn
));
1032 unlock_timer(timr
, flag
);
1033 if (error
== TIMER_RETRY
) {
1034 rtn
= NULL
; // We already got the old time...
1038 if (old_setting
&& !error
&& copy_to_user(old_setting
,
1039 &old_spec
, sizeof (old_spec
)))
1045 static inline int common_timer_del(struct k_itimer
*timer
)
1047 timer
->it
.real
.incr
= 0;
1049 if (try_to_del_timer_sync(&timer
->it
.real
.timer
) < 0) {
1052 * It can only be active if on an other cpu. Since
1053 * we have cleared the interval stuff above, it should
1054 * clear once we release the spin lock. Of course once
1055 * we do that anything could happen, including the
1056 * complete melt down of the timer. So return with
1057 * a "retry" exit status.
1063 remove_from_abslist(timer
);
1068 static inline int timer_delete_hook(struct k_itimer
*timer
)
1070 return CLOCK_DISPATCH(timer
->it_clock
, timer_del
, (timer
));
1073 /* Delete a POSIX.1b interval timer. */
1075 sys_timer_delete(timer_t timer_id
)
1077 struct k_itimer
*timer
;
1084 timer
= lock_timer(timer_id
, &flags
);
1089 error
= timer_delete_hook(timer
);
1091 if (error
== TIMER_RETRY
) {
1092 unlock_timer(timer
, flags
);
1096 timer_delete_hook(timer
);
1098 spin_lock(¤t
->sighand
->siglock
);
1099 list_del(&timer
->list
);
1100 spin_unlock(¤t
->sighand
->siglock
);
1102 * This keeps any tasks waiting on the spin lock from thinking
1103 * they got something (see the lock code above).
1105 if (timer
->it_process
) {
1106 if (timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
1107 put_task_struct(timer
->it_process
);
1108 timer
->it_process
= NULL
;
1110 unlock_timer(timer
, flags
);
1111 release_posix_timer(timer
, IT_ID_SET
);
1115 * return timer owned by the process, used by exit_itimers
1117 static inline void itimer_delete(struct k_itimer
*timer
)
1119 unsigned long flags
;
1125 spin_lock_irqsave(&timer
->it_lock
, flags
);
1128 error
= timer_delete_hook(timer
);
1130 if (error
== TIMER_RETRY
) {
1131 unlock_timer(timer
, flags
);
1135 timer_delete_hook(timer
);
1137 list_del(&timer
->list
);
1139 * This keeps any tasks waiting on the spin lock from thinking
1140 * they got something (see the lock code above).
1142 if (timer
->it_process
) {
1143 if (timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
1144 put_task_struct(timer
->it_process
);
1145 timer
->it_process
= NULL
;
1147 unlock_timer(timer
, flags
);
1148 release_posix_timer(timer
, IT_ID_SET
);
1152 * This is called by do_exit or de_thread, only when there are no more
1153 * references to the shared signal_struct.
1155 void exit_itimers(struct signal_struct
*sig
)
1157 struct k_itimer
*tmr
;
1159 while (!list_empty(&sig
->posix_timers
)) {
1160 tmr
= list_entry(sig
->posix_timers
.next
, struct k_itimer
, list
);
1166 * And now for the "clock" calls
1168 * These functions are called both from timer functions (with the timer
1169 * spin_lock_irq() held and from clock calls with no locking. They must
1170 * use the save flags versions of locks.
1174 * We do ticks here to avoid the irq lock ( they take sooo long).
1175 * The seqlock is great here. Since we a reader, we don't really care
1176 * if we are interrupted since we don't take lock that will stall us or
1177 * any other cpu. Voila, no irq lock is needed.
1181 static u64
do_posix_clock_monotonic_gettime_parts(
1182 struct timespec
*tp
, struct timespec
*mo
)
1188 seq
= read_seqbegin(&xtime_lock
);
1190 *mo
= wall_to_monotonic
;
1193 } while(read_seqretry(&xtime_lock
, seq
));
1198 static int do_posix_clock_monotonic_get(clockid_t clock
, struct timespec
*tp
)
1200 struct timespec wall_to_mono
;
1202 do_posix_clock_monotonic_gettime_parts(tp
, &wall_to_mono
);
1204 set_normalized_timespec(tp
, tp
->tv_sec
+ wall_to_mono
.tv_sec
,
1205 tp
->tv_nsec
+ wall_to_mono
.tv_nsec
);
1210 int do_posix_clock_monotonic_gettime(struct timespec
*tp
)
1212 return do_posix_clock_monotonic_get(CLOCK_MONOTONIC
, tp
);
1215 int do_posix_clock_nosettime(clockid_t clockid
, struct timespec
*tp
)
1219 EXPORT_SYMBOL_GPL(do_posix_clock_nosettime
);
1221 int do_posix_clock_notimer_create(struct k_itimer
*timer
)
1225 EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create
);
1227 int do_posix_clock_nonanosleep(clockid_t clock
, int flags
, struct timespec
*t
)
1230 return -EOPNOTSUPP
; /* aka ENOTSUP in userland for POSIX */
1231 #else /* parisc does define it separately. */
1235 EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep
);
1238 sys_clock_settime(clockid_t which_clock
, const struct timespec __user
*tp
)
1240 struct timespec new_tp
;
1242 if (invalid_clockid(which_clock
))
1244 if (copy_from_user(&new_tp
, tp
, sizeof (*tp
)))
1247 return CLOCK_DISPATCH(which_clock
, clock_set
, (which_clock
, &new_tp
));
1251 sys_clock_gettime(clockid_t which_clock
, struct timespec __user
*tp
)
1253 struct timespec kernel_tp
;
1256 if (invalid_clockid(which_clock
))
1258 error
= CLOCK_DISPATCH(which_clock
, clock_get
,
1259 (which_clock
, &kernel_tp
));
1260 if (!error
&& copy_to_user(tp
, &kernel_tp
, sizeof (kernel_tp
)))
1268 sys_clock_getres(clockid_t which_clock
, struct timespec __user
*tp
)
1270 struct timespec rtn_tp
;
1273 if (invalid_clockid(which_clock
))
1276 error
= CLOCK_DISPATCH(which_clock
, clock_getres
,
1277 (which_clock
, &rtn_tp
));
1279 if (!error
&& tp
&& copy_to_user(tp
, &rtn_tp
, sizeof (rtn_tp
))) {
1287 * The standard says that an absolute nanosleep call MUST wake up at
1288 * the requested time in spite of clock settings. Here is what we do:
1289 * For each nanosleep call that needs it (only absolute and not on
1290 * CLOCK_MONOTONIC* (as it can not be set)) we thread a little structure
1291 * into the "nanosleep_abs_list". All we need is the task_struct pointer.
1292 * When ever the clock is set we just wake up all those tasks. The rest
1293 * is done by the while loop in clock_nanosleep().
1295 * On locking, clock_was_set() is called from update_wall_clock which
1296 * holds (or has held for it) a write_lock_irq( xtime_lock) and is
1297 * called from the timer bh code. Thus we need the irq save locks.
1299 * Also, on the call from update_wall_clock, that is done as part of a
1300 * softirq thing. We don't want to delay the system that much (possibly
1301 * long list of timers to fix), so we defer that work to keventd.
1304 static DECLARE_WAIT_QUEUE_HEAD(nanosleep_abs_wqueue
);
1305 static DECLARE_WORK(clock_was_set_work
, (void(*)(void*))clock_was_set
, NULL
);
1307 static DECLARE_MUTEX(clock_was_set_lock
);
1309 void clock_was_set(void)
1311 struct k_itimer
*timr
;
1312 struct timespec new_wall_to
;
1313 LIST_HEAD(cws_list
);
1317 if (unlikely(in_interrupt())) {
1318 schedule_work(&clock_was_set_work
);
1321 wake_up_all(&nanosleep_abs_wqueue
);
1324 * Check if there exist TIMER_ABSTIME timers to correct.
1326 * Notes on locking: This code is run in task context with irq
1327 * on. We CAN be interrupted! All other usage of the abs list
1328 * lock is under the timer lock which holds the irq lock as
1329 * well. We REALLY don't want to scan the whole list with the
1330 * interrupt system off, AND we would like a sequence lock on
1331 * this code as well. Since we assume that the clock will not
1332 * be set often, it seems ok to take and release the irq lock
1333 * for each timer. In fact add_timer will do this, so this is
1334 * not an issue. So we know when we are done, we will move the
1335 * whole list to a new location. Then as we process each entry,
1336 * we will move it to the actual list again. This way, when our
1337 * copy is empty, we are done. We are not all that concerned
1338 * about preemption so we will use a semaphore lock to protect
1339 * aginst reentry. This way we will not stall another
1340 * processor. It is possible that this may delay some timers
1341 * that should have expired, given the new clock, but even this
1342 * will be minimal as we will always update to the current time,
1343 * even if it was set by a task that is waiting for entry to
1344 * this code. Timers that expire too early will be caught by
1345 * the expire code and restarted.
1347 * Absolute timers that repeat are left in the abs list while
1348 * waiting for the task to pick up the signal. This means we
1349 * may find timers that are not in the "add_timer" list, but are
1350 * in the abs list. We do the same thing for these, save
1351 * putting them back in the "add_timer" list. (Note, these are
1352 * left in the abs list mainly to indicate that they are
1353 * ABSOLUTE timers, a fact that is used by the re-arm code, and
1354 * for which we have no other flag.)
1358 down(&clock_was_set_lock
);
1359 spin_lock_irq(&abs_list
.lock
);
1360 list_splice_init(&abs_list
.list
, &cws_list
);
1361 spin_unlock_irq(&abs_list
.lock
);
1364 seq
= read_seqbegin(&xtime_lock
);
1365 new_wall_to
= wall_to_monotonic
;
1366 } while (read_seqretry(&xtime_lock
, seq
));
1368 spin_lock_irq(&abs_list
.lock
);
1369 if (list_empty(&cws_list
)) {
1370 spin_unlock_irq(&abs_list
.lock
);
1373 timr
= list_entry(cws_list
.next
, struct k_itimer
,
1374 it
.real
.abs_timer_entry
);
1376 list_del_init(&timr
->it
.real
.abs_timer_entry
);
1377 if (add_clockset_delta(timr
, &new_wall_to
) &&
1378 del_timer(&timr
->it
.real
.timer
)) /* timer run yet? */
1379 add_timer(&timr
->it
.real
.timer
);
1380 list_add(&timr
->it
.real
.abs_timer_entry
, &abs_list
.list
);
1381 spin_unlock_irq(&abs_list
.lock
);
1384 up(&clock_was_set_lock
);
1387 long clock_nanosleep_restart(struct restart_block
*restart_block
);
1390 sys_clock_nanosleep(clockid_t which_clock
, int flags
,
1391 const struct timespec __user
*rqtp
,
1392 struct timespec __user
*rmtp
)
1395 struct restart_block
*restart_block
=
1396 &(current_thread_info()->restart_block
);
1399 if (invalid_clockid(which_clock
))
1402 if (copy_from_user(&t
, rqtp
, sizeof (struct timespec
)))
1405 if ((unsigned) t
.tv_nsec
>= NSEC_PER_SEC
|| t
.tv_sec
< 0)
1409 * Do this here as nsleep function does not have the real address.
1411 restart_block
->arg1
= (unsigned long)rmtp
;
1413 ret
= CLOCK_DISPATCH(which_clock
, nsleep
, (which_clock
, flags
, &t
));
1415 if ((ret
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
1416 copy_to_user(rmtp
, &t
, sizeof (t
)))
1422 static int common_nsleep(clockid_t which_clock
,
1423 int flags
, struct timespec
*tsave
)
1425 struct timespec t
, dum
;
1426 DECLARE_WAITQUEUE(abs_wqueue
, current
);
1427 u64 rq_time
= (u64
)0;
1430 struct restart_block
*restart_block
=
1431 ¤t_thread_info()->restart_block
;
1433 abs_wqueue
.flags
= 0;
1434 abs
= flags
& TIMER_ABSTIME
;
1436 if (restart_block
->fn
== clock_nanosleep_restart
) {
1438 * Interrupted by a non-delivered signal, pick up remaining
1439 * time and continue. Remaining time is in arg2 & 3.
1441 restart_block
->fn
= do_no_restart_syscall
;
1443 rq_time
= restart_block
->arg3
;
1444 rq_time
= (rq_time
<< 32) + restart_block
->arg2
;
1447 left
= rq_time
- get_jiffies_64();
1449 return 0; /* Already passed */
1452 if (abs
&& (posix_clocks
[which_clock
].clock_get
!=
1453 posix_clocks
[CLOCK_MONOTONIC
].clock_get
))
1454 add_wait_queue(&nanosleep_abs_wqueue
, &abs_wqueue
);
1458 if (abs
|| !rq_time
) {
1459 adjust_abs_time(&posix_clocks
[which_clock
], &t
, abs
,
1463 left
= rq_time
- get_jiffies_64();
1464 if (left
>= (s64
)MAX_JIFFY_OFFSET
)
1465 left
= (s64
)MAX_JIFFY_OFFSET
;
1469 schedule_timeout_interruptible(left
);
1471 left
= rq_time
- get_jiffies_64();
1472 } while (left
> (s64
)0 && !test_thread_flag(TIF_SIGPENDING
));
1474 if (abs_wqueue
.task_list
.next
)
1475 finish_wait(&nanosleep_abs_wqueue
, &abs_wqueue
);
1477 if (left
> (s64
)0) {
1480 * Always restart abs calls from scratch to pick up any
1481 * clock shifting that happened while we are away.
1484 return -ERESTARTNOHAND
;
1487 tsave
->tv_sec
= div_long_long_rem(left
,
1491 * Restart works by saving the time remaing in
1492 * arg2 & 3 (it is 64-bits of jiffies). The other
1493 * info we need is the clock_id (saved in arg0).
1494 * The sys_call interface needs the users
1495 * timespec return address which _it_ saves in arg1.
1496 * Since we have cast the nanosleep call to a clock_nanosleep
1497 * both can be restarted with the same code.
1499 restart_block
->fn
= clock_nanosleep_restart
;
1500 restart_block
->arg0
= which_clock
;
1504 restart_block
->arg2
= rq_time
& 0xffffffffLL
;
1505 restart_block
->arg3
= rq_time
>> 32;
1507 return -ERESTART_RESTARTBLOCK
;
1513 * This will restart clock_nanosleep.
1516 clock_nanosleep_restart(struct restart_block
*restart_block
)
1519 int ret
= common_nsleep(restart_block
->arg0
, 0, &t
);
1521 if ((ret
== -ERESTART_RESTARTBLOCK
) && restart_block
->arg1
&&
1522 copy_to_user((struct timespec __user
*)(restart_block
->arg1
), &t
,