2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
13 * Allocate the thread_group_cputime structure appropriately for SMP kernels
14 * and fill in the current values of the fields. Called from copy_signal()
15 * via thread_group_cputime_clone_thread() when adding a second or subsequent
16 * thread to a thread group. Assumes interrupts are enabled when called.
18 int thread_group_cputime_alloc_smp(struct task_struct
*tsk
)
20 struct signal_struct
*sig
= tsk
->signal
;
21 struct task_cputime
*cputime
;
24 * If we have multiple threads and we don't already have a
25 * per-CPU task_cputime struct, allocate one and fill it in with
26 * the times accumulated so far.
28 if (sig
->cputime
.totals
)
30 cputime
= alloc_percpu(struct task_cputime
);
33 read_lock(&tasklist_lock
);
34 spin_lock_irq(&tsk
->sighand
->siglock
);
35 if (sig
->cputime
.totals
) {
36 spin_unlock_irq(&tsk
->sighand
->siglock
);
37 read_unlock(&tasklist_lock
);
41 sig
->cputime
.totals
= cputime
;
42 cputime
= per_cpu_ptr(sig
->cputime
.totals
, get_cpu());
43 cputime
->utime
= tsk
->utime
;
44 cputime
->stime
= tsk
->stime
;
45 cputime
->sum_exec_runtime
= tsk
->se
.sum_exec_runtime
;
47 spin_unlock_irq(&tsk
->sighand
->siglock
);
48 read_unlock(&tasklist_lock
);
53 * thread_group_cputime_smp - Sum the thread group time fields across all CPUs.
55 * @tsk: The task we use to identify the thread group.
56 * @times: task_cputime structure in which we return the summed fields.
58 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
61 void thread_group_cputime_smp(
62 struct task_struct
*tsk
,
63 struct task_cputime
*times
)
65 struct signal_struct
*sig
;
67 struct task_cputime
*tot
;
70 if (unlikely(!sig
) || !sig
->cputime
.totals
) {
71 times
->utime
= tsk
->utime
;
72 times
->stime
= tsk
->stime
;
73 times
->sum_exec_runtime
= tsk
->se
.sum_exec_runtime
;
76 times
->stime
= times
->utime
= cputime_zero
;
77 times
->sum_exec_runtime
= 0;
78 for_each_possible_cpu(i
) {
79 tot
= per_cpu_ptr(tsk
->signal
->cputime
.totals
, i
);
80 times
->utime
= cputime_add(times
->utime
, tot
->utime
);
81 times
->stime
= cputime_add(times
->stime
, tot
->stime
);
82 times
->sum_exec_runtime
+= tot
->sum_exec_runtime
;
86 #endif /* CONFIG_SMP */
89 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
91 void update_rlimit_cpu(unsigned long rlim_new
)
95 cputime
= secs_to_cputime(rlim_new
);
96 if (cputime_eq(current
->signal
->it_prof_expires
, cputime_zero
) ||
97 cputime_lt(current
->signal
->it_prof_expires
, cputime
)) {
98 spin_lock_irq(¤t
->sighand
->siglock
);
99 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
100 spin_unlock_irq(¤t
->sighand
->siglock
);
104 static int check_clock(const clockid_t which_clock
)
107 struct task_struct
*p
;
108 const pid_t pid
= CPUCLOCK_PID(which_clock
);
110 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
116 read_lock(&tasklist_lock
);
117 p
= find_task_by_vpid(pid
);
118 if (!p
|| !(CPUCLOCK_PERTHREAD(which_clock
) ?
119 same_thread_group(p
, current
) : thread_group_leader(p
))) {
122 read_unlock(&tasklist_lock
);
127 static inline union cpu_time_count
128 timespec_to_sample(const clockid_t which_clock
, const struct timespec
*tp
)
130 union cpu_time_count ret
;
131 ret
.sched
= 0; /* high half always zero when .cpu used */
132 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
133 ret
.sched
= (unsigned long long)tp
->tv_sec
* NSEC_PER_SEC
+ tp
->tv_nsec
;
135 ret
.cpu
= timespec_to_cputime(tp
);
140 static void sample_to_timespec(const clockid_t which_clock
,
141 union cpu_time_count cpu
,
144 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
)
145 *tp
= ns_to_timespec(cpu
.sched
);
147 cputime_to_timespec(cpu
.cpu
, tp
);
150 static inline int cpu_time_before(const clockid_t which_clock
,
151 union cpu_time_count now
,
152 union cpu_time_count then
)
154 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
155 return now
.sched
< then
.sched
;
157 return cputime_lt(now
.cpu
, then
.cpu
);
160 static inline void cpu_time_add(const clockid_t which_clock
,
161 union cpu_time_count
*acc
,
162 union cpu_time_count val
)
164 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
165 acc
->sched
+= val
.sched
;
167 acc
->cpu
= cputime_add(acc
->cpu
, val
.cpu
);
170 static inline union cpu_time_count
cpu_time_sub(const clockid_t which_clock
,
171 union cpu_time_count a
,
172 union cpu_time_count b
)
174 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
177 a
.cpu
= cputime_sub(a
.cpu
, b
.cpu
);
183 * Divide and limit the result to res >= 1
185 * This is necessary to prevent signal delivery starvation, when the result of
186 * the division would be rounded down to 0.
188 static inline cputime_t
cputime_div_non_zero(cputime_t time
, unsigned long div
)
190 cputime_t res
= cputime_div(time
, div
);
192 return max_t(cputime_t
, res
, 1);
196 * Update expiry time from increment, and increase overrun count,
197 * given the current clock sample.
199 static void bump_cpu_timer(struct k_itimer
*timer
,
200 union cpu_time_count now
)
204 if (timer
->it
.cpu
.incr
.sched
== 0)
207 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
208 unsigned long long delta
, incr
;
210 if (now
.sched
< timer
->it
.cpu
.expires
.sched
)
212 incr
= timer
->it
.cpu
.incr
.sched
;
213 delta
= now
.sched
+ incr
- timer
->it
.cpu
.expires
.sched
;
214 /* Don't use (incr*2 < delta), incr*2 might overflow. */
215 for (i
= 0; incr
< delta
- incr
; i
++)
217 for (; i
>= 0; incr
>>= 1, i
--) {
220 timer
->it
.cpu
.expires
.sched
+= incr
;
221 timer
->it_overrun
+= 1 << i
;
225 cputime_t delta
, incr
;
227 if (cputime_lt(now
.cpu
, timer
->it
.cpu
.expires
.cpu
))
229 incr
= timer
->it
.cpu
.incr
.cpu
;
230 delta
= cputime_sub(cputime_add(now
.cpu
, incr
),
231 timer
->it
.cpu
.expires
.cpu
);
232 /* Don't use (incr*2 < delta), incr*2 might overflow. */
233 for (i
= 0; cputime_lt(incr
, cputime_sub(delta
, incr
)); i
++)
234 incr
= cputime_add(incr
, incr
);
235 for (; i
>= 0; incr
= cputime_halve(incr
), i
--) {
236 if (cputime_lt(delta
, incr
))
238 timer
->it
.cpu
.expires
.cpu
=
239 cputime_add(timer
->it
.cpu
.expires
.cpu
, incr
);
240 timer
->it_overrun
+= 1 << i
;
241 delta
= cputime_sub(delta
, incr
);
246 static inline cputime_t
prof_ticks(struct task_struct
*p
)
248 return cputime_add(p
->utime
, p
->stime
);
250 static inline cputime_t
virt_ticks(struct task_struct
*p
)
255 int posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec
*tp
)
257 int error
= check_clock(which_clock
);
260 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
261 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
263 * If sched_clock is using a cycle counter, we
264 * don't have any idea of its true resolution
265 * exported, but it is much more than 1s/HZ.
273 int posix_cpu_clock_set(const clockid_t which_clock
, const struct timespec
*tp
)
276 * You can never reset a CPU clock, but we check for other errors
277 * in the call before failing with EPERM.
279 int error
= check_clock(which_clock
);
288 * Sample a per-thread clock for the given task.
290 static int cpu_clock_sample(const clockid_t which_clock
, struct task_struct
*p
,
291 union cpu_time_count
*cpu
)
293 switch (CPUCLOCK_WHICH(which_clock
)) {
297 cpu
->cpu
= prof_ticks(p
);
300 cpu
->cpu
= virt_ticks(p
);
303 cpu
->sched
= task_sched_runtime(p
);
310 * Sample a process (thread group) clock for the given group_leader task.
311 * Must be called with tasklist_lock held for reading.
312 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
314 static int cpu_clock_sample_group_locked(unsigned int clock_idx
,
315 struct task_struct
*p
,
316 union cpu_time_count
*cpu
)
318 struct task_cputime cputime
;
320 thread_group_cputime(p
, &cputime
);
325 cpu
->cpu
= cputime_add(cputime
.utime
, cputime
.stime
);
328 cpu
->cpu
= cputime
.utime
;
331 cpu
->sched
= thread_group_sched_runtime(p
);
338 * Sample a process (thread group) clock for the given group_leader task.
339 * Must be called with tasklist_lock held for reading.
341 static int cpu_clock_sample_group(const clockid_t which_clock
,
342 struct task_struct
*p
,
343 union cpu_time_count
*cpu
)
347 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
348 ret
= cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock
), p
,
350 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
355 int posix_cpu_clock_get(const clockid_t which_clock
, struct timespec
*tp
)
357 const pid_t pid
= CPUCLOCK_PID(which_clock
);
359 union cpu_time_count rtn
;
363 * Special case constant value for our own clocks.
364 * We don't have to do any lookup to find ourselves.
366 if (CPUCLOCK_PERTHREAD(which_clock
)) {
368 * Sampling just ourselves we can do with no locking.
370 error
= cpu_clock_sample(which_clock
,
373 read_lock(&tasklist_lock
);
374 error
= cpu_clock_sample_group(which_clock
,
376 read_unlock(&tasklist_lock
);
380 * Find the given PID, and validate that the caller
381 * should be able to see it.
383 struct task_struct
*p
;
385 p
= find_task_by_vpid(pid
);
387 if (CPUCLOCK_PERTHREAD(which_clock
)) {
388 if (same_thread_group(p
, current
)) {
389 error
= cpu_clock_sample(which_clock
,
393 read_lock(&tasklist_lock
);
394 if (thread_group_leader(p
) && p
->signal
) {
396 cpu_clock_sample_group(which_clock
,
399 read_unlock(&tasklist_lock
);
407 sample_to_timespec(which_clock
, rtn
, tp
);
413 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
414 * This is called from sys_timer_create with the new timer already locked.
416 int posix_cpu_timer_create(struct k_itimer
*new_timer
)
419 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
420 struct task_struct
*p
;
422 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
425 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
426 new_timer
->it
.cpu
.incr
.sched
= 0;
427 new_timer
->it
.cpu
.expires
.sched
= 0;
429 read_lock(&tasklist_lock
);
430 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
434 p
= find_task_by_vpid(pid
);
435 if (p
&& !same_thread_group(p
, current
))
440 p
= current
->group_leader
;
442 p
= find_task_by_vpid(pid
);
443 if (p
&& !thread_group_leader(p
))
447 new_timer
->it
.cpu
.task
= p
;
453 read_unlock(&tasklist_lock
);
459 * Clean up a CPU-clock timer that is about to be destroyed.
460 * This is called from timer deletion with the timer already locked.
461 * If we return TIMER_RETRY, it's necessary to release the timer's lock
462 * and try again. (This happens when the timer is in the middle of firing.)
464 int posix_cpu_timer_del(struct k_itimer
*timer
)
466 struct task_struct
*p
= timer
->it
.cpu
.task
;
469 if (likely(p
!= NULL
)) {
470 read_lock(&tasklist_lock
);
471 if (unlikely(p
->signal
== NULL
)) {
473 * We raced with the reaping of the task.
474 * The deletion should have cleared us off the list.
476 BUG_ON(!list_empty(&timer
->it
.cpu
.entry
));
478 spin_lock(&p
->sighand
->siglock
);
479 if (timer
->it
.cpu
.firing
)
482 list_del(&timer
->it
.cpu
.entry
);
483 spin_unlock(&p
->sighand
->siglock
);
485 read_unlock(&tasklist_lock
);
495 * Clean out CPU timers still ticking when a thread exited. The task
496 * pointer is cleared, and the expiry time is replaced with the residual
497 * time for later timer_gettime calls to return.
498 * This must be called with the siglock held.
500 static void cleanup_timers(struct list_head
*head
,
501 cputime_t utime
, cputime_t stime
,
502 unsigned long long sum_exec_runtime
)
504 struct cpu_timer_list
*timer
, *next
;
505 cputime_t ptime
= cputime_add(utime
, stime
);
507 list_for_each_entry_safe(timer
, next
, head
, entry
) {
508 list_del_init(&timer
->entry
);
509 if (cputime_lt(timer
->expires
.cpu
, ptime
)) {
510 timer
->expires
.cpu
= cputime_zero
;
512 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
518 list_for_each_entry_safe(timer
, next
, head
, entry
) {
519 list_del_init(&timer
->entry
);
520 if (cputime_lt(timer
->expires
.cpu
, utime
)) {
521 timer
->expires
.cpu
= cputime_zero
;
523 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
529 list_for_each_entry_safe(timer
, next
, head
, entry
) {
530 list_del_init(&timer
->entry
);
531 if (timer
->expires
.sched
< sum_exec_runtime
) {
532 timer
->expires
.sched
= 0;
534 timer
->expires
.sched
-= sum_exec_runtime
;
540 * These are both called with the siglock held, when the current thread
541 * is being reaped. When the final (leader) thread in the group is reaped,
542 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
544 void posix_cpu_timers_exit(struct task_struct
*tsk
)
546 cleanup_timers(tsk
->cpu_timers
,
547 tsk
->utime
, tsk
->stime
, tsk
->se
.sum_exec_runtime
);
550 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
552 struct task_cputime cputime
;
554 thread_group_cputime(tsk
, &cputime
);
555 cleanup_timers(tsk
->signal
->cpu_timers
,
556 cputime
.utime
, cputime
.stime
, cputime
.sum_exec_runtime
);
559 static void clear_dead_task(struct k_itimer
*timer
, union cpu_time_count now
)
562 * That's all for this thread or process.
563 * We leave our residual in expires to be reported.
565 put_task_struct(timer
->it
.cpu
.task
);
566 timer
->it
.cpu
.task
= NULL
;
567 timer
->it
.cpu
.expires
= cpu_time_sub(timer
->it_clock
,
568 timer
->it
.cpu
.expires
,
573 * Insert the timer on the appropriate list before any timers that
574 * expire later. This must be called with the tasklist_lock held
575 * for reading, and interrupts disabled.
577 static void arm_timer(struct k_itimer
*timer
, union cpu_time_count now
)
579 struct task_struct
*p
= timer
->it
.cpu
.task
;
580 struct list_head
*head
, *listpos
;
581 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
582 struct cpu_timer_list
*next
;
585 head
= (CPUCLOCK_PERTHREAD(timer
->it_clock
) ?
586 p
->cpu_timers
: p
->signal
->cpu_timers
);
587 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
589 BUG_ON(!irqs_disabled());
590 spin_lock(&p
->sighand
->siglock
);
593 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
594 list_for_each_entry(next
, head
, entry
) {
595 if (next
->expires
.sched
> nt
->expires
.sched
)
597 listpos
= &next
->entry
;
600 list_for_each_entry(next
, head
, entry
) {
601 if (cputime_gt(next
->expires
.cpu
, nt
->expires
.cpu
))
603 listpos
= &next
->entry
;
606 list_add(&nt
->entry
, listpos
);
608 if (listpos
== head
) {
610 * We are the new earliest-expiring timer.
611 * If we are a thread timer, there can always
612 * be a process timer telling us to stop earlier.
615 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
616 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
620 if (cputime_eq(p
->cputime_expires
.prof_exp
,
622 cputime_gt(p
->cputime_expires
.prof_exp
,
624 p
->cputime_expires
.prof_exp
=
628 if (cputime_eq(p
->cputime_expires
.virt_exp
,
630 cputime_gt(p
->cputime_expires
.virt_exp
,
632 p
->cputime_expires
.virt_exp
=
636 if (p
->cputime_expires
.sched_exp
== 0 ||
637 p
->cputime_expires
.sched_exp
>
639 p
->cputime_expires
.sched_exp
=
645 * For a process timer, set the cached expiration time.
647 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
651 if (!cputime_eq(p
->signal
->it_virt_expires
,
653 cputime_lt(p
->signal
->it_virt_expires
,
654 timer
->it
.cpu
.expires
.cpu
))
656 p
->signal
->cputime_expires
.virt_exp
=
657 timer
->it
.cpu
.expires
.cpu
;
660 if (!cputime_eq(p
->signal
->it_prof_expires
,
662 cputime_lt(p
->signal
->it_prof_expires
,
663 timer
->it
.cpu
.expires
.cpu
))
665 i
= p
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
;
666 if (i
!= RLIM_INFINITY
&&
667 i
<= cputime_to_secs(timer
->it
.cpu
.expires
.cpu
))
669 p
->signal
->cputime_expires
.prof_exp
=
670 timer
->it
.cpu
.expires
.cpu
;
673 p
->signal
->cputime_expires
.sched_exp
=
674 timer
->it
.cpu
.expires
.sched
;
680 spin_unlock(&p
->sighand
->siglock
);
684 * The timer is locked, fire it and arrange for its reload.
686 static void cpu_timer_fire(struct k_itimer
*timer
)
688 if (unlikely(timer
->sigq
== NULL
)) {
690 * This a special case for clock_nanosleep,
691 * not a normal timer from sys_timer_create.
693 wake_up_process(timer
->it_process
);
694 timer
->it
.cpu
.expires
.sched
= 0;
695 } else if (timer
->it
.cpu
.incr
.sched
== 0) {
697 * One-shot timer. Clear it as soon as it's fired.
699 posix_timer_event(timer
, 0);
700 timer
->it
.cpu
.expires
.sched
= 0;
701 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
703 * The signal did not get queued because the signal
704 * was ignored, so we won't get any callback to
705 * reload the timer. But we need to keep it
706 * ticking in case the signal is deliverable next time.
708 posix_cpu_timer_schedule(timer
);
713 * Guts of sys_timer_settime for CPU timers.
714 * This is called with the timer locked and interrupts disabled.
715 * If we return TIMER_RETRY, it's necessary to release the timer's lock
716 * and try again. (This happens when the timer is in the middle of firing.)
718 int posix_cpu_timer_set(struct k_itimer
*timer
, int flags
,
719 struct itimerspec
*new, struct itimerspec
*old
)
721 struct task_struct
*p
= timer
->it
.cpu
.task
;
722 union cpu_time_count old_expires
, new_expires
, val
;
725 if (unlikely(p
== NULL
)) {
727 * Timer refers to a dead task's clock.
732 new_expires
= timespec_to_sample(timer
->it_clock
, &new->it_value
);
734 read_lock(&tasklist_lock
);
736 * We need the tasklist_lock to protect against reaping that
737 * clears p->signal. If p has just been reaped, we can no
738 * longer get any information about it at all.
740 if (unlikely(p
->signal
== NULL
)) {
741 read_unlock(&tasklist_lock
);
743 timer
->it
.cpu
.task
= NULL
;
748 * Disarm any old timer after extracting its expiry time.
750 BUG_ON(!irqs_disabled());
753 spin_lock(&p
->sighand
->siglock
);
754 old_expires
= timer
->it
.cpu
.expires
;
755 if (unlikely(timer
->it
.cpu
.firing
)) {
756 timer
->it
.cpu
.firing
= -1;
759 list_del_init(&timer
->it
.cpu
.entry
);
760 spin_unlock(&p
->sighand
->siglock
);
763 * We need to sample the current value to convert the new
764 * value from to relative and absolute, and to convert the
765 * old value from absolute to relative. To set a process
766 * timer, we need a sample to balance the thread expiry
767 * times (in arm_timer). With an absolute time, we must
768 * check if it's already passed. In short, we need a sample.
770 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
771 cpu_clock_sample(timer
->it_clock
, p
, &val
);
773 cpu_clock_sample_group(timer
->it_clock
, p
, &val
);
777 if (old_expires
.sched
== 0) {
778 old
->it_value
.tv_sec
= 0;
779 old
->it_value
.tv_nsec
= 0;
782 * Update the timer in case it has
783 * overrun already. If it has,
784 * we'll report it as having overrun
785 * and with the next reloaded timer
786 * already ticking, though we are
787 * swallowing that pending
788 * notification here to install the
791 bump_cpu_timer(timer
, val
);
792 if (cpu_time_before(timer
->it_clock
, val
,
793 timer
->it
.cpu
.expires
)) {
794 old_expires
= cpu_time_sub(
796 timer
->it
.cpu
.expires
, val
);
797 sample_to_timespec(timer
->it_clock
,
801 old
->it_value
.tv_nsec
= 1;
802 old
->it_value
.tv_sec
= 0;
809 * We are colliding with the timer actually firing.
810 * Punt after filling in the timer's old value, and
811 * disable this firing since we are already reporting
812 * it as an overrun (thanks to bump_cpu_timer above).
814 read_unlock(&tasklist_lock
);
818 if (new_expires
.sched
!= 0 && !(flags
& TIMER_ABSTIME
)) {
819 cpu_time_add(timer
->it_clock
, &new_expires
, val
);
823 * Install the new expiry time (or zero).
824 * For a timer with no notification action, we don't actually
825 * arm the timer (we'll just fake it for timer_gettime).
827 timer
->it
.cpu
.expires
= new_expires
;
828 if (new_expires
.sched
!= 0 &&
829 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
830 cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
831 arm_timer(timer
, val
);
834 read_unlock(&tasklist_lock
);
837 * Install the new reload setting, and
838 * set up the signal and overrun bookkeeping.
840 timer
->it
.cpu
.incr
= timespec_to_sample(timer
->it_clock
,
844 * This acts as a modification timestamp for the timer,
845 * so any automatic reload attempt will punt on seeing
846 * that we have reset the timer manually.
848 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
850 timer
->it_overrun_last
= 0;
851 timer
->it_overrun
= -1;
853 if (new_expires
.sched
!= 0 &&
854 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
855 !cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
857 * The designated time already passed, so we notify
858 * immediately, even if the thread never runs to
859 * accumulate more time on this clock.
861 cpu_timer_fire(timer
);
867 sample_to_timespec(timer
->it_clock
,
868 timer
->it
.cpu
.incr
, &old
->it_interval
);
873 void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
875 union cpu_time_count now
;
876 struct task_struct
*p
= timer
->it
.cpu
.task
;
880 * Easy part: convert the reload time.
882 sample_to_timespec(timer
->it_clock
,
883 timer
->it
.cpu
.incr
, &itp
->it_interval
);
885 if (timer
->it
.cpu
.expires
.sched
== 0) { /* Timer not armed at all. */
886 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
890 if (unlikely(p
== NULL
)) {
892 * This task already died and the timer will never fire.
893 * In this case, expires is actually the dead value.
896 sample_to_timespec(timer
->it_clock
, timer
->it
.cpu
.expires
,
902 * Sample the clock to take the difference with the expiry time.
904 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
905 cpu_clock_sample(timer
->it_clock
, p
, &now
);
906 clear_dead
= p
->exit_state
;
908 read_lock(&tasklist_lock
);
909 if (unlikely(p
->signal
== NULL
)) {
911 * The process has been reaped.
912 * We can't even collect a sample any more.
913 * Call the timer disarmed, nothing else to do.
916 timer
->it
.cpu
.task
= NULL
;
917 timer
->it
.cpu
.expires
.sched
= 0;
918 read_unlock(&tasklist_lock
);
921 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
922 clear_dead
= (unlikely(p
->exit_state
) &&
923 thread_group_empty(p
));
925 read_unlock(&tasklist_lock
);
928 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
929 if (timer
->it
.cpu
.incr
.sched
== 0 &&
930 cpu_time_before(timer
->it_clock
,
931 timer
->it
.cpu
.expires
, now
)) {
933 * Do-nothing timer expired and has no reload,
934 * so it's as if it was never set.
936 timer
->it
.cpu
.expires
.sched
= 0;
937 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
941 * Account for any expirations and reloads that should
944 bump_cpu_timer(timer
, now
);
947 if (unlikely(clear_dead
)) {
949 * We've noticed that the thread is dead, but
950 * not yet reaped. Take this opportunity to
953 clear_dead_task(timer
, now
);
957 if (cpu_time_before(timer
->it_clock
, now
, timer
->it
.cpu
.expires
)) {
958 sample_to_timespec(timer
->it_clock
,
959 cpu_time_sub(timer
->it_clock
,
960 timer
->it
.cpu
.expires
, now
),
964 * The timer should have expired already, but the firing
965 * hasn't taken place yet. Say it's just about to expire.
967 itp
->it_value
.tv_nsec
= 1;
968 itp
->it_value
.tv_sec
= 0;
973 * Check for any per-thread CPU timers that have fired and move them off
974 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
975 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
977 static void check_thread_timers(struct task_struct
*tsk
,
978 struct list_head
*firing
)
981 struct list_head
*timers
= tsk
->cpu_timers
;
982 struct signal_struct
*const sig
= tsk
->signal
;
985 tsk
->cputime_expires
.prof_exp
= cputime_zero
;
986 while (!list_empty(timers
)) {
987 struct cpu_timer_list
*t
= list_first_entry(timers
,
988 struct cpu_timer_list
,
990 if (!--maxfire
|| cputime_lt(prof_ticks(tsk
), t
->expires
.cpu
)) {
991 tsk
->cputime_expires
.prof_exp
= t
->expires
.cpu
;
995 list_move_tail(&t
->entry
, firing
);
1000 tsk
->cputime_expires
.virt_exp
= cputime_zero
;
1001 while (!list_empty(timers
)) {
1002 struct cpu_timer_list
*t
= list_first_entry(timers
,
1003 struct cpu_timer_list
,
1005 if (!--maxfire
|| cputime_lt(virt_ticks(tsk
), t
->expires
.cpu
)) {
1006 tsk
->cputime_expires
.virt_exp
= t
->expires
.cpu
;
1010 list_move_tail(&t
->entry
, firing
);
1015 tsk
->cputime_expires
.sched_exp
= 0;
1016 while (!list_empty(timers
)) {
1017 struct cpu_timer_list
*t
= list_first_entry(timers
,
1018 struct cpu_timer_list
,
1020 if (!--maxfire
|| tsk
->se
.sum_exec_runtime
< t
->expires
.sched
) {
1021 tsk
->cputime_expires
.sched_exp
= t
->expires
.sched
;
1025 list_move_tail(&t
->entry
, firing
);
1029 * Check for the special case thread timers.
1031 if (sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
!= RLIM_INFINITY
) {
1032 unsigned long hard
= sig
->rlim
[RLIMIT_RTTIME
].rlim_max
;
1033 unsigned long *soft
= &sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
;
1035 if (hard
!= RLIM_INFINITY
&&
1036 tsk
->rt
.timeout
> DIV_ROUND_UP(hard
, USEC_PER_SEC
/HZ
)) {
1038 * At the hard limit, we just die.
1039 * No need to calculate anything else now.
1041 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1044 if (tsk
->rt
.timeout
> DIV_ROUND_UP(*soft
, USEC_PER_SEC
/HZ
)) {
1046 * At the soft limit, send a SIGXCPU every second.
1048 if (sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
1049 < sig
->rlim
[RLIMIT_RTTIME
].rlim_max
) {
1050 sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
+=
1054 "RT Watchdog Timeout: %s[%d]\n",
1055 tsk
->comm
, task_pid_nr(tsk
));
1056 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1062 * Check for any per-thread CPU timers that have fired and move them
1063 * off the tsk->*_timers list onto the firing list. Per-thread timers
1064 * have already been taken off.
1066 static void check_process_timers(struct task_struct
*tsk
,
1067 struct list_head
*firing
)
1070 struct signal_struct
*const sig
= tsk
->signal
;
1071 cputime_t utime
, ptime
, virt_expires
, prof_expires
;
1072 unsigned long long sum_sched_runtime
, sched_expires
;
1073 struct list_head
*timers
= sig
->cpu_timers
;
1074 struct task_cputime cputime
;
1077 * Don't sample the current process CPU clocks if there are no timers.
1079 if (list_empty(&timers
[CPUCLOCK_PROF
]) &&
1080 cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1081 sig
->rlim
[RLIMIT_CPU
].rlim_cur
== RLIM_INFINITY
&&
1082 list_empty(&timers
[CPUCLOCK_VIRT
]) &&
1083 cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1084 list_empty(&timers
[CPUCLOCK_SCHED
]))
1088 * Collect the current process totals.
1090 thread_group_cputime(tsk
, &cputime
);
1091 utime
= cputime
.utime
;
1092 ptime
= cputime_add(utime
, cputime
.stime
);
1093 sum_sched_runtime
= cputime
.sum_exec_runtime
;
1095 prof_expires
= cputime_zero
;
1096 while (!list_empty(timers
)) {
1097 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1098 struct cpu_timer_list
,
1100 if (!--maxfire
|| cputime_lt(ptime
, tl
->expires
.cpu
)) {
1101 prof_expires
= tl
->expires
.cpu
;
1105 list_move_tail(&tl
->entry
, firing
);
1110 virt_expires
= cputime_zero
;
1111 while (!list_empty(timers
)) {
1112 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1113 struct cpu_timer_list
,
1115 if (!--maxfire
|| cputime_lt(utime
, tl
->expires
.cpu
)) {
1116 virt_expires
= tl
->expires
.cpu
;
1120 list_move_tail(&tl
->entry
, firing
);
1126 while (!list_empty(timers
)) {
1127 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1128 struct cpu_timer_list
,
1130 if (!--maxfire
|| sum_sched_runtime
< tl
->expires
.sched
) {
1131 sched_expires
= tl
->expires
.sched
;
1135 list_move_tail(&tl
->entry
, firing
);
1139 * Check for the special case process timers.
1141 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1142 if (cputime_ge(ptime
, sig
->it_prof_expires
)) {
1143 /* ITIMER_PROF fires and reloads. */
1144 sig
->it_prof_expires
= sig
->it_prof_incr
;
1145 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1146 sig
->it_prof_expires
= cputime_add(
1147 sig
->it_prof_expires
, ptime
);
1149 __group_send_sig_info(SIGPROF
, SEND_SIG_PRIV
, tsk
);
1151 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1152 (cputime_eq(prof_expires
, cputime_zero
) ||
1153 cputime_lt(sig
->it_prof_expires
, prof_expires
))) {
1154 prof_expires
= sig
->it_prof_expires
;
1157 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1158 if (cputime_ge(utime
, sig
->it_virt_expires
)) {
1159 /* ITIMER_VIRTUAL fires and reloads. */
1160 sig
->it_virt_expires
= sig
->it_virt_incr
;
1161 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1162 sig
->it_virt_expires
= cputime_add(
1163 sig
->it_virt_expires
, utime
);
1165 __group_send_sig_info(SIGVTALRM
, SEND_SIG_PRIV
, tsk
);
1167 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1168 (cputime_eq(virt_expires
, cputime_zero
) ||
1169 cputime_lt(sig
->it_virt_expires
, virt_expires
))) {
1170 virt_expires
= sig
->it_virt_expires
;
1173 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
!= RLIM_INFINITY
) {
1174 unsigned long psecs
= cputime_to_secs(ptime
);
1176 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1178 * At the hard limit, we just die.
1179 * No need to calculate anything else now.
1181 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1184 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_cur
) {
1186 * At the soft limit, send a SIGXCPU every second.
1188 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1189 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
1190 < sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1191 sig
->rlim
[RLIMIT_CPU
].rlim_cur
++;
1194 x
= secs_to_cputime(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1195 if (cputime_eq(prof_expires
, cputime_zero
) ||
1196 cputime_lt(x
, prof_expires
)) {
1201 if (!cputime_eq(prof_expires
, cputime_zero
) &&
1202 (cputime_eq(sig
->cputime_expires
.prof_exp
, cputime_zero
) ||
1203 cputime_gt(sig
->cputime_expires
.prof_exp
, prof_expires
)))
1204 sig
->cputime_expires
.prof_exp
= prof_expires
;
1205 if (!cputime_eq(virt_expires
, cputime_zero
) &&
1206 (cputime_eq(sig
->cputime_expires
.virt_exp
, cputime_zero
) ||
1207 cputime_gt(sig
->cputime_expires
.virt_exp
, virt_expires
)))
1208 sig
->cputime_expires
.virt_exp
= virt_expires
;
1209 if (sched_expires
!= 0 &&
1210 (sig
->cputime_expires
.sched_exp
== 0 ||
1211 sig
->cputime_expires
.sched_exp
> sched_expires
))
1212 sig
->cputime_expires
.sched_exp
= sched_expires
;
1216 * This is called from the signal code (via do_schedule_next_timer)
1217 * when the last timer signal was delivered and we have to reload the timer.
1219 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
1221 struct task_struct
*p
= timer
->it
.cpu
.task
;
1222 union cpu_time_count now
;
1224 if (unlikely(p
== NULL
))
1226 * The task was cleaned up already, no future firings.
1231 * Fetch the current sample and update the timer's expiry time.
1233 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
1234 cpu_clock_sample(timer
->it_clock
, p
, &now
);
1235 bump_cpu_timer(timer
, now
);
1236 if (unlikely(p
->exit_state
)) {
1237 clear_dead_task(timer
, now
);
1240 read_lock(&tasklist_lock
); /* arm_timer needs it. */
1242 read_lock(&tasklist_lock
);
1243 if (unlikely(p
->signal
== NULL
)) {
1245 * The process has been reaped.
1246 * We can't even collect a sample any more.
1249 timer
->it
.cpu
.task
= p
= NULL
;
1250 timer
->it
.cpu
.expires
.sched
= 0;
1252 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1254 * We've noticed that the thread is dead, but
1255 * not yet reaped. Take this opportunity to
1256 * drop our task ref.
1258 clear_dead_task(timer
, now
);
1261 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
1262 bump_cpu_timer(timer
, now
);
1263 /* Leave the tasklist_lock locked for the call below. */
1267 * Now re-arm for the new expiry time.
1269 arm_timer(timer
, now
);
1272 read_unlock(&tasklist_lock
);
1275 timer
->it_overrun_last
= timer
->it_overrun
;
1276 timer
->it_overrun
= -1;
1277 ++timer
->it_requeue_pending
;
1281 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1283 * @cputime: The struct to compare.
1285 * Checks @cputime to see if all fields are zero. Returns true if all fields
1286 * are zero, false if any field is nonzero.
1288 static inline int task_cputime_zero(const struct task_cputime
*cputime
)
1290 if (cputime_eq(cputime
->utime
, cputime_zero
) &&
1291 cputime_eq(cputime
->stime
, cputime_zero
) &&
1292 cputime
->sum_exec_runtime
== 0)
1298 * task_cputime_expired - Compare two task_cputime entities.
1300 * @sample: The task_cputime structure to be checked for expiration.
1301 * @expires: Expiration times, against which @sample will be checked.
1303 * Checks @sample against @expires to see if any field of @sample has expired.
1304 * Returns true if any field of the former is greater than the corresponding
1305 * field of the latter if the latter field is set. Otherwise returns false.
1307 static inline int task_cputime_expired(const struct task_cputime
*sample
,
1308 const struct task_cputime
*expires
)
1310 if (!cputime_eq(expires
->utime
, cputime_zero
) &&
1311 cputime_ge(sample
->utime
, expires
->utime
))
1313 if (!cputime_eq(expires
->stime
, cputime_zero
) &&
1314 cputime_ge(cputime_add(sample
->utime
, sample
->stime
),
1317 if (expires
->sum_exec_runtime
!= 0 &&
1318 sample
->sum_exec_runtime
>= expires
->sum_exec_runtime
)
1324 * fastpath_timer_check - POSIX CPU timers fast path.
1326 * @tsk: The task (thread) being checked.
1327 * @sig: The signal pointer for that task.
1329 * If there are no timers set return false. Otherwise snapshot the task and
1330 * thread group timers, then compare them with the corresponding expiration
1331 # times. Returns true if a timer has expired, else returns false.
1333 static inline int fastpath_timer_check(struct task_struct
*tsk
,
1334 struct signal_struct
*sig
)
1336 struct task_cputime task_sample
= {
1337 .utime
= tsk
->utime
,
1338 .stime
= tsk
->stime
,
1339 .sum_exec_runtime
= tsk
->se
.sum_exec_runtime
1341 struct task_cputime group_sample
;
1343 if (task_cputime_zero(&tsk
->cputime_expires
) &&
1344 task_cputime_zero(&sig
->cputime_expires
))
1346 if (task_cputime_expired(&task_sample
, &tsk
->cputime_expires
))
1348 thread_group_cputime(tsk
, &group_sample
);
1349 return task_cputime_expired(&group_sample
, &sig
->cputime_expires
);
1353 * This is called from the timer interrupt handler. The irq handler has
1354 * already updated our counts. We need to check if any timers fire now.
1355 * Interrupts are disabled.
1357 void run_posix_cpu_timers(struct task_struct
*tsk
)
1360 struct k_itimer
*timer
, *next
;
1361 struct signal_struct
*sig
;
1362 struct sighand_struct
*sighand
;
1363 unsigned long flags
;
1365 BUG_ON(!irqs_disabled());
1367 /* Pick up tsk->signal and make sure it's valid. */
1370 * The fast path checks that there are no expired thread or thread
1371 * group timers. If that's so, just return. Also check that
1372 * tsk->signal is non-NULL; this probably can't happen but cover the
1373 * possibility anyway.
1375 if (unlikely(!sig
) || !fastpath_timer_check(tsk
, sig
))
1378 sighand
= lock_task_sighand(tsk
, &flags
);
1379 if (likely(sighand
)) {
1381 * Here we take off tsk->signal->cpu_timers[N] and
1382 * tsk->cpu_timers[N] all the timers that are firing, and
1383 * put them on the firing list.
1385 check_thread_timers(tsk
, &firing
);
1386 check_process_timers(tsk
, &firing
);
1389 * We must release these locks before taking any timer's lock.
1390 * There is a potential race with timer deletion here, as the
1391 * siglock now protects our private firing list. We have set
1392 * the firing flag in each timer, so that a deletion attempt
1393 * that gets the timer lock before we do will give it up and
1394 * spin until we've taken care of that timer below.
1397 unlock_task_sighand(tsk
, &flags
);
1400 * Now that all the timers on our list have the firing flag,
1401 * noone will touch their list entries but us. We'll take
1402 * each timer's lock before clearing its firing flag, so no
1403 * timer call will interfere.
1405 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1407 spin_lock(&timer
->it_lock
);
1408 list_del_init(&timer
->it
.cpu
.entry
);
1409 firing
= timer
->it
.cpu
.firing
;
1410 timer
->it
.cpu
.firing
= 0;
1412 * The firing flag is -1 if we collided with a reset
1413 * of the timer, which already reported this
1414 * almost-firing as an overrun. So don't generate an event.
1416 if (likely(firing
>= 0)) {
1417 cpu_timer_fire(timer
);
1419 spin_unlock(&timer
->it_lock
);
1424 * Set one of the process-wide special case CPU timers.
1425 * The tsk->sighand->siglock must be held by the caller.
1426 * The *newval argument is relative and we update it to be absolute, *oldval
1427 * is absolute and we update it to be relative.
1429 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1430 cputime_t
*newval
, cputime_t
*oldval
)
1432 union cpu_time_count now
;
1433 struct list_head
*head
;
1435 BUG_ON(clock_idx
== CPUCLOCK_SCHED
);
1436 cpu_clock_sample_group_locked(clock_idx
, tsk
, &now
);
1439 if (!cputime_eq(*oldval
, cputime_zero
)) {
1440 if (cputime_le(*oldval
, now
.cpu
)) {
1441 /* Just about to fire. */
1442 *oldval
= jiffies_to_cputime(1);
1444 *oldval
= cputime_sub(*oldval
, now
.cpu
);
1448 if (cputime_eq(*newval
, cputime_zero
))
1450 *newval
= cputime_add(*newval
, now
.cpu
);
1453 * If the RLIMIT_CPU timer will expire before the
1454 * ITIMER_PROF timer, we have nothing else to do.
1456 if (tsk
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
1457 < cputime_to_secs(*newval
))
1462 * Check whether there are any process timers already set to fire
1463 * before this one. If so, we don't have anything more to do.
1465 head
= &tsk
->signal
->cpu_timers
[clock_idx
];
1466 if (list_empty(head
) ||
1467 cputime_ge(list_first_entry(head
,
1468 struct cpu_timer_list
, entry
)->expires
.cpu
,
1470 switch (clock_idx
) {
1472 tsk
->signal
->cputime_expires
.prof_exp
= *newval
;
1475 tsk
->signal
->cputime_expires
.virt_exp
= *newval
;
1481 static int do_cpu_nanosleep(const clockid_t which_clock
, int flags
,
1482 struct timespec
*rqtp
, struct itimerspec
*it
)
1484 struct k_itimer timer
;
1488 * Set up a temporary timer and then wait for it to go off.
1490 memset(&timer
, 0, sizeof timer
);
1491 spin_lock_init(&timer
.it_lock
);
1492 timer
.it_clock
= which_clock
;
1493 timer
.it_overrun
= -1;
1494 error
= posix_cpu_timer_create(&timer
);
1495 timer
.it_process
= current
;
1497 static struct itimerspec zero_it
;
1499 memset(it
, 0, sizeof *it
);
1500 it
->it_value
= *rqtp
;
1502 spin_lock_irq(&timer
.it_lock
);
1503 error
= posix_cpu_timer_set(&timer
, flags
, it
, NULL
);
1505 spin_unlock_irq(&timer
.it_lock
);
1509 while (!signal_pending(current
)) {
1510 if (timer
.it
.cpu
.expires
.sched
== 0) {
1512 * Our timer fired and was reset.
1514 spin_unlock_irq(&timer
.it_lock
);
1519 * Block until cpu_timer_fire (or a signal) wakes us.
1521 __set_current_state(TASK_INTERRUPTIBLE
);
1522 spin_unlock_irq(&timer
.it_lock
);
1524 spin_lock_irq(&timer
.it_lock
);
1528 * We were interrupted by a signal.
1530 sample_to_timespec(which_clock
, timer
.it
.cpu
.expires
, rqtp
);
1531 posix_cpu_timer_set(&timer
, 0, &zero_it
, it
);
1532 spin_unlock_irq(&timer
.it_lock
);
1534 if ((it
->it_value
.tv_sec
| it
->it_value
.tv_nsec
) == 0) {
1536 * It actually did fire already.
1541 error
= -ERESTART_RESTARTBLOCK
;
1547 int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1548 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1550 struct restart_block
*restart_block
=
1551 ¤t_thread_info()->restart_block
;
1552 struct itimerspec it
;
1556 * Diagnose required errors first.
1558 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1559 (CPUCLOCK_PID(which_clock
) == 0 ||
1560 CPUCLOCK_PID(which_clock
) == current
->pid
))
1563 error
= do_cpu_nanosleep(which_clock
, flags
, rqtp
, &it
);
1565 if (error
== -ERESTART_RESTARTBLOCK
) {
1567 if (flags
& TIMER_ABSTIME
)
1568 return -ERESTARTNOHAND
;
1570 * Report back to the user the time still remaining.
1572 if (rmtp
!= NULL
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1575 restart_block
->fn
= posix_cpu_nsleep_restart
;
1576 restart_block
->arg0
= which_clock
;
1577 restart_block
->arg1
= (unsigned long) rmtp
;
1578 restart_block
->arg2
= rqtp
->tv_sec
;
1579 restart_block
->arg3
= rqtp
->tv_nsec
;
1584 long posix_cpu_nsleep_restart(struct restart_block
*restart_block
)
1586 clockid_t which_clock
= restart_block
->arg0
;
1587 struct timespec __user
*rmtp
;
1589 struct itimerspec it
;
1592 rmtp
= (struct timespec __user
*) restart_block
->arg1
;
1593 t
.tv_sec
= restart_block
->arg2
;
1594 t
.tv_nsec
= restart_block
->arg3
;
1596 restart_block
->fn
= do_no_restart_syscall
;
1597 error
= do_cpu_nanosleep(which_clock
, TIMER_ABSTIME
, &t
, &it
);
1599 if (error
== -ERESTART_RESTARTBLOCK
) {
1601 * Report back to the user the time still remaining.
1603 if (rmtp
!= NULL
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1606 restart_block
->fn
= posix_cpu_nsleep_restart
;
1607 restart_block
->arg0
= which_clock
;
1608 restart_block
->arg1
= (unsigned long) rmtp
;
1609 restart_block
->arg2
= t
.tv_sec
;
1610 restart_block
->arg3
= t
.tv_nsec
;
1617 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1618 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1620 static int process_cpu_clock_getres(const clockid_t which_clock
,
1621 struct timespec
*tp
)
1623 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1625 static int process_cpu_clock_get(const clockid_t which_clock
,
1626 struct timespec
*tp
)
1628 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1630 static int process_cpu_timer_create(struct k_itimer
*timer
)
1632 timer
->it_clock
= PROCESS_CLOCK
;
1633 return posix_cpu_timer_create(timer
);
1635 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1636 struct timespec
*rqtp
,
1637 struct timespec __user
*rmtp
)
1639 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
, rmtp
);
1641 static long process_cpu_nsleep_restart(struct restart_block
*restart_block
)
1645 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1646 struct timespec
*tp
)
1648 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1650 static int thread_cpu_clock_get(const clockid_t which_clock
,
1651 struct timespec
*tp
)
1653 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1655 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1657 timer
->it_clock
= THREAD_CLOCK
;
1658 return posix_cpu_timer_create(timer
);
1660 static int thread_cpu_nsleep(const clockid_t which_clock
, int flags
,
1661 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1665 static long thread_cpu_nsleep_restart(struct restart_block
*restart_block
)
1670 static __init
int init_posix_cpu_timers(void)
1672 struct k_clock process
= {
1673 .clock_getres
= process_cpu_clock_getres
,
1674 .clock_get
= process_cpu_clock_get
,
1675 .clock_set
= do_posix_clock_nosettime
,
1676 .timer_create
= process_cpu_timer_create
,
1677 .nsleep
= process_cpu_nsleep
,
1678 .nsleep_restart
= process_cpu_nsleep_restart
,
1680 struct k_clock thread
= {
1681 .clock_getres
= thread_cpu_clock_getres
,
1682 .clock_get
= thread_cpu_clock_get
,
1683 .clock_set
= do_posix_clock_nosettime
,
1684 .timer_create
= thread_cpu_timer_create
,
1685 .nsleep
= thread_cpu_nsleep
,
1686 .nsleep_restart
= thread_cpu_nsleep_restart
,
1689 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1690 register_posix_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1694 __initcall(init_posix_cpu_timers
);