2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
15 void update_rlimit_cpu(unsigned long rlim_new
)
19 cputime
= secs_to_cputime(rlim_new
);
20 if (cputime_eq(current
->signal
->it_prof_expires
, cputime_zero
) ||
21 cputime_lt(current
->signal
->it_prof_expires
, cputime
)) {
22 spin_lock_irq(¤t
->sighand
->siglock
);
23 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
24 spin_unlock_irq(¤t
->sighand
->siglock
);
28 static int check_clock(const clockid_t which_clock
)
31 struct task_struct
*p
;
32 const pid_t pid
= CPUCLOCK_PID(which_clock
);
34 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
40 read_lock(&tasklist_lock
);
41 p
= find_task_by_vpid(pid
);
42 if (!p
|| !(CPUCLOCK_PERTHREAD(which_clock
) ?
43 same_thread_group(p
, current
) : thread_group_leader(p
))) {
46 read_unlock(&tasklist_lock
);
51 static inline union cpu_time_count
52 timespec_to_sample(const clockid_t which_clock
, const struct timespec
*tp
)
54 union cpu_time_count ret
;
55 ret
.sched
= 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
57 ret
.sched
= (unsigned long long)tp
->tv_sec
* NSEC_PER_SEC
+ tp
->tv_nsec
;
59 ret
.cpu
= timespec_to_cputime(tp
);
64 static void sample_to_timespec(const clockid_t which_clock
,
65 union cpu_time_count cpu
,
68 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
)
69 *tp
= ns_to_timespec(cpu
.sched
);
71 cputime_to_timespec(cpu
.cpu
, tp
);
74 static inline int cpu_time_before(const clockid_t which_clock
,
75 union cpu_time_count now
,
76 union cpu_time_count then
)
78 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
79 return now
.sched
< then
.sched
;
81 return cputime_lt(now
.cpu
, then
.cpu
);
84 static inline void cpu_time_add(const clockid_t which_clock
,
85 union cpu_time_count
*acc
,
86 union cpu_time_count val
)
88 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
89 acc
->sched
+= val
.sched
;
91 acc
->cpu
= cputime_add(acc
->cpu
, val
.cpu
);
94 static inline union cpu_time_count
cpu_time_sub(const clockid_t which_clock
,
95 union cpu_time_count a
,
96 union cpu_time_count b
)
98 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
101 a
.cpu
= cputime_sub(a
.cpu
, b
.cpu
);
107 * Divide and limit the result to res >= 1
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
112 static inline cputime_t
cputime_div_non_zero(cputime_t time
, unsigned long div
)
114 cputime_t res
= cputime_div(time
, div
);
116 return max_t(cputime_t
, res
, 1);
120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
123 static void bump_cpu_timer(struct k_itimer
*timer
,
124 union cpu_time_count now
)
128 if (timer
->it
.cpu
.incr
.sched
== 0)
131 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
132 unsigned long long delta
, incr
;
134 if (now
.sched
< timer
->it
.cpu
.expires
.sched
)
136 incr
= timer
->it
.cpu
.incr
.sched
;
137 delta
= now
.sched
+ incr
- timer
->it
.cpu
.expires
.sched
;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i
= 0; incr
< delta
- incr
; i
++)
141 for (; i
>= 0; incr
>>= 1, i
--) {
144 timer
->it
.cpu
.expires
.sched
+= incr
;
145 timer
->it_overrun
+= 1 << i
;
149 cputime_t delta
, incr
;
151 if (cputime_lt(now
.cpu
, timer
->it
.cpu
.expires
.cpu
))
153 incr
= timer
->it
.cpu
.incr
.cpu
;
154 delta
= cputime_sub(cputime_add(now
.cpu
, incr
),
155 timer
->it
.cpu
.expires
.cpu
);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i
= 0; cputime_lt(incr
, cputime_sub(delta
, incr
)); i
++)
158 incr
= cputime_add(incr
, incr
);
159 for (; i
>= 0; incr
= cputime_halve(incr
), i
--) {
160 if (cputime_lt(delta
, incr
))
162 timer
->it
.cpu
.expires
.cpu
=
163 cputime_add(timer
->it
.cpu
.expires
.cpu
, incr
);
164 timer
->it_overrun
+= 1 << i
;
165 delta
= cputime_sub(delta
, incr
);
170 static inline cputime_t
prof_ticks(struct task_struct
*p
)
172 return cputime_add(p
->utime
, p
->stime
);
174 static inline cputime_t
virt_ticks(struct task_struct
*p
)
179 int posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec
*tp
)
181 int error
= check_clock(which_clock
);
184 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
185 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
187 * If sched_clock is using a cycle counter, we
188 * don't have any idea of its true resolution
189 * exported, but it is much more than 1s/HZ.
197 int posix_cpu_clock_set(const clockid_t which_clock
, const struct timespec
*tp
)
200 * You can never reset a CPU clock, but we check for other errors
201 * in the call before failing with EPERM.
203 int error
= check_clock(which_clock
);
212 * Sample a per-thread clock for the given task.
214 static int cpu_clock_sample(const clockid_t which_clock
, struct task_struct
*p
,
215 union cpu_time_count
*cpu
)
217 switch (CPUCLOCK_WHICH(which_clock
)) {
221 cpu
->cpu
= prof_ticks(p
);
224 cpu
->cpu
= virt_ticks(p
);
227 cpu
->sched
= p
->se
.sum_exec_runtime
+ task_delta_exec(p
);
233 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
)
235 struct sighand_struct
*sighand
;
236 struct signal_struct
*sig
;
237 struct task_struct
*t
;
239 *times
= INIT_CPUTIME
;
242 sighand
= rcu_dereference(tsk
->sighand
);
250 times
->utime
= cputime_add(times
->utime
, t
->utime
);
251 times
->stime
= cputime_add(times
->stime
, t
->stime
);
252 times
->sum_exec_runtime
+= t
->se
.sum_exec_runtime
;
257 times
->utime
= cputime_add(times
->utime
, sig
->utime
);
258 times
->stime
= cputime_add(times
->stime
, sig
->stime
);
259 times
->sum_exec_runtime
+= sig
->sum_sched_runtime
;
265 * Sample a process (thread group) clock for the given group_leader task.
266 * Must be called with tasklist_lock held for reading.
268 static int cpu_clock_sample_group(const clockid_t which_clock
,
269 struct task_struct
*p
,
270 union cpu_time_count
*cpu
)
272 struct task_cputime cputime
;
274 thread_group_cputime(p
, &cputime
);
275 switch (CPUCLOCK_WHICH(which_clock
)) {
279 cpu
->cpu
= cputime_add(cputime
.utime
, cputime
.stime
);
282 cpu
->cpu
= cputime
.utime
;
285 cpu
->sched
= cputime
.sum_exec_runtime
+ task_delta_exec(p
);
292 int posix_cpu_clock_get(const clockid_t which_clock
, struct timespec
*tp
)
294 const pid_t pid
= CPUCLOCK_PID(which_clock
);
296 union cpu_time_count rtn
;
300 * Special case constant value for our own clocks.
301 * We don't have to do any lookup to find ourselves.
303 if (CPUCLOCK_PERTHREAD(which_clock
)) {
305 * Sampling just ourselves we can do with no locking.
307 error
= cpu_clock_sample(which_clock
,
310 read_lock(&tasklist_lock
);
311 error
= cpu_clock_sample_group(which_clock
,
313 read_unlock(&tasklist_lock
);
317 * Find the given PID, and validate that the caller
318 * should be able to see it.
320 struct task_struct
*p
;
322 p
= find_task_by_vpid(pid
);
324 if (CPUCLOCK_PERTHREAD(which_clock
)) {
325 if (same_thread_group(p
, current
)) {
326 error
= cpu_clock_sample(which_clock
,
330 read_lock(&tasklist_lock
);
331 if (thread_group_leader(p
) && p
->signal
) {
333 cpu_clock_sample_group(which_clock
,
336 read_unlock(&tasklist_lock
);
344 sample_to_timespec(which_clock
, rtn
, tp
);
350 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
351 * This is called from sys_timer_create with the new timer already locked.
353 int posix_cpu_timer_create(struct k_itimer
*new_timer
)
356 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
357 struct task_struct
*p
;
359 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
362 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
363 new_timer
->it
.cpu
.incr
.sched
= 0;
364 new_timer
->it
.cpu
.expires
.sched
= 0;
366 read_lock(&tasklist_lock
);
367 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
371 p
= find_task_by_vpid(pid
);
372 if (p
&& !same_thread_group(p
, current
))
377 p
= current
->group_leader
;
379 p
= find_task_by_vpid(pid
);
380 if (p
&& !thread_group_leader(p
))
384 new_timer
->it
.cpu
.task
= p
;
390 read_unlock(&tasklist_lock
);
396 * Clean up a CPU-clock timer that is about to be destroyed.
397 * This is called from timer deletion with the timer already locked.
398 * If we return TIMER_RETRY, it's necessary to release the timer's lock
399 * and try again. (This happens when the timer is in the middle of firing.)
401 int posix_cpu_timer_del(struct k_itimer
*timer
)
403 struct task_struct
*p
= timer
->it
.cpu
.task
;
406 if (likely(p
!= NULL
)) {
407 read_lock(&tasklist_lock
);
408 if (unlikely(p
->signal
== NULL
)) {
410 * We raced with the reaping of the task.
411 * The deletion should have cleared us off the list.
413 BUG_ON(!list_empty(&timer
->it
.cpu
.entry
));
415 spin_lock(&p
->sighand
->siglock
);
416 if (timer
->it
.cpu
.firing
)
419 list_del(&timer
->it
.cpu
.entry
);
420 spin_unlock(&p
->sighand
->siglock
);
422 read_unlock(&tasklist_lock
);
432 * Clean out CPU timers still ticking when a thread exited. The task
433 * pointer is cleared, and the expiry time is replaced with the residual
434 * time for later timer_gettime calls to return.
435 * This must be called with the siglock held.
437 static void cleanup_timers(struct list_head
*head
,
438 cputime_t utime
, cputime_t stime
,
439 unsigned long long sum_exec_runtime
)
441 struct cpu_timer_list
*timer
, *next
;
442 cputime_t ptime
= cputime_add(utime
, stime
);
444 list_for_each_entry_safe(timer
, next
, head
, entry
) {
445 list_del_init(&timer
->entry
);
446 if (cputime_lt(timer
->expires
.cpu
, ptime
)) {
447 timer
->expires
.cpu
= cputime_zero
;
449 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
455 list_for_each_entry_safe(timer
, next
, head
, entry
) {
456 list_del_init(&timer
->entry
);
457 if (cputime_lt(timer
->expires
.cpu
, utime
)) {
458 timer
->expires
.cpu
= cputime_zero
;
460 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
466 list_for_each_entry_safe(timer
, next
, head
, entry
) {
467 list_del_init(&timer
->entry
);
468 if (timer
->expires
.sched
< sum_exec_runtime
) {
469 timer
->expires
.sched
= 0;
471 timer
->expires
.sched
-= sum_exec_runtime
;
477 * These are both called with the siglock held, when the current thread
478 * is being reaped. When the final (leader) thread in the group is reaped,
479 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
481 void posix_cpu_timers_exit(struct task_struct
*tsk
)
483 cleanup_timers(tsk
->cpu_timers
,
484 tsk
->utime
, tsk
->stime
, tsk
->se
.sum_exec_runtime
);
487 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
489 struct task_cputime cputime
;
491 thread_group_cputimer(tsk
, &cputime
);
492 cleanup_timers(tsk
->signal
->cpu_timers
,
493 cputime
.utime
, cputime
.stime
, cputime
.sum_exec_runtime
);
496 static void clear_dead_task(struct k_itimer
*timer
, union cpu_time_count now
)
499 * That's all for this thread or process.
500 * We leave our residual in expires to be reported.
502 put_task_struct(timer
->it
.cpu
.task
);
503 timer
->it
.cpu
.task
= NULL
;
504 timer
->it
.cpu
.expires
= cpu_time_sub(timer
->it_clock
,
505 timer
->it
.cpu
.expires
,
510 * Insert the timer on the appropriate list before any timers that
511 * expire later. This must be called with the tasklist_lock held
512 * for reading, and interrupts disabled.
514 static void arm_timer(struct k_itimer
*timer
, union cpu_time_count now
)
516 struct task_struct
*p
= timer
->it
.cpu
.task
;
517 struct list_head
*head
, *listpos
;
518 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
519 struct cpu_timer_list
*next
;
522 head
= (CPUCLOCK_PERTHREAD(timer
->it_clock
) ?
523 p
->cpu_timers
: p
->signal
->cpu_timers
);
524 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
526 BUG_ON(!irqs_disabled());
527 spin_lock(&p
->sighand
->siglock
);
530 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
531 list_for_each_entry(next
, head
, entry
) {
532 if (next
->expires
.sched
> nt
->expires
.sched
)
534 listpos
= &next
->entry
;
537 list_for_each_entry(next
, head
, entry
) {
538 if (cputime_gt(next
->expires
.cpu
, nt
->expires
.cpu
))
540 listpos
= &next
->entry
;
543 list_add(&nt
->entry
, listpos
);
545 if (listpos
== head
) {
547 * We are the new earliest-expiring timer.
548 * If we are a thread timer, there can always
549 * be a process timer telling us to stop earlier.
552 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
553 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
557 if (cputime_eq(p
->cputime_expires
.prof_exp
,
559 cputime_gt(p
->cputime_expires
.prof_exp
,
561 p
->cputime_expires
.prof_exp
=
565 if (cputime_eq(p
->cputime_expires
.virt_exp
,
567 cputime_gt(p
->cputime_expires
.virt_exp
,
569 p
->cputime_expires
.virt_exp
=
573 if (p
->cputime_expires
.sched_exp
== 0 ||
574 p
->cputime_expires
.sched_exp
>
576 p
->cputime_expires
.sched_exp
=
582 * For a process timer, set the cached expiration time.
584 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
588 if (!cputime_eq(p
->signal
->it_virt_expires
,
590 cputime_lt(p
->signal
->it_virt_expires
,
591 timer
->it
.cpu
.expires
.cpu
))
593 p
->signal
->cputime_expires
.virt_exp
=
594 timer
->it
.cpu
.expires
.cpu
;
597 if (!cputime_eq(p
->signal
->it_prof_expires
,
599 cputime_lt(p
->signal
->it_prof_expires
,
600 timer
->it
.cpu
.expires
.cpu
))
602 i
= p
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
;
603 if (i
!= RLIM_INFINITY
&&
604 i
<= cputime_to_secs(timer
->it
.cpu
.expires
.cpu
))
606 p
->signal
->cputime_expires
.prof_exp
=
607 timer
->it
.cpu
.expires
.cpu
;
610 p
->signal
->cputime_expires
.sched_exp
=
611 timer
->it
.cpu
.expires
.sched
;
617 spin_unlock(&p
->sighand
->siglock
);
621 * The timer is locked, fire it and arrange for its reload.
623 static void cpu_timer_fire(struct k_itimer
*timer
)
625 if (unlikely(timer
->sigq
== NULL
)) {
627 * This a special case for clock_nanosleep,
628 * not a normal timer from sys_timer_create.
630 wake_up_process(timer
->it_process
);
631 timer
->it
.cpu
.expires
.sched
= 0;
632 } else if (timer
->it
.cpu
.incr
.sched
== 0) {
634 * One-shot timer. Clear it as soon as it's fired.
636 posix_timer_event(timer
, 0);
637 timer
->it
.cpu
.expires
.sched
= 0;
638 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
640 * The signal did not get queued because the signal
641 * was ignored, so we won't get any callback to
642 * reload the timer. But we need to keep it
643 * ticking in case the signal is deliverable next time.
645 posix_cpu_timer_schedule(timer
);
650 * Guts of sys_timer_settime for CPU timers.
651 * This is called with the timer locked and interrupts disabled.
652 * If we return TIMER_RETRY, it's necessary to release the timer's lock
653 * and try again. (This happens when the timer is in the middle of firing.)
655 int posix_cpu_timer_set(struct k_itimer
*timer
, int flags
,
656 struct itimerspec
*new, struct itimerspec
*old
)
658 struct task_struct
*p
= timer
->it
.cpu
.task
;
659 union cpu_time_count old_expires
, new_expires
, val
;
662 if (unlikely(p
== NULL
)) {
664 * Timer refers to a dead task's clock.
669 new_expires
= timespec_to_sample(timer
->it_clock
, &new->it_value
);
671 read_lock(&tasklist_lock
);
673 * We need the tasklist_lock to protect against reaping that
674 * clears p->signal. If p has just been reaped, we can no
675 * longer get any information about it at all.
677 if (unlikely(p
->signal
== NULL
)) {
678 read_unlock(&tasklist_lock
);
680 timer
->it
.cpu
.task
= NULL
;
685 * Disarm any old timer after extracting its expiry time.
687 BUG_ON(!irqs_disabled());
690 spin_lock(&p
->sighand
->siglock
);
691 old_expires
= timer
->it
.cpu
.expires
;
692 if (unlikely(timer
->it
.cpu
.firing
)) {
693 timer
->it
.cpu
.firing
= -1;
696 list_del_init(&timer
->it
.cpu
.entry
);
697 spin_unlock(&p
->sighand
->siglock
);
700 * We need to sample the current value to convert the new
701 * value from to relative and absolute, and to convert the
702 * old value from absolute to relative. To set a process
703 * timer, we need a sample to balance the thread expiry
704 * times (in arm_timer). With an absolute time, we must
705 * check if it's already passed. In short, we need a sample.
707 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
708 cpu_clock_sample(timer
->it_clock
, p
, &val
);
710 cpu_clock_sample_group(timer
->it_clock
, p
, &val
);
714 if (old_expires
.sched
== 0) {
715 old
->it_value
.tv_sec
= 0;
716 old
->it_value
.tv_nsec
= 0;
719 * Update the timer in case it has
720 * overrun already. If it has,
721 * we'll report it as having overrun
722 * and with the next reloaded timer
723 * already ticking, though we are
724 * swallowing that pending
725 * notification here to install the
728 bump_cpu_timer(timer
, val
);
729 if (cpu_time_before(timer
->it_clock
, val
,
730 timer
->it
.cpu
.expires
)) {
731 old_expires
= cpu_time_sub(
733 timer
->it
.cpu
.expires
, val
);
734 sample_to_timespec(timer
->it_clock
,
738 old
->it_value
.tv_nsec
= 1;
739 old
->it_value
.tv_sec
= 0;
746 * We are colliding with the timer actually firing.
747 * Punt after filling in the timer's old value, and
748 * disable this firing since we are already reporting
749 * it as an overrun (thanks to bump_cpu_timer above).
751 read_unlock(&tasklist_lock
);
755 if (new_expires
.sched
!= 0 && !(flags
& TIMER_ABSTIME
)) {
756 cpu_time_add(timer
->it_clock
, &new_expires
, val
);
760 * Install the new expiry time (or zero).
761 * For a timer with no notification action, we don't actually
762 * arm the timer (we'll just fake it for timer_gettime).
764 timer
->it
.cpu
.expires
= new_expires
;
765 if (new_expires
.sched
!= 0 &&
766 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
767 cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
768 arm_timer(timer
, val
);
771 read_unlock(&tasklist_lock
);
774 * Install the new reload setting, and
775 * set up the signal and overrun bookkeeping.
777 timer
->it
.cpu
.incr
= timespec_to_sample(timer
->it_clock
,
781 * This acts as a modification timestamp for the timer,
782 * so any automatic reload attempt will punt on seeing
783 * that we have reset the timer manually.
785 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
787 timer
->it_overrun_last
= 0;
788 timer
->it_overrun
= -1;
790 if (new_expires
.sched
!= 0 &&
791 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
792 !cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
794 * The designated time already passed, so we notify
795 * immediately, even if the thread never runs to
796 * accumulate more time on this clock.
798 cpu_timer_fire(timer
);
804 sample_to_timespec(timer
->it_clock
,
805 timer
->it
.cpu
.incr
, &old
->it_interval
);
810 void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
812 union cpu_time_count now
;
813 struct task_struct
*p
= timer
->it
.cpu
.task
;
817 * Easy part: convert the reload time.
819 sample_to_timespec(timer
->it_clock
,
820 timer
->it
.cpu
.incr
, &itp
->it_interval
);
822 if (timer
->it
.cpu
.expires
.sched
== 0) { /* Timer not armed at all. */
823 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
827 if (unlikely(p
== NULL
)) {
829 * This task already died and the timer will never fire.
830 * In this case, expires is actually the dead value.
833 sample_to_timespec(timer
->it_clock
, timer
->it
.cpu
.expires
,
839 * Sample the clock to take the difference with the expiry time.
841 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
842 cpu_clock_sample(timer
->it_clock
, p
, &now
);
843 clear_dead
= p
->exit_state
;
845 read_lock(&tasklist_lock
);
846 if (unlikely(p
->signal
== NULL
)) {
848 * The process has been reaped.
849 * We can't even collect a sample any more.
850 * Call the timer disarmed, nothing else to do.
853 timer
->it
.cpu
.task
= NULL
;
854 timer
->it
.cpu
.expires
.sched
= 0;
855 read_unlock(&tasklist_lock
);
858 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
859 clear_dead
= (unlikely(p
->exit_state
) &&
860 thread_group_empty(p
));
862 read_unlock(&tasklist_lock
);
865 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
866 if (timer
->it
.cpu
.incr
.sched
== 0 &&
867 cpu_time_before(timer
->it_clock
,
868 timer
->it
.cpu
.expires
, now
)) {
870 * Do-nothing timer expired and has no reload,
871 * so it's as if it was never set.
873 timer
->it
.cpu
.expires
.sched
= 0;
874 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
878 * Account for any expirations and reloads that should
881 bump_cpu_timer(timer
, now
);
884 if (unlikely(clear_dead
)) {
886 * We've noticed that the thread is dead, but
887 * not yet reaped. Take this opportunity to
890 clear_dead_task(timer
, now
);
894 if (cpu_time_before(timer
->it_clock
, now
, timer
->it
.cpu
.expires
)) {
895 sample_to_timespec(timer
->it_clock
,
896 cpu_time_sub(timer
->it_clock
,
897 timer
->it
.cpu
.expires
, now
),
901 * The timer should have expired already, but the firing
902 * hasn't taken place yet. Say it's just about to expire.
904 itp
->it_value
.tv_nsec
= 1;
905 itp
->it_value
.tv_sec
= 0;
910 * Check for any per-thread CPU timers that have fired and move them off
911 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
912 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
914 static void check_thread_timers(struct task_struct
*tsk
,
915 struct list_head
*firing
)
918 struct list_head
*timers
= tsk
->cpu_timers
;
919 struct signal_struct
*const sig
= tsk
->signal
;
922 tsk
->cputime_expires
.prof_exp
= cputime_zero
;
923 while (!list_empty(timers
)) {
924 struct cpu_timer_list
*t
= list_first_entry(timers
,
925 struct cpu_timer_list
,
927 if (!--maxfire
|| cputime_lt(prof_ticks(tsk
), t
->expires
.cpu
)) {
928 tsk
->cputime_expires
.prof_exp
= t
->expires
.cpu
;
932 list_move_tail(&t
->entry
, firing
);
937 tsk
->cputime_expires
.virt_exp
= cputime_zero
;
938 while (!list_empty(timers
)) {
939 struct cpu_timer_list
*t
= list_first_entry(timers
,
940 struct cpu_timer_list
,
942 if (!--maxfire
|| cputime_lt(virt_ticks(tsk
), t
->expires
.cpu
)) {
943 tsk
->cputime_expires
.virt_exp
= t
->expires
.cpu
;
947 list_move_tail(&t
->entry
, firing
);
952 tsk
->cputime_expires
.sched_exp
= 0;
953 while (!list_empty(timers
)) {
954 struct cpu_timer_list
*t
= list_first_entry(timers
,
955 struct cpu_timer_list
,
957 if (!--maxfire
|| tsk
->se
.sum_exec_runtime
< t
->expires
.sched
) {
958 tsk
->cputime_expires
.sched_exp
= t
->expires
.sched
;
962 list_move_tail(&t
->entry
, firing
);
966 * Check for the special case thread timers.
968 if (sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
!= RLIM_INFINITY
) {
969 unsigned long hard
= sig
->rlim
[RLIMIT_RTTIME
].rlim_max
;
970 unsigned long *soft
= &sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
;
972 if (hard
!= RLIM_INFINITY
&&
973 tsk
->rt
.timeout
> DIV_ROUND_UP(hard
, USEC_PER_SEC
/HZ
)) {
975 * At the hard limit, we just die.
976 * No need to calculate anything else now.
978 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
981 if (tsk
->rt
.timeout
> DIV_ROUND_UP(*soft
, USEC_PER_SEC
/HZ
)) {
983 * At the soft limit, send a SIGXCPU every second.
985 if (sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
986 < sig
->rlim
[RLIMIT_RTTIME
].rlim_max
) {
987 sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
+=
991 "RT Watchdog Timeout: %s[%d]\n",
992 tsk
->comm
, task_pid_nr(tsk
));
993 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
998 static void stop_process_timers(struct task_struct
*tsk
)
1000 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
1001 unsigned long flags
;
1003 if (!cputimer
->running
)
1006 spin_lock_irqsave(&cputimer
->lock
, flags
);
1007 cputimer
->running
= 0;
1008 spin_unlock_irqrestore(&cputimer
->lock
, flags
);
1012 * Check for any per-thread CPU timers that have fired and move them
1013 * off the tsk->*_timers list onto the firing list. Per-thread timers
1014 * have already been taken off.
1016 static void check_process_timers(struct task_struct
*tsk
,
1017 struct list_head
*firing
)
1020 struct signal_struct
*const sig
= tsk
->signal
;
1021 cputime_t utime
, ptime
, virt_expires
, prof_expires
;
1022 unsigned long long sum_sched_runtime
, sched_expires
;
1023 struct list_head
*timers
= sig
->cpu_timers
;
1024 struct task_cputime cputime
;
1027 * Don't sample the current process CPU clocks if there are no timers.
1029 if (list_empty(&timers
[CPUCLOCK_PROF
]) &&
1030 cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1031 sig
->rlim
[RLIMIT_CPU
].rlim_cur
== RLIM_INFINITY
&&
1032 list_empty(&timers
[CPUCLOCK_VIRT
]) &&
1033 cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1034 list_empty(&timers
[CPUCLOCK_SCHED
])) {
1035 stop_process_timers(tsk
);
1040 * Collect the current process totals.
1042 thread_group_cputimer(tsk
, &cputime
);
1043 utime
= cputime
.utime
;
1044 ptime
= cputime_add(utime
, cputime
.stime
);
1045 sum_sched_runtime
= cputime
.sum_exec_runtime
;
1047 prof_expires
= cputime_zero
;
1048 while (!list_empty(timers
)) {
1049 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1050 struct cpu_timer_list
,
1052 if (!--maxfire
|| cputime_lt(ptime
, tl
->expires
.cpu
)) {
1053 prof_expires
= tl
->expires
.cpu
;
1057 list_move_tail(&tl
->entry
, firing
);
1062 virt_expires
= cputime_zero
;
1063 while (!list_empty(timers
)) {
1064 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1065 struct cpu_timer_list
,
1067 if (!--maxfire
|| cputime_lt(utime
, tl
->expires
.cpu
)) {
1068 virt_expires
= tl
->expires
.cpu
;
1072 list_move_tail(&tl
->entry
, firing
);
1078 while (!list_empty(timers
)) {
1079 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1080 struct cpu_timer_list
,
1082 if (!--maxfire
|| sum_sched_runtime
< tl
->expires
.sched
) {
1083 sched_expires
= tl
->expires
.sched
;
1087 list_move_tail(&tl
->entry
, firing
);
1091 * Check for the special case process timers.
1093 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1094 if (cputime_ge(ptime
, sig
->it_prof_expires
)) {
1095 /* ITIMER_PROF fires and reloads. */
1096 sig
->it_prof_expires
= sig
->it_prof_incr
;
1097 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1098 sig
->it_prof_expires
= cputime_add(
1099 sig
->it_prof_expires
, ptime
);
1101 __group_send_sig_info(SIGPROF
, SEND_SIG_PRIV
, tsk
);
1103 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1104 (cputime_eq(prof_expires
, cputime_zero
) ||
1105 cputime_lt(sig
->it_prof_expires
, prof_expires
))) {
1106 prof_expires
= sig
->it_prof_expires
;
1109 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1110 if (cputime_ge(utime
, sig
->it_virt_expires
)) {
1111 /* ITIMER_VIRTUAL fires and reloads. */
1112 sig
->it_virt_expires
= sig
->it_virt_incr
;
1113 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1114 sig
->it_virt_expires
= cputime_add(
1115 sig
->it_virt_expires
, utime
);
1117 __group_send_sig_info(SIGVTALRM
, SEND_SIG_PRIV
, tsk
);
1119 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1120 (cputime_eq(virt_expires
, cputime_zero
) ||
1121 cputime_lt(sig
->it_virt_expires
, virt_expires
))) {
1122 virt_expires
= sig
->it_virt_expires
;
1125 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
!= RLIM_INFINITY
) {
1126 unsigned long psecs
= cputime_to_secs(ptime
);
1128 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1130 * At the hard limit, we just die.
1131 * No need to calculate anything else now.
1133 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1136 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_cur
) {
1138 * At the soft limit, send a SIGXCPU every second.
1140 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1141 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
1142 < sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1143 sig
->rlim
[RLIMIT_CPU
].rlim_cur
++;
1146 x
= secs_to_cputime(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1147 if (cputime_eq(prof_expires
, cputime_zero
) ||
1148 cputime_lt(x
, prof_expires
)) {
1153 if (!cputime_eq(prof_expires
, cputime_zero
) &&
1154 (cputime_eq(sig
->cputime_expires
.prof_exp
, cputime_zero
) ||
1155 cputime_gt(sig
->cputime_expires
.prof_exp
, prof_expires
)))
1156 sig
->cputime_expires
.prof_exp
= prof_expires
;
1157 if (!cputime_eq(virt_expires
, cputime_zero
) &&
1158 (cputime_eq(sig
->cputime_expires
.virt_exp
, cputime_zero
) ||
1159 cputime_gt(sig
->cputime_expires
.virt_exp
, virt_expires
)))
1160 sig
->cputime_expires
.virt_exp
= virt_expires
;
1161 if (sched_expires
!= 0 &&
1162 (sig
->cputime_expires
.sched_exp
== 0 ||
1163 sig
->cputime_expires
.sched_exp
> sched_expires
))
1164 sig
->cputime_expires
.sched_exp
= sched_expires
;
1168 * This is called from the signal code (via do_schedule_next_timer)
1169 * when the last timer signal was delivered and we have to reload the timer.
1171 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
1173 struct task_struct
*p
= timer
->it
.cpu
.task
;
1174 union cpu_time_count now
;
1176 if (unlikely(p
== NULL
))
1178 * The task was cleaned up already, no future firings.
1183 * Fetch the current sample and update the timer's expiry time.
1185 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
1186 cpu_clock_sample(timer
->it_clock
, p
, &now
);
1187 bump_cpu_timer(timer
, now
);
1188 if (unlikely(p
->exit_state
)) {
1189 clear_dead_task(timer
, now
);
1192 read_lock(&tasklist_lock
); /* arm_timer needs it. */
1194 read_lock(&tasklist_lock
);
1195 if (unlikely(p
->signal
== NULL
)) {
1197 * The process has been reaped.
1198 * We can't even collect a sample any more.
1201 timer
->it
.cpu
.task
= p
= NULL
;
1202 timer
->it
.cpu
.expires
.sched
= 0;
1204 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1206 * We've noticed that the thread is dead, but
1207 * not yet reaped. Take this opportunity to
1208 * drop our task ref.
1210 clear_dead_task(timer
, now
);
1213 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
1214 bump_cpu_timer(timer
, now
);
1215 /* Leave the tasklist_lock locked for the call below. */
1219 * Now re-arm for the new expiry time.
1221 arm_timer(timer
, now
);
1224 read_unlock(&tasklist_lock
);
1227 timer
->it_overrun_last
= timer
->it_overrun
;
1228 timer
->it_overrun
= -1;
1229 ++timer
->it_requeue_pending
;
1233 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1235 * @cputime: The struct to compare.
1237 * Checks @cputime to see if all fields are zero. Returns true if all fields
1238 * are zero, false if any field is nonzero.
1240 static inline int task_cputime_zero(const struct task_cputime
*cputime
)
1242 if (cputime_eq(cputime
->utime
, cputime_zero
) &&
1243 cputime_eq(cputime
->stime
, cputime_zero
) &&
1244 cputime
->sum_exec_runtime
== 0)
1250 * task_cputime_expired - Compare two task_cputime entities.
1252 * @sample: The task_cputime structure to be checked for expiration.
1253 * @expires: Expiration times, against which @sample will be checked.
1255 * Checks @sample against @expires to see if any field of @sample has expired.
1256 * Returns true if any field of the former is greater than the corresponding
1257 * field of the latter if the latter field is set. Otherwise returns false.
1259 static inline int task_cputime_expired(const struct task_cputime
*sample
,
1260 const struct task_cputime
*expires
)
1262 if (!cputime_eq(expires
->utime
, cputime_zero
) &&
1263 cputime_ge(sample
->utime
, expires
->utime
))
1265 if (!cputime_eq(expires
->stime
, cputime_zero
) &&
1266 cputime_ge(cputime_add(sample
->utime
, sample
->stime
),
1269 if (expires
->sum_exec_runtime
!= 0 &&
1270 sample
->sum_exec_runtime
>= expires
->sum_exec_runtime
)
1276 * fastpath_timer_check - POSIX CPU timers fast path.
1278 * @tsk: The task (thread) being checked.
1280 * Check the task and thread group timers. If both are zero (there are no
1281 * timers set) return false. Otherwise snapshot the task and thread group
1282 * timers and compare them with the corresponding expiration times. Return
1283 * true if a timer has expired, else return false.
1285 static inline int fastpath_timer_check(struct task_struct
*tsk
)
1287 struct signal_struct
*sig
;
1289 /* tsk == current, ensure it is safe to use ->signal/sighand */
1290 if (unlikely(tsk
->exit_state
))
1293 if (!task_cputime_zero(&tsk
->cputime_expires
)) {
1294 struct task_cputime task_sample
= {
1295 .utime
= tsk
->utime
,
1296 .stime
= tsk
->stime
,
1297 .sum_exec_runtime
= tsk
->se
.sum_exec_runtime
1300 if (task_cputime_expired(&task_sample
, &tsk
->cputime_expires
))
1305 if (!task_cputime_zero(&sig
->cputime_expires
)) {
1306 struct task_cputime group_sample
;
1308 thread_group_cputimer(tsk
, &group_sample
);
1309 if (task_cputime_expired(&group_sample
, &sig
->cputime_expires
))
1316 * This is called from the timer interrupt handler. The irq handler has
1317 * already updated our counts. We need to check if any timers fire now.
1318 * Interrupts are disabled.
1320 void run_posix_cpu_timers(struct task_struct
*tsk
)
1323 struct k_itimer
*timer
, *next
;
1325 BUG_ON(!irqs_disabled());
1328 * The fast path checks that there are no expired thread or thread
1329 * group timers. If that's so, just return.
1331 if (!fastpath_timer_check(tsk
))
1334 spin_lock(&tsk
->sighand
->siglock
);
1336 * Here we take off tsk->signal->cpu_timers[N] and
1337 * tsk->cpu_timers[N] all the timers that are firing, and
1338 * put them on the firing list.
1340 check_thread_timers(tsk
, &firing
);
1341 check_process_timers(tsk
, &firing
);
1344 * We must release these locks before taking any timer's lock.
1345 * There is a potential race with timer deletion here, as the
1346 * siglock now protects our private firing list. We have set
1347 * the firing flag in each timer, so that a deletion attempt
1348 * that gets the timer lock before we do will give it up and
1349 * spin until we've taken care of that timer below.
1351 spin_unlock(&tsk
->sighand
->siglock
);
1354 * Now that all the timers on our list have the firing flag,
1355 * noone will touch their list entries but us. We'll take
1356 * each timer's lock before clearing its firing flag, so no
1357 * timer call will interfere.
1359 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1361 spin_lock(&timer
->it_lock
);
1362 list_del_init(&timer
->it
.cpu
.entry
);
1363 firing
= timer
->it
.cpu
.firing
;
1364 timer
->it
.cpu
.firing
= 0;
1366 * The firing flag is -1 if we collided with a reset
1367 * of the timer, which already reported this
1368 * almost-firing as an overrun. So don't generate an event.
1370 if (likely(firing
>= 0)) {
1371 cpu_timer_fire(timer
);
1373 spin_unlock(&timer
->it_lock
);
1378 * Sample a process (thread group) timer for the given group_leader task.
1379 * Must be called with tasklist_lock held for reading.
1381 static int cpu_timer_sample_group(const clockid_t which_clock
,
1382 struct task_struct
*p
,
1383 union cpu_time_count
*cpu
)
1385 struct task_cputime cputime
;
1387 thread_group_cputimer(p
, &cputime
);
1388 switch (CPUCLOCK_WHICH(which_clock
)) {
1392 cpu
->cpu
= cputime_add(cputime
.utime
, cputime
.stime
);
1395 cpu
->cpu
= cputime
.utime
;
1397 case CPUCLOCK_SCHED
:
1398 cpu
->sched
= cputime
.sum_exec_runtime
+ task_delta_exec(p
);
1405 * Set one of the process-wide special case CPU timers.
1406 * The tsk->sighand->siglock must be held by the caller.
1407 * The *newval argument is relative and we update it to be absolute, *oldval
1408 * is absolute and we update it to be relative.
1410 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1411 cputime_t
*newval
, cputime_t
*oldval
)
1413 union cpu_time_count now
;
1414 struct list_head
*head
;
1416 BUG_ON(clock_idx
== CPUCLOCK_SCHED
);
1417 cpu_timer_sample_group(clock_idx
, tsk
, &now
);
1420 if (!cputime_eq(*oldval
, cputime_zero
)) {
1421 if (cputime_le(*oldval
, now
.cpu
)) {
1422 /* Just about to fire. */
1423 *oldval
= jiffies_to_cputime(1);
1425 *oldval
= cputime_sub(*oldval
, now
.cpu
);
1429 if (cputime_eq(*newval
, cputime_zero
))
1431 *newval
= cputime_add(*newval
, now
.cpu
);
1434 * If the RLIMIT_CPU timer will expire before the
1435 * ITIMER_PROF timer, we have nothing else to do.
1437 if (tsk
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
1438 < cputime_to_secs(*newval
))
1443 * Check whether there are any process timers already set to fire
1444 * before this one. If so, we don't have anything more to do.
1446 head
= &tsk
->signal
->cpu_timers
[clock_idx
];
1447 if (list_empty(head
) ||
1448 cputime_ge(list_first_entry(head
,
1449 struct cpu_timer_list
, entry
)->expires
.cpu
,
1451 switch (clock_idx
) {
1453 tsk
->signal
->cputime_expires
.prof_exp
= *newval
;
1456 tsk
->signal
->cputime_expires
.virt_exp
= *newval
;
1462 static int do_cpu_nanosleep(const clockid_t which_clock
, int flags
,
1463 struct timespec
*rqtp
, struct itimerspec
*it
)
1465 struct k_itimer timer
;
1469 * Set up a temporary timer and then wait for it to go off.
1471 memset(&timer
, 0, sizeof timer
);
1472 spin_lock_init(&timer
.it_lock
);
1473 timer
.it_clock
= which_clock
;
1474 timer
.it_overrun
= -1;
1475 error
= posix_cpu_timer_create(&timer
);
1476 timer
.it_process
= current
;
1478 static struct itimerspec zero_it
;
1480 memset(it
, 0, sizeof *it
);
1481 it
->it_value
= *rqtp
;
1483 spin_lock_irq(&timer
.it_lock
);
1484 error
= posix_cpu_timer_set(&timer
, flags
, it
, NULL
);
1486 spin_unlock_irq(&timer
.it_lock
);
1490 while (!signal_pending(current
)) {
1491 if (timer
.it
.cpu
.expires
.sched
== 0) {
1493 * Our timer fired and was reset.
1495 spin_unlock_irq(&timer
.it_lock
);
1500 * Block until cpu_timer_fire (or a signal) wakes us.
1502 __set_current_state(TASK_INTERRUPTIBLE
);
1503 spin_unlock_irq(&timer
.it_lock
);
1505 spin_lock_irq(&timer
.it_lock
);
1509 * We were interrupted by a signal.
1511 sample_to_timespec(which_clock
, timer
.it
.cpu
.expires
, rqtp
);
1512 posix_cpu_timer_set(&timer
, 0, &zero_it
, it
);
1513 spin_unlock_irq(&timer
.it_lock
);
1515 if ((it
->it_value
.tv_sec
| it
->it_value
.tv_nsec
) == 0) {
1517 * It actually did fire already.
1522 error
= -ERESTART_RESTARTBLOCK
;
1528 int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1529 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1531 struct restart_block
*restart_block
=
1532 ¤t_thread_info()->restart_block
;
1533 struct itimerspec it
;
1537 * Diagnose required errors first.
1539 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1540 (CPUCLOCK_PID(which_clock
) == 0 ||
1541 CPUCLOCK_PID(which_clock
) == current
->pid
))
1544 error
= do_cpu_nanosleep(which_clock
, flags
, rqtp
, &it
);
1546 if (error
== -ERESTART_RESTARTBLOCK
) {
1548 if (flags
& TIMER_ABSTIME
)
1549 return -ERESTARTNOHAND
;
1551 * Report back to the user the time still remaining.
1553 if (rmtp
!= NULL
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1556 restart_block
->fn
= posix_cpu_nsleep_restart
;
1557 restart_block
->arg0
= which_clock
;
1558 restart_block
->arg1
= (unsigned long) rmtp
;
1559 restart_block
->arg2
= rqtp
->tv_sec
;
1560 restart_block
->arg3
= rqtp
->tv_nsec
;
1565 long posix_cpu_nsleep_restart(struct restart_block
*restart_block
)
1567 clockid_t which_clock
= restart_block
->arg0
;
1568 struct timespec __user
*rmtp
;
1570 struct itimerspec it
;
1573 rmtp
= (struct timespec __user
*) restart_block
->arg1
;
1574 t
.tv_sec
= restart_block
->arg2
;
1575 t
.tv_nsec
= restart_block
->arg3
;
1577 restart_block
->fn
= do_no_restart_syscall
;
1578 error
= do_cpu_nanosleep(which_clock
, TIMER_ABSTIME
, &t
, &it
);
1580 if (error
== -ERESTART_RESTARTBLOCK
) {
1582 * Report back to the user the time still remaining.
1584 if (rmtp
!= NULL
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1587 restart_block
->fn
= posix_cpu_nsleep_restart
;
1588 restart_block
->arg0
= which_clock
;
1589 restart_block
->arg1
= (unsigned long) rmtp
;
1590 restart_block
->arg2
= t
.tv_sec
;
1591 restart_block
->arg3
= t
.tv_nsec
;
1598 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1599 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1601 static int process_cpu_clock_getres(const clockid_t which_clock
,
1602 struct timespec
*tp
)
1604 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1606 static int process_cpu_clock_get(const clockid_t which_clock
,
1607 struct timespec
*tp
)
1609 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1611 static int process_cpu_timer_create(struct k_itimer
*timer
)
1613 timer
->it_clock
= PROCESS_CLOCK
;
1614 return posix_cpu_timer_create(timer
);
1616 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1617 struct timespec
*rqtp
,
1618 struct timespec __user
*rmtp
)
1620 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
, rmtp
);
1622 static long process_cpu_nsleep_restart(struct restart_block
*restart_block
)
1626 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1627 struct timespec
*tp
)
1629 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1631 static int thread_cpu_clock_get(const clockid_t which_clock
,
1632 struct timespec
*tp
)
1634 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1636 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1638 timer
->it_clock
= THREAD_CLOCK
;
1639 return posix_cpu_timer_create(timer
);
1641 static int thread_cpu_nsleep(const clockid_t which_clock
, int flags
,
1642 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1646 static long thread_cpu_nsleep_restart(struct restart_block
*restart_block
)
1651 static __init
int init_posix_cpu_timers(void)
1653 struct k_clock process
= {
1654 .clock_getres
= process_cpu_clock_getres
,
1655 .clock_get
= process_cpu_clock_get
,
1656 .clock_set
= do_posix_clock_nosettime
,
1657 .timer_create
= process_cpu_timer_create
,
1658 .nsleep
= process_cpu_nsleep
,
1659 .nsleep_restart
= process_cpu_nsleep_restart
,
1661 struct k_clock thread
= {
1662 .clock_getres
= thread_cpu_clock_getres
,
1663 .clock_get
= thread_cpu_clock_get
,
1664 .clock_set
= do_posix_clock_nosettime
,
1665 .timer_create
= thread_cpu_timer_create
,
1666 .nsleep
= thread_cpu_nsleep
,
1667 .nsleep_restart
= thread_cpu_nsleep_restart
,
1670 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1671 register_posix_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1675 __initcall(init_posix_cpu_timers
);