2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
13 #include <linux/tick.h>
14 #include <linux/workqueue.h>
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
22 void update_rlimit_cpu(struct task_struct
*task
, unsigned long rlim_new
)
24 cputime_t cputime
= secs_to_cputime(rlim_new
);
26 spin_lock_irq(&task
->sighand
->siglock
);
27 set_process_cpu_timer(task
, CPUCLOCK_PROF
, &cputime
, NULL
);
28 spin_unlock_irq(&task
->sighand
->siglock
);
31 static int check_clock(const clockid_t which_clock
)
34 struct task_struct
*p
;
35 const pid_t pid
= CPUCLOCK_PID(which_clock
);
37 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
44 p
= find_task_by_vpid(pid
);
45 if (!p
|| !(CPUCLOCK_PERTHREAD(which_clock
) ?
46 same_thread_group(p
, current
) : has_group_leader_pid(p
))) {
54 static inline unsigned long long
55 timespec_to_sample(const clockid_t which_clock
, const struct timespec
*tp
)
57 unsigned long long ret
;
59 ret
= 0; /* high half always zero when .cpu used */
60 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
61 ret
= (unsigned long long)tp
->tv_sec
* NSEC_PER_SEC
+ tp
->tv_nsec
;
63 ret
= cputime_to_expires(timespec_to_cputime(tp
));
68 static void sample_to_timespec(const clockid_t which_clock
,
69 unsigned long long expires
,
72 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
)
73 *tp
= ns_to_timespec(expires
);
75 cputime_to_timespec((__force cputime_t
)expires
, tp
);
79 * Update expiry time from increment, and increase overrun count,
80 * given the current clock sample.
82 static void bump_cpu_timer(struct k_itimer
*timer
,
83 unsigned long long now
)
86 unsigned long long delta
, incr
;
88 if (timer
->it
.cpu
.incr
== 0)
91 if (now
< timer
->it
.cpu
.expires
)
94 incr
= timer
->it
.cpu
.incr
;
95 delta
= now
+ incr
- timer
->it
.cpu
.expires
;
97 /* Don't use (incr*2 < delta), incr*2 might overflow. */
98 for (i
= 0; incr
< delta
- incr
; i
++)
101 for (; i
>= 0; incr
>>= 1, i
--) {
105 timer
->it
.cpu
.expires
+= incr
;
106 timer
->it_overrun
+= 1 << i
;
112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
114 * @cputime: The struct to compare.
116 * Checks @cputime to see if all fields are zero. Returns true if all fields
117 * are zero, false if any field is nonzero.
119 static inline int task_cputime_zero(const struct task_cputime
*cputime
)
121 if (!cputime
->utime
&& !cputime
->stime
&& !cputime
->sum_exec_runtime
)
126 static inline unsigned long long prof_ticks(struct task_struct
*p
)
128 cputime_t utime
, stime
;
130 task_cputime(p
, &utime
, &stime
);
132 return cputime_to_expires(utime
+ stime
);
134 static inline unsigned long long virt_ticks(struct task_struct
*p
)
138 task_cputime(p
, &utime
, NULL
);
140 return cputime_to_expires(utime
);
144 posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec
*tp
)
146 int error
= check_clock(which_clock
);
149 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
150 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
152 * If sched_clock is using a cycle counter, we
153 * don't have any idea of its true resolution
154 * exported, but it is much more than 1s/HZ.
163 posix_cpu_clock_set(const clockid_t which_clock
, const struct timespec
*tp
)
166 * You can never reset a CPU clock, but we check for other errors
167 * in the call before failing with EPERM.
169 int error
= check_clock(which_clock
);
178 * Sample a per-thread clock for the given task.
180 static int cpu_clock_sample(const clockid_t which_clock
, struct task_struct
*p
,
181 unsigned long long *sample
)
183 switch (CPUCLOCK_WHICH(which_clock
)) {
187 *sample
= prof_ticks(p
);
190 *sample
= virt_ticks(p
);
193 *sample
= task_sched_runtime(p
);
199 static void update_gt_cputime(struct task_cputime
*a
, struct task_cputime
*b
)
201 if (b
->utime
> a
->utime
)
204 if (b
->stime
> a
->stime
)
207 if (b
->sum_exec_runtime
> a
->sum_exec_runtime
)
208 a
->sum_exec_runtime
= b
->sum_exec_runtime
;
211 void thread_group_cputimer(struct task_struct
*tsk
, struct task_cputime
*times
)
213 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
214 struct task_cputime sum
;
217 if (!cputimer
->running
) {
219 * The POSIX timer interface allows for absolute time expiry
220 * values through the TIMER_ABSTIME flag, therefore we have
221 * to synchronize the timer to the clock every time we start
224 thread_group_cputime(tsk
, &sum
);
225 raw_spin_lock_irqsave(&cputimer
->lock
, flags
);
226 cputimer
->running
= 1;
227 update_gt_cputime(&cputimer
->cputime
, &sum
);
229 raw_spin_lock_irqsave(&cputimer
->lock
, flags
);
230 *times
= cputimer
->cputime
;
231 raw_spin_unlock_irqrestore(&cputimer
->lock
, flags
);
235 * Sample a process (thread group) clock for the given group_leader task.
236 * Must be called with tasklist_lock held for reading.
238 static int cpu_clock_sample_group(const clockid_t which_clock
,
239 struct task_struct
*p
,
240 unsigned long long *sample
)
242 struct task_cputime cputime
;
244 switch (CPUCLOCK_WHICH(which_clock
)) {
248 thread_group_cputime(p
, &cputime
);
249 *sample
= cputime_to_expires(cputime
.utime
+ cputime
.stime
);
252 thread_group_cputime(p
, &cputime
);
253 *sample
= cputime_to_expires(cputime
.utime
);
256 thread_group_cputime(p
, &cputime
);
257 *sample
= cputime
.sum_exec_runtime
;
264 static int posix_cpu_clock_get(const clockid_t which_clock
, struct timespec
*tp
)
266 const pid_t pid
= CPUCLOCK_PID(which_clock
);
268 unsigned long long rtn
;
272 * Special case constant value for our own clocks.
273 * We don't have to do any lookup to find ourselves.
275 if (CPUCLOCK_PERTHREAD(which_clock
)) {
277 * Sampling just ourselves we can do with no locking.
279 error
= cpu_clock_sample(which_clock
,
282 read_lock(&tasklist_lock
);
283 error
= cpu_clock_sample_group(which_clock
,
285 read_unlock(&tasklist_lock
);
289 * Find the given PID, and validate that the caller
290 * should be able to see it.
292 struct task_struct
*p
;
294 p
= find_task_by_vpid(pid
);
296 if (CPUCLOCK_PERTHREAD(which_clock
)) {
297 if (same_thread_group(p
, current
)) {
298 error
= cpu_clock_sample(which_clock
,
302 read_lock(&tasklist_lock
);
303 if (thread_group_leader(p
) && p
->sighand
) {
305 cpu_clock_sample_group(which_clock
,
308 read_unlock(&tasklist_lock
);
316 sample_to_timespec(which_clock
, rtn
, tp
);
322 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
323 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
324 * new timer already all-zeros initialized.
326 static int posix_cpu_timer_create(struct k_itimer
*new_timer
)
329 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
330 struct task_struct
*p
;
332 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
335 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
338 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
342 p
= find_task_by_vpid(pid
);
343 if (p
&& !same_thread_group(p
, current
))
348 p
= current
->group_leader
;
350 p
= find_task_by_vpid(pid
);
351 if (p
&& !has_group_leader_pid(p
))
355 new_timer
->it
.cpu
.task
= p
;
367 * Clean up a CPU-clock timer that is about to be destroyed.
368 * This is called from timer deletion with the timer already locked.
369 * If we return TIMER_RETRY, it's necessary to release the timer's lock
370 * and try again. (This happens when the timer is in the middle of firing.)
372 static int posix_cpu_timer_del(struct k_itimer
*timer
)
374 struct task_struct
*p
= timer
->it
.cpu
.task
;
377 if (likely(p
!= NULL
)) {
378 read_lock(&tasklist_lock
);
379 if (unlikely(p
->sighand
== NULL
)) {
381 * We raced with the reaping of the task.
382 * The deletion should have cleared us off the list.
384 BUG_ON(!list_empty(&timer
->it
.cpu
.entry
));
386 spin_lock(&p
->sighand
->siglock
);
387 if (timer
->it
.cpu
.firing
)
390 list_del(&timer
->it
.cpu
.entry
);
391 spin_unlock(&p
->sighand
->siglock
);
393 read_unlock(&tasklist_lock
);
402 static void cleanup_timers_list(struct list_head
*head
,
403 unsigned long long curr
)
405 struct cpu_timer_list
*timer
, *next
;
407 list_for_each_entry_safe(timer
, next
, head
, entry
) {
408 list_del_init(&timer
->entry
);
409 if (timer
->expires
< curr
) {
412 timer
->expires
-= curr
;
418 * Clean out CPU timers still ticking when a thread exited. The task
419 * pointer is cleared, and the expiry time is replaced with the residual
420 * time for later timer_gettime calls to return.
421 * This must be called with the siglock held.
423 static void cleanup_timers(struct list_head
*head
,
424 cputime_t utime
, cputime_t stime
,
425 unsigned long long sum_exec_runtime
)
428 cputime_t ptime
= utime
+ stime
;
430 cleanup_timers_list(head
, cputime_to_expires(ptime
));
431 cleanup_timers_list(++head
, cputime_to_expires(utime
));
432 cleanup_timers_list(++head
, sum_exec_runtime
);
436 * These are both called with the siglock held, when the current thread
437 * is being reaped. When the final (leader) thread in the group is reaped,
438 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
440 void posix_cpu_timers_exit(struct task_struct
*tsk
)
442 cputime_t utime
, stime
;
444 add_device_randomness((const void*) &tsk
->se
.sum_exec_runtime
,
445 sizeof(unsigned long long));
446 task_cputime(tsk
, &utime
, &stime
);
447 cleanup_timers(tsk
->cpu_timers
,
448 utime
, stime
, tsk
->se
.sum_exec_runtime
);
451 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
453 struct signal_struct
*const sig
= tsk
->signal
;
454 cputime_t utime
, stime
;
456 task_cputime(tsk
, &utime
, &stime
);
457 cleanup_timers(tsk
->signal
->cpu_timers
,
458 utime
+ sig
->utime
, stime
+ sig
->stime
,
459 tsk
->se
.sum_exec_runtime
+ sig
->sum_sched_runtime
);
462 static void clear_dead_task(struct k_itimer
*timer
, unsigned long long now
)
465 * That's all for this thread or process.
466 * We leave our residual in expires to be reported.
468 put_task_struct(timer
->it
.cpu
.task
);
469 timer
->it
.cpu
.task
= NULL
;
470 timer
->it
.cpu
.expires
-= now
;
473 static inline int expires_gt(cputime_t expires
, cputime_t new_exp
)
475 return expires
== 0 || expires
> new_exp
;
479 * Insert the timer on the appropriate list before any timers that
480 * expire later. This must be called with the tasklist_lock held
481 * for reading, interrupts disabled and p->sighand->siglock taken.
483 static void arm_timer(struct k_itimer
*timer
)
485 struct task_struct
*p
= timer
->it
.cpu
.task
;
486 struct list_head
*head
, *listpos
;
487 struct task_cputime
*cputime_expires
;
488 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
489 struct cpu_timer_list
*next
;
491 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
492 head
= p
->cpu_timers
;
493 cputime_expires
= &p
->cputime_expires
;
495 head
= p
->signal
->cpu_timers
;
496 cputime_expires
= &p
->signal
->cputime_expires
;
498 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
501 list_for_each_entry(next
, head
, entry
) {
502 if (nt
->expires
< next
->expires
)
504 listpos
= &next
->entry
;
506 list_add(&nt
->entry
, listpos
);
508 if (listpos
== head
) {
509 unsigned long long exp
= nt
->expires
;
512 * We are the new earliest-expiring POSIX 1.b timer, hence
513 * need to update expiration cache. Take into account that
514 * for process timers we share expiration cache with itimers
515 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
518 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
520 if (expires_gt(cputime_expires
->prof_exp
, expires_to_cputime(exp
)))
521 cputime_expires
->prof_exp
= expires_to_cputime(exp
);
524 if (expires_gt(cputime_expires
->virt_exp
, expires_to_cputime(exp
)))
525 cputime_expires
->virt_exp
= expires_to_cputime(exp
);
528 if (cputime_expires
->sched_exp
== 0 ||
529 cputime_expires
->sched_exp
> exp
)
530 cputime_expires
->sched_exp
= exp
;
537 * The timer is locked, fire it and arrange for its reload.
539 static void cpu_timer_fire(struct k_itimer
*timer
)
541 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
543 * User don't want any signal.
545 timer
->it
.cpu
.expires
= 0;
546 } else if (unlikely(timer
->sigq
== NULL
)) {
548 * This a special case for clock_nanosleep,
549 * not a normal timer from sys_timer_create.
551 wake_up_process(timer
->it_process
);
552 timer
->it
.cpu
.expires
= 0;
553 } else if (timer
->it
.cpu
.incr
== 0) {
555 * One-shot timer. Clear it as soon as it's fired.
557 posix_timer_event(timer
, 0);
558 timer
->it
.cpu
.expires
= 0;
559 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
561 * The signal did not get queued because the signal
562 * was ignored, so we won't get any callback to
563 * reload the timer. But we need to keep it
564 * ticking in case the signal is deliverable next time.
566 posix_cpu_timer_schedule(timer
);
571 * Sample a process (thread group) timer for the given group_leader task.
572 * Must be called with tasklist_lock held for reading.
574 static int cpu_timer_sample_group(const clockid_t which_clock
,
575 struct task_struct
*p
,
576 unsigned long long *sample
)
578 struct task_cputime cputime
;
580 thread_group_cputimer(p
, &cputime
);
581 switch (CPUCLOCK_WHICH(which_clock
)) {
585 *sample
= cputime_to_expires(cputime
.utime
+ cputime
.stime
);
588 *sample
= cputime_to_expires(cputime
.utime
);
591 *sample
= cputime
.sum_exec_runtime
+ task_delta_exec(p
);
597 #ifdef CONFIG_NO_HZ_FULL
598 static void nohz_kick_work_fn(struct work_struct
*work
)
600 tick_nohz_full_kick_all();
603 static DECLARE_WORK(nohz_kick_work
, nohz_kick_work_fn
);
606 * We need the IPIs to be sent from sane process context.
607 * The posix cpu timers are always set with irqs disabled.
609 static void posix_cpu_timer_kick_nohz(void)
611 schedule_work(&nohz_kick_work
);
614 bool posix_cpu_timers_can_stop_tick(struct task_struct
*tsk
)
616 if (!task_cputime_zero(&tsk
->cputime_expires
))
619 if (tsk
->signal
->cputimer
.running
)
625 static inline void posix_cpu_timer_kick_nohz(void) { }
629 * Guts of sys_timer_settime for CPU timers.
630 * This is called with the timer locked and interrupts disabled.
631 * If we return TIMER_RETRY, it's necessary to release the timer's lock
632 * and try again. (This happens when the timer is in the middle of firing.)
634 static int posix_cpu_timer_set(struct k_itimer
*timer
, int flags
,
635 struct itimerspec
*new, struct itimerspec
*old
)
637 struct task_struct
*p
= timer
->it
.cpu
.task
;
638 unsigned long long old_expires
, new_expires
, old_incr
, val
;
641 if (unlikely(p
== NULL
)) {
643 * Timer refers to a dead task's clock.
648 new_expires
= timespec_to_sample(timer
->it_clock
, &new->it_value
);
650 read_lock(&tasklist_lock
);
652 * We need the tasklist_lock to protect against reaping that
653 * clears p->sighand. If p has just been reaped, we can no
654 * longer get any information about it at all.
656 if (unlikely(p
->sighand
== NULL
)) {
657 read_unlock(&tasklist_lock
);
659 timer
->it
.cpu
.task
= NULL
;
664 * Disarm any old timer after extracting its expiry time.
666 BUG_ON(!irqs_disabled());
669 old_incr
= timer
->it
.cpu
.incr
;
670 spin_lock(&p
->sighand
->siglock
);
671 old_expires
= timer
->it
.cpu
.expires
;
672 if (unlikely(timer
->it
.cpu
.firing
)) {
673 timer
->it
.cpu
.firing
= -1;
676 list_del_init(&timer
->it
.cpu
.entry
);
679 * We need to sample the current value to convert the new
680 * value from to relative and absolute, and to convert the
681 * old value from absolute to relative. To set a process
682 * timer, we need a sample to balance the thread expiry
683 * times (in arm_timer). With an absolute time, we must
684 * check if it's already passed. In short, we need a sample.
686 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
687 cpu_clock_sample(timer
->it_clock
, p
, &val
);
689 cpu_timer_sample_group(timer
->it_clock
, p
, &val
);
693 if (old_expires
== 0) {
694 old
->it_value
.tv_sec
= 0;
695 old
->it_value
.tv_nsec
= 0;
698 * Update the timer in case it has
699 * overrun already. If it has,
700 * we'll report it as having overrun
701 * and with the next reloaded timer
702 * already ticking, though we are
703 * swallowing that pending
704 * notification here to install the
707 bump_cpu_timer(timer
, val
);
708 if (val
< timer
->it
.cpu
.expires
) {
709 old_expires
= timer
->it
.cpu
.expires
- val
;
710 sample_to_timespec(timer
->it_clock
,
714 old
->it_value
.tv_nsec
= 1;
715 old
->it_value
.tv_sec
= 0;
722 * We are colliding with the timer actually firing.
723 * Punt after filling in the timer's old value, and
724 * disable this firing since we are already reporting
725 * it as an overrun (thanks to bump_cpu_timer above).
727 spin_unlock(&p
->sighand
->siglock
);
728 read_unlock(&tasklist_lock
);
732 if (new_expires
!= 0 && !(flags
& TIMER_ABSTIME
)) {
737 * Install the new expiry time (or zero).
738 * For a timer with no notification action, we don't actually
739 * arm the timer (we'll just fake it for timer_gettime).
741 timer
->it
.cpu
.expires
= new_expires
;
742 if (new_expires
!= 0 && val
< new_expires
) {
746 spin_unlock(&p
->sighand
->siglock
);
747 read_unlock(&tasklist_lock
);
750 * Install the new reload setting, and
751 * set up the signal and overrun bookkeeping.
753 timer
->it
.cpu
.incr
= timespec_to_sample(timer
->it_clock
,
757 * This acts as a modification timestamp for the timer,
758 * so any automatic reload attempt will punt on seeing
759 * that we have reset the timer manually.
761 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
763 timer
->it_overrun_last
= 0;
764 timer
->it_overrun
= -1;
766 if (new_expires
!= 0 && !(val
< new_expires
)) {
768 * The designated time already passed, so we notify
769 * immediately, even if the thread never runs to
770 * accumulate more time on this clock.
772 cpu_timer_fire(timer
);
778 sample_to_timespec(timer
->it_clock
,
779 old_incr
, &old
->it_interval
);
782 posix_cpu_timer_kick_nohz();
786 static void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
788 unsigned long long now
;
789 struct task_struct
*p
= timer
->it
.cpu
.task
;
793 * Easy part: convert the reload time.
795 sample_to_timespec(timer
->it_clock
,
796 timer
->it
.cpu
.incr
, &itp
->it_interval
);
798 if (timer
->it
.cpu
.expires
== 0) { /* Timer not armed at all. */
799 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
803 if (unlikely(p
== NULL
)) {
805 * This task already died and the timer will never fire.
806 * In this case, expires is actually the dead value.
809 sample_to_timespec(timer
->it_clock
, timer
->it
.cpu
.expires
,
815 * Sample the clock to take the difference with the expiry time.
817 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
818 cpu_clock_sample(timer
->it_clock
, p
, &now
);
819 clear_dead
= p
->exit_state
;
821 read_lock(&tasklist_lock
);
822 if (unlikely(p
->sighand
== NULL
)) {
824 * The process has been reaped.
825 * We can't even collect a sample any more.
826 * Call the timer disarmed, nothing else to do.
829 timer
->it
.cpu
.task
= NULL
;
830 timer
->it
.cpu
.expires
= 0;
831 read_unlock(&tasklist_lock
);
834 cpu_timer_sample_group(timer
->it_clock
, p
, &now
);
835 clear_dead
= (unlikely(p
->exit_state
) &&
836 thread_group_empty(p
));
838 read_unlock(&tasklist_lock
);
841 if (unlikely(clear_dead
)) {
843 * We've noticed that the thread is dead, but
844 * not yet reaped. Take this opportunity to
847 clear_dead_task(timer
, now
);
851 if (now
< timer
->it
.cpu
.expires
) {
852 sample_to_timespec(timer
->it_clock
,
853 timer
->it
.cpu
.expires
- now
,
857 * The timer should have expired already, but the firing
858 * hasn't taken place yet. Say it's just about to expire.
860 itp
->it_value
.tv_nsec
= 1;
861 itp
->it_value
.tv_sec
= 0;
866 * Check for any per-thread CPU timers that have fired and move them off
867 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
868 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
870 static void check_thread_timers(struct task_struct
*tsk
,
871 struct list_head
*firing
)
874 struct list_head
*timers
= tsk
->cpu_timers
;
875 struct signal_struct
*const sig
= tsk
->signal
;
879 tsk
->cputime_expires
.prof_exp
= 0;
880 while (!list_empty(timers
)) {
881 struct cpu_timer_list
*t
= list_first_entry(timers
,
882 struct cpu_timer_list
,
884 if (!--maxfire
|| prof_ticks(tsk
) < t
->expires
) {
885 tsk
->cputime_expires
.prof_exp
= expires_to_cputime(t
->expires
);
889 list_move_tail(&t
->entry
, firing
);
894 tsk
->cputime_expires
.virt_exp
= 0;
895 while (!list_empty(timers
)) {
896 struct cpu_timer_list
*t
= list_first_entry(timers
,
897 struct cpu_timer_list
,
899 if (!--maxfire
|| virt_ticks(tsk
) < t
->expires
) {
900 tsk
->cputime_expires
.virt_exp
= expires_to_cputime(t
->expires
);
904 list_move_tail(&t
->entry
, firing
);
909 tsk
->cputime_expires
.sched_exp
= 0;
910 while (!list_empty(timers
)) {
911 struct cpu_timer_list
*t
= list_first_entry(timers
,
912 struct cpu_timer_list
,
914 if (!--maxfire
|| tsk
->se
.sum_exec_runtime
< t
->expires
) {
915 tsk
->cputime_expires
.sched_exp
= t
->expires
;
919 list_move_tail(&t
->entry
, firing
);
923 * Check for the special case thread timers.
925 soft
= ACCESS_ONCE(sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
);
926 if (soft
!= RLIM_INFINITY
) {
928 ACCESS_ONCE(sig
->rlim
[RLIMIT_RTTIME
].rlim_max
);
930 if (hard
!= RLIM_INFINITY
&&
931 tsk
->rt
.timeout
> DIV_ROUND_UP(hard
, USEC_PER_SEC
/HZ
)) {
933 * At the hard limit, we just die.
934 * No need to calculate anything else now.
936 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
939 if (tsk
->rt
.timeout
> DIV_ROUND_UP(soft
, USEC_PER_SEC
/HZ
)) {
941 * At the soft limit, send a SIGXCPU every second.
944 soft
+= USEC_PER_SEC
;
945 sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
= soft
;
948 "RT Watchdog Timeout: %s[%d]\n",
949 tsk
->comm
, task_pid_nr(tsk
));
950 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
955 static void stop_process_timers(struct signal_struct
*sig
)
957 struct thread_group_cputimer
*cputimer
= &sig
->cputimer
;
960 raw_spin_lock_irqsave(&cputimer
->lock
, flags
);
961 cputimer
->running
= 0;
962 raw_spin_unlock_irqrestore(&cputimer
->lock
, flags
);
965 static u32 onecputick
;
967 static void check_cpu_itimer(struct task_struct
*tsk
, struct cpu_itimer
*it
,
968 unsigned long long *expires
,
969 unsigned long long cur_time
, int signo
)
974 if (cur_time
>= it
->expires
) {
976 it
->expires
+= it
->incr
;
977 it
->error
+= it
->incr_error
;
978 if (it
->error
>= onecputick
) {
979 it
->expires
-= cputime_one_jiffy
;
980 it
->error
-= onecputick
;
986 trace_itimer_expire(signo
== SIGPROF
?
987 ITIMER_PROF
: ITIMER_VIRTUAL
,
988 tsk
->signal
->leader_pid
, cur_time
);
989 __group_send_sig_info(signo
, SEND_SIG_PRIV
, tsk
);
992 if (it
->expires
&& (!*expires
|| it
->expires
< *expires
)) {
993 *expires
= it
->expires
;
998 * Check for any per-thread CPU timers that have fired and move them
999 * off the tsk->*_timers list onto the firing list. Per-thread timers
1000 * have already been taken off.
1002 static void check_process_timers(struct task_struct
*tsk
,
1003 struct list_head
*firing
)
1006 struct signal_struct
*const sig
= tsk
->signal
;
1007 unsigned long long utime
, ptime
, virt_expires
, prof_expires
;
1008 unsigned long long sum_sched_runtime
, sched_expires
;
1009 struct list_head
*timers
= sig
->cpu_timers
;
1010 struct task_cputime cputime
;
1014 * Collect the current process totals.
1016 thread_group_cputimer(tsk
, &cputime
);
1017 utime
= cputime_to_expires(cputime
.utime
);
1018 ptime
= utime
+ cputime_to_expires(cputime
.stime
);
1019 sum_sched_runtime
= cputime
.sum_exec_runtime
;
1022 while (!list_empty(timers
)) {
1023 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1024 struct cpu_timer_list
,
1026 if (!--maxfire
|| ptime
< tl
->expires
) {
1027 prof_expires
= tl
->expires
;
1031 list_move_tail(&tl
->entry
, firing
);
1037 while (!list_empty(timers
)) {
1038 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1039 struct cpu_timer_list
,
1041 if (!--maxfire
|| utime
< tl
->expires
) {
1042 virt_expires
= tl
->expires
;
1046 list_move_tail(&tl
->entry
, firing
);
1052 while (!list_empty(timers
)) {
1053 struct cpu_timer_list
*tl
= list_first_entry(timers
,
1054 struct cpu_timer_list
,
1056 if (!--maxfire
|| sum_sched_runtime
< tl
->expires
) {
1057 sched_expires
= tl
->expires
;
1061 list_move_tail(&tl
->entry
, firing
);
1065 * Check for the special case process timers.
1067 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_PROF
], &prof_expires
, ptime
,
1069 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_VIRT
], &virt_expires
, utime
,
1071 soft
= ACCESS_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1072 if (soft
!= RLIM_INFINITY
) {
1073 unsigned long psecs
= cputime_to_secs(ptime
);
1074 unsigned long hard
=
1075 ACCESS_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_max
);
1077 if (psecs
>= hard
) {
1079 * At the hard limit, we just die.
1080 * No need to calculate anything else now.
1082 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1085 if (psecs
>= soft
) {
1087 * At the soft limit, send a SIGXCPU every second.
1089 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1092 sig
->rlim
[RLIMIT_CPU
].rlim_cur
= soft
;
1095 x
= secs_to_cputime(soft
);
1096 if (!prof_expires
|| x
< prof_expires
) {
1101 sig
->cputime_expires
.prof_exp
= expires_to_cputime(prof_expires
);
1102 sig
->cputime_expires
.virt_exp
= expires_to_cputime(virt_expires
);
1103 sig
->cputime_expires
.sched_exp
= sched_expires
;
1104 if (task_cputime_zero(&sig
->cputime_expires
))
1105 stop_process_timers(sig
);
1109 * This is called from the signal code (via do_schedule_next_timer)
1110 * when the last timer signal was delivered and we have to reload the timer.
1112 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
1114 struct task_struct
*p
= timer
->it
.cpu
.task
;
1115 unsigned long long now
;
1117 if (unlikely(p
== NULL
))
1119 * The task was cleaned up already, no future firings.
1124 * Fetch the current sample and update the timer's expiry time.
1126 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
1127 cpu_clock_sample(timer
->it_clock
, p
, &now
);
1128 bump_cpu_timer(timer
, now
);
1129 if (unlikely(p
->exit_state
)) {
1130 clear_dead_task(timer
, now
);
1133 read_lock(&tasklist_lock
); /* arm_timer needs it. */
1134 spin_lock(&p
->sighand
->siglock
);
1136 read_lock(&tasklist_lock
);
1137 if (unlikely(p
->sighand
== NULL
)) {
1139 * The process has been reaped.
1140 * We can't even collect a sample any more.
1143 timer
->it
.cpu
.task
= p
= NULL
;
1144 timer
->it
.cpu
.expires
= 0;
1146 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1148 * We've noticed that the thread is dead, but
1149 * not yet reaped. Take this opportunity to
1150 * drop our task ref.
1152 clear_dead_task(timer
, now
);
1155 spin_lock(&p
->sighand
->siglock
);
1156 cpu_timer_sample_group(timer
->it_clock
, p
, &now
);
1157 bump_cpu_timer(timer
, now
);
1158 /* Leave the tasklist_lock locked for the call below. */
1162 * Now re-arm for the new expiry time.
1164 BUG_ON(!irqs_disabled());
1166 spin_unlock(&p
->sighand
->siglock
);
1169 read_unlock(&tasklist_lock
);
1172 timer
->it_overrun_last
= timer
->it_overrun
;
1173 timer
->it_overrun
= -1;
1174 ++timer
->it_requeue_pending
;
1178 * task_cputime_expired - Compare two task_cputime entities.
1180 * @sample: The task_cputime structure to be checked for expiration.
1181 * @expires: Expiration times, against which @sample will be checked.
1183 * Checks @sample against @expires to see if any field of @sample has expired.
1184 * Returns true if any field of the former is greater than the corresponding
1185 * field of the latter if the latter field is set. Otherwise returns false.
1187 static inline int task_cputime_expired(const struct task_cputime
*sample
,
1188 const struct task_cputime
*expires
)
1190 if (expires
->utime
&& sample
->utime
>= expires
->utime
)
1192 if (expires
->stime
&& sample
->utime
+ sample
->stime
>= expires
->stime
)
1194 if (expires
->sum_exec_runtime
!= 0 &&
1195 sample
->sum_exec_runtime
>= expires
->sum_exec_runtime
)
1201 * fastpath_timer_check - POSIX CPU timers fast path.
1203 * @tsk: The task (thread) being checked.
1205 * Check the task and thread group timers. If both are zero (there are no
1206 * timers set) return false. Otherwise snapshot the task and thread group
1207 * timers and compare them with the corresponding expiration times. Return
1208 * true if a timer has expired, else return false.
1210 static inline int fastpath_timer_check(struct task_struct
*tsk
)
1212 struct signal_struct
*sig
;
1213 cputime_t utime
, stime
;
1215 task_cputime(tsk
, &utime
, &stime
);
1217 if (!task_cputime_zero(&tsk
->cputime_expires
)) {
1218 struct task_cputime task_sample
= {
1221 .sum_exec_runtime
= tsk
->se
.sum_exec_runtime
1224 if (task_cputime_expired(&task_sample
, &tsk
->cputime_expires
))
1229 if (sig
->cputimer
.running
) {
1230 struct task_cputime group_sample
;
1232 raw_spin_lock(&sig
->cputimer
.lock
);
1233 group_sample
= sig
->cputimer
.cputime
;
1234 raw_spin_unlock(&sig
->cputimer
.lock
);
1236 if (task_cputime_expired(&group_sample
, &sig
->cputime_expires
))
1244 * This is called from the timer interrupt handler. The irq handler has
1245 * already updated our counts. We need to check if any timers fire now.
1246 * Interrupts are disabled.
1248 void run_posix_cpu_timers(struct task_struct
*tsk
)
1251 struct k_itimer
*timer
, *next
;
1252 unsigned long flags
;
1254 BUG_ON(!irqs_disabled());
1257 * The fast path checks that there are no expired thread or thread
1258 * group timers. If that's so, just return.
1260 if (!fastpath_timer_check(tsk
))
1263 if (!lock_task_sighand(tsk
, &flags
))
1266 * Here we take off tsk->signal->cpu_timers[N] and
1267 * tsk->cpu_timers[N] all the timers that are firing, and
1268 * put them on the firing list.
1270 check_thread_timers(tsk
, &firing
);
1272 * If there are any active process wide timers (POSIX 1.b, itimers,
1273 * RLIMIT_CPU) cputimer must be running.
1275 if (tsk
->signal
->cputimer
.running
)
1276 check_process_timers(tsk
, &firing
);
1279 * We must release these locks before taking any timer's lock.
1280 * There is a potential race with timer deletion here, as the
1281 * siglock now protects our private firing list. We have set
1282 * the firing flag in each timer, so that a deletion attempt
1283 * that gets the timer lock before we do will give it up and
1284 * spin until we've taken care of that timer below.
1286 unlock_task_sighand(tsk
, &flags
);
1289 * Now that all the timers on our list have the firing flag,
1290 * no one will touch their list entries but us. We'll take
1291 * each timer's lock before clearing its firing flag, so no
1292 * timer call will interfere.
1294 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1297 spin_lock(&timer
->it_lock
);
1298 list_del_init(&timer
->it
.cpu
.entry
);
1299 cpu_firing
= timer
->it
.cpu
.firing
;
1300 timer
->it
.cpu
.firing
= 0;
1302 * The firing flag is -1 if we collided with a reset
1303 * of the timer, which already reported this
1304 * almost-firing as an overrun. So don't generate an event.
1306 if (likely(cpu_firing
>= 0))
1307 cpu_timer_fire(timer
);
1308 spin_unlock(&timer
->it_lock
);
1312 * In case some timers were rescheduled after the queue got emptied,
1313 * wake up full dynticks CPUs.
1315 if (tsk
->signal
->cputimer
.running
)
1316 posix_cpu_timer_kick_nohz();
1320 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1321 * The tsk->sighand->siglock must be held by the caller.
1323 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1324 cputime_t
*newval
, cputime_t
*oldval
)
1326 unsigned long long now
;
1328 BUG_ON(clock_idx
== CPUCLOCK_SCHED
);
1329 cpu_timer_sample_group(clock_idx
, tsk
, &now
);
1333 * We are setting itimer. The *oldval is absolute and we update
1334 * it to be relative, *newval argument is relative and we update
1335 * it to be absolute.
1338 if (*oldval
<= now
) {
1339 /* Just about to fire. */
1340 *oldval
= cputime_one_jiffy
;
1352 * Update expiration cache if we are the earliest timer, or eventually
1353 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1355 switch (clock_idx
) {
1357 if (expires_gt(tsk
->signal
->cputime_expires
.prof_exp
, *newval
))
1358 tsk
->signal
->cputime_expires
.prof_exp
= *newval
;
1361 if (expires_gt(tsk
->signal
->cputime_expires
.virt_exp
, *newval
))
1362 tsk
->signal
->cputime_expires
.virt_exp
= *newval
;
1366 posix_cpu_timer_kick_nohz();
1369 static int do_cpu_nanosleep(const clockid_t which_clock
, int flags
,
1370 struct timespec
*rqtp
, struct itimerspec
*it
)
1372 struct k_itimer timer
;
1376 * Set up a temporary timer and then wait for it to go off.
1378 memset(&timer
, 0, sizeof timer
);
1379 spin_lock_init(&timer
.it_lock
);
1380 timer
.it_clock
= which_clock
;
1381 timer
.it_overrun
= -1;
1382 error
= posix_cpu_timer_create(&timer
);
1383 timer
.it_process
= current
;
1385 static struct itimerspec zero_it
;
1387 memset(it
, 0, sizeof *it
);
1388 it
->it_value
= *rqtp
;
1390 spin_lock_irq(&timer
.it_lock
);
1391 error
= posix_cpu_timer_set(&timer
, flags
, it
, NULL
);
1393 spin_unlock_irq(&timer
.it_lock
);
1397 while (!signal_pending(current
)) {
1398 if (timer
.it
.cpu
.expires
== 0) {
1400 * Our timer fired and was reset, below
1401 * deletion can not fail.
1403 posix_cpu_timer_del(&timer
);
1404 spin_unlock_irq(&timer
.it_lock
);
1409 * Block until cpu_timer_fire (or a signal) wakes us.
1411 __set_current_state(TASK_INTERRUPTIBLE
);
1412 spin_unlock_irq(&timer
.it_lock
);
1414 spin_lock_irq(&timer
.it_lock
);
1418 * We were interrupted by a signal.
1420 sample_to_timespec(which_clock
, timer
.it
.cpu
.expires
, rqtp
);
1421 error
= posix_cpu_timer_set(&timer
, 0, &zero_it
, it
);
1424 * Timer is now unarmed, deletion can not fail.
1426 posix_cpu_timer_del(&timer
);
1428 spin_unlock_irq(&timer
.it_lock
);
1430 while (error
== TIMER_RETRY
) {
1432 * We need to handle case when timer was or is in the
1433 * middle of firing. In other cases we already freed
1436 spin_lock_irq(&timer
.it_lock
);
1437 error
= posix_cpu_timer_del(&timer
);
1438 spin_unlock_irq(&timer
.it_lock
);
1441 if ((it
->it_value
.tv_sec
| it
->it_value
.tv_nsec
) == 0) {
1443 * It actually did fire already.
1448 error
= -ERESTART_RESTARTBLOCK
;
1454 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
);
1456 static int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1457 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1459 struct restart_block
*restart_block
=
1460 ¤t_thread_info()->restart_block
;
1461 struct itimerspec it
;
1465 * Diagnose required errors first.
1467 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1468 (CPUCLOCK_PID(which_clock
) == 0 ||
1469 CPUCLOCK_PID(which_clock
) == current
->pid
))
1472 error
= do_cpu_nanosleep(which_clock
, flags
, rqtp
, &it
);
1474 if (error
== -ERESTART_RESTARTBLOCK
) {
1476 if (flags
& TIMER_ABSTIME
)
1477 return -ERESTARTNOHAND
;
1479 * Report back to the user the time still remaining.
1481 if (rmtp
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1484 restart_block
->fn
= posix_cpu_nsleep_restart
;
1485 restart_block
->nanosleep
.clockid
= which_clock
;
1486 restart_block
->nanosleep
.rmtp
= rmtp
;
1487 restart_block
->nanosleep
.expires
= timespec_to_ns(rqtp
);
1492 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
)
1494 clockid_t which_clock
= restart_block
->nanosleep
.clockid
;
1496 struct itimerspec it
;
1499 t
= ns_to_timespec(restart_block
->nanosleep
.expires
);
1501 error
= do_cpu_nanosleep(which_clock
, TIMER_ABSTIME
, &t
, &it
);
1503 if (error
== -ERESTART_RESTARTBLOCK
) {
1504 struct timespec __user
*rmtp
= restart_block
->nanosleep
.rmtp
;
1506 * Report back to the user the time still remaining.
1508 if (rmtp
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1511 restart_block
->nanosleep
.expires
= timespec_to_ns(&t
);
1517 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1518 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1520 static int process_cpu_clock_getres(const clockid_t which_clock
,
1521 struct timespec
*tp
)
1523 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1525 static int process_cpu_clock_get(const clockid_t which_clock
,
1526 struct timespec
*tp
)
1528 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1530 static int process_cpu_timer_create(struct k_itimer
*timer
)
1532 timer
->it_clock
= PROCESS_CLOCK
;
1533 return posix_cpu_timer_create(timer
);
1535 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1536 struct timespec
*rqtp
,
1537 struct timespec __user
*rmtp
)
1539 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
, rmtp
);
1541 static long process_cpu_nsleep_restart(struct restart_block
*restart_block
)
1545 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1546 struct timespec
*tp
)
1548 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1550 static int thread_cpu_clock_get(const clockid_t which_clock
,
1551 struct timespec
*tp
)
1553 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1555 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1557 timer
->it_clock
= THREAD_CLOCK
;
1558 return posix_cpu_timer_create(timer
);
1561 struct k_clock clock_posix_cpu
= {
1562 .clock_getres
= posix_cpu_clock_getres
,
1563 .clock_set
= posix_cpu_clock_set
,
1564 .clock_get
= posix_cpu_clock_get
,
1565 .timer_create
= posix_cpu_timer_create
,
1566 .nsleep
= posix_cpu_nsleep
,
1567 .nsleep_restart
= posix_cpu_nsleep_restart
,
1568 .timer_set
= posix_cpu_timer_set
,
1569 .timer_del
= posix_cpu_timer_del
,
1570 .timer_get
= posix_cpu_timer_get
,
1573 static __init
int init_posix_cpu_timers(void)
1575 struct k_clock process
= {
1576 .clock_getres
= process_cpu_clock_getres
,
1577 .clock_get
= process_cpu_clock_get
,
1578 .timer_create
= process_cpu_timer_create
,
1579 .nsleep
= process_cpu_nsleep
,
1580 .nsleep_restart
= process_cpu_nsleep_restart
,
1582 struct k_clock thread
= {
1583 .clock_getres
= thread_cpu_clock_getres
,
1584 .clock_get
= thread_cpu_clock_get
,
1585 .timer_create
= thread_cpu_timer_create
,
1589 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1590 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1592 cputime_to_timespec(cputime_one_jiffy
, &ts
);
1593 onecputick
= ts
.tv_nsec
;
1594 WARN_ON(ts
.tv_sec
!= 0);
1598 __initcall(init_posix_cpu_timers
);