1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(u64
, cpu_hardirq_time
);
24 DEFINE_PER_CPU(u64
, cpu_softirq_time
);
26 static DEFINE_PER_CPU(u64
, irq_start_time
);
27 static int sched_clock_irqtime
;
29 void enable_sched_clock_irqtime(void)
31 sched_clock_irqtime
= 1;
34 void disable_sched_clock_irqtime(void)
36 sched_clock_irqtime
= 0;
40 DEFINE_PER_CPU(seqcount_t
, irq_time_seq
);
41 #endif /* CONFIG_64BIT */
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
47 void irqtime_account_irq(struct task_struct
*curr
)
53 if (!sched_clock_irqtime
)
56 local_irq_save(flags
);
58 cpu
= smp_processor_id();
59 delta
= sched_clock_cpu(cpu
) - __this_cpu_read(irq_start_time
);
60 __this_cpu_add(irq_start_time
, delta
);
62 irq_time_write_begin();
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 __this_cpu_add(cpu_hardirq_time
, delta
);
71 else if (in_serving_softirq() && curr
!= this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time
, delta
);
75 local_irq_restore(flags
);
77 EXPORT_SYMBOL_GPL(irqtime_account_irq
);
79 static int irqtime_account_hi_update(void)
81 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
86 local_irq_save(flags
);
87 latest_ns
= this_cpu_read(cpu_hardirq_time
);
88 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_IRQ
])
90 local_irq_restore(flags
);
94 static int irqtime_account_si_update(void)
96 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
101 local_irq_save(flags
);
102 latest_ns
= this_cpu_read(cpu_softirq_time
);
103 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_SOFTIRQ
])
105 local_irq_restore(flags
);
109 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
111 #define sched_clock_irqtime (0)
113 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
115 static inline void task_group_account_field(struct task_struct
*p
, int index
,
119 * Since all updates are sure to touch the root cgroup, we
120 * get ourselves ahead and touch it first. If the root cgroup
121 * is the only cgroup, then nothing else should be necessary.
124 __get_cpu_var(kernel_cpustat
).cpustat
[index
] += tmp
;
126 cpuacct_account_field(p
, index
, tmp
);
130 * Account user cpu time to a process.
131 * @p: the process that the cpu time gets accounted to
132 * @cputime: the cpu time spent in user space since the last update
133 * @cputime_scaled: cputime scaled by cpu frequency
135 void account_user_time(struct task_struct
*p
, cputime_t cputime
,
136 cputime_t cputime_scaled
)
140 /* Add user time to process. */
142 p
->utimescaled
+= cputime_scaled
;
143 account_group_user_time(p
, cputime
);
145 index
= (TASK_NICE(p
) > 0) ? CPUTIME_NICE
: CPUTIME_USER
;
147 /* Add user time to cpustat. */
148 task_group_account_field(p
, index
, (__force u64
) cputime
);
150 /* Account for user time used */
151 acct_account_cputime(p
);
155 * Account guest cpu time to a process.
156 * @p: the process that the cpu time gets accounted to
157 * @cputime: the cpu time spent in virtual machine since the last update
158 * @cputime_scaled: cputime scaled by cpu frequency
160 static void account_guest_time(struct task_struct
*p
, cputime_t cputime
,
161 cputime_t cputime_scaled
)
163 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
165 /* Add guest time to process. */
167 p
->utimescaled
+= cputime_scaled
;
168 account_group_user_time(p
, cputime
);
171 /* Add guest time to cpustat. */
172 if (TASK_NICE(p
) > 0) {
173 cpustat
[CPUTIME_NICE
] += (__force u64
) cputime
;
174 cpustat
[CPUTIME_GUEST_NICE
] += (__force u64
) cputime
;
176 cpustat
[CPUTIME_USER
] += (__force u64
) cputime
;
177 cpustat
[CPUTIME_GUEST
] += (__force u64
) cputime
;
182 * Account system cpu time to a process and desired cpustat field
183 * @p: the process that the cpu time gets accounted to
184 * @cputime: the cpu time spent in kernel space since the last update
185 * @cputime_scaled: cputime scaled by cpu frequency
186 * @target_cputime64: pointer to cpustat field that has to be updated
189 void __account_system_time(struct task_struct
*p
, cputime_t cputime
,
190 cputime_t cputime_scaled
, int index
)
192 /* Add system time to process. */
194 p
->stimescaled
+= cputime_scaled
;
195 account_group_system_time(p
, cputime
);
197 /* Add system time to cpustat. */
198 task_group_account_field(p
, index
, (__force u64
) cputime
);
200 /* Account for system time used */
201 acct_account_cputime(p
);
205 * Account system cpu time to a process.
206 * @p: the process that the cpu time gets accounted to
207 * @hardirq_offset: the offset to subtract from hardirq_count()
208 * @cputime: the cpu time spent in kernel space since the last update
209 * @cputime_scaled: cputime scaled by cpu frequency
211 void account_system_time(struct task_struct
*p
, int hardirq_offset
,
212 cputime_t cputime
, cputime_t cputime_scaled
)
216 if ((p
->flags
& PF_VCPU
) && (irq_count() - hardirq_offset
== 0)) {
217 account_guest_time(p
, cputime
, cputime_scaled
);
221 if (hardirq_count() - hardirq_offset
)
223 else if (in_serving_softirq())
224 index
= CPUTIME_SOFTIRQ
;
226 index
= CPUTIME_SYSTEM
;
228 __account_system_time(p
, cputime
, cputime_scaled
, index
);
232 * Account for involuntary wait time.
233 * @cputime: the cpu time spent in involuntary wait
235 void account_steal_time(cputime_t cputime
)
237 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
239 cpustat
[CPUTIME_STEAL
] += (__force u64
) cputime
;
243 * Account for idle time.
244 * @cputime: the cpu time spent in idle wait
246 void account_idle_time(cputime_t cputime
)
248 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
249 struct rq
*rq
= this_rq();
251 if (atomic_read(&rq
->nr_iowait
) > 0)
252 cpustat
[CPUTIME_IOWAIT
] += (__force u64
) cputime
;
254 cpustat
[CPUTIME_IDLE
] += (__force u64
) cputime
;
257 static __always_inline
bool steal_account_process_tick(void)
259 #ifdef CONFIG_PARAVIRT
260 if (static_key_false(¶virt_steal_enabled
)) {
263 steal
= paravirt_steal_clock(smp_processor_id());
264 steal
-= this_rq()->prev_steal_time
;
266 st
= steal_ticks(steal
);
267 this_rq()->prev_steal_time
+= st
* TICK_NSEC
;
269 account_steal_time(st
);
277 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
278 * tasks (sum on group iteration) belonging to @tsk's group.
280 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
)
282 struct signal_struct
*sig
= tsk
->signal
;
283 cputime_t utime
, stime
;
284 struct task_struct
*t
;
286 times
->utime
= sig
->utime
;
287 times
->stime
= sig
->stime
;
288 times
->sum_exec_runtime
= sig
->sum_sched_runtime
;
291 /* make sure we can trust tsk->thread_group list */
292 if (!likely(pid_alive(tsk
)))
297 task_cputime(t
, &utime
, &stime
);
298 times
->utime
+= utime
;
299 times
->stime
+= stime
;
300 times
->sum_exec_runtime
+= task_sched_runtime(t
);
301 } while_each_thread(tsk
, t
);
306 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
308 * Account a tick to a process and cpustat
309 * @p: the process that the cpu time gets accounted to
310 * @user_tick: is the tick from userspace
311 * @rq: the pointer to rq
313 * Tick demultiplexing follows the order
314 * - pending hardirq update
315 * - pending softirq update
319 * - check for guest_time
320 * - else account as system_time
322 * Check for hardirq is done both for system and user time as there is
323 * no timer going off while we are on hardirq and hence we may never get an
324 * opportunity to update it solely in system time.
325 * p->stime and friends are only updated on system time and not on irq
326 * softirq as those do not count in task exec_runtime any more.
328 static void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
331 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
332 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
334 if (steal_account_process_tick())
337 if (irqtime_account_hi_update()) {
338 cpustat
[CPUTIME_IRQ
] += (__force u64
) cputime_one_jiffy
;
339 } else if (irqtime_account_si_update()) {
340 cpustat
[CPUTIME_SOFTIRQ
] += (__force u64
) cputime_one_jiffy
;
341 } else if (this_cpu_ksoftirqd() == p
) {
343 * ksoftirqd time do not get accounted in cpu_softirq_time.
344 * So, we have to handle it separately here.
345 * Also, p->stime needs to be updated for ksoftirqd.
347 __account_system_time(p
, cputime_one_jiffy
, one_jiffy_scaled
,
349 } else if (user_tick
) {
350 account_user_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
351 } else if (p
== rq
->idle
) {
352 account_idle_time(cputime_one_jiffy
);
353 } else if (p
->flags
& PF_VCPU
) { /* System time or guest time */
354 account_guest_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
356 __account_system_time(p
, cputime_one_jiffy
, one_jiffy_scaled
,
361 static void irqtime_account_idle_ticks(int ticks
)
364 struct rq
*rq
= this_rq();
366 for (i
= 0; i
< ticks
; i
++)
367 irqtime_account_process_tick(current
, 0, rq
);
369 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
370 static inline void irqtime_account_idle_ticks(int ticks
) {}
371 static inline void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
373 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
376 * Use precise platform statistics if available:
378 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
380 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
381 void vtime_task_switch(struct task_struct
*prev
)
383 if (!vtime_accounting_enabled())
386 if (is_idle_task(prev
))
387 vtime_account_idle(prev
);
389 vtime_account_system(prev
);
391 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
392 vtime_account_user(prev
);
394 arch_vtime_task_switch(prev
);
399 * Archs that account the whole time spent in the idle task
400 * (outside irq) as idle time can rely on this and just implement
401 * vtime_account_system() and vtime_account_idle(). Archs that
402 * have other meaning of the idle time (s390 only includes the
403 * time spent by the CPU when it's in low power mode) must override
406 #ifndef __ARCH_HAS_VTIME_ACCOUNT
407 void vtime_account_irq_enter(struct task_struct
*tsk
)
409 if (!vtime_accounting_enabled())
412 if (!in_interrupt()) {
414 * If we interrupted user, context_tracking_in_user()
415 * is 1 because the context tracking don't hook
416 * on irq entry/exit. This way we know if
417 * we need to flush user time on kernel entry.
419 if (context_tracking_in_user()) {
420 vtime_account_user(tsk
);
424 if (is_idle_task(tsk
)) {
425 vtime_account_idle(tsk
);
429 vtime_account_system(tsk
);
431 EXPORT_SYMBOL_GPL(vtime_account_irq_enter
);
432 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
433 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
436 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
437 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
443 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
445 struct task_cputime cputime
;
447 thread_group_cputime(p
, &cputime
);
452 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
454 * Account a single tick of cpu time.
455 * @p: the process that the cpu time gets accounted to
456 * @user_tick: indicates if the tick is a user or a system tick
458 void account_process_tick(struct task_struct
*p
, int user_tick
)
460 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
461 struct rq
*rq
= this_rq();
463 if (vtime_accounting_enabled())
466 if (sched_clock_irqtime
) {
467 irqtime_account_process_tick(p
, user_tick
, rq
);
471 if (steal_account_process_tick())
475 account_user_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
476 else if ((p
!= rq
->idle
) || (irq_count() != HARDIRQ_OFFSET
))
477 account_system_time(p
, HARDIRQ_OFFSET
, cputime_one_jiffy
,
480 account_idle_time(cputime_one_jiffy
);
484 * Account multiple ticks of steal time.
485 * @p: the process from which the cpu time has been stolen
486 * @ticks: number of stolen ticks
488 void account_steal_ticks(unsigned long ticks
)
490 account_steal_time(jiffies_to_cputime(ticks
));
494 * Account multiple ticks of idle time.
495 * @ticks: number of stolen ticks
497 void account_idle_ticks(unsigned long ticks
)
500 if (sched_clock_irqtime
) {
501 irqtime_account_idle_ticks(ticks
);
505 account_idle_time(jiffies_to_cputime(ticks
));
509 * Perform (stime * rtime) / total with reduced chances
510 * of multiplication overflows by using smaller factors
511 * like quotient and remainders of divisions between
514 static cputime_t
scale_stime(u64 stime
, u64 rtime
, u64 total
)
516 u64 rem
, res
, scaled
;
518 if (rtime
>= total
) {
520 * Scale up to rtime / total then add
521 * the remainder scaled to stime / total.
523 res
= div64_u64_rem(rtime
, total
, &rem
);
524 scaled
= stime
* res
;
525 scaled
+= div64_u64(stime
* rem
, total
);
528 * Same in reverse: scale down to total / rtime
529 * then substract that result scaled to
530 * to the remaining part.
532 res
= div64_u64_rem(total
, rtime
, &rem
);
533 scaled
= div64_u64(stime
, res
);
534 scaled
-= div64_u64(scaled
* rem
, total
);
537 return (__force cputime_t
) scaled
;
541 * Adjust tick based cputime random precision against scheduler
542 * runtime accounting.
544 static void cputime_adjust(struct task_cputime
*curr
,
545 struct cputime
*prev
,
546 cputime_t
*ut
, cputime_t
*st
)
548 cputime_t rtime
, stime
, total
;
550 if (vtime_accounting_enabled()) {
557 total
= stime
+ curr
->utime
;
560 * Tick based cputime accounting depend on random scheduling
561 * timeslices of a task to be interrupted or not by the timer.
562 * Depending on these circumstances, the number of these interrupts
563 * may be over or under-optimistic, matching the real user and system
564 * cputime with a variable precision.
566 * Fix this by scaling these tick based values against the total
567 * runtime accounted by the CFS scheduler.
569 rtime
= nsecs_to_cputime(curr
->sum_exec_runtime
);
576 stime
= scale_stime((__force u64
)stime
,
577 (__force u64
)rtime
, (__force u64
)total
);
581 * If the tick based count grows faster than the scheduler one,
582 * the result of the scaling may go backward.
583 * Let's enforce monotonicity.
585 prev
->stime
= max(prev
->stime
, stime
);
586 prev
->utime
= max(prev
->utime
, rtime
- prev
->stime
);
592 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
594 struct task_cputime cputime
= {
595 .sum_exec_runtime
= p
->se
.sum_exec_runtime
,
598 task_cputime(p
, &cputime
.utime
, &cputime
.stime
);
599 cputime_adjust(&cputime
, &p
->prev_cputime
, ut
, st
);
603 * Must be called with siglock held.
605 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
607 struct task_cputime cputime
;
609 thread_group_cputime(p
, &cputime
);
610 cputime_adjust(&cputime
, &p
->signal
->prev_cputime
, ut
, st
);
612 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
614 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
615 static unsigned long long vtime_delta(struct task_struct
*tsk
)
617 unsigned long long clock
;
619 clock
= local_clock();
620 if (clock
< tsk
->vtime_snap
)
623 return clock
- tsk
->vtime_snap
;
626 static cputime_t
get_vtime_delta(struct task_struct
*tsk
)
628 unsigned long long delta
= vtime_delta(tsk
);
630 WARN_ON_ONCE(tsk
->vtime_snap_whence
== VTIME_SLEEPING
);
631 tsk
->vtime_snap
+= delta
;
633 /* CHECKME: always safe to convert nsecs to cputime? */
634 return nsecs_to_cputime(delta
);
637 static void __vtime_account_system(struct task_struct
*tsk
)
639 cputime_t delta_cpu
= get_vtime_delta(tsk
);
641 account_system_time(tsk
, irq_count(), delta_cpu
, cputime_to_scaled(delta_cpu
));
644 void vtime_account_system(struct task_struct
*tsk
)
646 if (!vtime_accounting_enabled())
649 write_seqlock(&tsk
->vtime_seqlock
);
650 __vtime_account_system(tsk
);
651 write_sequnlock(&tsk
->vtime_seqlock
);
654 void vtime_account_irq_exit(struct task_struct
*tsk
)
656 if (!vtime_accounting_enabled())
659 write_seqlock(&tsk
->vtime_seqlock
);
660 if (context_tracking_in_user())
661 tsk
->vtime_snap_whence
= VTIME_USER
;
662 __vtime_account_system(tsk
);
663 write_sequnlock(&tsk
->vtime_seqlock
);
666 void vtime_account_user(struct task_struct
*tsk
)
670 if (!vtime_accounting_enabled())
673 delta_cpu
= get_vtime_delta(tsk
);
675 write_seqlock(&tsk
->vtime_seqlock
);
676 tsk
->vtime_snap_whence
= VTIME_SYS
;
677 account_user_time(tsk
, delta_cpu
, cputime_to_scaled(delta_cpu
));
678 write_sequnlock(&tsk
->vtime_seqlock
);
681 void vtime_user_enter(struct task_struct
*tsk
)
683 if (!vtime_accounting_enabled())
686 write_seqlock(&tsk
->vtime_seqlock
);
687 tsk
->vtime_snap_whence
= VTIME_USER
;
688 __vtime_account_system(tsk
);
689 write_sequnlock(&tsk
->vtime_seqlock
);
692 void vtime_guest_enter(struct task_struct
*tsk
)
694 write_seqlock(&tsk
->vtime_seqlock
);
695 __vtime_account_system(tsk
);
696 current
->flags
|= PF_VCPU
;
697 write_sequnlock(&tsk
->vtime_seqlock
);
700 void vtime_guest_exit(struct task_struct
*tsk
)
702 write_seqlock(&tsk
->vtime_seqlock
);
703 __vtime_account_system(tsk
);
704 current
->flags
&= ~PF_VCPU
;
705 write_sequnlock(&tsk
->vtime_seqlock
);
708 void vtime_account_idle(struct task_struct
*tsk
)
710 cputime_t delta_cpu
= get_vtime_delta(tsk
);
712 account_idle_time(delta_cpu
);
715 bool vtime_accounting_enabled(void)
717 return context_tracking_active();
720 void arch_vtime_task_switch(struct task_struct
*prev
)
722 write_seqlock(&prev
->vtime_seqlock
);
723 prev
->vtime_snap_whence
= VTIME_SLEEPING
;
724 write_sequnlock(&prev
->vtime_seqlock
);
726 write_seqlock(¤t
->vtime_seqlock
);
727 current
->vtime_snap_whence
= VTIME_SYS
;
728 current
->vtime_snap
= sched_clock();
729 write_sequnlock(¤t
->vtime_seqlock
);
732 void vtime_init_idle(struct task_struct
*t
)
736 write_seqlock_irqsave(&t
->vtime_seqlock
, flags
);
737 t
->vtime_snap_whence
= VTIME_SYS
;
738 t
->vtime_snap
= sched_clock();
739 write_sequnlock_irqrestore(&t
->vtime_seqlock
, flags
);
742 cputime_t
task_gtime(struct task_struct
*t
)
748 seq
= read_seqbegin(&t
->vtime_seqlock
);
751 if (t
->flags
& PF_VCPU
)
752 gtime
+= vtime_delta(t
);
754 } while (read_seqretry(&t
->vtime_seqlock
, seq
));
760 * Fetch cputime raw values from fields of task_struct and
761 * add up the pending nohz execution time since the last
765 fetch_task_cputime(struct task_struct
*t
,
766 cputime_t
*u_dst
, cputime_t
*s_dst
,
767 cputime_t
*u_src
, cputime_t
*s_src
,
768 cputime_t
*udelta
, cputime_t
*sdelta
)
771 unsigned long long delta
;
777 seq
= read_seqbegin(&t
->vtime_seqlock
);
784 /* Task is sleeping, nothing to add */
785 if (t
->vtime_snap_whence
== VTIME_SLEEPING
||
789 delta
= vtime_delta(t
);
792 * Task runs either in user or kernel space, add pending nohz time to
795 if (t
->vtime_snap_whence
== VTIME_USER
|| t
->flags
& PF_VCPU
) {
798 if (t
->vtime_snap_whence
== VTIME_SYS
)
801 } while (read_seqretry(&t
->vtime_seqlock
, seq
));
805 void task_cputime(struct task_struct
*t
, cputime_t
*utime
, cputime_t
*stime
)
807 cputime_t udelta
, sdelta
;
809 fetch_task_cputime(t
, utime
, stime
, &t
->utime
,
810 &t
->stime
, &udelta
, &sdelta
);
817 void task_cputime_scaled(struct task_struct
*t
,
818 cputime_t
*utimescaled
, cputime_t
*stimescaled
)
820 cputime_t udelta
, sdelta
;
822 fetch_task_cputime(t
, utimescaled
, stimescaled
,
823 &t
->utimescaled
, &t
->stimescaled
, &udelta
, &sdelta
);
825 *utimescaled
+= cputime_to_scaled(udelta
);
827 *stimescaled
+= cputime_to_scaled(sdelta
);
829 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */