1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
9 #include <asm/paravirt.h>
13 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
16 * There are no locks covering percpu hardirq/softirq time.
17 * They are only modified in vtime_account, on corresponding CPU
18 * with interrupts disabled. So, writes are safe.
19 * They are read and saved off onto struct rq in update_rq_clock().
20 * This may result in other CPU reading this CPU's irq time and can
21 * race with irq/vtime_account on this CPU. We would either get old
22 * or new value with a side effect of accounting a slice of irq time to wrong
23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time.
26 DEFINE_PER_CPU(u64
, cpu_hardirq_time
);
27 DEFINE_PER_CPU(u64
, cpu_softirq_time
);
29 static DEFINE_PER_CPU(u64
, irq_start_time
);
30 static int sched_clock_irqtime
;
32 void enable_sched_clock_irqtime(void)
34 sched_clock_irqtime
= 1;
37 void disable_sched_clock_irqtime(void)
39 sched_clock_irqtime
= 0;
43 DEFINE_PER_CPU(seqcount_t
, irq_time_seq
);
44 #endif /* CONFIG_64BIT */
47 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit.
50 void irqtime_account_irq(struct task_struct
*curr
)
56 if (!sched_clock_irqtime
)
59 local_irq_save(flags
);
61 cpu
= smp_processor_id();
62 delta
= sched_clock_cpu(cpu
) - __this_cpu_read(irq_start_time
);
63 __this_cpu_add(irq_start_time
, delta
);
65 irq_time_write_begin();
67 * We do not account for softirq time from ksoftirqd here.
68 * We want to continue accounting softirq time to ksoftirqd thread
69 * in that case, so as not to confuse scheduler with a special task
70 * that do not consume any time, but still wants to run.
73 __this_cpu_add(cpu_hardirq_time
, delta
);
74 else if (in_serving_softirq() && curr
!= this_cpu_ksoftirqd())
75 __this_cpu_add(cpu_softirq_time
, delta
);
78 local_irq_restore(flags
);
80 EXPORT_SYMBOL_GPL(irqtime_account_irq
);
82 static int irqtime_account_hi_update(void)
84 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
89 local_irq_save(flags
);
90 latest_ns
= this_cpu_read(cpu_hardirq_time
);
91 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_IRQ
])
93 local_irq_restore(flags
);
97 static int irqtime_account_si_update(void)
99 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
104 local_irq_save(flags
);
105 latest_ns
= this_cpu_read(cpu_softirq_time
);
106 if (nsecs_to_cputime64(latest_ns
) > cpustat
[CPUTIME_SOFTIRQ
])
108 local_irq_restore(flags
);
112 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
114 #define sched_clock_irqtime (0)
116 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
118 static inline void task_group_account_field(struct task_struct
*p
, int index
,
122 * Since all updates are sure to touch the root cgroup, we
123 * get ourselves ahead and touch it first. If the root cgroup
124 * is the only cgroup, then nothing else should be necessary.
127 __this_cpu_add(kernel_cpustat
.cpustat
[index
], tmp
);
129 cpuacct_account_field(p
, index
, tmp
);
133 * Account user cpu time to a process.
134 * @p: the process that the cpu time gets accounted to
135 * @cputime: the cpu time spent in user space since the last update
136 * @cputime_scaled: cputime scaled by cpu frequency
138 void account_user_time(struct task_struct
*p
, cputime_t cputime
,
139 cputime_t cputime_scaled
)
143 /* Add user time to process. */
145 p
->utimescaled
+= cputime_scaled
;
146 account_group_user_time(p
, cputime
);
148 index
= (task_nice(p
) > 0) ? CPUTIME_NICE
: CPUTIME_USER
;
150 /* Add user time to cpustat. */
151 task_group_account_field(p
, index
, (__force u64
) cputime
);
153 /* Account for user time used */
154 acct_account_cputime(p
);
158 * Account guest cpu time to a process.
159 * @p: the process that the cpu time gets accounted to
160 * @cputime: the cpu time spent in virtual machine since the last update
161 * @cputime_scaled: cputime scaled by cpu frequency
163 static void account_guest_time(struct task_struct
*p
, cputime_t cputime
,
164 cputime_t cputime_scaled
)
166 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
168 /* Add guest time to process. */
170 p
->utimescaled
+= cputime_scaled
;
171 account_group_user_time(p
, cputime
);
174 /* Add guest time to cpustat. */
175 if (task_nice(p
) > 0) {
176 cpustat
[CPUTIME_NICE
] += (__force u64
) cputime
;
177 cpustat
[CPUTIME_GUEST_NICE
] += (__force u64
) cputime
;
179 cpustat
[CPUTIME_USER
] += (__force u64
) cputime
;
180 cpustat
[CPUTIME_GUEST
] += (__force u64
) cputime
;
185 * Account system cpu time to a process and desired cpustat field
186 * @p: the process that the cpu time gets accounted to
187 * @cputime: the cpu time spent in kernel space since the last update
188 * @cputime_scaled: cputime scaled by cpu frequency
189 * @target_cputime64: pointer to cpustat field that has to be updated
192 void __account_system_time(struct task_struct
*p
, cputime_t cputime
,
193 cputime_t cputime_scaled
, int index
)
195 /* Add system time to process. */
197 p
->stimescaled
+= cputime_scaled
;
198 account_group_system_time(p
, cputime
);
200 /* Add system time to cpustat. */
201 task_group_account_field(p
, index
, (__force u64
) cputime
);
203 /* Account for system time used */
204 acct_account_cputime(p
);
208 * Account system cpu time to a process.
209 * @p: the process that the cpu time gets accounted to
210 * @hardirq_offset: the offset to subtract from hardirq_count()
211 * @cputime: the cpu time spent in kernel space since the last update
212 * @cputime_scaled: cputime scaled by cpu frequency
214 void account_system_time(struct task_struct
*p
, int hardirq_offset
,
215 cputime_t cputime
, cputime_t cputime_scaled
)
219 if ((p
->flags
& PF_VCPU
) && (irq_count() - hardirq_offset
== 0)) {
220 account_guest_time(p
, cputime
, cputime_scaled
);
224 if (hardirq_count() - hardirq_offset
)
226 else if (in_serving_softirq())
227 index
= CPUTIME_SOFTIRQ
;
229 index
= CPUTIME_SYSTEM
;
231 __account_system_time(p
, cputime
, cputime_scaled
, index
);
235 * Account for involuntary wait time.
236 * @cputime: the cpu time spent in involuntary wait
238 void account_steal_time(cputime_t cputime
)
240 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
242 cpustat
[CPUTIME_STEAL
] += (__force u64
) cputime
;
246 * Account for idle time.
247 * @cputime: the cpu time spent in idle wait
249 void account_idle_time(cputime_t cputime
)
251 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
252 struct rq
*rq
= this_rq();
254 if (atomic_read(&rq
->nr_iowait
) > 0)
255 cpustat
[CPUTIME_IOWAIT
] += (__force u64
) cputime
;
257 cpustat
[CPUTIME_IDLE
] += (__force u64
) cputime
;
260 static __always_inline
bool steal_account_process_tick(void)
262 #ifdef CONFIG_PARAVIRT
263 if (static_key_false(¶virt_steal_enabled
)) {
265 unsigned long steal_jiffies
;
267 steal
= paravirt_steal_clock(smp_processor_id());
268 steal
-= this_rq()->prev_steal_time
;
271 * steal is in nsecs but our caller is expecting steal
272 * time in jiffies. Lets cast the result to jiffies
273 * granularity and account the rest on the next rounds.
275 steal_jiffies
= nsecs_to_jiffies(steal
);
276 this_rq()->prev_steal_time
+= jiffies_to_nsecs(steal_jiffies
);
278 account_steal_time(jiffies_to_cputime(steal_jiffies
));
279 return steal_jiffies
;
286 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
287 * tasks (sum on group iteration) belonging to @tsk's group.
289 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
)
291 struct signal_struct
*sig
= tsk
->signal
;
292 cputime_t utime
, stime
;
293 struct task_struct
*t
;
294 unsigned int seq
, nextseq
;
298 /* Attempt a lockless read on the first round. */
302 flags
= read_seqbegin_or_lock_irqsave(&sig
->stats_lock
, &seq
);
303 times
->utime
= sig
->utime
;
304 times
->stime
= sig
->stime
;
305 times
->sum_exec_runtime
= sig
->sum_sched_runtime
;
307 for_each_thread(tsk
, t
) {
308 task_cputime(t
, &utime
, &stime
);
309 times
->utime
+= utime
;
310 times
->stime
+= stime
;
311 times
->sum_exec_runtime
+= task_sched_runtime(t
);
313 /* If lockless access failed, take the lock. */
315 } while (need_seqretry(&sig
->stats_lock
, seq
));
316 done_seqretry_irqrestore(&sig
->stats_lock
, seq
, flags
);
320 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
322 * Account a tick to a process and cpustat
323 * @p: the process that the cpu time gets accounted to
324 * @user_tick: is the tick from userspace
325 * @rq: the pointer to rq
327 * Tick demultiplexing follows the order
328 * - pending hardirq update
329 * - pending softirq update
333 * - check for guest_time
334 * - else account as system_time
336 * Check for hardirq is done both for system and user time as there is
337 * no timer going off while we are on hardirq and hence we may never get an
338 * opportunity to update it solely in system time.
339 * p->stime and friends are only updated on system time and not on irq
340 * softirq as those do not count in task exec_runtime any more.
342 static void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
343 struct rq
*rq
, int ticks
)
345 cputime_t scaled
= cputime_to_scaled(cputime_one_jiffy
);
346 u64 cputime
= (__force u64
) cputime_one_jiffy
;
347 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
349 if (steal_account_process_tick())
355 if (irqtime_account_hi_update()) {
356 cpustat
[CPUTIME_IRQ
] += cputime
;
357 } else if (irqtime_account_si_update()) {
358 cpustat
[CPUTIME_SOFTIRQ
] += cputime
;
359 } else if (this_cpu_ksoftirqd() == p
) {
361 * ksoftirqd time do not get accounted in cpu_softirq_time.
362 * So, we have to handle it separately here.
363 * Also, p->stime needs to be updated for ksoftirqd.
365 __account_system_time(p
, cputime
, scaled
, CPUTIME_SOFTIRQ
);
366 } else if (user_tick
) {
367 account_user_time(p
, cputime
, scaled
);
368 } else if (p
== rq
->idle
) {
369 account_idle_time(cputime
);
370 } else if (p
->flags
& PF_VCPU
) { /* System time or guest time */
371 account_guest_time(p
, cputime
, scaled
);
373 __account_system_time(p
, cputime
, scaled
, CPUTIME_SYSTEM
);
377 static void irqtime_account_idle_ticks(int ticks
)
379 struct rq
*rq
= this_rq();
381 irqtime_account_process_tick(current
, 0, rq
, ticks
);
383 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
384 static inline void irqtime_account_idle_ticks(int ticks
) {}
385 static inline void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
386 struct rq
*rq
, int nr_ticks
) {}
387 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
390 * Use precise platform statistics if available:
392 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
394 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
395 void vtime_common_task_switch(struct task_struct
*prev
)
397 if (is_idle_task(prev
))
398 vtime_account_idle(prev
);
400 vtime_account_system(prev
);
402 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
403 vtime_account_user(prev
);
405 arch_vtime_task_switch(prev
);
410 * Archs that account the whole time spent in the idle task
411 * (outside irq) as idle time can rely on this and just implement
412 * vtime_account_system() and vtime_account_idle(). Archs that
413 * have other meaning of the idle time (s390 only includes the
414 * time spent by the CPU when it's in low power mode) must override
417 #ifndef __ARCH_HAS_VTIME_ACCOUNT
418 void vtime_common_account_irq_enter(struct task_struct
*tsk
)
420 if (!in_interrupt()) {
422 * If we interrupted user, context_tracking_in_user()
423 * is 1 because the context tracking don't hook
424 * on irq entry/exit. This way we know if
425 * we need to flush user time on kernel entry.
427 if (context_tracking_in_user()) {
428 vtime_account_user(tsk
);
432 if (is_idle_task(tsk
)) {
433 vtime_account_idle(tsk
);
437 vtime_account_system(tsk
);
439 EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter
);
440 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
441 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
444 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
445 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
450 EXPORT_SYMBOL_GPL(task_cputime_adjusted
);
452 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
454 struct task_cputime cputime
;
456 thread_group_cputime(p
, &cputime
);
461 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
463 * Account a single tick of cpu time.
464 * @p: the process that the cpu time gets accounted to
465 * @user_tick: indicates if the tick is a user or a system tick
467 void account_process_tick(struct task_struct
*p
, int user_tick
)
469 cputime_t one_jiffy_scaled
= cputime_to_scaled(cputime_one_jiffy
);
470 struct rq
*rq
= this_rq();
472 if (vtime_accounting_cpu_enabled())
475 if (sched_clock_irqtime
) {
476 irqtime_account_process_tick(p
, user_tick
, rq
, 1);
480 if (steal_account_process_tick())
484 account_user_time(p
, cputime_one_jiffy
, one_jiffy_scaled
);
485 else if ((p
!= rq
->idle
) || (irq_count() != HARDIRQ_OFFSET
))
486 account_system_time(p
, HARDIRQ_OFFSET
, cputime_one_jiffy
,
489 account_idle_time(cputime_one_jiffy
);
493 * Account multiple ticks of steal time.
494 * @p: the process from which the cpu time has been stolen
495 * @ticks: number of stolen ticks
497 void account_steal_ticks(unsigned long ticks
)
499 account_steal_time(jiffies_to_cputime(ticks
));
503 * Account multiple ticks of idle time.
504 * @ticks: number of stolen ticks
506 void account_idle_ticks(unsigned long ticks
)
509 if (sched_clock_irqtime
) {
510 irqtime_account_idle_ticks(ticks
);
514 account_idle_time(jiffies_to_cputime(ticks
));
518 * Perform (stime * rtime) / total, but avoid multiplication overflow by
519 * loosing precision when the numbers are big.
521 static cputime_t
scale_stime(u64 stime
, u64 rtime
, u64 total
)
526 /* Make sure "rtime" is the bigger of stime/rtime */
530 /* Make sure 'total' fits in 32 bits */
534 /* Does rtime (and thus stime) fit in 32 bits? */
538 /* Can we just balance rtime/stime rather than dropping bits? */
542 /* We can grow stime and shrink rtime and try to make them both fit */
548 /* We drop from rtime, it has more bits than stime */
554 * Make sure gcc understands that this is a 32x32->64 multiply,
555 * followed by a 64/32->64 divide.
557 scaled
= div_u64((u64
) (u32
) stime
* (u64
) (u32
) rtime
, (u32
)total
);
558 return (__force cputime_t
) scaled
;
562 * Adjust tick based cputime random precision against scheduler runtime
565 * Tick based cputime accounting depend on random scheduling timeslices of a
566 * task to be interrupted or not by the timer. Depending on these
567 * circumstances, the number of these interrupts may be over or
568 * under-optimistic, matching the real user and system cputime with a variable
571 * Fix this by scaling these tick based values against the total runtime
572 * accounted by the CFS scheduler.
574 * This code provides the following guarantees:
576 * stime + utime == rtime
577 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
579 * Assuming that rtime_i+1 >= rtime_i.
581 static void cputime_adjust(struct task_cputime
*curr
,
582 struct prev_cputime
*prev
,
583 cputime_t
*ut
, cputime_t
*st
)
585 cputime_t rtime
, stime
, utime
;
588 /* Serialize concurrent callers such that we can honour our guarantees */
589 raw_spin_lock_irqsave(&prev
->lock
, flags
);
590 rtime
= nsecs_to_cputime(curr
->sum_exec_runtime
);
593 * This is possible under two circumstances:
594 * - rtime isn't monotonic after all (a bug);
595 * - we got reordered by the lock.
597 * In both cases this acts as a filter such that the rest of the code
598 * can assume it is monotonic regardless of anything else.
600 if (prev
->stime
+ prev
->utime
>= rtime
)
616 stime
= scale_stime((__force u64
)stime
, (__force u64
)rtime
,
617 (__force u64
)(stime
+ utime
));
620 * Make sure stime doesn't go backwards; this preserves monotonicity
621 * for utime because rtime is monotonic.
623 * utime_i+1 = rtime_i+1 - stime_i
624 * = rtime_i+1 - (rtime_i - utime_i)
625 * = (rtime_i+1 - rtime_i) + utime_i
628 if (stime
< prev
->stime
)
630 utime
= rtime
- stime
;
633 * Make sure utime doesn't go backwards; this still preserves
634 * monotonicity for stime, analogous argument to above.
636 if (utime
< prev
->utime
) {
638 stime
= rtime
- utime
;
647 raw_spin_unlock_irqrestore(&prev
->lock
, flags
);
650 void task_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
652 struct task_cputime cputime
= {
653 .sum_exec_runtime
= p
->se
.sum_exec_runtime
,
656 task_cputime(p
, &cputime
.utime
, &cputime
.stime
);
657 cputime_adjust(&cputime
, &p
->prev_cputime
, ut
, st
);
659 EXPORT_SYMBOL_GPL(task_cputime_adjusted
);
661 void thread_group_cputime_adjusted(struct task_struct
*p
, cputime_t
*ut
, cputime_t
*st
)
663 struct task_cputime cputime
;
665 thread_group_cputime(p
, &cputime
);
666 cputime_adjust(&cputime
, &p
->signal
->prev_cputime
, ut
, st
);
668 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
670 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
671 static cputime_t
vtime_delta(struct task_struct
*tsk
)
673 unsigned long now
= READ_ONCE(jiffies
);
675 if (time_before(now
, (unsigned long)tsk
->vtime_snap
))
678 return jiffies_to_cputime(now
- tsk
->vtime_snap
);
681 static cputime_t
get_vtime_delta(struct task_struct
*tsk
)
683 unsigned long now
= READ_ONCE(jiffies
);
684 unsigned long delta
= now
- tsk
->vtime_snap
;
686 WARN_ON_ONCE(tsk
->vtime_snap_whence
== VTIME_INACTIVE
);
687 tsk
->vtime_snap
= now
;
689 return jiffies_to_cputime(delta
);
692 static void __vtime_account_system(struct task_struct
*tsk
)
694 cputime_t delta_cpu
= get_vtime_delta(tsk
);
696 account_system_time(tsk
, irq_count(), delta_cpu
, cputime_to_scaled(delta_cpu
));
699 void vtime_account_system(struct task_struct
*tsk
)
701 if (!vtime_delta(tsk
))
704 write_seqcount_begin(&tsk
->vtime_seqcount
);
705 __vtime_account_system(tsk
);
706 write_seqcount_end(&tsk
->vtime_seqcount
);
709 void vtime_gen_account_irq_exit(struct task_struct
*tsk
)
711 write_seqcount_begin(&tsk
->vtime_seqcount
);
712 if (vtime_delta(tsk
))
713 __vtime_account_system(tsk
);
714 if (context_tracking_in_user())
715 tsk
->vtime_snap_whence
= VTIME_USER
;
716 write_seqcount_end(&tsk
->vtime_seqcount
);
719 void vtime_account_user(struct task_struct
*tsk
)
723 write_seqcount_begin(&tsk
->vtime_seqcount
);
724 tsk
->vtime_snap_whence
= VTIME_SYS
;
725 if (vtime_delta(tsk
)) {
726 delta_cpu
= get_vtime_delta(tsk
);
727 account_user_time(tsk
, delta_cpu
, cputime_to_scaled(delta_cpu
));
729 write_seqcount_end(&tsk
->vtime_seqcount
);
732 void vtime_user_enter(struct task_struct
*tsk
)
734 write_seqcount_begin(&tsk
->vtime_seqcount
);
735 if (vtime_delta(tsk
))
736 __vtime_account_system(tsk
);
737 tsk
->vtime_snap_whence
= VTIME_USER
;
738 write_seqcount_end(&tsk
->vtime_seqcount
);
741 void vtime_guest_enter(struct task_struct
*tsk
)
744 * The flags must be updated under the lock with
745 * the vtime_snap flush and update.
746 * That enforces a right ordering and update sequence
747 * synchronization against the reader (task_gtime())
748 * that can thus safely catch up with a tickless delta.
750 write_seqcount_begin(&tsk
->vtime_seqcount
);
751 if (vtime_delta(tsk
))
752 __vtime_account_system(tsk
);
753 current
->flags
|= PF_VCPU
;
754 write_seqcount_end(&tsk
->vtime_seqcount
);
756 EXPORT_SYMBOL_GPL(vtime_guest_enter
);
758 void vtime_guest_exit(struct task_struct
*tsk
)
760 write_seqcount_begin(&tsk
->vtime_seqcount
);
761 __vtime_account_system(tsk
);
762 current
->flags
&= ~PF_VCPU
;
763 write_seqcount_end(&tsk
->vtime_seqcount
);
765 EXPORT_SYMBOL_GPL(vtime_guest_exit
);
767 void vtime_account_idle(struct task_struct
*tsk
)
769 cputime_t delta_cpu
= get_vtime_delta(tsk
);
771 account_idle_time(delta_cpu
);
774 void arch_vtime_task_switch(struct task_struct
*prev
)
776 write_seqcount_begin(&prev
->vtime_seqcount
);
777 prev
->vtime_snap_whence
= VTIME_INACTIVE
;
778 write_seqcount_end(&prev
->vtime_seqcount
);
780 write_seqcount_begin(¤t
->vtime_seqcount
);
781 current
->vtime_snap_whence
= VTIME_SYS
;
782 current
->vtime_snap
= jiffies
;
783 write_seqcount_end(¤t
->vtime_seqcount
);
786 void vtime_init_idle(struct task_struct
*t
, int cpu
)
790 local_irq_save(flags
);
791 write_seqcount_begin(&t
->vtime_seqcount
);
792 t
->vtime_snap_whence
= VTIME_SYS
;
793 t
->vtime_snap
= jiffies
;
794 write_seqcount_end(&t
->vtime_seqcount
);
795 local_irq_restore(flags
);
798 cputime_t
task_gtime(struct task_struct
*t
)
803 if (!vtime_accounting_enabled())
807 seq
= read_seqcount_begin(&t
->vtime_seqcount
);
810 if (t
->vtime_snap_whence
== VTIME_SYS
&& t
->flags
& PF_VCPU
)
811 gtime
+= vtime_delta(t
);
813 } while (read_seqcount_retry(&t
->vtime_seqcount
, seq
));
819 * Fetch cputime raw values from fields of task_struct and
820 * add up the pending nohz execution time since the last
824 fetch_task_cputime(struct task_struct
*t
,
825 cputime_t
*u_dst
, cputime_t
*s_dst
,
826 cputime_t
*u_src
, cputime_t
*s_src
,
827 cputime_t
*udelta
, cputime_t
*sdelta
)
830 unsigned long long delta
;
836 seq
= read_seqcount_begin(&t
->vtime_seqcount
);
843 /* Task is sleeping, nothing to add */
844 if (t
->vtime_snap_whence
== VTIME_INACTIVE
||
848 delta
= vtime_delta(t
);
851 * Task runs either in user or kernel space, add pending nohz time to
854 if (t
->vtime_snap_whence
== VTIME_USER
|| t
->flags
& PF_VCPU
) {
857 if (t
->vtime_snap_whence
== VTIME_SYS
)
860 } while (read_seqcount_retry(&t
->vtime_seqcount
, seq
));
864 void task_cputime(struct task_struct
*t
, cputime_t
*utime
, cputime_t
*stime
)
866 cputime_t udelta
, sdelta
;
868 if (!vtime_accounting_enabled()) {
876 fetch_task_cputime(t
, utime
, stime
, &t
->utime
,
877 &t
->stime
, &udelta
, &sdelta
);
884 void task_cputime_scaled(struct task_struct
*t
,
885 cputime_t
*utimescaled
, cputime_t
*stimescaled
)
887 cputime_t udelta
, sdelta
;
889 if (!vtime_accounting_enabled()) {
891 *utimescaled
= t
->utimescaled
;
893 *stimescaled
= t
->stimescaled
;
897 fetch_task_cputime(t
, utimescaled
, stimescaled
,
898 &t
->utimescaled
, &t
->stimescaled
, &udelta
, &sdelta
);
900 *utimescaled
+= cputime_to_scaled(udelta
);
902 *stimescaled
+= cputime_to_scaled(sdelta
);
904 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */