Commit | Line | Data |
---|---|---|
425e0968 IM |
1 | |
2 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 3 | |
425e0968 IM |
4 | /* |
5 | * Expects runqueue lock to be held for atomicity of update | |
6 | */ | |
7 | static inline void | |
8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
9 | { | |
10 | if (rq) { | |
11 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 12 | rq->rq_sched_info.pcount++; |
425e0968 IM |
13 | } |
14 | } | |
15 | ||
16 | /* | |
17 | * Expects runqueue lock to be held for atomicity of update | |
18 | */ | |
19 | static inline void | |
20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
21 | { | |
22 | if (rq) | |
9c2c4802 | 23 | rq->rq_cpu_time += delta; |
425e0968 | 24 | } |
46ac22ba AG |
25 | |
26 | static inline void | |
27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
28 | { | |
29 | if (rq) | |
30 | rq->rq_sched_info.run_delay += delta; | |
31 | } | |
cb251765 MG |
32 | # define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
33 | # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) | |
34 | # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) | |
35 | # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | |
9c572591 JP |
36 | # define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0) |
37 | ||
425e0968 IM |
38 | #else /* !CONFIG_SCHEDSTATS */ |
39 | static inline void | |
40 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
41 | {} | |
42 | static inline void | |
46ac22ba AG |
43 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
44 | {} | |
45 | static inline void | |
425e0968 IM |
46 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
47 | {} | |
cb251765 | 48 | # define schedstat_enabled() 0 |
425e0968 IM |
49 | # define schedstat_inc(rq, field) do { } while (0) |
50 | # define schedstat_add(rq, field, amt) do { } while (0) | |
c3c70119 | 51 | # define schedstat_set(var, val) do { } while (0) |
9c572591 | 52 | # define schedstat_val(rq, field) 0 |
425e0968 IM |
53 | #endif |
54 | ||
f6db8347 | 55 | #ifdef CONFIG_SCHED_INFO |
46ac22ba AG |
56 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
57 | { | |
58 | t->sched_info.last_queued = 0; | |
59 | } | |
60 | ||
425e0968 | 61 | /* |
d4a6f3c3 | 62 | * We are interested in knowing how long it was from the *first* time a |
46ac22ba AG |
63 | * task was queued to the time that it finally hit a cpu, we call this routine |
64 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | |
65 | * delta taken on each cpu would annul the skew. | |
425e0968 | 66 | */ |
43148951 | 67 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
425e0968 | 68 | { |
43148951 | 69 | unsigned long long now = rq_clock(rq), delta = 0; |
46ac22ba AG |
70 | |
71 | if (unlikely(sched_info_on())) | |
72 | if (t->sched_info.last_queued) | |
73 | delta = now - t->sched_info.last_queued; | |
74 | sched_info_reset_dequeued(t); | |
75 | t->sched_info.run_delay += delta; | |
76 | ||
43148951 | 77 | rq_sched_info_dequeued(rq, delta); |
425e0968 IM |
78 | } |
79 | ||
80 | /* | |
81 | * Called when a task finally hits the cpu. We can now calculate how | |
82 | * long it was waiting to run. We also note when it began so that we | |
83 | * can keep stats on how long its timeslice is. | |
84 | */ | |
43148951 | 85 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
425e0968 | 86 | { |
43148951 | 87 | unsigned long long now = rq_clock(rq), delta = 0; |
425e0968 IM |
88 | |
89 | if (t->sched_info.last_queued) | |
90 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 91 | sched_info_reset_dequeued(t); |
425e0968 IM |
92 | t->sched_info.run_delay += delta; |
93 | t->sched_info.last_arrival = now; | |
2d72376b | 94 | t->sched_info.pcount++; |
425e0968 | 95 | |
43148951 | 96 | rq_sched_info_arrive(rq, delta); |
425e0968 IM |
97 | } |
98 | ||
99 | /* | |
425e0968 IM |
100 | * This function is only called from enqueue_task(), but also only updates |
101 | * the timestamp if it is already not set. It's assumed that | |
102 | * sched_info_dequeued() will clear that stamp when appropriate. | |
103 | */ | |
43148951 | 104 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
425e0968 IM |
105 | { |
106 | if (unlikely(sched_info_on())) | |
107 | if (!t->sched_info.last_queued) | |
43148951 | 108 | t->sched_info.last_queued = rq_clock(rq); |
425e0968 IM |
109 | } |
110 | ||
111 | /* | |
13b62e46 MT |
112 | * Called when a process ceases being the active-running process involuntarily |
113 | * due, typically, to expiring its time slice (this may also be called when | |
114 | * switching to the idle task). Now we can calculate how long we ran. | |
d4abc238 BR |
115 | * Also, if the process is still in the TASK_RUNNING state, call |
116 | * sched_info_queued() to mark that it has now again started waiting on | |
117 | * the runqueue. | |
425e0968 | 118 | */ |
43148951 | 119 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
425e0968 | 120 | { |
43148951 | 121 | unsigned long long delta = rq_clock(rq) - |
9a41785c | 122 | t->sched_info.last_arrival; |
425e0968 | 123 | |
43148951 | 124 | rq_sched_info_depart(rq, delta); |
d4abc238 BR |
125 | |
126 | if (t->state == TASK_RUNNING) | |
43148951 | 127 | sched_info_queued(rq, t); |
425e0968 IM |
128 | } |
129 | ||
130 | /* | |
131 | * Called when tasks are switched involuntarily due, typically, to expiring | |
132 | * their time slice. (This may also be called when switching to or from | |
133 | * the idle task.) We are only called when prev != next. | |
134 | */ | |
135 | static inline void | |
43148951 MT |
136 | __sched_info_switch(struct rq *rq, |
137 | struct task_struct *prev, struct task_struct *next) | |
425e0968 | 138 | { |
425e0968 IM |
139 | /* |
140 | * prev now departs the cpu. It's not interesting to record | |
141 | * stats about how efficient we were at scheduling the idle | |
142 | * process, however. | |
143 | */ | |
144 | if (prev != rq->idle) | |
43148951 | 145 | sched_info_depart(rq, prev); |
425e0968 IM |
146 | |
147 | if (next != rq->idle) | |
43148951 | 148 | sched_info_arrive(rq, next); |
425e0968 IM |
149 | } |
150 | static inline void | |
43148951 MT |
151 | sched_info_switch(struct rq *rq, |
152 | struct task_struct *prev, struct task_struct *next) | |
425e0968 IM |
153 | { |
154 | if (unlikely(sched_info_on())) | |
43148951 | 155 | __sched_info_switch(rq, prev, next); |
425e0968 IM |
156 | } |
157 | #else | |
43148951 | 158 | #define sched_info_queued(rq, t) do { } while (0) |
46ac22ba | 159 | #define sched_info_reset_dequeued(t) do { } while (0) |
43148951 MT |
160 | #define sched_info_dequeued(rq, t) do { } while (0) |
161 | #define sched_info_depart(rq, t) do { } while (0) | |
162 | #define sched_info_arrive(rq, next) do { } while (0) | |
163 | #define sched_info_switch(rq, t, next) do { } while (0) | |
f6db8347 | 164 | #endif /* CONFIG_SCHED_INFO */ |
425e0968 | 165 | |
bb34d92f FM |
166 | /* |
167 | * The following are functions that support scheduler-internal time accounting. | |
168 | * These functions are generally called at the timer tick. None of this depends | |
169 | * on CONFIG_SCHEDSTATS. | |
170 | */ | |
171 | ||
fa18f7bd KM |
172 | /** |
173 | * cputimer_running - return true if cputimer is running | |
174 | * | |
175 | * @tsk: Pointer to target task. | |
176 | */ | |
177 | static inline bool cputimer_running(struct task_struct *tsk) | |
178 | ||
179 | { | |
180 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | |
181 | ||
1018016c JL |
182 | /* Check if cputimer isn't running. This is accessed without locking. */ |
183 | if (!READ_ONCE(cputimer->running)) | |
fa18f7bd KM |
184 | return false; |
185 | ||
186 | /* | |
187 | * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime | |
188 | * in __exit_signal(), we won't account to the signal struct further | |
189 | * cputime consumed by that task, even though the task can still be | |
190 | * ticking after __exit_signal(). | |
191 | * | |
192 | * In order to keep a consistent behaviour between thread group cputime | |
193 | * and thread group cputimer accounting, lets also ignore the cputime | |
194 | * elapsing after __exit_signal() in any thread group timer running. | |
195 | * | |
196 | * This makes sure that POSIX CPU clocks and timers are synchronized, so | |
197 | * that a POSIX CPU timer won't expire while the corresponding POSIX CPU | |
198 | * clock delta is behind the expiring timer value. | |
199 | */ | |
200 | if (unlikely(!tsk->sighand)) | |
201 | return false; | |
202 | ||
203 | return true; | |
204 | } | |
205 | ||
bb34d92f | 206 | /** |
7086efe1 | 207 | * account_group_user_time - Maintain utime for a thread group. |
bb34d92f | 208 | * |
7086efe1 FM |
209 | * @tsk: Pointer to task structure. |
210 | * @cputime: Time value by which to increment the utime field of the | |
211 | * thread_group_cputime structure. | |
bb34d92f FM |
212 | * |
213 | * If thread group time is being maintained, get the structure for the | |
214 | * running CPU and update the utime field there. | |
215 | */ | |
7086efe1 FM |
216 | static inline void account_group_user_time(struct task_struct *tsk, |
217 | cputime_t cputime) | |
bb34d92f | 218 | { |
48286d50 | 219 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
bb34d92f | 220 | |
fa18f7bd | 221 | if (!cputimer_running(tsk)) |
4cd4c1b4 PZ |
222 | return; |
223 | ||
71107445 | 224 | atomic64_add(cputime, &cputimer->cputime_atomic.utime); |
bb34d92f FM |
225 | } |
226 | ||
227 | /** | |
7086efe1 | 228 | * account_group_system_time - Maintain stime for a thread group. |
bb34d92f | 229 | * |
7086efe1 FM |
230 | * @tsk: Pointer to task structure. |
231 | * @cputime: Time value by which to increment the stime field of the | |
232 | * thread_group_cputime structure. | |
bb34d92f FM |
233 | * |
234 | * If thread group time is being maintained, get the structure for the | |
235 | * running CPU and update the stime field there. | |
236 | */ | |
7086efe1 FM |
237 | static inline void account_group_system_time(struct task_struct *tsk, |
238 | cputime_t cputime) | |
bb34d92f | 239 | { |
48286d50 | 240 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
4cd4c1b4 | 241 | |
fa18f7bd | 242 | if (!cputimer_running(tsk)) |
4cd4c1b4 | 243 | return; |
bb34d92f | 244 | |
71107445 | 245 | atomic64_add(cputime, &cputimer->cputime_atomic.stime); |
bb34d92f FM |
246 | } |
247 | ||
248 | /** | |
7086efe1 | 249 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
bb34d92f | 250 | * |
7086efe1 | 251 | * @tsk: Pointer to task structure. |
bb34d92f | 252 | * @ns: Time value by which to increment the sum_exec_runtime field |
7086efe1 | 253 | * of the thread_group_cputime structure. |
bb34d92f FM |
254 | * |
255 | * If thread group time is being maintained, get the structure for the | |
256 | * running CPU and update the sum_exec_runtime field there. | |
257 | */ | |
7086efe1 FM |
258 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
259 | unsigned long long ns) | |
bb34d92f | 260 | { |
48286d50 | 261 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
4cd4c1b4 | 262 | |
fa18f7bd | 263 | if (!cputimer_running(tsk)) |
4cd4c1b4 | 264 | return; |
bb34d92f | 265 | |
71107445 | 266 | atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); |
bb34d92f | 267 | } |