Merge remote-tracking branch 'regulator/topic/da9063' into regulator-next
[deliverable/linux.git] / kernel / sched / stats.h
CommitLineData
425e0968
IM
1
2#ifdef CONFIG_SCHEDSTATS
b5aadf7f 3
425e0968
IM
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
2d72376b 12 rq->rq_sched_info.pcount++;
425e0968
IM
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
9c2c4802 23 rq->rq_cpu_time += delta;
425e0968 24}
46ac22ba
AG
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
425e0968
IM
32# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
c3c70119 34# define schedstat_set(var, val) do { var = (val); } while (0)
425e0968
IM
35#else /* !CONFIG_SCHEDSTATS */
36static inline void
37rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38{}
39static inline void
46ac22ba
AG
40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
41{}
42static inline void
425e0968
IM
43rq_sched_info_depart(struct rq *rq, unsigned long long delta)
44{}
45# define schedstat_inc(rq, field) do { } while (0)
46# define schedstat_add(rq, field, amt) do { } while (0)
c3c70119 47# define schedstat_set(var, val) do { } while (0)
425e0968
IM
48#endif
49
9a41785c 50#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
46ac22ba
AG
51static inline void sched_info_reset_dequeued(struct task_struct *t)
52{
53 t->sched_info.last_queued = 0;
54}
55
425e0968 56/*
d4a6f3c3 57 * We are interested in knowing how long it was from the *first* time a
46ac22ba
AG
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
425e0968
IM
61 */
62static inline void sched_info_dequeued(struct task_struct *t)
63{
78becc27 64 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
46ac22ba
AG
65
66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued)
68 delta = now - t->sched_info.last_queued;
69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta;
71
72 rq_sched_info_dequeued(task_rq(t), delta);
425e0968
IM
73}
74
75/*
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
79 */
80static void sched_info_arrive(struct task_struct *t)
81{
78becc27 82 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
425e0968
IM
83
84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued;
46ac22ba 86 sched_info_reset_dequeued(t);
425e0968
IM
87 t->sched_info.run_delay += delta;
88 t->sched_info.last_arrival = now;
2d72376b 89 t->sched_info.pcount++;
425e0968
IM
90
91 rq_sched_info_arrive(task_rq(t), delta);
92}
93
94/*
425e0968
IM
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
98 */
99static inline void sched_info_queued(struct task_struct *t)
100{
101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued)
78becc27 103 t->sched_info.last_queued = rq_clock(task_rq(t));
425e0968
IM
104}
105
106/*
13b62e46
MT
107 * Called when a process ceases being the active-running process involuntarily
108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
d4abc238
BR
110 * Also, if the process is still in the TASK_RUNNING state, call
111 * sched_info_queued() to mark that it has now again started waiting on
112 * the runqueue.
425e0968
IM
113 */
114static inline void sched_info_depart(struct task_struct *t)
115{
78becc27 116 unsigned long long delta = rq_clock(task_rq(t)) -
9a41785c 117 t->sched_info.last_arrival;
425e0968 118
425e0968 119 rq_sched_info_depart(task_rq(t), delta);
d4abc238
BR
120
121 if (t->state == TASK_RUNNING)
122 sched_info_queued(t);
425e0968
IM
123}
124
125/*
126 * Called when tasks are switched involuntarily due, typically, to expiring
127 * their time slice. (This may also be called when switching to or from
128 * the idle task.) We are only called when prev != next.
129 */
130static inline void
131__sched_info_switch(struct task_struct *prev, struct task_struct *next)
132{
133 struct rq *rq = task_rq(prev);
134
135 /*
136 * prev now departs the cpu. It's not interesting to record
137 * stats about how efficient we were at scheduling the idle
138 * process, however.
139 */
140 if (prev != rq->idle)
141 sched_info_depart(prev);
142
143 if (next != rq->idle)
144 sched_info_arrive(next);
145}
146static inline void
147sched_info_switch(struct task_struct *prev, struct task_struct *next)
148{
149 if (unlikely(sched_info_on()))
150 __sched_info_switch(prev, next);
151}
152#else
46ac22ba
AG
153#define sched_info_queued(t) do { } while (0)
154#define sched_info_reset_dequeued(t) do { } while (0)
155#define sched_info_dequeued(t) do { } while (0)
156#define sched_info_switch(t, next) do { } while (0)
9a41785c 157#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
425e0968 158
bb34d92f
FM
159/*
160 * The following are functions that support scheduler-internal time accounting.
161 * These functions are generally called at the timer tick. None of this depends
162 * on CONFIG_SCHEDSTATS.
163 */
164
fa18f7bd
KM
165/**
166 * cputimer_running - return true if cputimer is running
167 *
168 * @tsk: Pointer to target task.
169 */
170static inline bool cputimer_running(struct task_struct *tsk)
171
172{
173 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
174
175 if (!cputimer->running)
176 return false;
177
178 /*
179 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
180 * in __exit_signal(), we won't account to the signal struct further
181 * cputime consumed by that task, even though the task can still be
182 * ticking after __exit_signal().
183 *
184 * In order to keep a consistent behaviour between thread group cputime
185 * and thread group cputimer accounting, lets also ignore the cputime
186 * elapsing after __exit_signal() in any thread group timer running.
187 *
188 * This makes sure that POSIX CPU clocks and timers are synchronized, so
189 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
190 * clock delta is behind the expiring timer value.
191 */
192 if (unlikely(!tsk->sighand))
193 return false;
194
195 return true;
196}
197
bb34d92f 198/**
7086efe1 199 * account_group_user_time - Maintain utime for a thread group.
bb34d92f 200 *
7086efe1
FM
201 * @tsk: Pointer to task structure.
202 * @cputime: Time value by which to increment the utime field of the
203 * thread_group_cputime structure.
bb34d92f
FM
204 *
205 * If thread group time is being maintained, get the structure for the
206 * running CPU and update the utime field there.
207 */
7086efe1
FM
208static inline void account_group_user_time(struct task_struct *tsk,
209 cputime_t cputime)
bb34d92f 210{
48286d50 211 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
bb34d92f 212
fa18f7bd 213 if (!cputimer_running(tsk))
4cd4c1b4
PZ
214 return;
215
ee30a7b2 216 raw_spin_lock(&cputimer->lock);
64861634 217 cputimer->cputime.utime += cputime;
ee30a7b2 218 raw_spin_unlock(&cputimer->lock);
bb34d92f
FM
219}
220
221/**
7086efe1 222 * account_group_system_time - Maintain stime for a thread group.
bb34d92f 223 *
7086efe1
FM
224 * @tsk: Pointer to task structure.
225 * @cputime: Time value by which to increment the stime field of the
226 * thread_group_cputime structure.
bb34d92f
FM
227 *
228 * If thread group time is being maintained, get the structure for the
229 * running CPU and update the stime field there.
230 */
7086efe1
FM
231static inline void account_group_system_time(struct task_struct *tsk,
232 cputime_t cputime)
bb34d92f 233{
48286d50 234 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b4 235
fa18f7bd 236 if (!cputimer_running(tsk))
4cd4c1b4 237 return;
bb34d92f 238
ee30a7b2 239 raw_spin_lock(&cputimer->lock);
64861634 240 cputimer->cputime.stime += cputime;
ee30a7b2 241 raw_spin_unlock(&cputimer->lock);
bb34d92f
FM
242}
243
244/**
7086efe1 245 * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f 246 *
7086efe1 247 * @tsk: Pointer to task structure.
bb34d92f 248 * @ns: Time value by which to increment the sum_exec_runtime field
7086efe1 249 * of the thread_group_cputime structure.
bb34d92f
FM
250 *
251 * If thread group time is being maintained, get the structure for the
252 * running CPU and update the sum_exec_runtime field there.
253 */
7086efe1
FM
254static inline void account_group_exec_runtime(struct task_struct *tsk,
255 unsigned long long ns)
bb34d92f 256{
48286d50 257 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b4 258
fa18f7bd 259 if (!cputimer_running(tsk))
4cd4c1b4 260 return;
bb34d92f 261
ee30a7b2 262 raw_spin_lock(&cputimer->lock);
4cd4c1b4 263 cputimer->cputime.sum_exec_runtime += ns;
ee30a7b2 264 raw_spin_unlock(&cputimer->lock);
bb34d92f 265}
This page took 0.39137 seconds and 5 git commands to generate.