Commit | Line | Data |
---|---|---|
425e0968 IM |
1 | |
2 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 3 | |
425e0968 IM |
4 | /* |
5 | * Expects runqueue lock to be held for atomicity of update | |
6 | */ | |
7 | static inline void | |
8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
9 | { | |
10 | if (rq) { | |
11 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 12 | rq->rq_sched_info.pcount++; |
425e0968 IM |
13 | } |
14 | } | |
15 | ||
16 | /* | |
17 | * Expects runqueue lock to be held for atomicity of update | |
18 | */ | |
19 | static inline void | |
20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
21 | { | |
22 | if (rq) | |
9c2c4802 | 23 | rq->rq_cpu_time += delta; |
425e0968 | 24 | } |
46ac22ba AG |
25 | |
26 | static inline void | |
27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
28 | { | |
29 | if (rq) | |
30 | rq->rq_sched_info.run_delay += delta; | |
31 | } | |
425e0968 IM |
32 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) |
33 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | |
c3c70119 | 34 | # define schedstat_set(var, val) do { var = (val); } while (0) |
425e0968 IM |
35 | #else /* !CONFIG_SCHEDSTATS */ |
36 | static inline void | |
37 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
38 | {} | |
39 | static inline void | |
46ac22ba AG |
40 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
41 | {} | |
42 | static inline void | |
425e0968 IM |
43 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
44 | {} | |
45 | # define schedstat_inc(rq, field) do { } while (0) | |
46 | # define schedstat_add(rq, field, amt) do { } while (0) | |
c3c70119 | 47 | # define schedstat_set(var, val) do { } while (0) |
425e0968 IM |
48 | #endif |
49 | ||
9a41785c | 50 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
46ac22ba AG |
51 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
52 | { | |
53 | t->sched_info.last_queued = 0; | |
54 | } | |
55 | ||
425e0968 | 56 | /* |
d4a6f3c3 | 57 | * We are interested in knowing how long it was from the *first* time a |
46ac22ba AG |
58 | * task was queued to the time that it finally hit a cpu, we call this routine |
59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | |
60 | * delta taken on each cpu would annul the skew. | |
425e0968 IM |
61 | */ |
62 | static inline void sched_info_dequeued(struct task_struct *t) | |
63 | { | |
46ac22ba AG |
64 | unsigned long long now = task_rq(t)->clock, delta = 0; |
65 | ||
66 | if (unlikely(sched_info_on())) | |
67 | if (t->sched_info.last_queued) | |
68 | delta = now - t->sched_info.last_queued; | |
69 | sched_info_reset_dequeued(t); | |
70 | t->sched_info.run_delay += delta; | |
71 | ||
72 | rq_sched_info_dequeued(task_rq(t), delta); | |
425e0968 IM |
73 | } |
74 | ||
75 | /* | |
76 | * Called when a task finally hits the cpu. We can now calculate how | |
77 | * long it was waiting to run. We also note when it began so that we | |
78 | * can keep stats on how long its timeslice is. | |
79 | */ | |
80 | static void sched_info_arrive(struct task_struct *t) | |
81 | { | |
9a41785c | 82 | unsigned long long now = task_rq(t)->clock, delta = 0; |
425e0968 IM |
83 | |
84 | if (t->sched_info.last_queued) | |
85 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 86 | sched_info_reset_dequeued(t); |
425e0968 IM |
87 | t->sched_info.run_delay += delta; |
88 | t->sched_info.last_arrival = now; | |
2d72376b | 89 | t->sched_info.pcount++; |
425e0968 IM |
90 | |
91 | rq_sched_info_arrive(task_rq(t), delta); | |
92 | } | |
93 | ||
94 | /* | |
425e0968 IM |
95 | * This function is only called from enqueue_task(), but also only updates |
96 | * the timestamp if it is already not set. It's assumed that | |
97 | * sched_info_dequeued() will clear that stamp when appropriate. | |
98 | */ | |
99 | static inline void sched_info_queued(struct task_struct *t) | |
100 | { | |
101 | if (unlikely(sched_info_on())) | |
102 | if (!t->sched_info.last_queued) | |
9a41785c | 103 | t->sched_info.last_queued = task_rq(t)->clock; |
425e0968 IM |
104 | } |
105 | ||
106 | /* | |
107 | * Called when a process ceases being the active-running process, either | |
108 | * voluntarily or involuntarily. Now we can calculate how long we ran. | |
d4abc238 BR |
109 | * Also, if the process is still in the TASK_RUNNING state, call |
110 | * sched_info_queued() to mark that it has now again started waiting on | |
111 | * the runqueue. | |
425e0968 IM |
112 | */ |
113 | static inline void sched_info_depart(struct task_struct *t) | |
114 | { | |
9a41785c BS |
115 | unsigned long long delta = task_rq(t)->clock - |
116 | t->sched_info.last_arrival; | |
425e0968 | 117 | |
425e0968 | 118 | rq_sched_info_depart(task_rq(t), delta); |
d4abc238 BR |
119 | |
120 | if (t->state == TASK_RUNNING) | |
121 | sched_info_queued(t); | |
425e0968 IM |
122 | } |
123 | ||
124 | /* | |
125 | * Called when tasks are switched involuntarily due, typically, to expiring | |
126 | * their time slice. (This may also be called when switching to or from | |
127 | * the idle task.) We are only called when prev != next. | |
128 | */ | |
129 | static inline void | |
130 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | |
131 | { | |
132 | struct rq *rq = task_rq(prev); | |
133 | ||
134 | /* | |
135 | * prev now departs the cpu. It's not interesting to record | |
136 | * stats about how efficient we were at scheduling the idle | |
137 | * process, however. | |
138 | */ | |
139 | if (prev != rq->idle) | |
140 | sched_info_depart(prev); | |
141 | ||
142 | if (next != rq->idle) | |
143 | sched_info_arrive(next); | |
144 | } | |
145 | static inline void | |
146 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | |
147 | { | |
148 | if (unlikely(sched_info_on())) | |
149 | __sched_info_switch(prev, next); | |
150 | } | |
151 | #else | |
46ac22ba AG |
152 | #define sched_info_queued(t) do { } while (0) |
153 | #define sched_info_reset_dequeued(t) do { } while (0) | |
154 | #define sched_info_dequeued(t) do { } while (0) | |
155 | #define sched_info_switch(t, next) do { } while (0) | |
9a41785c | 156 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ |
425e0968 | 157 | |
bb34d92f FM |
158 | /* |
159 | * The following are functions that support scheduler-internal time accounting. | |
160 | * These functions are generally called at the timer tick. None of this depends | |
161 | * on CONFIG_SCHEDSTATS. | |
162 | */ | |
163 | ||
bb34d92f | 164 | /** |
7086efe1 | 165 | * account_group_user_time - Maintain utime for a thread group. |
bb34d92f | 166 | * |
7086efe1 FM |
167 | * @tsk: Pointer to task structure. |
168 | * @cputime: Time value by which to increment the utime field of the | |
169 | * thread_group_cputime structure. | |
bb34d92f FM |
170 | * |
171 | * If thread group time is being maintained, get the structure for the | |
172 | * running CPU and update the utime field there. | |
173 | */ | |
7086efe1 FM |
174 | static inline void account_group_user_time(struct task_struct *tsk, |
175 | cputime_t cputime) | |
bb34d92f | 176 | { |
48286d50 | 177 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
bb34d92f | 178 | |
4cd4c1b4 PZ |
179 | if (!cputimer->running) |
180 | return; | |
181 | ||
ee30a7b2 | 182 | raw_spin_lock(&cputimer->lock); |
64861634 | 183 | cputimer->cputime.utime += cputime; |
ee30a7b2 | 184 | raw_spin_unlock(&cputimer->lock); |
bb34d92f FM |
185 | } |
186 | ||
187 | /** | |
7086efe1 | 188 | * account_group_system_time - Maintain stime for a thread group. |
bb34d92f | 189 | * |
7086efe1 FM |
190 | * @tsk: Pointer to task structure. |
191 | * @cputime: Time value by which to increment the stime field of the | |
192 | * thread_group_cputime structure. | |
bb34d92f FM |
193 | * |
194 | * If thread group time is being maintained, get the structure for the | |
195 | * running CPU and update the stime field there. | |
196 | */ | |
7086efe1 FM |
197 | static inline void account_group_system_time(struct task_struct *tsk, |
198 | cputime_t cputime) | |
bb34d92f | 199 | { |
48286d50 | 200 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
4cd4c1b4 PZ |
201 | |
202 | if (!cputimer->running) | |
203 | return; | |
bb34d92f | 204 | |
ee30a7b2 | 205 | raw_spin_lock(&cputimer->lock); |
64861634 | 206 | cputimer->cputime.stime += cputime; |
ee30a7b2 | 207 | raw_spin_unlock(&cputimer->lock); |
bb34d92f FM |
208 | } |
209 | ||
210 | /** | |
7086efe1 | 211 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
bb34d92f | 212 | * |
7086efe1 | 213 | * @tsk: Pointer to task structure. |
bb34d92f | 214 | * @ns: Time value by which to increment the sum_exec_runtime field |
7086efe1 | 215 | * of the thread_group_cputime structure. |
bb34d92f FM |
216 | * |
217 | * If thread group time is being maintained, get the structure for the | |
218 | * running CPU and update the sum_exec_runtime field there. | |
219 | */ | |
7086efe1 FM |
220 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
221 | unsigned long long ns) | |
bb34d92f | 222 | { |
48286d50 | 223 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
4cd4c1b4 PZ |
224 | |
225 | if (!cputimer->running) | |
226 | return; | |
bb34d92f | 227 | |
ee30a7b2 | 228 | raw_spin_lock(&cputimer->lock); |
4cd4c1b4 | 229 | cputimer->cputime.sum_exec_runtime += ns; |
ee30a7b2 | 230 | raw_spin_unlock(&cputimer->lock); |
bb34d92f | 231 | } |