Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. | |
3 | */ | |
4 | ||
5 | #include <linux/sched.h> | |
6 | #include <linux/posix-timers.h> | |
1da177e4 | 7 | #include <linux/errno.h> |
f8bd2258 RZ |
8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> | |
bb34d92f | 10 | #include <linux/kernel_stat.h> |
1da177e4 | 11 | |
f06febc9 FM |
12 | /* |
13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | |
14 | */ | |
15 | void update_rlimit_cpu(unsigned long rlim_new) | |
16 | { | |
17 | cputime_t cputime; | |
18 | ||
19 | cputime = secs_to_cputime(rlim_new); | |
20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | |
5ce73a4a | 21 | cputime_lt(current->signal->it_prof_expires, cputime)) { |
f06febc9 FM |
22 | spin_lock_irq(¤t->sighand->siglock); |
23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | |
24 | spin_unlock_irq(¤t->sighand->siglock); | |
25 | } | |
26 | } | |
27 | ||
a924b04d | 28 | static int check_clock(const clockid_t which_clock) |
1da177e4 LT |
29 | { |
30 | int error = 0; | |
31 | struct task_struct *p; | |
32 | const pid_t pid = CPUCLOCK_PID(which_clock); | |
33 | ||
34 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) | |
35 | return -EINVAL; | |
36 | ||
37 | if (pid == 0) | |
38 | return 0; | |
39 | ||
40 | read_lock(&tasklist_lock); | |
8dc86af0 | 41 | p = find_task_by_vpid(pid); |
bac0abd6 PE |
42 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
43 | same_thread_group(p, current) : thread_group_leader(p))) { | |
1da177e4 LT |
44 | error = -EINVAL; |
45 | } | |
46 | read_unlock(&tasklist_lock); | |
47 | ||
48 | return error; | |
49 | } | |
50 | ||
51 | static inline union cpu_time_count | |
a924b04d | 52 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) |
1da177e4 LT |
53 | { |
54 | union cpu_time_count ret; | |
55 | ret.sched = 0; /* high half always zero when .cpu used */ | |
56 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
ee500f27 | 57 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
1da177e4 LT |
58 | } else { |
59 | ret.cpu = timespec_to_cputime(tp); | |
60 | } | |
61 | return ret; | |
62 | } | |
63 | ||
a924b04d | 64 | static void sample_to_timespec(const clockid_t which_clock, |
1da177e4 LT |
65 | union cpu_time_count cpu, |
66 | struct timespec *tp) | |
67 | { | |
f8bd2258 RZ |
68 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
69 | *tp = ns_to_timespec(cpu.sched); | |
70 | else | |
1da177e4 | 71 | cputime_to_timespec(cpu.cpu, tp); |
1da177e4 LT |
72 | } |
73 | ||
a924b04d | 74 | static inline int cpu_time_before(const clockid_t which_clock, |
1da177e4 LT |
75 | union cpu_time_count now, |
76 | union cpu_time_count then) | |
77 | { | |
78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
79 | return now.sched < then.sched; | |
80 | } else { | |
81 | return cputime_lt(now.cpu, then.cpu); | |
82 | } | |
83 | } | |
a924b04d | 84 | static inline void cpu_time_add(const clockid_t which_clock, |
1da177e4 LT |
85 | union cpu_time_count *acc, |
86 | union cpu_time_count val) | |
87 | { | |
88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
89 | acc->sched += val.sched; | |
90 | } else { | |
91 | acc->cpu = cputime_add(acc->cpu, val.cpu); | |
92 | } | |
93 | } | |
a924b04d | 94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, |
1da177e4 LT |
95 | union cpu_time_count a, |
96 | union cpu_time_count b) | |
97 | { | |
98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
99 | a.sched -= b.sched; | |
100 | } else { | |
101 | a.cpu = cputime_sub(a.cpu, b.cpu); | |
102 | } | |
103 | return a; | |
104 | } | |
105 | ||
ac08c264 TG |
106 | /* |
107 | * Divide and limit the result to res >= 1 | |
108 | * | |
109 | * This is necessary to prevent signal delivery starvation, when the result of | |
110 | * the division would be rounded down to 0. | |
111 | */ | |
112 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) | |
113 | { | |
114 | cputime_t res = cputime_div(time, div); | |
115 | ||
116 | return max_t(cputime_t, res, 1); | |
117 | } | |
118 | ||
1da177e4 LT |
119 | /* |
120 | * Update expiry time from increment, and increase overrun count, | |
121 | * given the current clock sample. | |
122 | */ | |
7a4ed937 | 123 | static void bump_cpu_timer(struct k_itimer *timer, |
1da177e4 LT |
124 | union cpu_time_count now) |
125 | { | |
126 | int i; | |
127 | ||
128 | if (timer->it.cpu.incr.sched == 0) | |
129 | return; | |
130 | ||
131 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | |
132 | unsigned long long delta, incr; | |
133 | ||
134 | if (now.sched < timer->it.cpu.expires.sched) | |
135 | return; | |
136 | incr = timer->it.cpu.incr.sched; | |
137 | delta = now.sched + incr - timer->it.cpu.expires.sched; | |
138 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | |
139 | for (i = 0; incr < delta - incr; i++) | |
140 | incr = incr << 1; | |
141 | for (; i >= 0; incr >>= 1, i--) { | |
7a4ed937 | 142 | if (delta < incr) |
1da177e4 LT |
143 | continue; |
144 | timer->it.cpu.expires.sched += incr; | |
145 | timer->it_overrun += 1 << i; | |
146 | delta -= incr; | |
147 | } | |
148 | } else { | |
149 | cputime_t delta, incr; | |
150 | ||
151 | if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) | |
152 | return; | |
153 | incr = timer->it.cpu.incr.cpu; | |
154 | delta = cputime_sub(cputime_add(now.cpu, incr), | |
155 | timer->it.cpu.expires.cpu); | |
156 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | |
157 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | |
158 | incr = cputime_add(incr, incr); | |
159 | for (; i >= 0; incr = cputime_halve(incr), i--) { | |
7a4ed937 | 160 | if (cputime_lt(delta, incr)) |
1da177e4 LT |
161 | continue; |
162 | timer->it.cpu.expires.cpu = | |
163 | cputime_add(timer->it.cpu.expires.cpu, incr); | |
164 | timer->it_overrun += 1 << i; | |
165 | delta = cputime_sub(delta, incr); | |
166 | } | |
167 | } | |
168 | } | |
169 | ||
170 | static inline cputime_t prof_ticks(struct task_struct *p) | |
171 | { | |
172 | return cputime_add(p->utime, p->stime); | |
173 | } | |
174 | static inline cputime_t virt_ticks(struct task_struct *p) | |
175 | { | |
176 | return p->utime; | |
177 | } | |
1da177e4 | 178 | |
a924b04d | 179 | int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) |
1da177e4 LT |
180 | { |
181 | int error = check_clock(which_clock); | |
182 | if (!error) { | |
183 | tp->tv_sec = 0; | |
184 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); | |
185 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
186 | /* | |
187 | * If sched_clock is using a cycle counter, we | |
188 | * don't have any idea of its true resolution | |
189 | * exported, but it is much more than 1s/HZ. | |
190 | */ | |
191 | tp->tv_nsec = 1; | |
192 | } | |
193 | } | |
194 | return error; | |
195 | } | |
196 | ||
a924b04d | 197 | int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) |
1da177e4 LT |
198 | { |
199 | /* | |
200 | * You can never reset a CPU clock, but we check for other errors | |
201 | * in the call before failing with EPERM. | |
202 | */ | |
203 | int error = check_clock(which_clock); | |
204 | if (error == 0) { | |
205 | error = -EPERM; | |
206 | } | |
207 | return error; | |
208 | } | |
209 | ||
210 | ||
211 | /* | |
212 | * Sample a per-thread clock for the given task. | |
213 | */ | |
a924b04d | 214 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
1da177e4 LT |
215 | union cpu_time_count *cpu) |
216 | { | |
217 | switch (CPUCLOCK_WHICH(which_clock)) { | |
218 | default: | |
219 | return -EINVAL; | |
220 | case CPUCLOCK_PROF: | |
221 | cpu->cpu = prof_ticks(p); | |
222 | break; | |
223 | case CPUCLOCK_VIRT: | |
224 | cpu->cpu = virt_ticks(p); | |
225 | break; | |
226 | case CPUCLOCK_SCHED: | |
bb34d92f | 227 | cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); |
1da177e4 LT |
228 | break; |
229 | } | |
230 | return 0; | |
231 | } | |
232 | ||
4cd4c1b4 PZ |
233 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
234 | { | |
235 | struct sighand_struct *sighand; | |
236 | struct signal_struct *sig; | |
237 | struct task_struct *t; | |
238 | ||
239 | *times = INIT_CPUTIME; | |
240 | ||
241 | rcu_read_lock(); | |
242 | sighand = rcu_dereference(tsk->sighand); | |
243 | if (!sighand) | |
244 | goto out; | |
245 | ||
246 | sig = tsk->signal; | |
247 | ||
248 | t = tsk; | |
249 | do { | |
250 | times->utime = cputime_add(times->utime, t->utime); | |
251 | times->stime = cputime_add(times->stime, t->stime); | |
252 | times->sum_exec_runtime += t->se.sum_exec_runtime; | |
253 | ||
254 | t = next_thread(t); | |
255 | } while (t != tsk); | |
256 | ||
257 | times->utime = cputime_add(times->utime, sig->utime); | |
258 | times->stime = cputime_add(times->stime, sig->stime); | |
259 | times->sum_exec_runtime += sig->sum_sched_runtime; | |
260 | out: | |
261 | rcu_read_unlock(); | |
262 | } | |
263 | ||
1da177e4 LT |
264 | /* |
265 | * Sample a process (thread group) clock for the given group_leader task. | |
266 | * Must be called with tasklist_lock held for reading. | |
1da177e4 | 267 | */ |
bb34d92f FM |
268 | static int cpu_clock_sample_group(const clockid_t which_clock, |
269 | struct task_struct *p, | |
270 | union cpu_time_count *cpu) | |
1da177e4 | 271 | { |
f06febc9 FM |
272 | struct task_cputime cputime; |
273 | ||
274 | thread_group_cputime(p, &cputime); | |
eccdaeaf | 275 | switch (CPUCLOCK_WHICH(which_clock)) { |
1da177e4 LT |
276 | default: |
277 | return -EINVAL; | |
278 | case CPUCLOCK_PROF: | |
f06febc9 | 279 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
1da177e4 LT |
280 | break; |
281 | case CPUCLOCK_VIRT: | |
f06febc9 | 282 | cpu->cpu = cputime.utime; |
1da177e4 LT |
283 | break; |
284 | case CPUCLOCK_SCHED: | |
bb34d92f | 285 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); |
1da177e4 LT |
286 | break; |
287 | } | |
288 | return 0; | |
289 | } | |
290 | ||
1da177e4 | 291 | |
a924b04d | 292 | int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
1da177e4 LT |
293 | { |
294 | const pid_t pid = CPUCLOCK_PID(which_clock); | |
295 | int error = -EINVAL; | |
296 | union cpu_time_count rtn; | |
297 | ||
298 | if (pid == 0) { | |
299 | /* | |
300 | * Special case constant value for our own clocks. | |
301 | * We don't have to do any lookup to find ourselves. | |
302 | */ | |
303 | if (CPUCLOCK_PERTHREAD(which_clock)) { | |
304 | /* | |
305 | * Sampling just ourselves we can do with no locking. | |
306 | */ | |
307 | error = cpu_clock_sample(which_clock, | |
308 | current, &rtn); | |
309 | } else { | |
310 | read_lock(&tasklist_lock); | |
311 | error = cpu_clock_sample_group(which_clock, | |
312 | current, &rtn); | |
313 | read_unlock(&tasklist_lock); | |
314 | } | |
315 | } else { | |
316 | /* | |
317 | * Find the given PID, and validate that the caller | |
318 | * should be able to see it. | |
319 | */ | |
320 | struct task_struct *p; | |
1f2ea083 | 321 | rcu_read_lock(); |
8dc86af0 | 322 | p = find_task_by_vpid(pid); |
1da177e4 LT |
323 | if (p) { |
324 | if (CPUCLOCK_PERTHREAD(which_clock)) { | |
bac0abd6 | 325 | if (same_thread_group(p, current)) { |
1da177e4 LT |
326 | error = cpu_clock_sample(which_clock, |
327 | p, &rtn); | |
328 | } | |
1f2ea083 PM |
329 | } else { |
330 | read_lock(&tasklist_lock); | |
bac0abd6 | 331 | if (thread_group_leader(p) && p->signal) { |
1f2ea083 PM |
332 | error = |
333 | cpu_clock_sample_group(which_clock, | |
334 | p, &rtn); | |
335 | } | |
336 | read_unlock(&tasklist_lock); | |
1da177e4 LT |
337 | } |
338 | } | |
1f2ea083 | 339 | rcu_read_unlock(); |
1da177e4 LT |
340 | } |
341 | ||
342 | if (error) | |
343 | return error; | |
344 | sample_to_timespec(which_clock, rtn, tp); | |
345 | return 0; | |
346 | } | |
347 | ||
348 | ||
349 | /* | |
350 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. | |
351 | * This is called from sys_timer_create with the new timer already locked. | |
352 | */ | |
353 | int posix_cpu_timer_create(struct k_itimer *new_timer) | |
354 | { | |
355 | int ret = 0; | |
356 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); | |
357 | struct task_struct *p; | |
358 | ||
359 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) | |
360 | return -EINVAL; | |
361 | ||
362 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); | |
363 | new_timer->it.cpu.incr.sched = 0; | |
364 | new_timer->it.cpu.expires.sched = 0; | |
365 | ||
366 | read_lock(&tasklist_lock); | |
367 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { | |
368 | if (pid == 0) { | |
369 | p = current; | |
370 | } else { | |
8dc86af0 | 371 | p = find_task_by_vpid(pid); |
bac0abd6 | 372 | if (p && !same_thread_group(p, current)) |
1da177e4 LT |
373 | p = NULL; |
374 | } | |
375 | } else { | |
376 | if (pid == 0) { | |
377 | p = current->group_leader; | |
378 | } else { | |
8dc86af0 | 379 | p = find_task_by_vpid(pid); |
bac0abd6 | 380 | if (p && !thread_group_leader(p)) |
1da177e4 LT |
381 | p = NULL; |
382 | } | |
383 | } | |
384 | new_timer->it.cpu.task = p; | |
385 | if (p) { | |
386 | get_task_struct(p); | |
387 | } else { | |
388 | ret = -EINVAL; | |
389 | } | |
390 | read_unlock(&tasklist_lock); | |
391 | ||
392 | return ret; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Clean up a CPU-clock timer that is about to be destroyed. | |
397 | * This is called from timer deletion with the timer already locked. | |
398 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
399 | * and try again. (This happens when the timer is in the middle of firing.) | |
400 | */ | |
401 | int posix_cpu_timer_del(struct k_itimer *timer) | |
402 | { | |
403 | struct task_struct *p = timer->it.cpu.task; | |
108150ea | 404 | int ret = 0; |
1da177e4 | 405 | |
108150ea | 406 | if (likely(p != NULL)) { |
9465bee8 LT |
407 | read_lock(&tasklist_lock); |
408 | if (unlikely(p->signal == NULL)) { | |
409 | /* | |
410 | * We raced with the reaping of the task. | |
411 | * The deletion should have cleared us off the list. | |
412 | */ | |
413 | BUG_ON(!list_empty(&timer->it.cpu.entry)); | |
414 | } else { | |
9465bee8 | 415 | spin_lock(&p->sighand->siglock); |
108150ea ON |
416 | if (timer->it.cpu.firing) |
417 | ret = TIMER_RETRY; | |
418 | else | |
419 | list_del(&timer->it.cpu.entry); | |
9465bee8 LT |
420 | spin_unlock(&p->sighand->siglock); |
421 | } | |
422 | read_unlock(&tasklist_lock); | |
108150ea ON |
423 | |
424 | if (!ret) | |
425 | put_task_struct(p); | |
1da177e4 | 426 | } |
1da177e4 | 427 | |
108150ea | 428 | return ret; |
1da177e4 LT |
429 | } |
430 | ||
431 | /* | |
432 | * Clean out CPU timers still ticking when a thread exited. The task | |
433 | * pointer is cleared, and the expiry time is replaced with the residual | |
434 | * time for later timer_gettime calls to return. | |
435 | * This must be called with the siglock held. | |
436 | */ | |
437 | static void cleanup_timers(struct list_head *head, | |
438 | cputime_t utime, cputime_t stime, | |
41b86e9c | 439 | unsigned long long sum_exec_runtime) |
1da177e4 LT |
440 | { |
441 | struct cpu_timer_list *timer, *next; | |
442 | cputime_t ptime = cputime_add(utime, stime); | |
443 | ||
444 | list_for_each_entry_safe(timer, next, head, entry) { | |
1da177e4 LT |
445 | list_del_init(&timer->entry); |
446 | if (cputime_lt(timer->expires.cpu, ptime)) { | |
447 | timer->expires.cpu = cputime_zero; | |
448 | } else { | |
449 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | |
450 | ptime); | |
451 | } | |
452 | } | |
453 | ||
454 | ++head; | |
455 | list_for_each_entry_safe(timer, next, head, entry) { | |
1da177e4 LT |
456 | list_del_init(&timer->entry); |
457 | if (cputime_lt(timer->expires.cpu, utime)) { | |
458 | timer->expires.cpu = cputime_zero; | |
459 | } else { | |
460 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | |
461 | utime); | |
462 | } | |
463 | } | |
464 | ||
465 | ++head; | |
466 | list_for_each_entry_safe(timer, next, head, entry) { | |
1da177e4 | 467 | list_del_init(&timer->entry); |
41b86e9c | 468 | if (timer->expires.sched < sum_exec_runtime) { |
1da177e4 LT |
469 | timer->expires.sched = 0; |
470 | } else { | |
41b86e9c | 471 | timer->expires.sched -= sum_exec_runtime; |
1da177e4 LT |
472 | } |
473 | } | |
474 | } | |
475 | ||
476 | /* | |
477 | * These are both called with the siglock held, when the current thread | |
478 | * is being reaped. When the final (leader) thread in the group is reaped, | |
479 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. | |
480 | */ | |
481 | void posix_cpu_timers_exit(struct task_struct *tsk) | |
482 | { | |
483 | cleanup_timers(tsk->cpu_timers, | |
41b86e9c | 484 | tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); |
1da177e4 LT |
485 | |
486 | } | |
487 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | |
488 | { | |
f06febc9 | 489 | struct task_cputime cputime; |
ca531a0a | 490 | |
f06febc9 FM |
491 | thread_group_cputime(tsk, &cputime); |
492 | cleanup_timers(tsk->signal->cpu_timers, | |
493 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | |
1da177e4 LT |
494 | } |
495 | ||
496 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |
497 | { | |
498 | /* | |
499 | * That's all for this thread or process. | |
500 | * We leave our residual in expires to be reported. | |
501 | */ | |
502 | put_task_struct(timer->it.cpu.task); | |
503 | timer->it.cpu.task = NULL; | |
504 | timer->it.cpu.expires = cpu_time_sub(timer->it_clock, | |
505 | timer->it.cpu.expires, | |
506 | now); | |
507 | } | |
508 | ||
4cd4c1b4 PZ |
509 | /* |
510 | * Enable the process wide cpu timer accounting. | |
511 | * | |
512 | * serialized using ->sighand->siglock | |
513 | */ | |
514 | static void start_process_timers(struct task_struct *tsk) | |
515 | { | |
516 | tsk->signal->cputimer.running = 1; | |
517 | barrier(); | |
518 | } | |
519 | ||
520 | /* | |
521 | * Release the process wide timer accounting -- timer stops ticking when | |
522 | * nobody cares about it. | |
523 | * | |
524 | * serialized using ->sighand->siglock | |
525 | */ | |
526 | static void stop_process_timers(struct task_struct *tsk) | |
527 | { | |
528 | tsk->signal->cputimer.running = 0; | |
529 | barrier(); | |
530 | } | |
531 | ||
1da177e4 LT |
532 | /* |
533 | * Insert the timer on the appropriate list before any timers that | |
534 | * expire later. This must be called with the tasklist_lock held | |
535 | * for reading, and interrupts disabled. | |
536 | */ | |
537 | static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |
538 | { | |
539 | struct task_struct *p = timer->it.cpu.task; | |
540 | struct list_head *head, *listpos; | |
541 | struct cpu_timer_list *const nt = &timer->it.cpu; | |
542 | struct cpu_timer_list *next; | |
543 | unsigned long i; | |
544 | ||
545 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? | |
546 | p->cpu_timers : p->signal->cpu_timers); | |
547 | head += CPUCLOCK_WHICH(timer->it_clock); | |
548 | ||
549 | BUG_ON(!irqs_disabled()); | |
550 | spin_lock(&p->sighand->siglock); | |
551 | ||
4cd4c1b4 PZ |
552 | if (!CPUCLOCK_PERTHREAD(timer->it_clock)) |
553 | start_process_timers(p); | |
554 | ||
1da177e4 LT |
555 | listpos = head; |
556 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | |
557 | list_for_each_entry(next, head, entry) { | |
70ab81c2 | 558 | if (next->expires.sched > nt->expires.sched) |
1da177e4 | 559 | break; |
70ab81c2 | 560 | listpos = &next->entry; |
1da177e4 LT |
561 | } |
562 | } else { | |
563 | list_for_each_entry(next, head, entry) { | |
70ab81c2 | 564 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) |
1da177e4 | 565 | break; |
70ab81c2 | 566 | listpos = &next->entry; |
1da177e4 LT |
567 | } |
568 | } | |
569 | list_add(&nt->entry, listpos); | |
570 | ||
571 | if (listpos == head) { | |
572 | /* | |
573 | * We are the new earliest-expiring timer. | |
574 | * If we are a thread timer, there can always | |
575 | * be a process timer telling us to stop earlier. | |
576 | */ | |
577 | ||
578 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
579 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | |
580 | default: | |
581 | BUG(); | |
582 | case CPUCLOCK_PROF: | |
f06febc9 | 583 | if (cputime_eq(p->cputime_expires.prof_exp, |
1da177e4 | 584 | cputime_zero) || |
f06febc9 | 585 | cputime_gt(p->cputime_expires.prof_exp, |
1da177e4 | 586 | nt->expires.cpu)) |
f06febc9 FM |
587 | p->cputime_expires.prof_exp = |
588 | nt->expires.cpu; | |
1da177e4 LT |
589 | break; |
590 | case CPUCLOCK_VIRT: | |
f06febc9 | 591 | if (cputime_eq(p->cputime_expires.virt_exp, |
1da177e4 | 592 | cputime_zero) || |
f06febc9 | 593 | cputime_gt(p->cputime_expires.virt_exp, |
1da177e4 | 594 | nt->expires.cpu)) |
f06febc9 FM |
595 | p->cputime_expires.virt_exp = |
596 | nt->expires.cpu; | |
1da177e4 LT |
597 | break; |
598 | case CPUCLOCK_SCHED: | |
f06febc9 FM |
599 | if (p->cputime_expires.sched_exp == 0 || |
600 | p->cputime_expires.sched_exp > | |
601 | nt->expires.sched) | |
602 | p->cputime_expires.sched_exp = | |
603 | nt->expires.sched; | |
1da177e4 LT |
604 | break; |
605 | } | |
606 | } else { | |
607 | /* | |
f06febc9 | 608 | * For a process timer, set the cached expiration time. |
1da177e4 LT |
609 | */ |
610 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | |
611 | default: | |
612 | BUG(); | |
613 | case CPUCLOCK_VIRT: | |
614 | if (!cputime_eq(p->signal->it_virt_expires, | |
615 | cputime_zero) && | |
616 | cputime_lt(p->signal->it_virt_expires, | |
617 | timer->it.cpu.expires.cpu)) | |
618 | break; | |
f06febc9 FM |
619 | p->signal->cputime_expires.virt_exp = |
620 | timer->it.cpu.expires.cpu; | |
621 | break; | |
1da177e4 LT |
622 | case CPUCLOCK_PROF: |
623 | if (!cputime_eq(p->signal->it_prof_expires, | |
624 | cputime_zero) && | |
625 | cputime_lt(p->signal->it_prof_expires, | |
626 | timer->it.cpu.expires.cpu)) | |
627 | break; | |
628 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; | |
629 | if (i != RLIM_INFINITY && | |
630 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | |
631 | break; | |
f06febc9 FM |
632 | p->signal->cputime_expires.prof_exp = |
633 | timer->it.cpu.expires.cpu; | |
634 | break; | |
1da177e4 | 635 | case CPUCLOCK_SCHED: |
f06febc9 FM |
636 | p->signal->cputime_expires.sched_exp = |
637 | timer->it.cpu.expires.sched; | |
1da177e4 LT |
638 | break; |
639 | } | |
640 | } | |
641 | } | |
642 | ||
643 | spin_unlock(&p->sighand->siglock); | |
644 | } | |
645 | ||
646 | /* | |
647 | * The timer is locked, fire it and arrange for its reload. | |
648 | */ | |
649 | static void cpu_timer_fire(struct k_itimer *timer) | |
650 | { | |
651 | if (unlikely(timer->sigq == NULL)) { | |
652 | /* | |
653 | * This a special case for clock_nanosleep, | |
654 | * not a normal timer from sys_timer_create. | |
655 | */ | |
656 | wake_up_process(timer->it_process); | |
657 | timer->it.cpu.expires.sched = 0; | |
658 | } else if (timer->it.cpu.incr.sched == 0) { | |
659 | /* | |
660 | * One-shot timer. Clear it as soon as it's fired. | |
661 | */ | |
662 | posix_timer_event(timer, 0); | |
663 | timer->it.cpu.expires.sched = 0; | |
664 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { | |
665 | /* | |
666 | * The signal did not get queued because the signal | |
667 | * was ignored, so we won't get any callback to | |
668 | * reload the timer. But we need to keep it | |
669 | * ticking in case the signal is deliverable next time. | |
670 | */ | |
671 | posix_cpu_timer_schedule(timer); | |
672 | } | |
673 | } | |
674 | ||
675 | /* | |
676 | * Guts of sys_timer_settime for CPU timers. | |
677 | * This is called with the timer locked and interrupts disabled. | |
678 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
679 | * and try again. (This happens when the timer is in the middle of firing.) | |
680 | */ | |
681 | int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |
682 | struct itimerspec *new, struct itimerspec *old) | |
683 | { | |
684 | struct task_struct *p = timer->it.cpu.task; | |
685 | union cpu_time_count old_expires, new_expires, val; | |
686 | int ret; | |
687 | ||
688 | if (unlikely(p == NULL)) { | |
689 | /* | |
690 | * Timer refers to a dead task's clock. | |
691 | */ | |
692 | return -ESRCH; | |
693 | } | |
694 | ||
695 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); | |
696 | ||
697 | read_lock(&tasklist_lock); | |
698 | /* | |
699 | * We need the tasklist_lock to protect against reaping that | |
700 | * clears p->signal. If p has just been reaped, we can no | |
701 | * longer get any information about it at all. | |
702 | */ | |
703 | if (unlikely(p->signal == NULL)) { | |
704 | read_unlock(&tasklist_lock); | |
705 | put_task_struct(p); | |
706 | timer->it.cpu.task = NULL; | |
707 | return -ESRCH; | |
708 | } | |
709 | ||
710 | /* | |
711 | * Disarm any old timer after extracting its expiry time. | |
712 | */ | |
713 | BUG_ON(!irqs_disabled()); | |
a69ac4a7 ON |
714 | |
715 | ret = 0; | |
1da177e4 LT |
716 | spin_lock(&p->sighand->siglock); |
717 | old_expires = timer->it.cpu.expires; | |
a69ac4a7 ON |
718 | if (unlikely(timer->it.cpu.firing)) { |
719 | timer->it.cpu.firing = -1; | |
720 | ret = TIMER_RETRY; | |
721 | } else | |
722 | list_del_init(&timer->it.cpu.entry); | |
1da177e4 LT |
723 | spin_unlock(&p->sighand->siglock); |
724 | ||
725 | /* | |
726 | * We need to sample the current value to convert the new | |
727 | * value from to relative and absolute, and to convert the | |
728 | * old value from absolute to relative. To set a process | |
729 | * timer, we need a sample to balance the thread expiry | |
730 | * times (in arm_timer). With an absolute time, we must | |
731 | * check if it's already passed. In short, we need a sample. | |
732 | */ | |
733 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
734 | cpu_clock_sample(timer->it_clock, p, &val); | |
735 | } else { | |
736 | cpu_clock_sample_group(timer->it_clock, p, &val); | |
737 | } | |
738 | ||
739 | if (old) { | |
740 | if (old_expires.sched == 0) { | |
741 | old->it_value.tv_sec = 0; | |
742 | old->it_value.tv_nsec = 0; | |
743 | } else { | |
744 | /* | |
745 | * Update the timer in case it has | |
746 | * overrun already. If it has, | |
747 | * we'll report it as having overrun | |
748 | * and with the next reloaded timer | |
749 | * already ticking, though we are | |
750 | * swallowing that pending | |
751 | * notification here to install the | |
752 | * new setting. | |
753 | */ | |
754 | bump_cpu_timer(timer, val); | |
755 | if (cpu_time_before(timer->it_clock, val, | |
756 | timer->it.cpu.expires)) { | |
757 | old_expires = cpu_time_sub( | |
758 | timer->it_clock, | |
759 | timer->it.cpu.expires, val); | |
760 | sample_to_timespec(timer->it_clock, | |
761 | old_expires, | |
762 | &old->it_value); | |
763 | } else { | |
764 | old->it_value.tv_nsec = 1; | |
765 | old->it_value.tv_sec = 0; | |
766 | } | |
767 | } | |
768 | } | |
769 | ||
a69ac4a7 | 770 | if (unlikely(ret)) { |
1da177e4 LT |
771 | /* |
772 | * We are colliding with the timer actually firing. | |
773 | * Punt after filling in the timer's old value, and | |
774 | * disable this firing since we are already reporting | |
775 | * it as an overrun (thanks to bump_cpu_timer above). | |
776 | */ | |
777 | read_unlock(&tasklist_lock); | |
1da177e4 LT |
778 | goto out; |
779 | } | |
780 | ||
781 | if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) { | |
782 | cpu_time_add(timer->it_clock, &new_expires, val); | |
783 | } | |
784 | ||
785 | /* | |
786 | * Install the new expiry time (or zero). | |
787 | * For a timer with no notification action, we don't actually | |
788 | * arm the timer (we'll just fake it for timer_gettime). | |
789 | */ | |
790 | timer->it.cpu.expires = new_expires; | |
791 | if (new_expires.sched != 0 && | |
792 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && | |
793 | cpu_time_before(timer->it_clock, val, new_expires)) { | |
794 | arm_timer(timer, val); | |
795 | } | |
796 | ||
797 | read_unlock(&tasklist_lock); | |
798 | ||
799 | /* | |
800 | * Install the new reload setting, and | |
801 | * set up the signal and overrun bookkeeping. | |
802 | */ | |
803 | timer->it.cpu.incr = timespec_to_sample(timer->it_clock, | |
804 | &new->it_interval); | |
805 | ||
806 | /* | |
807 | * This acts as a modification timestamp for the timer, | |
808 | * so any automatic reload attempt will punt on seeing | |
809 | * that we have reset the timer manually. | |
810 | */ | |
811 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & | |
812 | ~REQUEUE_PENDING; | |
813 | timer->it_overrun_last = 0; | |
814 | timer->it_overrun = -1; | |
815 | ||
816 | if (new_expires.sched != 0 && | |
817 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && | |
818 | !cpu_time_before(timer->it_clock, val, new_expires)) { | |
819 | /* | |
820 | * The designated time already passed, so we notify | |
821 | * immediately, even if the thread never runs to | |
822 | * accumulate more time on this clock. | |
823 | */ | |
824 | cpu_timer_fire(timer); | |
825 | } | |
826 | ||
827 | ret = 0; | |
828 | out: | |
829 | if (old) { | |
830 | sample_to_timespec(timer->it_clock, | |
831 | timer->it.cpu.incr, &old->it_interval); | |
832 | } | |
833 | return ret; | |
834 | } | |
835 | ||
836 | void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |
837 | { | |
838 | union cpu_time_count now; | |
839 | struct task_struct *p = timer->it.cpu.task; | |
840 | int clear_dead; | |
841 | ||
842 | /* | |
843 | * Easy part: convert the reload time. | |
844 | */ | |
845 | sample_to_timespec(timer->it_clock, | |
846 | timer->it.cpu.incr, &itp->it_interval); | |
847 | ||
848 | if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */ | |
849 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; | |
850 | return; | |
851 | } | |
852 | ||
853 | if (unlikely(p == NULL)) { | |
854 | /* | |
855 | * This task already died and the timer will never fire. | |
856 | * In this case, expires is actually the dead value. | |
857 | */ | |
858 | dead: | |
859 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, | |
860 | &itp->it_value); | |
861 | return; | |
862 | } | |
863 | ||
864 | /* | |
865 | * Sample the clock to take the difference with the expiry time. | |
866 | */ | |
867 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
868 | cpu_clock_sample(timer->it_clock, p, &now); | |
869 | clear_dead = p->exit_state; | |
870 | } else { | |
871 | read_lock(&tasklist_lock); | |
872 | if (unlikely(p->signal == NULL)) { | |
873 | /* | |
874 | * The process has been reaped. | |
875 | * We can't even collect a sample any more. | |
876 | * Call the timer disarmed, nothing else to do. | |
877 | */ | |
878 | put_task_struct(p); | |
879 | timer->it.cpu.task = NULL; | |
880 | timer->it.cpu.expires.sched = 0; | |
881 | read_unlock(&tasklist_lock); | |
882 | goto dead; | |
883 | } else { | |
884 | cpu_clock_sample_group(timer->it_clock, p, &now); | |
885 | clear_dead = (unlikely(p->exit_state) && | |
886 | thread_group_empty(p)); | |
887 | } | |
888 | read_unlock(&tasklist_lock); | |
889 | } | |
890 | ||
891 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { | |
892 | if (timer->it.cpu.incr.sched == 0 && | |
893 | cpu_time_before(timer->it_clock, | |
894 | timer->it.cpu.expires, now)) { | |
895 | /* | |
896 | * Do-nothing timer expired and has no reload, | |
897 | * so it's as if it was never set. | |
898 | */ | |
899 | timer->it.cpu.expires.sched = 0; | |
900 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; | |
901 | return; | |
902 | } | |
903 | /* | |
904 | * Account for any expirations and reloads that should | |
905 | * have happened. | |
906 | */ | |
907 | bump_cpu_timer(timer, now); | |
908 | } | |
909 | ||
910 | if (unlikely(clear_dead)) { | |
911 | /* | |
912 | * We've noticed that the thread is dead, but | |
913 | * not yet reaped. Take this opportunity to | |
914 | * drop our task ref. | |
915 | */ | |
916 | clear_dead_task(timer, now); | |
917 | goto dead; | |
918 | } | |
919 | ||
920 | if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) { | |
921 | sample_to_timespec(timer->it_clock, | |
922 | cpu_time_sub(timer->it_clock, | |
923 | timer->it.cpu.expires, now), | |
924 | &itp->it_value); | |
925 | } else { | |
926 | /* | |
927 | * The timer should have expired already, but the firing | |
928 | * hasn't taken place yet. Say it's just about to expire. | |
929 | */ | |
930 | itp->it_value.tv_nsec = 1; | |
931 | itp->it_value.tv_sec = 0; | |
932 | } | |
933 | } | |
934 | ||
935 | /* | |
936 | * Check for any per-thread CPU timers that have fired and move them off | |
937 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the | |
938 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. | |
939 | */ | |
940 | static void check_thread_timers(struct task_struct *tsk, | |
941 | struct list_head *firing) | |
942 | { | |
e80eda94 | 943 | int maxfire; |
1da177e4 | 944 | struct list_head *timers = tsk->cpu_timers; |
78f2c7db | 945 | struct signal_struct *const sig = tsk->signal; |
1da177e4 | 946 | |
e80eda94 | 947 | maxfire = 20; |
f06febc9 | 948 | tsk->cputime_expires.prof_exp = cputime_zero; |
1da177e4 | 949 | while (!list_empty(timers)) { |
b5e61818 | 950 | struct cpu_timer_list *t = list_first_entry(timers, |
1da177e4 LT |
951 | struct cpu_timer_list, |
952 | entry); | |
e80eda94 | 953 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
f06febc9 | 954 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
1da177e4 LT |
955 | break; |
956 | } | |
957 | t->firing = 1; | |
958 | list_move_tail(&t->entry, firing); | |
959 | } | |
960 | ||
961 | ++timers; | |
e80eda94 | 962 | maxfire = 20; |
f06febc9 | 963 | tsk->cputime_expires.virt_exp = cputime_zero; |
1da177e4 | 964 | while (!list_empty(timers)) { |
b5e61818 | 965 | struct cpu_timer_list *t = list_first_entry(timers, |
1da177e4 LT |
966 | struct cpu_timer_list, |
967 | entry); | |
e80eda94 | 968 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
f06febc9 | 969 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
1da177e4 LT |
970 | break; |
971 | } | |
972 | t->firing = 1; | |
973 | list_move_tail(&t->entry, firing); | |
974 | } | |
975 | ||
976 | ++timers; | |
e80eda94 | 977 | maxfire = 20; |
f06febc9 | 978 | tsk->cputime_expires.sched_exp = 0; |
1da177e4 | 979 | while (!list_empty(timers)) { |
b5e61818 | 980 | struct cpu_timer_list *t = list_first_entry(timers, |
1da177e4 LT |
981 | struct cpu_timer_list, |
982 | entry); | |
41b86e9c | 983 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { |
f06febc9 | 984 | tsk->cputime_expires.sched_exp = t->expires.sched; |
1da177e4 LT |
985 | break; |
986 | } | |
987 | t->firing = 1; | |
988 | list_move_tail(&t->entry, firing); | |
989 | } | |
78f2c7db PZ |
990 | |
991 | /* | |
992 | * Check for the special case thread timers. | |
993 | */ | |
994 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { | |
995 | unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; | |
996 | unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; | |
997 | ||
5a52dd50 PZ |
998 | if (hard != RLIM_INFINITY && |
999 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { | |
78f2c7db PZ |
1000 | /* |
1001 | * At the hard limit, we just die. | |
1002 | * No need to calculate anything else now. | |
1003 | */ | |
1004 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | |
1005 | return; | |
1006 | } | |
1007 | if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { | |
1008 | /* | |
1009 | * At the soft limit, send a SIGXCPU every second. | |
1010 | */ | |
1011 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur | |
1012 | < sig->rlim[RLIMIT_RTTIME].rlim_max) { | |
1013 | sig->rlim[RLIMIT_RTTIME].rlim_cur += | |
1014 | USEC_PER_SEC; | |
1015 | } | |
81d50bb2 HS |
1016 | printk(KERN_INFO |
1017 | "RT Watchdog Timeout: %s[%d]\n", | |
1018 | tsk->comm, task_pid_nr(tsk)); | |
78f2c7db PZ |
1019 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1020 | } | |
1021 | } | |
1da177e4 LT |
1022 | } |
1023 | ||
1024 | /* | |
1025 | * Check for any per-thread CPU timers that have fired and move them | |
1026 | * off the tsk->*_timers list onto the firing list. Per-thread timers | |
1027 | * have already been taken off. | |
1028 | */ | |
1029 | static void check_process_timers(struct task_struct *tsk, | |
1030 | struct list_head *firing) | |
1031 | { | |
e80eda94 | 1032 | int maxfire; |
1da177e4 | 1033 | struct signal_struct *const sig = tsk->signal; |
f06febc9 | 1034 | cputime_t utime, ptime, virt_expires, prof_expires; |
41b86e9c | 1035 | unsigned long long sum_sched_runtime, sched_expires; |
1da177e4 | 1036 | struct list_head *timers = sig->cpu_timers; |
f06febc9 | 1037 | struct task_cputime cputime; |
1da177e4 LT |
1038 | |
1039 | /* | |
1040 | * Don't sample the current process CPU clocks if there are no timers. | |
1041 | */ | |
1042 | if (list_empty(&timers[CPUCLOCK_PROF]) && | |
1043 | cputime_eq(sig->it_prof_expires, cputime_zero) && | |
1044 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | |
1045 | list_empty(&timers[CPUCLOCK_VIRT]) && | |
1046 | cputime_eq(sig->it_virt_expires, cputime_zero) && | |
4cd4c1b4 PZ |
1047 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1048 | stop_process_timers(tsk); | |
1da177e4 | 1049 | return; |
4cd4c1b4 | 1050 | } |
1da177e4 LT |
1051 | |
1052 | /* | |
1053 | * Collect the current process totals. | |
1054 | */ | |
4cd4c1b4 | 1055 | thread_group_cputimer(tsk, &cputime); |
f06febc9 FM |
1056 | utime = cputime.utime; |
1057 | ptime = cputime_add(utime, cputime.stime); | |
1058 | sum_sched_runtime = cputime.sum_exec_runtime; | |
e80eda94 | 1059 | maxfire = 20; |
1da177e4 LT |
1060 | prof_expires = cputime_zero; |
1061 | while (!list_empty(timers)) { | |
ee7dd205 | 1062 | struct cpu_timer_list *tl = list_first_entry(timers, |
1da177e4 LT |
1063 | struct cpu_timer_list, |
1064 | entry); | |
ee7dd205 WC |
1065 | if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { |
1066 | prof_expires = tl->expires.cpu; | |
1da177e4 LT |
1067 | break; |
1068 | } | |
ee7dd205 WC |
1069 | tl->firing = 1; |
1070 | list_move_tail(&tl->entry, firing); | |
1da177e4 LT |
1071 | } |
1072 | ||
1073 | ++timers; | |
e80eda94 | 1074 | maxfire = 20; |
1da177e4 LT |
1075 | virt_expires = cputime_zero; |
1076 | while (!list_empty(timers)) { | |
ee7dd205 | 1077 | struct cpu_timer_list *tl = list_first_entry(timers, |
1da177e4 LT |
1078 | struct cpu_timer_list, |
1079 | entry); | |
ee7dd205 WC |
1080 | if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { |
1081 | virt_expires = tl->expires.cpu; | |
1da177e4 LT |
1082 | break; |
1083 | } | |
ee7dd205 WC |
1084 | tl->firing = 1; |
1085 | list_move_tail(&tl->entry, firing); | |
1da177e4 LT |
1086 | } |
1087 | ||
1088 | ++timers; | |
e80eda94 | 1089 | maxfire = 20; |
1da177e4 LT |
1090 | sched_expires = 0; |
1091 | while (!list_empty(timers)) { | |
ee7dd205 | 1092 | struct cpu_timer_list *tl = list_first_entry(timers, |
1da177e4 LT |
1093 | struct cpu_timer_list, |
1094 | entry); | |
ee7dd205 WC |
1095 | if (!--maxfire || sum_sched_runtime < tl->expires.sched) { |
1096 | sched_expires = tl->expires.sched; | |
1da177e4 LT |
1097 | break; |
1098 | } | |
ee7dd205 WC |
1099 | tl->firing = 1; |
1100 | list_move_tail(&tl->entry, firing); | |
1da177e4 LT |
1101 | } |
1102 | ||
1103 | /* | |
1104 | * Check for the special case process timers. | |
1105 | */ | |
1106 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | |
1107 | if (cputime_ge(ptime, sig->it_prof_expires)) { | |
1108 | /* ITIMER_PROF fires and reloads. */ | |
1109 | sig->it_prof_expires = sig->it_prof_incr; | |
1110 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | |
1111 | sig->it_prof_expires = cputime_add( | |
1112 | sig->it_prof_expires, ptime); | |
1113 | } | |
1114 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); | |
1115 | } | |
1116 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && | |
1117 | (cputime_eq(prof_expires, cputime_zero) || | |
1118 | cputime_lt(sig->it_prof_expires, prof_expires))) { | |
1119 | prof_expires = sig->it_prof_expires; | |
1120 | } | |
1121 | } | |
1122 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | |
1123 | if (cputime_ge(utime, sig->it_virt_expires)) { | |
1124 | /* ITIMER_VIRTUAL fires and reloads. */ | |
1125 | sig->it_virt_expires = sig->it_virt_incr; | |
1126 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | |
1127 | sig->it_virt_expires = cputime_add( | |
1128 | sig->it_virt_expires, utime); | |
1129 | } | |
1130 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); | |
1131 | } | |
1132 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && | |
1133 | (cputime_eq(virt_expires, cputime_zero) || | |
1134 | cputime_lt(sig->it_virt_expires, virt_expires))) { | |
1135 | virt_expires = sig->it_virt_expires; | |
1136 | } | |
1137 | } | |
1138 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | |
1139 | unsigned long psecs = cputime_to_secs(ptime); | |
1140 | cputime_t x; | |
1141 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { | |
1142 | /* | |
1143 | * At the hard limit, we just die. | |
1144 | * No need to calculate anything else now. | |
1145 | */ | |
1146 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | |
1147 | return; | |
1148 | } | |
1149 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { | |
1150 | /* | |
1151 | * At the soft limit, send a SIGXCPU every second. | |
1152 | */ | |
1153 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | |
1154 | if (sig->rlim[RLIMIT_CPU].rlim_cur | |
1155 | < sig->rlim[RLIMIT_CPU].rlim_max) { | |
1156 | sig->rlim[RLIMIT_CPU].rlim_cur++; | |
1157 | } | |
1158 | } | |
1159 | x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | |
1160 | if (cputime_eq(prof_expires, cputime_zero) || | |
1161 | cputime_lt(x, prof_expires)) { | |
1162 | prof_expires = x; | |
1163 | } | |
1164 | } | |
1165 | ||
f06febc9 FM |
1166 | if (!cputime_eq(prof_expires, cputime_zero) && |
1167 | (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || | |
1168 | cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) | |
1169 | sig->cputime_expires.prof_exp = prof_expires; | |
1170 | if (!cputime_eq(virt_expires, cputime_zero) && | |
1171 | (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) || | |
1172 | cputime_gt(sig->cputime_expires.virt_exp, virt_expires))) | |
1173 | sig->cputime_expires.virt_exp = virt_expires; | |
1174 | if (sched_expires != 0 && | |
1175 | (sig->cputime_expires.sched_exp == 0 || | |
1176 | sig->cputime_expires.sched_exp > sched_expires)) | |
1177 | sig->cputime_expires.sched_exp = sched_expires; | |
1da177e4 LT |
1178 | } |
1179 | ||
1180 | /* | |
1181 | * This is called from the signal code (via do_schedule_next_timer) | |
1182 | * when the last timer signal was delivered and we have to reload the timer. | |
1183 | */ | |
1184 | void posix_cpu_timer_schedule(struct k_itimer *timer) | |
1185 | { | |
1186 | struct task_struct *p = timer->it.cpu.task; | |
1187 | union cpu_time_count now; | |
1188 | ||
1189 | if (unlikely(p == NULL)) | |
1190 | /* | |
1191 | * The task was cleaned up already, no future firings. | |
1192 | */ | |
708f430d | 1193 | goto out; |
1da177e4 LT |
1194 | |
1195 | /* | |
1196 | * Fetch the current sample and update the timer's expiry time. | |
1197 | */ | |
1198 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
1199 | cpu_clock_sample(timer->it_clock, p, &now); | |
1200 | bump_cpu_timer(timer, now); | |
1201 | if (unlikely(p->exit_state)) { | |
1202 | clear_dead_task(timer, now); | |
708f430d | 1203 | goto out; |
1da177e4 LT |
1204 | } |
1205 | read_lock(&tasklist_lock); /* arm_timer needs it. */ | |
1206 | } else { | |
1207 | read_lock(&tasklist_lock); | |
1208 | if (unlikely(p->signal == NULL)) { | |
1209 | /* | |
1210 | * The process has been reaped. | |
1211 | * We can't even collect a sample any more. | |
1212 | */ | |
1213 | put_task_struct(p); | |
1214 | timer->it.cpu.task = p = NULL; | |
1215 | timer->it.cpu.expires.sched = 0; | |
708f430d | 1216 | goto out_unlock; |
1da177e4 LT |
1217 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1218 | /* | |
1219 | * We've noticed that the thread is dead, but | |
1220 | * not yet reaped. Take this opportunity to | |
1221 | * drop our task ref. | |
1222 | */ | |
1223 | clear_dead_task(timer, now); | |
708f430d | 1224 | goto out_unlock; |
1da177e4 LT |
1225 | } |
1226 | cpu_clock_sample_group(timer->it_clock, p, &now); | |
1227 | bump_cpu_timer(timer, now); | |
1228 | /* Leave the tasklist_lock locked for the call below. */ | |
1229 | } | |
1230 | ||
1231 | /* | |
1232 | * Now re-arm for the new expiry time. | |
1233 | */ | |
1234 | arm_timer(timer, now); | |
1235 | ||
708f430d | 1236 | out_unlock: |
1da177e4 | 1237 | read_unlock(&tasklist_lock); |
708f430d RM |
1238 | |
1239 | out: | |
1240 | timer->it_overrun_last = timer->it_overrun; | |
1241 | timer->it_overrun = -1; | |
1242 | ++timer->it_requeue_pending; | |
1da177e4 LT |
1243 | } |
1244 | ||
f06febc9 FM |
1245 | /** |
1246 | * task_cputime_zero - Check a task_cputime struct for all zero fields. | |
1247 | * | |
1248 | * @cputime: The struct to compare. | |
1249 | * | |
1250 | * Checks @cputime to see if all fields are zero. Returns true if all fields | |
1251 | * are zero, false if any field is nonzero. | |
1252 | */ | |
1253 | static inline int task_cputime_zero(const struct task_cputime *cputime) | |
1254 | { | |
1255 | if (cputime_eq(cputime->utime, cputime_zero) && | |
1256 | cputime_eq(cputime->stime, cputime_zero) && | |
1257 | cputime->sum_exec_runtime == 0) | |
1258 | return 1; | |
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | /** | |
1263 | * task_cputime_expired - Compare two task_cputime entities. | |
1264 | * | |
1265 | * @sample: The task_cputime structure to be checked for expiration. | |
1266 | * @expires: Expiration times, against which @sample will be checked. | |
1267 | * | |
1268 | * Checks @sample against @expires to see if any field of @sample has expired. | |
1269 | * Returns true if any field of the former is greater than the corresponding | |
1270 | * field of the latter if the latter field is set. Otherwise returns false. | |
1271 | */ | |
1272 | static inline int task_cputime_expired(const struct task_cputime *sample, | |
1273 | const struct task_cputime *expires) | |
1274 | { | |
1275 | if (!cputime_eq(expires->utime, cputime_zero) && | |
1276 | cputime_ge(sample->utime, expires->utime)) | |
1277 | return 1; | |
1278 | if (!cputime_eq(expires->stime, cputime_zero) && | |
1279 | cputime_ge(cputime_add(sample->utime, sample->stime), | |
1280 | expires->stime)) | |
1281 | return 1; | |
1282 | if (expires->sum_exec_runtime != 0 && | |
1283 | sample->sum_exec_runtime >= expires->sum_exec_runtime) | |
1284 | return 1; | |
1285 | return 0; | |
1286 | } | |
1287 | ||
1288 | /** | |
1289 | * fastpath_timer_check - POSIX CPU timers fast path. | |
1290 | * | |
1291 | * @tsk: The task (thread) being checked. | |
f06febc9 | 1292 | * |
bb34d92f FM |
1293 | * Check the task and thread group timers. If both are zero (there are no |
1294 | * timers set) return false. Otherwise snapshot the task and thread group | |
1295 | * timers and compare them with the corresponding expiration times. Return | |
1296 | * true if a timer has expired, else return false. | |
f06febc9 | 1297 | */ |
bb34d92f | 1298 | static inline int fastpath_timer_check(struct task_struct *tsk) |
f06febc9 | 1299 | { |
ad133ba3 | 1300 | struct signal_struct *sig; |
bb34d92f | 1301 | |
ad133ba3 ON |
1302 | /* tsk == current, ensure it is safe to use ->signal/sighand */ |
1303 | if (unlikely(tsk->exit_state)) | |
f06febc9 | 1304 | return 0; |
bb34d92f FM |
1305 | |
1306 | if (!task_cputime_zero(&tsk->cputime_expires)) { | |
1307 | struct task_cputime task_sample = { | |
1308 | .utime = tsk->utime, | |
1309 | .stime = tsk->stime, | |
1310 | .sum_exec_runtime = tsk->se.sum_exec_runtime | |
1311 | }; | |
1312 | ||
1313 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | |
1314 | return 1; | |
1315 | } | |
ad133ba3 ON |
1316 | |
1317 | sig = tsk->signal; | |
bb34d92f FM |
1318 | if (!task_cputime_zero(&sig->cputime_expires)) { |
1319 | struct task_cputime group_sample; | |
1320 | ||
4cd4c1b4 | 1321 | thread_group_cputimer(tsk, &group_sample); |
bb34d92f FM |
1322 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1323 | return 1; | |
1324 | } | |
1325 | return 0; | |
f06febc9 FM |
1326 | } |
1327 | ||
1da177e4 LT |
1328 | /* |
1329 | * This is called from the timer interrupt handler. The irq handler has | |
1330 | * already updated our counts. We need to check if any timers fire now. | |
1331 | * Interrupts are disabled. | |
1332 | */ | |
1333 | void run_posix_cpu_timers(struct task_struct *tsk) | |
1334 | { | |
1335 | LIST_HEAD(firing); | |
1336 | struct k_itimer *timer, *next; | |
1337 | ||
1338 | BUG_ON(!irqs_disabled()); | |
1339 | ||
1da177e4 | 1340 | /* |
f06febc9 | 1341 | * The fast path checks that there are no expired thread or thread |
bb34d92f | 1342 | * group timers. If that's so, just return. |
1da177e4 | 1343 | */ |
bb34d92f | 1344 | if (!fastpath_timer_check(tsk)) |
f06febc9 | 1345 | return; |
5ce73a4a | 1346 | |
bb34d92f FM |
1347 | spin_lock(&tsk->sighand->siglock); |
1348 | /* | |
1349 | * Here we take off tsk->signal->cpu_timers[N] and | |
1350 | * tsk->cpu_timers[N] all the timers that are firing, and | |
1351 | * put them on the firing list. | |
1352 | */ | |
1353 | check_thread_timers(tsk, &firing); | |
1354 | check_process_timers(tsk, &firing); | |
1da177e4 | 1355 | |
bb34d92f FM |
1356 | /* |
1357 | * We must release these locks before taking any timer's lock. | |
1358 | * There is a potential race with timer deletion here, as the | |
1359 | * siglock now protects our private firing list. We have set | |
1360 | * the firing flag in each timer, so that a deletion attempt | |
1361 | * that gets the timer lock before we do will give it up and | |
1362 | * spin until we've taken care of that timer below. | |
1363 | */ | |
1364 | spin_unlock(&tsk->sighand->siglock); | |
1da177e4 LT |
1365 | |
1366 | /* | |
1367 | * Now that all the timers on our list have the firing flag, | |
1368 | * noone will touch their list entries but us. We'll take | |
1369 | * each timer's lock before clearing its firing flag, so no | |
1370 | * timer call will interfere. | |
1371 | */ | |
1372 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { | |
1373 | int firing; | |
1374 | spin_lock(&timer->it_lock); | |
1375 | list_del_init(&timer->it.cpu.entry); | |
1376 | firing = timer->it.cpu.firing; | |
1377 | timer->it.cpu.firing = 0; | |
1378 | /* | |
1379 | * The firing flag is -1 if we collided with a reset | |
1380 | * of the timer, which already reported this | |
1381 | * almost-firing as an overrun. So don't generate an event. | |
1382 | */ | |
1383 | if (likely(firing >= 0)) { | |
1384 | cpu_timer_fire(timer); | |
1385 | } | |
1386 | spin_unlock(&timer->it_lock); | |
1387 | } | |
1388 | } | |
1389 | ||
4cd4c1b4 PZ |
1390 | /* |
1391 | * Sample a process (thread group) timer for the given group_leader task. | |
1392 | * Must be called with tasklist_lock held for reading. | |
1393 | */ | |
1394 | static int cpu_timer_sample_group(const clockid_t which_clock, | |
1395 | struct task_struct *p, | |
1396 | union cpu_time_count *cpu) | |
1397 | { | |
1398 | struct task_cputime cputime; | |
1399 | ||
1400 | thread_group_cputimer(p, &cputime); | |
1401 | switch (CPUCLOCK_WHICH(which_clock)) { | |
1402 | default: | |
1403 | return -EINVAL; | |
1404 | case CPUCLOCK_PROF: | |
1405 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | |
1406 | break; | |
1407 | case CPUCLOCK_VIRT: | |
1408 | cpu->cpu = cputime.utime; | |
1409 | break; | |
1410 | case CPUCLOCK_SCHED: | |
1411 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | |
1412 | break; | |
1413 | } | |
1414 | return 0; | |
1415 | } | |
1416 | ||
1da177e4 LT |
1417 | /* |
1418 | * Set one of the process-wide special case CPU timers. | |
f06febc9 FM |
1419 | * The tsk->sighand->siglock must be held by the caller. |
1420 | * The *newval argument is relative and we update it to be absolute, *oldval | |
1421 | * is absolute and we update it to be relative. | |
1da177e4 LT |
1422 | */ |
1423 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |
1424 | cputime_t *newval, cputime_t *oldval) | |
1425 | { | |
1426 | union cpu_time_count now; | |
1427 | struct list_head *head; | |
1428 | ||
1429 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | |
4cd4c1b4 PZ |
1430 | start_process_timers(tsk); |
1431 | cpu_timer_sample_group(clock_idx, tsk, &now); | |
1da177e4 LT |
1432 | |
1433 | if (oldval) { | |
1434 | if (!cputime_eq(*oldval, cputime_zero)) { | |
1435 | if (cputime_le(*oldval, now.cpu)) { | |
1436 | /* Just about to fire. */ | |
1437 | *oldval = jiffies_to_cputime(1); | |
1438 | } else { | |
1439 | *oldval = cputime_sub(*oldval, now.cpu); | |
1440 | } | |
1441 | } | |
1442 | ||
1443 | if (cputime_eq(*newval, cputime_zero)) | |
1444 | return; | |
1445 | *newval = cputime_add(*newval, now.cpu); | |
1446 | ||
1447 | /* | |
1448 | * If the RLIMIT_CPU timer will expire before the | |
1449 | * ITIMER_PROF timer, we have nothing else to do. | |
1450 | */ | |
1451 | if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur | |
1452 | < cputime_to_secs(*newval)) | |
1453 | return; | |
1454 | } | |
1455 | ||
1456 | /* | |
1457 | * Check whether there are any process timers already set to fire | |
1458 | * before this one. If so, we don't have anything more to do. | |
1459 | */ | |
1460 | head = &tsk->signal->cpu_timers[clock_idx]; | |
1461 | if (list_empty(head) || | |
b5e61818 | 1462 | cputime_ge(list_first_entry(head, |
1da177e4 LT |
1463 | struct cpu_timer_list, entry)->expires.cpu, |
1464 | *newval)) { | |
f06febc9 FM |
1465 | switch (clock_idx) { |
1466 | case CPUCLOCK_PROF: | |
1467 | tsk->signal->cputime_expires.prof_exp = *newval; | |
1468 | break; | |
1469 | case CPUCLOCK_VIRT: | |
1470 | tsk->signal->cputime_expires.virt_exp = *newval; | |
1471 | break; | |
1472 | } | |
1da177e4 LT |
1473 | } |
1474 | } | |
1475 | ||
e4b76555 TA |
1476 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1477 | struct timespec *rqtp, struct itimerspec *it) | |
1da177e4 | 1478 | { |
1da177e4 LT |
1479 | struct k_itimer timer; |
1480 | int error; | |
1481 | ||
1da177e4 LT |
1482 | /* |
1483 | * Set up a temporary timer and then wait for it to go off. | |
1484 | */ | |
1485 | memset(&timer, 0, sizeof timer); | |
1486 | spin_lock_init(&timer.it_lock); | |
1487 | timer.it_clock = which_clock; | |
1488 | timer.it_overrun = -1; | |
1489 | error = posix_cpu_timer_create(&timer); | |
1490 | timer.it_process = current; | |
1491 | if (!error) { | |
1da177e4 | 1492 | static struct itimerspec zero_it; |
e4b76555 TA |
1493 | |
1494 | memset(it, 0, sizeof *it); | |
1495 | it->it_value = *rqtp; | |
1da177e4 LT |
1496 | |
1497 | spin_lock_irq(&timer.it_lock); | |
e4b76555 | 1498 | error = posix_cpu_timer_set(&timer, flags, it, NULL); |
1da177e4 LT |
1499 | if (error) { |
1500 | spin_unlock_irq(&timer.it_lock); | |
1501 | return error; | |
1502 | } | |
1503 | ||
1504 | while (!signal_pending(current)) { | |
1505 | if (timer.it.cpu.expires.sched == 0) { | |
1506 | /* | |
1507 | * Our timer fired and was reset. | |
1508 | */ | |
1509 | spin_unlock_irq(&timer.it_lock); | |
1510 | return 0; | |
1511 | } | |
1512 | ||
1513 | /* | |
1514 | * Block until cpu_timer_fire (or a signal) wakes us. | |
1515 | */ | |
1516 | __set_current_state(TASK_INTERRUPTIBLE); | |
1517 | spin_unlock_irq(&timer.it_lock); | |
1518 | schedule(); | |
1519 | spin_lock_irq(&timer.it_lock); | |
1520 | } | |
1521 | ||
1522 | /* | |
1523 | * We were interrupted by a signal. | |
1524 | */ | |
1525 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); | |
e4b76555 | 1526 | posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1da177e4 LT |
1527 | spin_unlock_irq(&timer.it_lock); |
1528 | ||
e4b76555 | 1529 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1da177e4 LT |
1530 | /* |
1531 | * It actually did fire already. | |
1532 | */ | |
1533 | return 0; | |
1534 | } | |
1535 | ||
e4b76555 TA |
1536 | error = -ERESTART_RESTARTBLOCK; |
1537 | } | |
1538 | ||
1539 | return error; | |
1540 | } | |
1541 | ||
1542 | int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |
1543 | struct timespec *rqtp, struct timespec __user *rmtp) | |
1544 | { | |
1545 | struct restart_block *restart_block = | |
1546 | ¤t_thread_info()->restart_block; | |
1547 | struct itimerspec it; | |
1548 | int error; | |
1549 | ||
1550 | /* | |
1551 | * Diagnose required errors first. | |
1552 | */ | |
1553 | if (CPUCLOCK_PERTHREAD(which_clock) && | |
1554 | (CPUCLOCK_PID(which_clock) == 0 || | |
1555 | CPUCLOCK_PID(which_clock) == current->pid)) | |
1556 | return -EINVAL; | |
1557 | ||
1558 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); | |
1559 | ||
1560 | if (error == -ERESTART_RESTARTBLOCK) { | |
1561 | ||
1562 | if (flags & TIMER_ABSTIME) | |
1563 | return -ERESTARTNOHAND; | |
1da177e4 | 1564 | /* |
e4b76555 TA |
1565 | * Report back to the user the time still remaining. |
1566 | */ | |
1567 | if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) | |
1da177e4 LT |
1568 | return -EFAULT; |
1569 | ||
1711ef38 | 1570 | restart_block->fn = posix_cpu_nsleep_restart; |
1da177e4 | 1571 | restart_block->arg0 = which_clock; |
97735f25 | 1572 | restart_block->arg1 = (unsigned long) rmtp; |
1da177e4 LT |
1573 | restart_block->arg2 = rqtp->tv_sec; |
1574 | restart_block->arg3 = rqtp->tv_nsec; | |
1da177e4 | 1575 | } |
1da177e4 LT |
1576 | return error; |
1577 | } | |
1578 | ||
1711ef38 | 1579 | long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1da177e4 LT |
1580 | { |
1581 | clockid_t which_clock = restart_block->arg0; | |
97735f25 TG |
1582 | struct timespec __user *rmtp; |
1583 | struct timespec t; | |
e4b76555 TA |
1584 | struct itimerspec it; |
1585 | int error; | |
97735f25 TG |
1586 | |
1587 | rmtp = (struct timespec __user *) restart_block->arg1; | |
1588 | t.tv_sec = restart_block->arg2; | |
1589 | t.tv_nsec = restart_block->arg3; | |
1590 | ||
1da177e4 | 1591 | restart_block->fn = do_no_restart_syscall; |
e4b76555 TA |
1592 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); |
1593 | ||
1594 | if (error == -ERESTART_RESTARTBLOCK) { | |
1595 | /* | |
1596 | * Report back to the user the time still remaining. | |
1597 | */ | |
1598 | if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) | |
1599 | return -EFAULT; | |
1600 | ||
1601 | restart_block->fn = posix_cpu_nsleep_restart; | |
1602 | restart_block->arg0 = which_clock; | |
1603 | restart_block->arg1 = (unsigned long) rmtp; | |
1604 | restart_block->arg2 = t.tv_sec; | |
1605 | restart_block->arg3 = t.tv_nsec; | |
1606 | } | |
1607 | return error; | |
1608 | ||
1da177e4 LT |
1609 | } |
1610 | ||
1611 | ||
1612 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) | |
1613 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) | |
1614 | ||
a924b04d TG |
1615 | static int process_cpu_clock_getres(const clockid_t which_clock, |
1616 | struct timespec *tp) | |
1da177e4 LT |
1617 | { |
1618 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); | |
1619 | } | |
a924b04d TG |
1620 | static int process_cpu_clock_get(const clockid_t which_clock, |
1621 | struct timespec *tp) | |
1da177e4 LT |
1622 | { |
1623 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); | |
1624 | } | |
1625 | static int process_cpu_timer_create(struct k_itimer *timer) | |
1626 | { | |
1627 | timer->it_clock = PROCESS_CLOCK; | |
1628 | return posix_cpu_timer_create(timer); | |
1629 | } | |
a924b04d | 1630 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
97735f25 TG |
1631 | struct timespec *rqtp, |
1632 | struct timespec __user *rmtp) | |
1da177e4 | 1633 | { |
97735f25 | 1634 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); |
1da177e4 | 1635 | } |
1711ef38 TA |
1636 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) |
1637 | { | |
1638 | return -EINVAL; | |
1639 | } | |
a924b04d TG |
1640 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1641 | struct timespec *tp) | |
1da177e4 LT |
1642 | { |
1643 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); | |
1644 | } | |
a924b04d TG |
1645 | static int thread_cpu_clock_get(const clockid_t which_clock, |
1646 | struct timespec *tp) | |
1da177e4 LT |
1647 | { |
1648 | return posix_cpu_clock_get(THREAD_CLOCK, tp); | |
1649 | } | |
1650 | static int thread_cpu_timer_create(struct k_itimer *timer) | |
1651 | { | |
1652 | timer->it_clock = THREAD_CLOCK; | |
1653 | return posix_cpu_timer_create(timer); | |
1654 | } | |
a924b04d | 1655 | static int thread_cpu_nsleep(const clockid_t which_clock, int flags, |
97735f25 | 1656 | struct timespec *rqtp, struct timespec __user *rmtp) |
1da177e4 LT |
1657 | { |
1658 | return -EINVAL; | |
1659 | } | |
1711ef38 TA |
1660 | static long thread_cpu_nsleep_restart(struct restart_block *restart_block) |
1661 | { | |
1662 | return -EINVAL; | |
1663 | } | |
1da177e4 LT |
1664 | |
1665 | static __init int init_posix_cpu_timers(void) | |
1666 | { | |
1667 | struct k_clock process = { | |
1668 | .clock_getres = process_cpu_clock_getres, | |
1669 | .clock_get = process_cpu_clock_get, | |
1670 | .clock_set = do_posix_clock_nosettime, | |
1671 | .timer_create = process_cpu_timer_create, | |
1672 | .nsleep = process_cpu_nsleep, | |
1711ef38 | 1673 | .nsleep_restart = process_cpu_nsleep_restart, |
1da177e4 LT |
1674 | }; |
1675 | struct k_clock thread = { | |
1676 | .clock_getres = thread_cpu_clock_getres, | |
1677 | .clock_get = thread_cpu_clock_get, | |
1678 | .clock_set = do_posix_clock_nosettime, | |
1679 | .timer_create = thread_cpu_timer_create, | |
1680 | .nsleep = thread_cpu_nsleep, | |
1711ef38 | 1681 | .nsleep_restart = thread_cpu_nsleep_restart, |
1da177e4 LT |
1682 | }; |
1683 | ||
1684 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | |
1685 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | |
1686 | ||
1687 | return 0; | |
1688 | } | |
1689 | __initcall(init_posix_cpu_timers); |