ftrace: Fix en(dis)able graph caller when en(dis)abling record via sysctl
[deliverable/linux.git] / kernel / sched / deadline.c
1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17 #include "sched.h"
18
19 #include <linux/slab.h>
20
21 struct dl_bandwidth def_dl_bandwidth;
22
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 return container_of(dl_se, struct task_struct, dl);
26 }
27
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 return container_of(dl_rq, struct rq, dl);
31 }
32
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39 }
40
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47 {
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51 }
52
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54 {
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58 }
59
60 void init_dl_bw(struct dl_bw *dl_b)
61 {
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70 }
71
72 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
73 {
74 dl_rq->rb_root = RB_ROOT;
75
76 #ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 init_dl_bw(&dl_rq->dl_bw);
85 #endif
86 }
87
88 #ifdef CONFIG_SMP
89
90 static inline int dl_overloaded(struct rq *rq)
91 {
92 return atomic_read(&rq->rd->dlo_count);
93 }
94
95 static inline void dl_set_overload(struct rq *rq)
96 {
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109 }
110
111 static inline void dl_clear_overload(struct rq *rq)
112 {
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118 }
119
120 static void update_dl_migration(struct dl_rq *dl_rq)
121 {
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131 }
132
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134 {
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141 }
142
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151 }
152
153 /*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158 {
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184 }
185
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187 {
188 struct dl_rq *dl_rq = &rq->dl;
189
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
192
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
195
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
198 }
199
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
202 }
203
204 static inline int has_pushable_dl_tasks(struct rq *rq)
205 {
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207 }
208
209 static int push_dl_task(struct rq *rq);
210
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212 {
213 return dl_task(prev);
214 }
215
216 static inline void set_post_schedule(struct rq *rq)
217 {
218 rq->post_schedule = has_pushable_dl_tasks(rq);
219 }
220
221 #else
222
223 static inline
224 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
225 {
226 }
227
228 static inline
229 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
230 {
231 }
232
233 static inline
234 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
235 {
236 }
237
238 static inline
239 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
240 {
241 }
242
243 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
244 {
245 return false;
246 }
247
248 static inline int pull_dl_task(struct rq *rq)
249 {
250 return 0;
251 }
252
253 static inline void set_post_schedule(struct rq *rq)
254 {
255 }
256 #endif /* CONFIG_SMP */
257
258 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
259 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
260 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
261 int flags);
262
263 /*
264 * We are being explicitly informed that a new instance is starting,
265 * and this means that:
266 * - the absolute deadline of the entity has to be placed at
267 * current time + relative deadline;
268 * - the runtime of the entity has to be set to the maximum value.
269 *
270 * The capability of specifying such event is useful whenever a -deadline
271 * entity wants to (try to!) synchronize its behaviour with the scheduler's
272 * one, and to (try to!) reconcile itself with its own scheduling
273 * parameters.
274 */
275 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
276 struct sched_dl_entity *pi_se)
277 {
278 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
279 struct rq *rq = rq_of_dl_rq(dl_rq);
280
281 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
282
283 /*
284 * We use the regular wall clock time to set deadlines in the
285 * future; in fact, we must consider execution overheads (time
286 * spent on hardirq context, etc.).
287 */
288 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
289 dl_se->runtime = pi_se->dl_runtime;
290 dl_se->dl_new = 0;
291 }
292
293 /*
294 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
295 * possibility of a entity lasting more than what it declared, and thus
296 * exhausting its runtime.
297 *
298 * Here we are interested in making runtime overrun possible, but we do
299 * not want a entity which is misbehaving to affect the scheduling of all
300 * other entities.
301 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
302 * is used, in order to confine each entity within its own bandwidth.
303 *
304 * This function deals exactly with that, and ensures that when the runtime
305 * of a entity is replenished, its deadline is also postponed. That ensures
306 * the overrunning entity can't interfere with other entity in the system and
307 * can't make them miss their deadlines. Reasons why this kind of overruns
308 * could happen are, typically, a entity voluntarily trying to overcome its
309 * runtime, or it just underestimated it during sched_setattr().
310 */
311 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
312 struct sched_dl_entity *pi_se)
313 {
314 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
315 struct rq *rq = rq_of_dl_rq(dl_rq);
316
317 BUG_ON(pi_se->dl_runtime <= 0);
318
319 /*
320 * This could be the case for a !-dl task that is boosted.
321 * Just go with full inherited parameters.
322 */
323 if (dl_se->dl_deadline == 0) {
324 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
325 dl_se->runtime = pi_se->dl_runtime;
326 }
327
328 /*
329 * We keep moving the deadline away until we get some
330 * available runtime for the entity. This ensures correct
331 * handling of situations where the runtime overrun is
332 * arbitrary large.
333 */
334 while (dl_se->runtime <= 0) {
335 dl_se->deadline += pi_se->dl_period;
336 dl_se->runtime += pi_se->dl_runtime;
337 }
338
339 /*
340 * At this point, the deadline really should be "in
341 * the future" with respect to rq->clock. If it's
342 * not, we are, for some reason, lagging too much!
343 * Anyway, after having warn userspace abut that,
344 * we still try to keep the things running by
345 * resetting the deadline and the budget of the
346 * entity.
347 */
348 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
349 printk_deferred_once("sched: DL replenish lagged to much\n");
350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
351 dl_se->runtime = pi_se->dl_runtime;
352 }
353
354 if (dl_se->dl_yielded)
355 dl_se->dl_yielded = 0;
356 if (dl_se->dl_throttled)
357 dl_se->dl_throttled = 0;
358 }
359
360 /*
361 * Here we check if --at time t-- an entity (which is probably being
362 * [re]activated or, in general, enqueued) can use its remaining runtime
363 * and its current deadline _without_ exceeding the bandwidth it is
364 * assigned (function returns true if it can't). We are in fact applying
365 * one of the CBS rules: when a task wakes up, if the residual runtime
366 * over residual deadline fits within the allocated bandwidth, then we
367 * can keep the current (absolute) deadline and residual budget without
368 * disrupting the schedulability of the system. Otherwise, we should
369 * refill the runtime and set the deadline a period in the future,
370 * because keeping the current (absolute) deadline of the task would
371 * result in breaking guarantees promised to other tasks (refer to
372 * Documentation/scheduler/sched-deadline.txt for more informations).
373 *
374 * This function returns true if:
375 *
376 * runtime / (deadline - t) > dl_runtime / dl_period ,
377 *
378 * IOW we can't recycle current parameters.
379 *
380 * Notice that the bandwidth check is done against the period. For
381 * task with deadline equal to period this is the same of using
382 * dl_deadline instead of dl_period in the equation above.
383 */
384 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
385 struct sched_dl_entity *pi_se, u64 t)
386 {
387 u64 left, right;
388
389 /*
390 * left and right are the two sides of the equation above,
391 * after a bit of shuffling to use multiplications instead
392 * of divisions.
393 *
394 * Note that none of the time values involved in the two
395 * multiplications are absolute: dl_deadline and dl_runtime
396 * are the relative deadline and the maximum runtime of each
397 * instance, runtime is the runtime left for the last instance
398 * and (deadline - t), since t is rq->clock, is the time left
399 * to the (absolute) deadline. Even if overflowing the u64 type
400 * is very unlikely to occur in both cases, here we scale down
401 * as we want to avoid that risk at all. Scaling down by 10
402 * means that we reduce granularity to 1us. We are fine with it,
403 * since this is only a true/false check and, anyway, thinking
404 * of anything below microseconds resolution is actually fiction
405 * (but still we want to give the user that illusion >;).
406 */
407 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
408 right = ((dl_se->deadline - t) >> DL_SCALE) *
409 (pi_se->dl_runtime >> DL_SCALE);
410
411 return dl_time_before(right, left);
412 }
413
414 /*
415 * When a -deadline entity is queued back on the runqueue, its runtime and
416 * deadline might need updating.
417 *
418 * The policy here is that we update the deadline of the entity only if:
419 * - the current deadline is in the past,
420 * - using the remaining runtime with the current deadline would make
421 * the entity exceed its bandwidth.
422 */
423 static void update_dl_entity(struct sched_dl_entity *dl_se,
424 struct sched_dl_entity *pi_se)
425 {
426 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
427 struct rq *rq = rq_of_dl_rq(dl_rq);
428
429 /*
430 * The arrival of a new instance needs special treatment, i.e.,
431 * the actual scheduling parameters have to be "renewed".
432 */
433 if (dl_se->dl_new) {
434 setup_new_dl_entity(dl_se, pi_se);
435 return;
436 }
437
438 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
439 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
440 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
441 dl_se->runtime = pi_se->dl_runtime;
442 }
443 }
444
445 /*
446 * If the entity depleted all its runtime, and if we want it to sleep
447 * while waiting for some new execution time to become available, we
448 * set the bandwidth enforcement timer to the replenishment instant
449 * and try to activate it.
450 *
451 * Notice that it is important for the caller to know if the timer
452 * actually started or not (i.e., the replenishment instant is in
453 * the future or in the past).
454 */
455 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
456 {
457 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
458 struct rq *rq = rq_of_dl_rq(dl_rq);
459 ktime_t now, act;
460 ktime_t soft, hard;
461 unsigned long range;
462 s64 delta;
463
464 if (boosted)
465 return 0;
466 /*
467 * We want the timer to fire at the deadline, but considering
468 * that it is actually coming from rq->clock and not from
469 * hrtimer's time base reading.
470 */
471 act = ns_to_ktime(dl_se->deadline);
472 now = hrtimer_cb_get_time(&dl_se->dl_timer);
473 delta = ktime_to_ns(now) - rq_clock(rq);
474 act = ktime_add_ns(act, delta);
475
476 /*
477 * If the expiry time already passed, e.g., because the value
478 * chosen as the deadline is too small, don't even try to
479 * start the timer in the past!
480 */
481 if (ktime_us_delta(act, now) < 0)
482 return 0;
483
484 hrtimer_set_expires(&dl_se->dl_timer, act);
485
486 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
487 hard = hrtimer_get_expires(&dl_se->dl_timer);
488 range = ktime_to_ns(ktime_sub(hard, soft));
489 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
490 range, HRTIMER_MODE_ABS, 0);
491
492 return hrtimer_active(&dl_se->dl_timer);
493 }
494
495 /*
496 * This is the bandwidth enforcement timer callback. If here, we know
497 * a task is not on its dl_rq, since the fact that the timer was running
498 * means the task is throttled and needs a runtime replenishment.
499 *
500 * However, what we actually do depends on the fact the task is active,
501 * (it is on its rq) or has been removed from there by a call to
502 * dequeue_task_dl(). In the former case we must issue the runtime
503 * replenishment and add the task back to the dl_rq; in the latter, we just
504 * do nothing but clearing dl_throttled, so that runtime and deadline
505 * updating (and the queueing back to dl_rq) will be done by the
506 * next call to enqueue_task_dl().
507 */
508 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
509 {
510 struct sched_dl_entity *dl_se = container_of(timer,
511 struct sched_dl_entity,
512 dl_timer);
513 struct task_struct *p = dl_task_of(dl_se);
514 unsigned long flags;
515 struct rq *rq;
516
517 rq = task_rq_lock(current, &flags);
518
519 /*
520 * We need to take care of several possible races here:
521 *
522 * - the task might have changed its scheduling policy
523 * to something different than SCHED_DEADLINE
524 * - the task might have changed its reservation parameters
525 * (through sched_setattr())
526 * - the task might have been boosted by someone else and
527 * might be in the boosting/deboosting path
528 *
529 * In all this cases we bail out, as the task is already
530 * in the runqueue or is going to be enqueued back anyway.
531 */
532 if (!dl_task(p) || dl_se->dl_new ||
533 dl_se->dl_boosted || !dl_se->dl_throttled)
534 goto unlock;
535
536 sched_clock_tick();
537 update_rq_clock(rq);
538
539 /*
540 * If the throttle happened during sched-out; like:
541 *
542 * schedule()
543 * deactivate_task()
544 * dequeue_task_dl()
545 * update_curr_dl()
546 * start_dl_timer()
547 * __dequeue_task_dl()
548 * prev->on_rq = 0;
549 *
550 * We can be both throttled and !queued. Replenish the counter
551 * but do not enqueue -- wait for our wakeup to do that.
552 */
553 if (!task_on_rq_queued(p)) {
554 replenish_dl_entity(dl_se, dl_se);
555 goto unlock;
556 }
557
558 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
559 if (dl_task(rq->curr))
560 check_preempt_curr_dl(rq, p, 0);
561 else
562 resched_curr(rq);
563 #ifdef CONFIG_SMP
564 /*
565 * Queueing this task back might have overloaded rq,
566 * check if we need to kick someone away.
567 */
568 if (has_pushable_dl_tasks(rq))
569 push_dl_task(rq);
570 #endif
571 unlock:
572 task_rq_unlock(rq, current, &flags);
573
574 return HRTIMER_NORESTART;
575 }
576
577 void init_dl_task_timer(struct sched_dl_entity *dl_se)
578 {
579 struct hrtimer *timer = &dl_se->dl_timer;
580
581 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
582 timer->function = dl_task_timer;
583 }
584
585 static
586 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
587 {
588 return (dl_se->runtime <= 0);
589 }
590
591 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
592
593 /*
594 * Update the current task's runtime statistics (provided it is still
595 * a -deadline task and has not been removed from the dl_rq).
596 */
597 static void update_curr_dl(struct rq *rq)
598 {
599 struct task_struct *curr = rq->curr;
600 struct sched_dl_entity *dl_se = &curr->dl;
601 u64 delta_exec;
602
603 if (!dl_task(curr) || !on_dl_rq(dl_se))
604 return;
605
606 /*
607 * Consumed budget is computed considering the time as
608 * observed by schedulable tasks (excluding time spent
609 * in hardirq context, etc.). Deadlines are instead
610 * computed using hard walltime. This seems to be the more
611 * natural solution, but the full ramifications of this
612 * approach need further study.
613 */
614 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
615 if (unlikely((s64)delta_exec <= 0))
616 return;
617
618 schedstat_set(curr->se.statistics.exec_max,
619 max(curr->se.statistics.exec_max, delta_exec));
620
621 curr->se.sum_exec_runtime += delta_exec;
622 account_group_exec_runtime(curr, delta_exec);
623
624 curr->se.exec_start = rq_clock_task(rq);
625 cpuacct_charge(curr, delta_exec);
626
627 sched_rt_avg_update(rq, delta_exec);
628
629 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
630 if (dl_runtime_exceeded(rq, dl_se)) {
631 dl_se->dl_throttled = 1;
632 __dequeue_task_dl(rq, curr, 0);
633 if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
634 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
635
636 if (!is_leftmost(curr, &rq->dl))
637 resched_curr(rq);
638 }
639
640 /*
641 * Because -- for now -- we share the rt bandwidth, we need to
642 * account our runtime there too, otherwise actual rt tasks
643 * would be able to exceed the shared quota.
644 *
645 * Account to the root rt group for now.
646 *
647 * The solution we're working towards is having the RT groups scheduled
648 * using deadline servers -- however there's a few nasties to figure
649 * out before that can happen.
650 */
651 if (rt_bandwidth_enabled()) {
652 struct rt_rq *rt_rq = &rq->rt;
653
654 raw_spin_lock(&rt_rq->rt_runtime_lock);
655 /*
656 * We'll let actual RT tasks worry about the overflow here, we
657 * have our own CBS to keep us inline; only account when RT
658 * bandwidth is relevant.
659 */
660 if (sched_rt_bandwidth_account(rt_rq))
661 rt_rq->rt_time += delta_exec;
662 raw_spin_unlock(&rt_rq->rt_runtime_lock);
663 }
664 }
665
666 #ifdef CONFIG_SMP
667
668 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
669
670 static inline u64 next_deadline(struct rq *rq)
671 {
672 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
673
674 if (next && dl_prio(next->prio))
675 return next->dl.deadline;
676 else
677 return 0;
678 }
679
680 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
681 {
682 struct rq *rq = rq_of_dl_rq(dl_rq);
683
684 if (dl_rq->earliest_dl.curr == 0 ||
685 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
686 /*
687 * If the dl_rq had no -deadline tasks, or if the new task
688 * has shorter deadline than the current one on dl_rq, we
689 * know that the previous earliest becomes our next earliest,
690 * as the new task becomes the earliest itself.
691 */
692 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
693 dl_rq->earliest_dl.curr = deadline;
694 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
695 } else if (dl_rq->earliest_dl.next == 0 ||
696 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
697 /*
698 * On the other hand, if the new -deadline task has a
699 * a later deadline than the earliest one on dl_rq, but
700 * it is earlier than the next (if any), we must
701 * recompute the next-earliest.
702 */
703 dl_rq->earliest_dl.next = next_deadline(rq);
704 }
705 }
706
707 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
708 {
709 struct rq *rq = rq_of_dl_rq(dl_rq);
710
711 /*
712 * Since we may have removed our earliest (and/or next earliest)
713 * task we must recompute them.
714 */
715 if (!dl_rq->dl_nr_running) {
716 dl_rq->earliest_dl.curr = 0;
717 dl_rq->earliest_dl.next = 0;
718 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
719 } else {
720 struct rb_node *leftmost = dl_rq->rb_leftmost;
721 struct sched_dl_entity *entry;
722
723 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
724 dl_rq->earliest_dl.curr = entry->deadline;
725 dl_rq->earliest_dl.next = next_deadline(rq);
726 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
727 }
728 }
729
730 #else
731
732 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
733 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
734
735 #endif /* CONFIG_SMP */
736
737 static inline
738 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
739 {
740 int prio = dl_task_of(dl_se)->prio;
741 u64 deadline = dl_se->deadline;
742
743 WARN_ON(!dl_prio(prio));
744 dl_rq->dl_nr_running++;
745 add_nr_running(rq_of_dl_rq(dl_rq), 1);
746
747 inc_dl_deadline(dl_rq, deadline);
748 inc_dl_migration(dl_se, dl_rq);
749 }
750
751 static inline
752 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
753 {
754 int prio = dl_task_of(dl_se)->prio;
755
756 WARN_ON(!dl_prio(prio));
757 WARN_ON(!dl_rq->dl_nr_running);
758 dl_rq->dl_nr_running--;
759 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
760
761 dec_dl_deadline(dl_rq, dl_se->deadline);
762 dec_dl_migration(dl_se, dl_rq);
763 }
764
765 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
766 {
767 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
768 struct rb_node **link = &dl_rq->rb_root.rb_node;
769 struct rb_node *parent = NULL;
770 struct sched_dl_entity *entry;
771 int leftmost = 1;
772
773 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
774
775 while (*link) {
776 parent = *link;
777 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
778 if (dl_time_before(dl_se->deadline, entry->deadline))
779 link = &parent->rb_left;
780 else {
781 link = &parent->rb_right;
782 leftmost = 0;
783 }
784 }
785
786 if (leftmost)
787 dl_rq->rb_leftmost = &dl_se->rb_node;
788
789 rb_link_node(&dl_se->rb_node, parent, link);
790 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
791
792 inc_dl_tasks(dl_se, dl_rq);
793 }
794
795 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
796 {
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798
799 if (RB_EMPTY_NODE(&dl_se->rb_node))
800 return;
801
802 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
803 struct rb_node *next_node;
804
805 next_node = rb_next(&dl_se->rb_node);
806 dl_rq->rb_leftmost = next_node;
807 }
808
809 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
810 RB_CLEAR_NODE(&dl_se->rb_node);
811
812 dec_dl_tasks(dl_se, dl_rq);
813 }
814
815 static void
816 enqueue_dl_entity(struct sched_dl_entity *dl_se,
817 struct sched_dl_entity *pi_se, int flags)
818 {
819 BUG_ON(on_dl_rq(dl_se));
820
821 /*
822 * If this is a wakeup or a new instance, the scheduling
823 * parameters of the task might need updating. Otherwise,
824 * we want a replenishment of its runtime.
825 */
826 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
827 update_dl_entity(dl_se, pi_se);
828 else if (flags & ENQUEUE_REPLENISH)
829 replenish_dl_entity(dl_se, pi_se);
830
831 __enqueue_dl_entity(dl_se);
832 }
833
834 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
835 {
836 __dequeue_dl_entity(dl_se);
837 }
838
839 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
840 {
841 struct task_struct *pi_task = rt_mutex_get_top_task(p);
842 struct sched_dl_entity *pi_se = &p->dl;
843
844 /*
845 * Use the scheduling parameters of the top pi-waiter
846 * task if we have one and its (relative) deadline is
847 * smaller than our one... OTW we keep our runtime and
848 * deadline.
849 */
850 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
851 pi_se = &pi_task->dl;
852 } else if (!dl_prio(p->normal_prio)) {
853 /*
854 * Special case in which we have a !SCHED_DEADLINE task
855 * that is going to be deboosted, but exceedes its
856 * runtime while doing so. No point in replenishing
857 * it, as it's going to return back to its original
858 * scheduling class after this.
859 */
860 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
861 return;
862 }
863
864 /*
865 * If p is throttled, we do nothing. In fact, if it exhausted
866 * its budget it needs a replenishment and, since it now is on
867 * its rq, the bandwidth timer callback (which clearly has not
868 * run yet) will take care of this.
869 */
870 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
871 return;
872
873 enqueue_dl_entity(&p->dl, pi_se, flags);
874
875 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
876 enqueue_pushable_dl_task(rq, p);
877 }
878
879 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
880 {
881 dequeue_dl_entity(&p->dl);
882 dequeue_pushable_dl_task(rq, p);
883 }
884
885 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
886 {
887 update_curr_dl(rq);
888 __dequeue_task_dl(rq, p, flags);
889 }
890
891 /*
892 * Yield task semantic for -deadline tasks is:
893 *
894 * get off from the CPU until our next instance, with
895 * a new runtime. This is of little use now, since we
896 * don't have a bandwidth reclaiming mechanism. Anyway,
897 * bandwidth reclaiming is planned for the future, and
898 * yield_task_dl will indicate that some spare budget
899 * is available for other task instances to use it.
900 */
901 static void yield_task_dl(struct rq *rq)
902 {
903 struct task_struct *p = rq->curr;
904
905 /*
906 * We make the task go to sleep until its current deadline by
907 * forcing its runtime to zero. This way, update_curr_dl() stops
908 * it and the bandwidth timer will wake it up and will give it
909 * new scheduling parameters (thanks to dl_yielded=1).
910 */
911 if (p->dl.runtime > 0) {
912 rq->curr->dl.dl_yielded = 1;
913 p->dl.runtime = 0;
914 }
915 update_rq_clock(rq);
916 update_curr_dl(rq);
917 }
918
919 #ifdef CONFIG_SMP
920
921 static int find_later_rq(struct task_struct *task);
922
923 static int
924 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
925 {
926 struct task_struct *curr;
927 struct rq *rq;
928
929 if (sd_flag != SD_BALANCE_WAKE)
930 goto out;
931
932 rq = cpu_rq(cpu);
933
934 rcu_read_lock();
935 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
936
937 /*
938 * If we are dealing with a -deadline task, we must
939 * decide where to wake it up.
940 * If it has a later deadline and the current task
941 * on this rq can't move (provided the waking task
942 * can!) we prefer to send it somewhere else. On the
943 * other hand, if it has a shorter deadline, we
944 * try to make it stay here, it might be important.
945 */
946 if (unlikely(dl_task(curr)) &&
947 (curr->nr_cpus_allowed < 2 ||
948 !dl_entity_preempt(&p->dl, &curr->dl)) &&
949 (p->nr_cpus_allowed > 1)) {
950 int target = find_later_rq(p);
951
952 if (target != -1)
953 cpu = target;
954 }
955 rcu_read_unlock();
956
957 out:
958 return cpu;
959 }
960
961 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
962 {
963 /*
964 * Current can't be migrated, useless to reschedule,
965 * let's hope p can move out.
966 */
967 if (rq->curr->nr_cpus_allowed == 1 ||
968 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
969 return;
970
971 /*
972 * p is migratable, so let's not schedule it and
973 * see if it is pushed or pulled somewhere else.
974 */
975 if (p->nr_cpus_allowed != 1 &&
976 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
977 return;
978
979 resched_curr(rq);
980 }
981
982 static int pull_dl_task(struct rq *this_rq);
983
984 #endif /* CONFIG_SMP */
985
986 /*
987 * Only called when both the current and waking task are -deadline
988 * tasks.
989 */
990 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
991 int flags)
992 {
993 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
994 resched_curr(rq);
995 return;
996 }
997
998 #ifdef CONFIG_SMP
999 /*
1000 * In the unlikely case current and p have the same deadline
1001 * let us try to decide what's the best thing to do...
1002 */
1003 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1004 !test_tsk_need_resched(rq->curr))
1005 check_preempt_equal_dl(rq, p);
1006 #endif /* CONFIG_SMP */
1007 }
1008
1009 #ifdef CONFIG_SCHED_HRTICK
1010 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1011 {
1012 hrtick_start(rq, p->dl.runtime);
1013 }
1014 #else /* !CONFIG_SCHED_HRTICK */
1015 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1016 {
1017 }
1018 #endif
1019
1020 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1021 struct dl_rq *dl_rq)
1022 {
1023 struct rb_node *left = dl_rq->rb_leftmost;
1024
1025 if (!left)
1026 return NULL;
1027
1028 return rb_entry(left, struct sched_dl_entity, rb_node);
1029 }
1030
1031 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1032 {
1033 struct sched_dl_entity *dl_se;
1034 struct task_struct *p;
1035 struct dl_rq *dl_rq;
1036
1037 dl_rq = &rq->dl;
1038
1039 if (need_pull_dl_task(rq, prev)) {
1040 pull_dl_task(rq);
1041 /*
1042 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1043 * means a stop task can slip in, in which case we need to
1044 * re-start task selection.
1045 */
1046 if (rq->stop && task_on_rq_queued(rq->stop))
1047 return RETRY_TASK;
1048 }
1049
1050 /*
1051 * When prev is DL, we may throttle it in put_prev_task().
1052 * So, we update time before we check for dl_nr_running.
1053 */
1054 if (prev->sched_class == &dl_sched_class)
1055 update_curr_dl(rq);
1056
1057 if (unlikely(!dl_rq->dl_nr_running))
1058 return NULL;
1059
1060 put_prev_task(rq, prev);
1061
1062 dl_se = pick_next_dl_entity(rq, dl_rq);
1063 BUG_ON(!dl_se);
1064
1065 p = dl_task_of(dl_se);
1066 p->se.exec_start = rq_clock_task(rq);
1067
1068 /* Running task will never be pushed. */
1069 dequeue_pushable_dl_task(rq, p);
1070
1071 if (hrtick_enabled(rq))
1072 start_hrtick_dl(rq, p);
1073
1074 set_post_schedule(rq);
1075
1076 return p;
1077 }
1078
1079 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1080 {
1081 update_curr_dl(rq);
1082
1083 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1084 enqueue_pushable_dl_task(rq, p);
1085 }
1086
1087 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1088 {
1089 update_curr_dl(rq);
1090
1091 /*
1092 * Even when we have runtime, update_curr_dl() might have resulted in us
1093 * not being the leftmost task anymore. In that case NEED_RESCHED will
1094 * be set and schedule() will start a new hrtick for the next task.
1095 */
1096 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1097 is_leftmost(p, &rq->dl))
1098 start_hrtick_dl(rq, p);
1099 }
1100
1101 static void task_fork_dl(struct task_struct *p)
1102 {
1103 /*
1104 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1105 * sched_fork()
1106 */
1107 }
1108
1109 static void task_dead_dl(struct task_struct *p)
1110 {
1111 struct hrtimer *timer = &p->dl.dl_timer;
1112 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1113
1114 /*
1115 * Since we are TASK_DEAD we won't slip out of the domain!
1116 */
1117 raw_spin_lock_irq(&dl_b->lock);
1118 /* XXX we should retain the bw until 0-lag */
1119 dl_b->total_bw -= p->dl.dl_bw;
1120 raw_spin_unlock_irq(&dl_b->lock);
1121
1122 hrtimer_cancel(timer);
1123 }
1124
1125 static void set_curr_task_dl(struct rq *rq)
1126 {
1127 struct task_struct *p = rq->curr;
1128
1129 p->se.exec_start = rq_clock_task(rq);
1130
1131 /* You can't push away the running task */
1132 dequeue_pushable_dl_task(rq, p);
1133 }
1134
1135 #ifdef CONFIG_SMP
1136
1137 /* Only try algorithms three times */
1138 #define DL_MAX_TRIES 3
1139
1140 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1141 {
1142 if (!task_running(rq, p) &&
1143 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1144 return 1;
1145 return 0;
1146 }
1147
1148 /* Returns the second earliest -deadline task, NULL otherwise */
1149 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1150 {
1151 struct rb_node *next_node = rq->dl.rb_leftmost;
1152 struct sched_dl_entity *dl_se;
1153 struct task_struct *p = NULL;
1154
1155 next_node:
1156 next_node = rb_next(next_node);
1157 if (next_node) {
1158 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1159 p = dl_task_of(dl_se);
1160
1161 if (pick_dl_task(rq, p, cpu))
1162 return p;
1163
1164 goto next_node;
1165 }
1166
1167 return NULL;
1168 }
1169
1170 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1171
1172 static int find_later_rq(struct task_struct *task)
1173 {
1174 struct sched_domain *sd;
1175 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1176 int this_cpu = smp_processor_id();
1177 int best_cpu, cpu = task_cpu(task);
1178
1179 /* Make sure the mask is initialized first */
1180 if (unlikely(!later_mask))
1181 return -1;
1182
1183 if (task->nr_cpus_allowed == 1)
1184 return -1;
1185
1186 /*
1187 * We have to consider system topology and task affinity
1188 * first, then we can look for a suitable cpu.
1189 */
1190 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1191 task, later_mask);
1192 if (best_cpu == -1)
1193 return -1;
1194
1195 /*
1196 * If we are here, some target has been found,
1197 * the most suitable of which is cached in best_cpu.
1198 * This is, among the runqueues where the current tasks
1199 * have later deadlines than the task's one, the rq
1200 * with the latest possible one.
1201 *
1202 * Now we check how well this matches with task's
1203 * affinity and system topology.
1204 *
1205 * The last cpu where the task run is our first
1206 * guess, since it is most likely cache-hot there.
1207 */
1208 if (cpumask_test_cpu(cpu, later_mask))
1209 return cpu;
1210 /*
1211 * Check if this_cpu is to be skipped (i.e., it is
1212 * not in the mask) or not.
1213 */
1214 if (!cpumask_test_cpu(this_cpu, later_mask))
1215 this_cpu = -1;
1216
1217 rcu_read_lock();
1218 for_each_domain(cpu, sd) {
1219 if (sd->flags & SD_WAKE_AFFINE) {
1220
1221 /*
1222 * If possible, preempting this_cpu is
1223 * cheaper than migrating.
1224 */
1225 if (this_cpu != -1 &&
1226 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1227 rcu_read_unlock();
1228 return this_cpu;
1229 }
1230
1231 /*
1232 * Last chance: if best_cpu is valid and is
1233 * in the mask, that becomes our choice.
1234 */
1235 if (best_cpu < nr_cpu_ids &&
1236 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1237 rcu_read_unlock();
1238 return best_cpu;
1239 }
1240 }
1241 }
1242 rcu_read_unlock();
1243
1244 /*
1245 * At this point, all our guesses failed, we just return
1246 * 'something', and let the caller sort the things out.
1247 */
1248 if (this_cpu != -1)
1249 return this_cpu;
1250
1251 cpu = cpumask_any(later_mask);
1252 if (cpu < nr_cpu_ids)
1253 return cpu;
1254
1255 return -1;
1256 }
1257
1258 /* Locks the rq it finds */
1259 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1260 {
1261 struct rq *later_rq = NULL;
1262 int tries;
1263 int cpu;
1264
1265 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1266 cpu = find_later_rq(task);
1267
1268 if ((cpu == -1) || (cpu == rq->cpu))
1269 break;
1270
1271 later_rq = cpu_rq(cpu);
1272
1273 /* Retry if something changed. */
1274 if (double_lock_balance(rq, later_rq)) {
1275 if (unlikely(task_rq(task) != rq ||
1276 !cpumask_test_cpu(later_rq->cpu,
1277 &task->cpus_allowed) ||
1278 task_running(rq, task) ||
1279 !task_on_rq_queued(task))) {
1280 double_unlock_balance(rq, later_rq);
1281 later_rq = NULL;
1282 break;
1283 }
1284 }
1285
1286 /*
1287 * If the rq we found has no -deadline task, or
1288 * its earliest one has a later deadline than our
1289 * task, the rq is a good one.
1290 */
1291 if (!later_rq->dl.dl_nr_running ||
1292 dl_time_before(task->dl.deadline,
1293 later_rq->dl.earliest_dl.curr))
1294 break;
1295
1296 /* Otherwise we try again. */
1297 double_unlock_balance(rq, later_rq);
1298 later_rq = NULL;
1299 }
1300
1301 return later_rq;
1302 }
1303
1304 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1305 {
1306 struct task_struct *p;
1307
1308 if (!has_pushable_dl_tasks(rq))
1309 return NULL;
1310
1311 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1312 struct task_struct, pushable_dl_tasks);
1313
1314 BUG_ON(rq->cpu != task_cpu(p));
1315 BUG_ON(task_current(rq, p));
1316 BUG_ON(p->nr_cpus_allowed <= 1);
1317
1318 BUG_ON(!task_on_rq_queued(p));
1319 BUG_ON(!dl_task(p));
1320
1321 return p;
1322 }
1323
1324 /*
1325 * See if the non running -deadline tasks on this rq
1326 * can be sent to some other CPU where they can preempt
1327 * and start executing.
1328 */
1329 static int push_dl_task(struct rq *rq)
1330 {
1331 struct task_struct *next_task;
1332 struct rq *later_rq;
1333 int ret = 0;
1334
1335 if (!rq->dl.overloaded)
1336 return 0;
1337
1338 next_task = pick_next_pushable_dl_task(rq);
1339 if (!next_task)
1340 return 0;
1341
1342 retry:
1343 if (unlikely(next_task == rq->curr)) {
1344 WARN_ON(1);
1345 return 0;
1346 }
1347
1348 /*
1349 * If next_task preempts rq->curr, and rq->curr
1350 * can move away, it makes sense to just reschedule
1351 * without going further in pushing next_task.
1352 */
1353 if (dl_task(rq->curr) &&
1354 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1355 rq->curr->nr_cpus_allowed > 1) {
1356 resched_curr(rq);
1357 return 0;
1358 }
1359
1360 /* We might release rq lock */
1361 get_task_struct(next_task);
1362
1363 /* Will lock the rq it'll find */
1364 later_rq = find_lock_later_rq(next_task, rq);
1365 if (!later_rq) {
1366 struct task_struct *task;
1367
1368 /*
1369 * We must check all this again, since
1370 * find_lock_later_rq releases rq->lock and it is
1371 * then possible that next_task has migrated.
1372 */
1373 task = pick_next_pushable_dl_task(rq);
1374 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1375 /*
1376 * The task is still there. We don't try
1377 * again, some other cpu will pull it when ready.
1378 */
1379 goto out;
1380 }
1381
1382 if (!task)
1383 /* No more tasks */
1384 goto out;
1385
1386 put_task_struct(next_task);
1387 next_task = task;
1388 goto retry;
1389 }
1390
1391 deactivate_task(rq, next_task, 0);
1392 set_task_cpu(next_task, later_rq->cpu);
1393 activate_task(later_rq, next_task, 0);
1394 ret = 1;
1395
1396 resched_curr(later_rq);
1397
1398 double_unlock_balance(rq, later_rq);
1399
1400 out:
1401 put_task_struct(next_task);
1402
1403 return ret;
1404 }
1405
1406 static void push_dl_tasks(struct rq *rq)
1407 {
1408 /* Terminates as it moves a -deadline task */
1409 while (push_dl_task(rq))
1410 ;
1411 }
1412
1413 static int pull_dl_task(struct rq *this_rq)
1414 {
1415 int this_cpu = this_rq->cpu, ret = 0, cpu;
1416 struct task_struct *p;
1417 struct rq *src_rq;
1418 u64 dmin = LONG_MAX;
1419
1420 if (likely(!dl_overloaded(this_rq)))
1421 return 0;
1422
1423 /*
1424 * Match the barrier from dl_set_overloaded; this guarantees that if we
1425 * see overloaded we must also see the dlo_mask bit.
1426 */
1427 smp_rmb();
1428
1429 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1430 if (this_cpu == cpu)
1431 continue;
1432
1433 src_rq = cpu_rq(cpu);
1434
1435 /*
1436 * It looks racy, abd it is! However, as in sched_rt.c,
1437 * we are fine with this.
1438 */
1439 if (this_rq->dl.dl_nr_running &&
1440 dl_time_before(this_rq->dl.earliest_dl.curr,
1441 src_rq->dl.earliest_dl.next))
1442 continue;
1443
1444 /* Might drop this_rq->lock */
1445 double_lock_balance(this_rq, src_rq);
1446
1447 /*
1448 * If there are no more pullable tasks on the
1449 * rq, we're done with it.
1450 */
1451 if (src_rq->dl.dl_nr_running <= 1)
1452 goto skip;
1453
1454 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1455
1456 /*
1457 * We found a task to be pulled if:
1458 * - it preempts our current (if there's one),
1459 * - it will preempt the last one we pulled (if any).
1460 */
1461 if (p && dl_time_before(p->dl.deadline, dmin) &&
1462 (!this_rq->dl.dl_nr_running ||
1463 dl_time_before(p->dl.deadline,
1464 this_rq->dl.earliest_dl.curr))) {
1465 WARN_ON(p == src_rq->curr);
1466 WARN_ON(!task_on_rq_queued(p));
1467
1468 /*
1469 * Then we pull iff p has actually an earlier
1470 * deadline than the current task of its runqueue.
1471 */
1472 if (dl_time_before(p->dl.deadline,
1473 src_rq->curr->dl.deadline))
1474 goto skip;
1475
1476 ret = 1;
1477
1478 deactivate_task(src_rq, p, 0);
1479 set_task_cpu(p, this_cpu);
1480 activate_task(this_rq, p, 0);
1481 dmin = p->dl.deadline;
1482
1483 /* Is there any other task even earlier? */
1484 }
1485 skip:
1486 double_unlock_balance(this_rq, src_rq);
1487 }
1488
1489 return ret;
1490 }
1491
1492 static void post_schedule_dl(struct rq *rq)
1493 {
1494 push_dl_tasks(rq);
1495 }
1496
1497 /*
1498 * Since the task is not running and a reschedule is not going to happen
1499 * anytime soon on its runqueue, we try pushing it away now.
1500 */
1501 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1502 {
1503 if (!task_running(rq, p) &&
1504 !test_tsk_need_resched(rq->curr) &&
1505 has_pushable_dl_tasks(rq) &&
1506 p->nr_cpus_allowed > 1 &&
1507 dl_task(rq->curr) &&
1508 (rq->curr->nr_cpus_allowed < 2 ||
1509 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1510 push_dl_tasks(rq);
1511 }
1512 }
1513
1514 static void set_cpus_allowed_dl(struct task_struct *p,
1515 const struct cpumask *new_mask)
1516 {
1517 struct rq *rq;
1518 struct root_domain *src_rd;
1519 int weight;
1520
1521 BUG_ON(!dl_task(p));
1522
1523 rq = task_rq(p);
1524 src_rd = rq->rd;
1525 /*
1526 * Migrating a SCHED_DEADLINE task between exclusive
1527 * cpusets (different root_domains) entails a bandwidth
1528 * update. We already made space for us in the destination
1529 * domain (see cpuset_can_attach()).
1530 */
1531 if (!cpumask_intersects(src_rd->span, new_mask)) {
1532 struct dl_bw *src_dl_b;
1533
1534 src_dl_b = dl_bw_of(cpu_of(rq));
1535 /*
1536 * We now free resources of the root_domain we are migrating
1537 * off. In the worst case, sched_setattr() may temporary fail
1538 * until we complete the update.
1539 */
1540 raw_spin_lock(&src_dl_b->lock);
1541 __dl_clear(src_dl_b, p->dl.dl_bw);
1542 raw_spin_unlock(&src_dl_b->lock);
1543 }
1544
1545 /*
1546 * Update only if the task is actually running (i.e.,
1547 * it is on the rq AND it is not throttled).
1548 */
1549 if (!on_dl_rq(&p->dl))
1550 return;
1551
1552 weight = cpumask_weight(new_mask);
1553
1554 /*
1555 * Only update if the process changes its state from whether it
1556 * can migrate or not.
1557 */
1558 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1559 return;
1560
1561 /*
1562 * The process used to be able to migrate OR it can now migrate
1563 */
1564 if (weight <= 1) {
1565 if (!task_current(rq, p))
1566 dequeue_pushable_dl_task(rq, p);
1567 BUG_ON(!rq->dl.dl_nr_migratory);
1568 rq->dl.dl_nr_migratory--;
1569 } else {
1570 if (!task_current(rq, p))
1571 enqueue_pushable_dl_task(rq, p);
1572 rq->dl.dl_nr_migratory++;
1573 }
1574
1575 update_dl_migration(&rq->dl);
1576 }
1577
1578 /* Assumes rq->lock is held */
1579 static void rq_online_dl(struct rq *rq)
1580 {
1581 if (rq->dl.overloaded)
1582 dl_set_overload(rq);
1583
1584 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1585 if (rq->dl.dl_nr_running > 0)
1586 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1587 }
1588
1589 /* Assumes rq->lock is held */
1590 static void rq_offline_dl(struct rq *rq)
1591 {
1592 if (rq->dl.overloaded)
1593 dl_clear_overload(rq);
1594
1595 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1596 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1597 }
1598
1599 void init_sched_dl_class(void)
1600 {
1601 unsigned int i;
1602
1603 for_each_possible_cpu(i)
1604 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1605 GFP_KERNEL, cpu_to_node(i));
1606 }
1607
1608 #endif /* CONFIG_SMP */
1609
1610 /*
1611 * Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
1612 */
1613 static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1614 {
1615 struct hrtimer *dl_timer = &p->dl.dl_timer;
1616
1617 /* Nobody will change task's class if pi_lock is held */
1618 lockdep_assert_held(&p->pi_lock);
1619
1620 if (hrtimer_active(dl_timer)) {
1621 int ret = hrtimer_try_to_cancel(dl_timer);
1622
1623 if (unlikely(ret == -1)) {
1624 /*
1625 * Note, p may migrate OR new deadline tasks
1626 * may appear in rq when we are unlocking it.
1627 * A caller of us must be fine with that.
1628 */
1629 raw_spin_unlock(&rq->lock);
1630 hrtimer_cancel(dl_timer);
1631 raw_spin_lock(&rq->lock);
1632 }
1633 }
1634 }
1635
1636 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1637 {
1638 /* XXX we should retain the bw until 0-lag */
1639 cancel_dl_timer(rq, p);
1640 __dl_clear_params(p);
1641
1642 /*
1643 * Since this might be the only -deadline task on the rq,
1644 * this is the right place to try to pull some other one
1645 * from an overloaded cpu, if any.
1646 */
1647 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1648 return;
1649
1650 if (pull_dl_task(rq))
1651 resched_curr(rq);
1652 }
1653
1654 /*
1655 * When switching to -deadline, we may overload the rq, then
1656 * we try to push someone off, if possible.
1657 */
1658 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1659 {
1660 int check_resched = 1;
1661
1662 /*
1663 * If p is throttled, don't consider the possibility
1664 * of preempting rq->curr, the check will be done right
1665 * after its runtime will get replenished.
1666 */
1667 if (unlikely(p->dl.dl_throttled))
1668 return;
1669
1670 if (task_on_rq_queued(p) && rq->curr != p) {
1671 #ifdef CONFIG_SMP
1672 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
1673 push_dl_task(rq) && rq != task_rq(p))
1674 /* Only reschedule if pushing failed */
1675 check_resched = 0;
1676 #endif /* CONFIG_SMP */
1677 if (check_resched) {
1678 if (dl_task(rq->curr))
1679 check_preempt_curr_dl(rq, p, 0);
1680 else
1681 resched_curr(rq);
1682 }
1683 }
1684 }
1685
1686 /*
1687 * If the scheduling parameters of a -deadline task changed,
1688 * a push or pull operation might be needed.
1689 */
1690 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1691 int oldprio)
1692 {
1693 if (task_on_rq_queued(p) || rq->curr == p) {
1694 #ifdef CONFIG_SMP
1695 /*
1696 * This might be too much, but unfortunately
1697 * we don't have the old deadline value, and
1698 * we can't argue if the task is increasing
1699 * or lowering its prio, so...
1700 */
1701 if (!rq->dl.overloaded)
1702 pull_dl_task(rq);
1703
1704 /*
1705 * If we now have a earlier deadline task than p,
1706 * then reschedule, provided p is still on this
1707 * runqueue.
1708 */
1709 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1710 rq->curr == p)
1711 resched_curr(rq);
1712 #else
1713 /*
1714 * Again, we don't know if p has a earlier
1715 * or later deadline, so let's blindly set a
1716 * (maybe not needed) rescheduling point.
1717 */
1718 resched_curr(rq);
1719 #endif /* CONFIG_SMP */
1720 } else
1721 switched_to_dl(rq, p);
1722 }
1723
1724 const struct sched_class dl_sched_class = {
1725 .next = &rt_sched_class,
1726 .enqueue_task = enqueue_task_dl,
1727 .dequeue_task = dequeue_task_dl,
1728 .yield_task = yield_task_dl,
1729
1730 .check_preempt_curr = check_preempt_curr_dl,
1731
1732 .pick_next_task = pick_next_task_dl,
1733 .put_prev_task = put_prev_task_dl,
1734
1735 #ifdef CONFIG_SMP
1736 .select_task_rq = select_task_rq_dl,
1737 .set_cpus_allowed = set_cpus_allowed_dl,
1738 .rq_online = rq_online_dl,
1739 .rq_offline = rq_offline_dl,
1740 .post_schedule = post_schedule_dl,
1741 .task_woken = task_woken_dl,
1742 #endif
1743
1744 .set_curr_task = set_curr_task_dl,
1745 .task_tick = task_tick_dl,
1746 .task_fork = task_fork_dl,
1747 .task_dead = task_dead_dl,
1748
1749 .prio_changed = prio_changed_dl,
1750 .switched_from = switched_from_dl,
1751 .switched_to = switched_to_dl,
1752
1753 .update_curr = update_curr_dl,
1754 };
1755
1756 #ifdef CONFIG_SCHED_DEBUG
1757 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1758
1759 void print_dl_stats(struct seq_file *m, int cpu)
1760 {
1761 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1762 }
1763 #endif /* CONFIG_SCHED_DEBUG */
This page took 0.070465 seconds and 5 git commands to generate.