2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth
;
23 static inline struct task_struct
*dl_task_of(struct sched_dl_entity
*dl_se
)
25 return container_of(dl_se
, struct task_struct
, dl
);
28 static inline struct rq
*rq_of_dl_rq(struct dl_rq
*dl_rq
)
30 return container_of(dl_rq
, struct rq
, dl
);
33 static inline struct dl_rq
*dl_rq_of_se(struct sched_dl_entity
*dl_se
)
35 struct task_struct
*p
= dl_task_of(dl_se
);
36 struct rq
*rq
= task_rq(p
);
41 static inline int on_dl_rq(struct sched_dl_entity
*dl_se
)
43 return !RB_EMPTY_NODE(&dl_se
->rb_node
);
46 static inline int is_leftmost(struct task_struct
*p
, struct dl_rq
*dl_rq
)
48 struct sched_dl_entity
*dl_se
= &p
->dl
;
50 return dl_rq
->rb_leftmost
== &dl_se
->rb_node
;
53 void init_dl_bandwidth(struct dl_bandwidth
*dl_b
, u64 period
, u64 runtime
)
55 raw_spin_lock_init(&dl_b
->dl_runtime_lock
);
56 dl_b
->dl_period
= period
;
57 dl_b
->dl_runtime
= runtime
;
60 extern unsigned long to_ratio(u64 period
, u64 runtime
);
62 void init_dl_bw(struct dl_bw
*dl_b
)
64 raw_spin_lock_init(&dl_b
->lock
);
65 raw_spin_lock(&def_dl_bandwidth
.dl_runtime_lock
);
66 if (global_rt_runtime() == RUNTIME_INF
)
69 dl_b
->bw
= to_ratio(global_rt_period(), global_rt_runtime());
70 raw_spin_unlock(&def_dl_bandwidth
.dl_runtime_lock
);
74 void init_dl_rq(struct dl_rq
*dl_rq
, struct rq
*rq
)
76 dl_rq
->rb_root
= RB_ROOT
;
79 /* zero means no -deadline tasks */
80 dl_rq
->earliest_dl
.curr
= dl_rq
->earliest_dl
.next
= 0;
82 dl_rq
->dl_nr_migratory
= 0;
83 dl_rq
->overloaded
= 0;
84 dl_rq
->pushable_dl_tasks_root
= RB_ROOT
;
86 init_dl_bw(&dl_rq
->dl_bw
);
92 static inline int dl_overloaded(struct rq
*rq
)
94 return atomic_read(&rq
->rd
->dlo_count
);
97 static inline void dl_set_overload(struct rq
*rq
)
102 cpumask_set_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
104 * Must be visible before the overload count is
105 * set (as in sched_rt.c).
107 * Matched by the barrier in pull_dl_task().
110 atomic_inc(&rq
->rd
->dlo_count
);
113 static inline void dl_clear_overload(struct rq
*rq
)
118 atomic_dec(&rq
->rd
->dlo_count
);
119 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
122 static void update_dl_migration(struct dl_rq
*dl_rq
)
124 if (dl_rq
->dl_nr_migratory
&& dl_rq
->dl_nr_running
> 1) {
125 if (!dl_rq
->overloaded
) {
126 dl_set_overload(rq_of_dl_rq(dl_rq
));
127 dl_rq
->overloaded
= 1;
129 } else if (dl_rq
->overloaded
) {
130 dl_clear_overload(rq_of_dl_rq(dl_rq
));
131 dl_rq
->overloaded
= 0;
135 static void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
137 struct task_struct
*p
= dl_task_of(dl_se
);
139 if (p
->nr_cpus_allowed
> 1)
140 dl_rq
->dl_nr_migratory
++;
142 update_dl_migration(dl_rq
);
145 static void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
147 struct task_struct
*p
= dl_task_of(dl_se
);
149 if (p
->nr_cpus_allowed
> 1)
150 dl_rq
->dl_nr_migratory
--;
152 update_dl_migration(dl_rq
);
156 * The list of pushable -deadline task is not a plist, like in
157 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
159 static void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
161 struct dl_rq
*dl_rq
= &rq
->dl
;
162 struct rb_node
**link
= &dl_rq
->pushable_dl_tasks_root
.rb_node
;
163 struct rb_node
*parent
= NULL
;
164 struct task_struct
*entry
;
167 BUG_ON(!RB_EMPTY_NODE(&p
->pushable_dl_tasks
));
171 entry
= rb_entry(parent
, struct task_struct
,
173 if (dl_entity_preempt(&p
->dl
, &entry
->dl
))
174 link
= &parent
->rb_left
;
176 link
= &parent
->rb_right
;
182 dl_rq
->pushable_dl_tasks_leftmost
= &p
->pushable_dl_tasks
;
184 rb_link_node(&p
->pushable_dl_tasks
, parent
, link
);
185 rb_insert_color(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
188 static void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
190 struct dl_rq
*dl_rq
= &rq
->dl
;
192 if (RB_EMPTY_NODE(&p
->pushable_dl_tasks
))
195 if (dl_rq
->pushable_dl_tasks_leftmost
== &p
->pushable_dl_tasks
) {
196 struct rb_node
*next_node
;
198 next_node
= rb_next(&p
->pushable_dl_tasks
);
199 dl_rq
->pushable_dl_tasks_leftmost
= next_node
;
202 rb_erase(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
203 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
206 static inline int has_pushable_dl_tasks(struct rq
*rq
)
208 return !RB_EMPTY_ROOT(&rq
->dl
.pushable_dl_tasks_root
);
211 static int push_dl_task(struct rq
*rq
);
213 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
215 return dl_task(prev
);
218 static inline void set_post_schedule(struct rq
*rq
)
220 rq
->post_schedule
= has_pushable_dl_tasks(rq
);
226 void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
231 void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
236 void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
241 void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
245 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
250 static inline int pull_dl_task(struct rq
*rq
)
255 static inline void set_post_schedule(struct rq
*rq
)
258 #endif /* CONFIG_SMP */
260 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
261 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
262 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
266 * We are being explicitly informed that a new instance is starting,
267 * and this means that:
268 * - the absolute deadline of the entity has to be placed at
269 * current time + relative deadline;
270 * - the runtime of the entity has to be set to the maximum value.
272 * The capability of specifying such event is useful whenever a -deadline
273 * entity wants to (try to!) synchronize its behaviour with the scheduler's
274 * one, and to (try to!) reconcile itself with its own scheduling
277 static inline void setup_new_dl_entity(struct sched_dl_entity
*dl_se
,
278 struct sched_dl_entity
*pi_se
)
280 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
281 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
283 WARN_ON(!dl_se
->dl_new
|| dl_se
->dl_throttled
);
286 * We use the regular wall clock time to set deadlines in the
287 * future; in fact, we must consider execution overheads (time
288 * spent on hardirq context, etc.).
290 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
291 dl_se
->runtime
= pi_se
->dl_runtime
;
296 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
297 * possibility of a entity lasting more than what it declared, and thus
298 * exhausting its runtime.
300 * Here we are interested in making runtime overrun possible, but we do
301 * not want a entity which is misbehaving to affect the scheduling of all
303 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
304 * is used, in order to confine each entity within its own bandwidth.
306 * This function deals exactly with that, and ensures that when the runtime
307 * of a entity is replenished, its deadline is also postponed. That ensures
308 * the overrunning entity can't interfere with other entity in the system and
309 * can't make them miss their deadlines. Reasons why this kind of overruns
310 * could happen are, typically, a entity voluntarily trying to overcome its
311 * runtime, or it just underestimated it during sched_setscheduler_ex().
313 static void replenish_dl_entity(struct sched_dl_entity
*dl_se
,
314 struct sched_dl_entity
*pi_se
)
316 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
317 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
319 BUG_ON(pi_se
->dl_runtime
<= 0);
322 * This could be the case for a !-dl task that is boosted.
323 * Just go with full inherited parameters.
325 if (dl_se
->dl_deadline
== 0) {
326 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
327 dl_se
->runtime
= pi_se
->dl_runtime
;
331 * We keep moving the deadline away until we get some
332 * available runtime for the entity. This ensures correct
333 * handling of situations where the runtime overrun is
336 while (dl_se
->runtime
<= 0) {
337 dl_se
->deadline
+= pi_se
->dl_period
;
338 dl_se
->runtime
+= pi_se
->dl_runtime
;
342 * At this point, the deadline really should be "in
343 * the future" with respect to rq->clock. If it's
344 * not, we are, for some reason, lagging too much!
345 * Anyway, after having warn userspace abut that,
346 * we still try to keep the things running by
347 * resetting the deadline and the budget of the
350 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
))) {
351 static bool lag_once
= false;
355 printk_sched("sched: DL replenish lagged to much\n");
357 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
358 dl_se
->runtime
= pi_se
->dl_runtime
;
363 * Here we check if --at time t-- an entity (which is probably being
364 * [re]activated or, in general, enqueued) can use its remaining runtime
365 * and its current deadline _without_ exceeding the bandwidth it is
366 * assigned (function returns true if it can't). We are in fact applying
367 * one of the CBS rules: when a task wakes up, if the residual runtime
368 * over residual deadline fits within the allocated bandwidth, then we
369 * can keep the current (absolute) deadline and residual budget without
370 * disrupting the schedulability of the system. Otherwise, we should
371 * refill the runtime and set the deadline a period in the future,
372 * because keeping the current (absolute) deadline of the task would
373 * result in breaking guarantees promised to other tasks (refer to
374 * Documentation/scheduler/sched-deadline.txt for more informations).
376 * This function returns true if:
378 * runtime / (deadline - t) > dl_runtime / dl_period ,
380 * IOW we can't recycle current parameters.
382 * Notice that the bandwidth check is done against the period. For
383 * task with deadline equal to period this is the same of using
384 * dl_deadline instead of dl_period in the equation above.
386 static bool dl_entity_overflow(struct sched_dl_entity
*dl_se
,
387 struct sched_dl_entity
*pi_se
, u64 t
)
392 * left and right are the two sides of the equation above,
393 * after a bit of shuffling to use multiplications instead
396 * Note that none of the time values involved in the two
397 * multiplications are absolute: dl_deadline and dl_runtime
398 * are the relative deadline and the maximum runtime of each
399 * instance, runtime is the runtime left for the last instance
400 * and (deadline - t), since t is rq->clock, is the time left
401 * to the (absolute) deadline. Even if overflowing the u64 type
402 * is very unlikely to occur in both cases, here we scale down
403 * as we want to avoid that risk at all. Scaling down by 10
404 * means that we reduce granularity to 1us. We are fine with it,
405 * since this is only a true/false check and, anyway, thinking
406 * of anything below microseconds resolution is actually fiction
407 * (but still we want to give the user that illusion >;).
409 left
= (pi_se
->dl_period
>> DL_SCALE
) * (dl_se
->runtime
>> DL_SCALE
);
410 right
= ((dl_se
->deadline
- t
) >> DL_SCALE
) *
411 (pi_se
->dl_runtime
>> DL_SCALE
);
413 return dl_time_before(right
, left
);
417 * When a -deadline entity is queued back on the runqueue, its runtime and
418 * deadline might need updating.
420 * The policy here is that we update the deadline of the entity only if:
421 * - the current deadline is in the past,
422 * - using the remaining runtime with the current deadline would make
423 * the entity exceed its bandwidth.
425 static void update_dl_entity(struct sched_dl_entity
*dl_se
,
426 struct sched_dl_entity
*pi_se
)
428 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
429 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
432 * The arrival of a new instance needs special treatment, i.e.,
433 * the actual scheduling parameters have to be "renewed".
436 setup_new_dl_entity(dl_se
, pi_se
);
440 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) ||
441 dl_entity_overflow(dl_se
, pi_se
, rq_clock(rq
))) {
442 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
443 dl_se
->runtime
= pi_se
->dl_runtime
;
448 * If the entity depleted all its runtime, and if we want it to sleep
449 * while waiting for some new execution time to become available, we
450 * set the bandwidth enforcement timer to the replenishment instant
451 * and try to activate it.
453 * Notice that it is important for the caller to know if the timer
454 * actually started or not (i.e., the replenishment instant is in
455 * the future or in the past).
457 static int start_dl_timer(struct sched_dl_entity
*dl_se
, bool boosted
)
459 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
460 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
469 * We want the timer to fire at the deadline, but considering
470 * that it is actually coming from rq->clock and not from
471 * hrtimer's time base reading.
473 act
= ns_to_ktime(dl_se
->deadline
);
474 now
= hrtimer_cb_get_time(&dl_se
->dl_timer
);
475 delta
= ktime_to_ns(now
) - rq_clock(rq
);
476 act
= ktime_add_ns(act
, delta
);
479 * If the expiry time already passed, e.g., because the value
480 * chosen as the deadline is too small, don't even try to
481 * start the timer in the past!
483 if (ktime_us_delta(act
, now
) < 0)
486 hrtimer_set_expires(&dl_se
->dl_timer
, act
);
488 soft
= hrtimer_get_softexpires(&dl_se
->dl_timer
);
489 hard
= hrtimer_get_expires(&dl_se
->dl_timer
);
490 range
= ktime_to_ns(ktime_sub(hard
, soft
));
491 __hrtimer_start_range_ns(&dl_se
->dl_timer
, soft
,
492 range
, HRTIMER_MODE_ABS
, 0);
494 return hrtimer_active(&dl_se
->dl_timer
);
498 * This is the bandwidth enforcement timer callback. If here, we know
499 * a task is not on its dl_rq, since the fact that the timer was running
500 * means the task is throttled and needs a runtime replenishment.
502 * However, what we actually do depends on the fact the task is active,
503 * (it is on its rq) or has been removed from there by a call to
504 * dequeue_task_dl(). In the former case we must issue the runtime
505 * replenishment and add the task back to the dl_rq; in the latter, we just
506 * do nothing but clearing dl_throttled, so that runtime and deadline
507 * updating (and the queueing back to dl_rq) will be done by the
508 * next call to enqueue_task_dl().
510 static enum hrtimer_restart
dl_task_timer(struct hrtimer
*timer
)
512 struct sched_dl_entity
*dl_se
= container_of(timer
,
513 struct sched_dl_entity
,
515 struct task_struct
*p
= dl_task_of(dl_se
);
516 struct rq
*rq
= task_rq(p
);
517 raw_spin_lock(&rq
->lock
);
520 * We need to take care of a possible races here. In fact, the
521 * task might have changed its scheduling policy to something
522 * different from SCHED_DEADLINE or changed its reservation
523 * parameters (through sched_setscheduler()).
525 if (!dl_task(p
) || dl_se
->dl_new
)
530 dl_se
->dl_throttled
= 0;
532 enqueue_task_dl(rq
, p
, ENQUEUE_REPLENISH
);
533 if (task_has_dl_policy(rq
->curr
))
534 check_preempt_curr_dl(rq
, p
, 0);
536 resched_task(rq
->curr
);
539 * Queueing this task back might have overloaded rq,
540 * check if we need to kick someone away.
542 if (has_pushable_dl_tasks(rq
))
547 raw_spin_unlock(&rq
->lock
);
549 return HRTIMER_NORESTART
;
552 void init_dl_task_timer(struct sched_dl_entity
*dl_se
)
554 struct hrtimer
*timer
= &dl_se
->dl_timer
;
556 if (hrtimer_active(timer
)) {
557 hrtimer_try_to_cancel(timer
);
561 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
562 timer
->function
= dl_task_timer
;
566 int dl_runtime_exceeded(struct rq
*rq
, struct sched_dl_entity
*dl_se
)
568 int dmiss
= dl_time_before(dl_se
->deadline
, rq_clock(rq
));
569 int rorun
= dl_se
->runtime
<= 0;
571 if (!rorun
&& !dmiss
)
575 * If we are beyond our current deadline and we are still
576 * executing, then we have already used some of the runtime of
577 * the next instance. Thus, if we do not account that, we are
578 * stealing bandwidth from the system at each deadline miss!
581 dl_se
->runtime
= rorun
? dl_se
->runtime
: 0;
582 dl_se
->runtime
-= rq_clock(rq
) - dl_se
->deadline
;
588 extern bool sched_rt_bandwidth_account(struct rt_rq
*rt_rq
);
591 * Update the current task's runtime statistics (provided it is still
592 * a -deadline task and has not been removed from the dl_rq).
594 static void update_curr_dl(struct rq
*rq
)
596 struct task_struct
*curr
= rq
->curr
;
597 struct sched_dl_entity
*dl_se
= &curr
->dl
;
600 if (!dl_task(curr
) || !on_dl_rq(dl_se
))
604 * Consumed budget is computed considering the time as
605 * observed by schedulable tasks (excluding time spent
606 * in hardirq context, etc.). Deadlines are instead
607 * computed using hard walltime. This seems to be the more
608 * natural solution, but the full ramifications of this
609 * approach need further study.
611 delta_exec
= rq_clock_task(rq
) - curr
->se
.exec_start
;
612 if (unlikely((s64
)delta_exec
<= 0))
615 schedstat_set(curr
->se
.statistics
.exec_max
,
616 max(curr
->se
.statistics
.exec_max
, delta_exec
));
618 curr
->se
.sum_exec_runtime
+= delta_exec
;
619 account_group_exec_runtime(curr
, delta_exec
);
621 curr
->se
.exec_start
= rq_clock_task(rq
);
622 cpuacct_charge(curr
, delta_exec
);
624 sched_rt_avg_update(rq
, delta_exec
);
626 dl_se
->runtime
-= delta_exec
;
627 if (dl_runtime_exceeded(rq
, dl_se
)) {
628 __dequeue_task_dl(rq
, curr
, 0);
629 if (likely(start_dl_timer(dl_se
, curr
->dl
.dl_boosted
)))
630 dl_se
->dl_throttled
= 1;
632 enqueue_task_dl(rq
, curr
, ENQUEUE_REPLENISH
);
634 if (!is_leftmost(curr
, &rq
->dl
))
639 * Because -- for now -- we share the rt bandwidth, we need to
640 * account our runtime there too, otherwise actual rt tasks
641 * would be able to exceed the shared quota.
643 * Account to the root rt group for now.
645 * The solution we're working towards is having the RT groups scheduled
646 * using deadline servers -- however there's a few nasties to figure
647 * out before that can happen.
649 if (rt_bandwidth_enabled()) {
650 struct rt_rq
*rt_rq
= &rq
->rt
;
652 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
654 * We'll let actual RT tasks worry about the overflow here, we
655 * have our own CBS to keep us inline; only account when RT
656 * bandwidth is relevant.
658 if (sched_rt_bandwidth_account(rt_rq
))
659 rt_rq
->rt_time
+= delta_exec
;
660 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
666 static struct task_struct
*pick_next_earliest_dl_task(struct rq
*rq
, int cpu
);
668 static inline u64
next_deadline(struct rq
*rq
)
670 struct task_struct
*next
= pick_next_earliest_dl_task(rq
, rq
->cpu
);
672 if (next
&& dl_prio(next
->prio
))
673 return next
->dl
.deadline
;
678 static void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
680 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
682 if (dl_rq
->earliest_dl
.curr
== 0 ||
683 dl_time_before(deadline
, dl_rq
->earliest_dl
.curr
)) {
685 * If the dl_rq had no -deadline tasks, or if the new task
686 * has shorter deadline than the current one on dl_rq, we
687 * know that the previous earliest becomes our next earliest,
688 * as the new task becomes the earliest itself.
690 dl_rq
->earliest_dl
.next
= dl_rq
->earliest_dl
.curr
;
691 dl_rq
->earliest_dl
.curr
= deadline
;
692 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, deadline
, 1);
693 } else if (dl_rq
->earliest_dl
.next
== 0 ||
694 dl_time_before(deadline
, dl_rq
->earliest_dl
.next
)) {
696 * On the other hand, if the new -deadline task has a
697 * a later deadline than the earliest one on dl_rq, but
698 * it is earlier than the next (if any), we must
699 * recompute the next-earliest.
701 dl_rq
->earliest_dl
.next
= next_deadline(rq
);
705 static void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
707 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
710 * Since we may have removed our earliest (and/or next earliest)
711 * task we must recompute them.
713 if (!dl_rq
->dl_nr_running
) {
714 dl_rq
->earliest_dl
.curr
= 0;
715 dl_rq
->earliest_dl
.next
= 0;
716 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
718 struct rb_node
*leftmost
= dl_rq
->rb_leftmost
;
719 struct sched_dl_entity
*entry
;
721 entry
= rb_entry(leftmost
, struct sched_dl_entity
, rb_node
);
722 dl_rq
->earliest_dl
.curr
= entry
->deadline
;
723 dl_rq
->earliest_dl
.next
= next_deadline(rq
);
724 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, entry
->deadline
, 1);
730 static inline void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
731 static inline void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
733 #endif /* CONFIG_SMP */
736 void inc_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
738 int prio
= dl_task_of(dl_se
)->prio
;
739 u64 deadline
= dl_se
->deadline
;
741 WARN_ON(!dl_prio(prio
));
742 dl_rq
->dl_nr_running
++;
743 inc_nr_running(rq_of_dl_rq(dl_rq
));
745 inc_dl_deadline(dl_rq
, deadline
);
746 inc_dl_migration(dl_se
, dl_rq
);
750 void dec_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
752 int prio
= dl_task_of(dl_se
)->prio
;
754 WARN_ON(!dl_prio(prio
));
755 WARN_ON(!dl_rq
->dl_nr_running
);
756 dl_rq
->dl_nr_running
--;
757 dec_nr_running(rq_of_dl_rq(dl_rq
));
759 dec_dl_deadline(dl_rq
, dl_se
->deadline
);
760 dec_dl_migration(dl_se
, dl_rq
);
763 static void __enqueue_dl_entity(struct sched_dl_entity
*dl_se
)
765 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
766 struct rb_node
**link
= &dl_rq
->rb_root
.rb_node
;
767 struct rb_node
*parent
= NULL
;
768 struct sched_dl_entity
*entry
;
771 BUG_ON(!RB_EMPTY_NODE(&dl_se
->rb_node
));
775 entry
= rb_entry(parent
, struct sched_dl_entity
, rb_node
);
776 if (dl_time_before(dl_se
->deadline
, entry
->deadline
))
777 link
= &parent
->rb_left
;
779 link
= &parent
->rb_right
;
785 dl_rq
->rb_leftmost
= &dl_se
->rb_node
;
787 rb_link_node(&dl_se
->rb_node
, parent
, link
);
788 rb_insert_color(&dl_se
->rb_node
, &dl_rq
->rb_root
);
790 inc_dl_tasks(dl_se
, dl_rq
);
793 static void __dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
795 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
797 if (RB_EMPTY_NODE(&dl_se
->rb_node
))
800 if (dl_rq
->rb_leftmost
== &dl_se
->rb_node
) {
801 struct rb_node
*next_node
;
803 next_node
= rb_next(&dl_se
->rb_node
);
804 dl_rq
->rb_leftmost
= next_node
;
807 rb_erase(&dl_se
->rb_node
, &dl_rq
->rb_root
);
808 RB_CLEAR_NODE(&dl_se
->rb_node
);
810 dec_dl_tasks(dl_se
, dl_rq
);
814 enqueue_dl_entity(struct sched_dl_entity
*dl_se
,
815 struct sched_dl_entity
*pi_se
, int flags
)
817 BUG_ON(on_dl_rq(dl_se
));
820 * If this is a wakeup or a new instance, the scheduling
821 * parameters of the task might need updating. Otherwise,
822 * we want a replenishment of its runtime.
824 if (!dl_se
->dl_new
&& flags
& ENQUEUE_REPLENISH
)
825 replenish_dl_entity(dl_se
, pi_se
);
827 update_dl_entity(dl_se
, pi_se
);
829 __enqueue_dl_entity(dl_se
);
832 static void dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
834 __dequeue_dl_entity(dl_se
);
837 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
839 struct task_struct
*pi_task
= rt_mutex_get_top_task(p
);
840 struct sched_dl_entity
*pi_se
= &p
->dl
;
843 * Use the scheduling parameters of the top pi-waiter
844 * task if we have one and its (relative) deadline is
845 * smaller than our one... OTW we keep our runtime and
848 if (pi_task
&& p
->dl
.dl_boosted
&& dl_prio(pi_task
->normal_prio
))
849 pi_se
= &pi_task
->dl
;
852 * If p is throttled, we do nothing. In fact, if it exhausted
853 * its budget it needs a replenishment and, since it now is on
854 * its rq, the bandwidth timer callback (which clearly has not
855 * run yet) will take care of this.
857 if (p
->dl
.dl_throttled
)
860 enqueue_dl_entity(&p
->dl
, pi_se
, flags
);
862 if (!task_current(rq
, p
) && p
->nr_cpus_allowed
> 1)
863 enqueue_pushable_dl_task(rq
, p
);
866 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
868 dequeue_dl_entity(&p
->dl
);
869 dequeue_pushable_dl_task(rq
, p
);
872 static void dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
875 __dequeue_task_dl(rq
, p
, flags
);
879 * Yield task semantic for -deadline tasks is:
881 * get off from the CPU until our next instance, with
882 * a new runtime. This is of little use now, since we
883 * don't have a bandwidth reclaiming mechanism. Anyway,
884 * bandwidth reclaiming is planned for the future, and
885 * yield_task_dl will indicate that some spare budget
886 * is available for other task instances to use it.
888 static void yield_task_dl(struct rq
*rq
)
890 struct task_struct
*p
= rq
->curr
;
893 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1).
898 if (p
->dl
.runtime
> 0) {
899 rq
->curr
->dl
.dl_new
= 1;
907 static int find_later_rq(struct task_struct
*task
);
910 select_task_rq_dl(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
912 struct task_struct
*curr
;
915 if (sd_flag
!= SD_BALANCE_WAKE
&& sd_flag
!= SD_BALANCE_FORK
)
921 curr
= ACCESS_ONCE(rq
->curr
); /* unlocked access */
924 * If we are dealing with a -deadline task, we must
925 * decide where to wake it up.
926 * If it has a later deadline and the current task
927 * on this rq can't move (provided the waking task
928 * can!) we prefer to send it somewhere else. On the
929 * other hand, if it has a shorter deadline, we
930 * try to make it stay here, it might be important.
932 if (unlikely(dl_task(curr
)) &&
933 (curr
->nr_cpus_allowed
< 2 ||
934 !dl_entity_preempt(&p
->dl
, &curr
->dl
)) &&
935 (p
->nr_cpus_allowed
> 1)) {
936 int target
= find_later_rq(p
);
947 static void check_preempt_equal_dl(struct rq
*rq
, struct task_struct
*p
)
950 * Current can't be migrated, useless to reschedule,
951 * let's hope p can move out.
953 if (rq
->curr
->nr_cpus_allowed
== 1 ||
954 cpudl_find(&rq
->rd
->cpudl
, rq
->curr
, NULL
) == -1)
958 * p is migratable, so let's not schedule it and
959 * see if it is pushed or pulled somewhere else.
961 if (p
->nr_cpus_allowed
!= 1 &&
962 cpudl_find(&rq
->rd
->cpudl
, p
, NULL
) != -1)
965 resched_task(rq
->curr
);
968 static int pull_dl_task(struct rq
*this_rq
);
970 #endif /* CONFIG_SMP */
973 * Only called when both the current and waking task are -deadline
976 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
979 if (dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
)) {
980 resched_task(rq
->curr
);
986 * In the unlikely case current and p have the same deadline
987 * let us try to decide what's the best thing to do...
989 if ((p
->dl
.deadline
== rq
->curr
->dl
.deadline
) &&
990 !test_tsk_need_resched(rq
->curr
))
991 check_preempt_equal_dl(rq
, p
);
992 #endif /* CONFIG_SMP */
995 #ifdef CONFIG_SCHED_HRTICK
996 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
998 s64 delta
= p
->dl
.dl_runtime
- p
->dl
.runtime
;
1001 hrtick_start(rq
, p
->dl
.runtime
);
1005 static struct sched_dl_entity
*pick_next_dl_entity(struct rq
*rq
,
1006 struct dl_rq
*dl_rq
)
1008 struct rb_node
*left
= dl_rq
->rb_leftmost
;
1013 return rb_entry(left
, struct sched_dl_entity
, rb_node
);
1016 struct task_struct
*pick_next_task_dl(struct rq
*rq
, struct task_struct
*prev
)
1018 struct sched_dl_entity
*dl_se
;
1019 struct task_struct
*p
;
1020 struct dl_rq
*dl_rq
;
1024 if (need_pull_dl_task(rq
, prev
)) {
1027 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1028 * means a stop task can slip in, in which case we need to
1029 * re-start task selection.
1031 if (rq
->stop
&& rq
->stop
->on_rq
)
1036 * When prev is DL, we may throttle it in put_prev_task().
1037 * So, we update time before we check for dl_nr_running.
1039 if (prev
->sched_class
== &dl_sched_class
)
1042 if (unlikely(!dl_rq
->dl_nr_running
))
1045 put_prev_task(rq
, prev
);
1047 dl_se
= pick_next_dl_entity(rq
, dl_rq
);
1050 p
= dl_task_of(dl_se
);
1051 p
->se
.exec_start
= rq_clock_task(rq
);
1053 /* Running task will never be pushed. */
1054 dequeue_pushable_dl_task(rq
, p
);
1056 #ifdef CONFIG_SCHED_HRTICK
1057 if (hrtick_enabled(rq
))
1058 start_hrtick_dl(rq
, p
);
1061 set_post_schedule(rq
);
1066 static void put_prev_task_dl(struct rq
*rq
, struct task_struct
*p
)
1070 if (on_dl_rq(&p
->dl
) && p
->nr_cpus_allowed
> 1)
1071 enqueue_pushable_dl_task(rq
, p
);
1074 static void task_tick_dl(struct rq
*rq
, struct task_struct
*p
, int queued
)
1078 #ifdef CONFIG_SCHED_HRTICK
1079 if (hrtick_enabled(rq
) && queued
&& p
->dl
.runtime
> 0)
1080 start_hrtick_dl(rq
, p
);
1084 static void task_fork_dl(struct task_struct
*p
)
1087 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1092 static void task_dead_dl(struct task_struct
*p
)
1094 struct hrtimer
*timer
= &p
->dl
.dl_timer
;
1095 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
1098 * Since we are TASK_DEAD we won't slip out of the domain!
1100 raw_spin_lock_irq(&dl_b
->lock
);
1101 dl_b
->total_bw
-= p
->dl
.dl_bw
;
1102 raw_spin_unlock_irq(&dl_b
->lock
);
1104 hrtimer_cancel(timer
);
1107 static void set_curr_task_dl(struct rq
*rq
)
1109 struct task_struct
*p
= rq
->curr
;
1111 p
->se
.exec_start
= rq_clock_task(rq
);
1113 /* You can't push away the running task */
1114 dequeue_pushable_dl_task(rq
, p
);
1119 /* Only try algorithms three times */
1120 #define DL_MAX_TRIES 3
1122 static int pick_dl_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1124 if (!task_running(rq
, p
) &&
1125 (cpu
< 0 || cpumask_test_cpu(cpu
, &p
->cpus_allowed
)) &&
1126 (p
->nr_cpus_allowed
> 1))
1132 /* Returns the second earliest -deadline task, NULL otherwise */
1133 static struct task_struct
*pick_next_earliest_dl_task(struct rq
*rq
, int cpu
)
1135 struct rb_node
*next_node
= rq
->dl
.rb_leftmost
;
1136 struct sched_dl_entity
*dl_se
;
1137 struct task_struct
*p
= NULL
;
1140 next_node
= rb_next(next_node
);
1142 dl_se
= rb_entry(next_node
, struct sched_dl_entity
, rb_node
);
1143 p
= dl_task_of(dl_se
);
1145 if (pick_dl_task(rq
, p
, cpu
))
1154 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask_dl
);
1156 static int find_later_rq(struct task_struct
*task
)
1158 struct sched_domain
*sd
;
1159 struct cpumask
*later_mask
= __get_cpu_var(local_cpu_mask_dl
);
1160 int this_cpu
= smp_processor_id();
1161 int best_cpu
, cpu
= task_cpu(task
);
1163 /* Make sure the mask is initialized first */
1164 if (unlikely(!later_mask
))
1167 if (task
->nr_cpus_allowed
== 1)
1170 best_cpu
= cpudl_find(&task_rq(task
)->rd
->cpudl
,
1176 * If we are here, some target has been found,
1177 * the most suitable of which is cached in best_cpu.
1178 * This is, among the runqueues where the current tasks
1179 * have later deadlines than the task's one, the rq
1180 * with the latest possible one.
1182 * Now we check how well this matches with task's
1183 * affinity and system topology.
1185 * The last cpu where the task run is our first
1186 * guess, since it is most likely cache-hot there.
1188 if (cpumask_test_cpu(cpu
, later_mask
))
1191 * Check if this_cpu is to be skipped (i.e., it is
1192 * not in the mask) or not.
1194 if (!cpumask_test_cpu(this_cpu
, later_mask
))
1198 for_each_domain(cpu
, sd
) {
1199 if (sd
->flags
& SD_WAKE_AFFINE
) {
1202 * If possible, preempting this_cpu is
1203 * cheaper than migrating.
1205 if (this_cpu
!= -1 &&
1206 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
1212 * Last chance: if best_cpu is valid and is
1213 * in the mask, that becomes our choice.
1215 if (best_cpu
< nr_cpu_ids
&&
1216 cpumask_test_cpu(best_cpu
, sched_domain_span(sd
))) {
1225 * At this point, all our guesses failed, we just return
1226 * 'something', and let the caller sort the things out.
1231 cpu
= cpumask_any(later_mask
);
1232 if (cpu
< nr_cpu_ids
)
1238 /* Locks the rq it finds */
1239 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
)
1241 struct rq
*later_rq
= NULL
;
1245 for (tries
= 0; tries
< DL_MAX_TRIES
; tries
++) {
1246 cpu
= find_later_rq(task
);
1248 if ((cpu
== -1) || (cpu
== rq
->cpu
))
1251 later_rq
= cpu_rq(cpu
);
1253 /* Retry if something changed. */
1254 if (double_lock_balance(rq
, later_rq
)) {
1255 if (unlikely(task_rq(task
) != rq
||
1256 !cpumask_test_cpu(later_rq
->cpu
,
1257 &task
->cpus_allowed
) ||
1258 task_running(rq
, task
) || !task
->on_rq
)) {
1259 double_unlock_balance(rq
, later_rq
);
1266 * If the rq we found has no -deadline task, or
1267 * its earliest one has a later deadline than our
1268 * task, the rq is a good one.
1270 if (!later_rq
->dl
.dl_nr_running
||
1271 dl_time_before(task
->dl
.deadline
,
1272 later_rq
->dl
.earliest_dl
.curr
))
1275 /* Otherwise we try again. */
1276 double_unlock_balance(rq
, later_rq
);
1283 static struct task_struct
*pick_next_pushable_dl_task(struct rq
*rq
)
1285 struct task_struct
*p
;
1287 if (!has_pushable_dl_tasks(rq
))
1290 p
= rb_entry(rq
->dl
.pushable_dl_tasks_leftmost
,
1291 struct task_struct
, pushable_dl_tasks
);
1293 BUG_ON(rq
->cpu
!= task_cpu(p
));
1294 BUG_ON(task_current(rq
, p
));
1295 BUG_ON(p
->nr_cpus_allowed
<= 1);
1298 BUG_ON(!dl_task(p
));
1304 * See if the non running -deadline tasks on this rq
1305 * can be sent to some other CPU where they can preempt
1306 * and start executing.
1308 static int push_dl_task(struct rq
*rq
)
1310 struct task_struct
*next_task
;
1311 struct rq
*later_rq
;
1313 if (!rq
->dl
.overloaded
)
1316 next_task
= pick_next_pushable_dl_task(rq
);
1321 if (unlikely(next_task
== rq
->curr
)) {
1327 * If next_task preempts rq->curr, and rq->curr
1328 * can move away, it makes sense to just reschedule
1329 * without going further in pushing next_task.
1331 if (dl_task(rq
->curr
) &&
1332 dl_time_before(next_task
->dl
.deadline
, rq
->curr
->dl
.deadline
) &&
1333 rq
->curr
->nr_cpus_allowed
> 1) {
1334 resched_task(rq
->curr
);
1338 /* We might release rq lock */
1339 get_task_struct(next_task
);
1341 /* Will lock the rq it'll find */
1342 later_rq
= find_lock_later_rq(next_task
, rq
);
1344 struct task_struct
*task
;
1347 * We must check all this again, since
1348 * find_lock_later_rq releases rq->lock and it is
1349 * then possible that next_task has migrated.
1351 task
= pick_next_pushable_dl_task(rq
);
1352 if (task_cpu(next_task
) == rq
->cpu
&& task
== next_task
) {
1354 * The task is still there. We don't try
1355 * again, some other cpu will pull it when ready.
1357 dequeue_pushable_dl_task(rq
, next_task
);
1365 put_task_struct(next_task
);
1370 deactivate_task(rq
, next_task
, 0);
1371 set_task_cpu(next_task
, later_rq
->cpu
);
1372 activate_task(later_rq
, next_task
, 0);
1374 resched_task(later_rq
->curr
);
1376 double_unlock_balance(rq
, later_rq
);
1379 put_task_struct(next_task
);
1384 static void push_dl_tasks(struct rq
*rq
)
1386 /* Terminates as it moves a -deadline task */
1387 while (push_dl_task(rq
))
1391 static int pull_dl_task(struct rq
*this_rq
)
1393 int this_cpu
= this_rq
->cpu
, ret
= 0, cpu
;
1394 struct task_struct
*p
;
1396 u64 dmin
= LONG_MAX
;
1398 if (likely(!dl_overloaded(this_rq
)))
1402 * Match the barrier from dl_set_overloaded; this guarantees that if we
1403 * see overloaded we must also see the dlo_mask bit.
1407 for_each_cpu(cpu
, this_rq
->rd
->dlo_mask
) {
1408 if (this_cpu
== cpu
)
1411 src_rq
= cpu_rq(cpu
);
1414 * It looks racy, abd it is! However, as in sched_rt.c,
1415 * we are fine with this.
1417 if (this_rq
->dl
.dl_nr_running
&&
1418 dl_time_before(this_rq
->dl
.earliest_dl
.curr
,
1419 src_rq
->dl
.earliest_dl
.next
))
1422 /* Might drop this_rq->lock */
1423 double_lock_balance(this_rq
, src_rq
);
1426 * If there are no more pullable tasks on the
1427 * rq, we're done with it.
1429 if (src_rq
->dl
.dl_nr_running
<= 1)
1432 p
= pick_next_earliest_dl_task(src_rq
, this_cpu
);
1435 * We found a task to be pulled if:
1436 * - it preempts our current (if there's one),
1437 * - it will preempt the last one we pulled (if any).
1439 if (p
&& dl_time_before(p
->dl
.deadline
, dmin
) &&
1440 (!this_rq
->dl
.dl_nr_running
||
1441 dl_time_before(p
->dl
.deadline
,
1442 this_rq
->dl
.earliest_dl
.curr
))) {
1443 WARN_ON(p
== src_rq
->curr
);
1447 * Then we pull iff p has actually an earlier
1448 * deadline than the current task of its runqueue.
1450 if (dl_time_before(p
->dl
.deadline
,
1451 src_rq
->curr
->dl
.deadline
))
1456 deactivate_task(src_rq
, p
, 0);
1457 set_task_cpu(p
, this_cpu
);
1458 activate_task(this_rq
, p
, 0);
1459 dmin
= p
->dl
.deadline
;
1461 /* Is there any other task even earlier? */
1464 double_unlock_balance(this_rq
, src_rq
);
1470 static void post_schedule_dl(struct rq
*rq
)
1476 * Since the task is not running and a reschedule is not going to happen
1477 * anytime soon on its runqueue, we try pushing it away now.
1479 static void task_woken_dl(struct rq
*rq
, struct task_struct
*p
)
1481 if (!task_running(rq
, p
) &&
1482 !test_tsk_need_resched(rq
->curr
) &&
1483 has_pushable_dl_tasks(rq
) &&
1484 p
->nr_cpus_allowed
> 1 &&
1485 dl_task(rq
->curr
) &&
1486 (rq
->curr
->nr_cpus_allowed
< 2 ||
1487 dl_entity_preempt(&rq
->curr
->dl
, &p
->dl
))) {
1492 static void set_cpus_allowed_dl(struct task_struct
*p
,
1493 const struct cpumask
*new_mask
)
1498 BUG_ON(!dl_task(p
));
1501 * Update only if the task is actually running (i.e.,
1502 * it is on the rq AND it is not throttled).
1504 if (!on_dl_rq(&p
->dl
))
1507 weight
= cpumask_weight(new_mask
);
1510 * Only update if the process changes its state from whether it
1511 * can migrate or not.
1513 if ((p
->nr_cpus_allowed
> 1) == (weight
> 1))
1519 * The process used to be able to migrate OR it can now migrate
1522 if (!task_current(rq
, p
))
1523 dequeue_pushable_dl_task(rq
, p
);
1524 BUG_ON(!rq
->dl
.dl_nr_migratory
);
1525 rq
->dl
.dl_nr_migratory
--;
1527 if (!task_current(rq
, p
))
1528 enqueue_pushable_dl_task(rq
, p
);
1529 rq
->dl
.dl_nr_migratory
++;
1532 update_dl_migration(&rq
->dl
);
1535 /* Assumes rq->lock is held */
1536 static void rq_online_dl(struct rq
*rq
)
1538 if (rq
->dl
.overloaded
)
1539 dl_set_overload(rq
);
1541 if (rq
->dl
.dl_nr_running
> 0)
1542 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, rq
->dl
.earliest_dl
.curr
, 1);
1545 /* Assumes rq->lock is held */
1546 static void rq_offline_dl(struct rq
*rq
)
1548 if (rq
->dl
.overloaded
)
1549 dl_clear_overload(rq
);
1551 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
1554 void init_sched_dl_class(void)
1558 for_each_possible_cpu(i
)
1559 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl
, i
),
1560 GFP_KERNEL
, cpu_to_node(i
));
1563 #endif /* CONFIG_SMP */
1565 static void switched_from_dl(struct rq
*rq
, struct task_struct
*p
)
1567 if (hrtimer_active(&p
->dl
.dl_timer
) && !dl_policy(p
->policy
))
1568 hrtimer_try_to_cancel(&p
->dl
.dl_timer
);
1572 * Since this might be the only -deadline task on the rq,
1573 * this is the right place to try to pull some other one
1574 * from an overloaded cpu, if any.
1576 if (!rq
->dl
.dl_nr_running
)
1582 * When switching to -deadline, we may overload the rq, then
1583 * we try to push someone off, if possible.
1585 static void switched_to_dl(struct rq
*rq
, struct task_struct
*p
)
1587 int check_resched
= 1;
1590 * If p is throttled, don't consider the possibility
1591 * of preempting rq->curr, the check will be done right
1592 * after its runtime will get replenished.
1594 if (unlikely(p
->dl
.dl_throttled
))
1597 if (p
->on_rq
&& rq
->curr
!= p
) {
1599 if (rq
->dl
.overloaded
&& push_dl_task(rq
) && rq
!= task_rq(p
))
1600 /* Only reschedule if pushing failed */
1602 #endif /* CONFIG_SMP */
1603 if (check_resched
&& task_has_dl_policy(rq
->curr
))
1604 check_preempt_curr_dl(rq
, p
, 0);
1609 * If the scheduling parameters of a -deadline task changed,
1610 * a push or pull operation might be needed.
1612 static void prio_changed_dl(struct rq
*rq
, struct task_struct
*p
,
1615 if (p
->on_rq
|| rq
->curr
== p
) {
1618 * This might be too much, but unfortunately
1619 * we don't have the old deadline value, and
1620 * we can't argue if the task is increasing
1621 * or lowering its prio, so...
1623 if (!rq
->dl
.overloaded
)
1627 * If we now have a earlier deadline task than p,
1628 * then reschedule, provided p is still on this
1631 if (dl_time_before(rq
->dl
.earliest_dl
.curr
, p
->dl
.deadline
) &&
1636 * Again, we don't know if p has a earlier
1637 * or later deadline, so let's blindly set a
1638 * (maybe not needed) rescheduling point.
1641 #endif /* CONFIG_SMP */
1643 switched_to_dl(rq
, p
);
1646 const struct sched_class dl_sched_class
= {
1647 .next
= &rt_sched_class
,
1648 .enqueue_task
= enqueue_task_dl
,
1649 .dequeue_task
= dequeue_task_dl
,
1650 .yield_task
= yield_task_dl
,
1652 .check_preempt_curr
= check_preempt_curr_dl
,
1654 .pick_next_task
= pick_next_task_dl
,
1655 .put_prev_task
= put_prev_task_dl
,
1658 .select_task_rq
= select_task_rq_dl
,
1659 .set_cpus_allowed
= set_cpus_allowed_dl
,
1660 .rq_online
= rq_online_dl
,
1661 .rq_offline
= rq_offline_dl
,
1662 .post_schedule
= post_schedule_dl
,
1663 .task_woken
= task_woken_dl
,
1666 .set_curr_task
= set_curr_task_dl
,
1667 .task_tick
= task_tick_dl
,
1668 .task_fork
= task_fork_dl
,
1669 .task_dead
= task_dead_dl
,
1671 .prio_changed
= prio_changed_dl
,
1672 .switched_from
= switched_from_dl
,
1673 .switched_to
= switched_to_dl
,