sched: Final power vs. capacity cleanups
[deliverable/linux.git] / kernel / sched / rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82 /* We start is dequeued state, because no RT tasks are queued */
83 rt_rq->rt_queued = 0;
84
85 rt_rq->rt_time = 0;
86 rt_rq->rt_throttled = 0;
87 rt_rq->rt_runtime = 0;
88 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
89 }
90
91 #ifdef CONFIG_RT_GROUP_SCHED
92 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
93 {
94 hrtimer_cancel(&rt_b->rt_period_timer);
95 }
96
97 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
98
99 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
100 {
101 #ifdef CONFIG_SCHED_DEBUG
102 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
103 #endif
104 return container_of(rt_se, struct task_struct, rt);
105 }
106
107 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
108 {
109 return rt_rq->rq;
110 }
111
112 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
113 {
114 return rt_se->rt_rq;
115 }
116
117 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
118 {
119 struct rt_rq *rt_rq = rt_se->rt_rq;
120
121 return rt_rq->rq;
122 }
123
124 void free_rt_sched_group(struct task_group *tg)
125 {
126 int i;
127
128 if (tg->rt_se)
129 destroy_rt_bandwidth(&tg->rt_bandwidth);
130
131 for_each_possible_cpu(i) {
132 if (tg->rt_rq)
133 kfree(tg->rt_rq[i]);
134 if (tg->rt_se)
135 kfree(tg->rt_se[i]);
136 }
137
138 kfree(tg->rt_rq);
139 kfree(tg->rt_se);
140 }
141
142 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
143 struct sched_rt_entity *rt_se, int cpu,
144 struct sched_rt_entity *parent)
145 {
146 struct rq *rq = cpu_rq(cpu);
147
148 rt_rq->highest_prio.curr = MAX_RT_PRIO;
149 rt_rq->rt_nr_boosted = 0;
150 rt_rq->rq = rq;
151 rt_rq->tg = tg;
152
153 tg->rt_rq[cpu] = rt_rq;
154 tg->rt_se[cpu] = rt_se;
155
156 if (!rt_se)
157 return;
158
159 if (!parent)
160 rt_se->rt_rq = &rq->rt;
161 else
162 rt_se->rt_rq = parent->my_q;
163
164 rt_se->my_q = rt_rq;
165 rt_se->parent = parent;
166 INIT_LIST_HEAD(&rt_se->run_list);
167 }
168
169 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
170 {
171 struct rt_rq *rt_rq;
172 struct sched_rt_entity *rt_se;
173 int i;
174
175 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
176 if (!tg->rt_rq)
177 goto err;
178 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
179 if (!tg->rt_se)
180 goto err;
181
182 init_rt_bandwidth(&tg->rt_bandwidth,
183 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
184
185 for_each_possible_cpu(i) {
186 rt_rq = kzalloc_node(sizeof(struct rt_rq),
187 GFP_KERNEL, cpu_to_node(i));
188 if (!rt_rq)
189 goto err;
190
191 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
192 GFP_KERNEL, cpu_to_node(i));
193 if (!rt_se)
194 goto err_free_rq;
195
196 init_rt_rq(rt_rq, cpu_rq(i));
197 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
198 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
199 }
200
201 return 1;
202
203 err_free_rq:
204 kfree(rt_rq);
205 err:
206 return 0;
207 }
208
209 #else /* CONFIG_RT_GROUP_SCHED */
210
211 #define rt_entity_is_task(rt_se) (1)
212
213 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
214 {
215 return container_of(rt_se, struct task_struct, rt);
216 }
217
218 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
219 {
220 return container_of(rt_rq, struct rq, rt);
221 }
222
223 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
224 {
225 struct task_struct *p = rt_task_of(rt_se);
226
227 return task_rq(p);
228 }
229
230 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
231 {
232 struct rq *rq = rq_of_rt_se(rt_se);
233
234 return &rq->rt;
235 }
236
237 void free_rt_sched_group(struct task_group *tg) { }
238
239 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
240 {
241 return 1;
242 }
243 #endif /* CONFIG_RT_GROUP_SCHED */
244
245 #ifdef CONFIG_SMP
246
247 static int pull_rt_task(struct rq *this_rq);
248
249 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
250 {
251 /* Try to pull RT tasks here if we lower this rq's prio */
252 return rq->rt.highest_prio.curr > prev->prio;
253 }
254
255 static inline int rt_overloaded(struct rq *rq)
256 {
257 return atomic_read(&rq->rd->rto_count);
258 }
259
260 static inline void rt_set_overload(struct rq *rq)
261 {
262 if (!rq->online)
263 return;
264
265 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
266 /*
267 * Make sure the mask is visible before we set
268 * the overload count. That is checked to determine
269 * if we should look at the mask. It would be a shame
270 * if we looked at the mask, but the mask was not
271 * updated yet.
272 *
273 * Matched by the barrier in pull_rt_task().
274 */
275 smp_wmb();
276 atomic_inc(&rq->rd->rto_count);
277 }
278
279 static inline void rt_clear_overload(struct rq *rq)
280 {
281 if (!rq->online)
282 return;
283
284 /* the order here really doesn't matter */
285 atomic_dec(&rq->rd->rto_count);
286 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
287 }
288
289 static void update_rt_migration(struct rt_rq *rt_rq)
290 {
291 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
292 if (!rt_rq->overloaded) {
293 rt_set_overload(rq_of_rt_rq(rt_rq));
294 rt_rq->overloaded = 1;
295 }
296 } else if (rt_rq->overloaded) {
297 rt_clear_overload(rq_of_rt_rq(rt_rq));
298 rt_rq->overloaded = 0;
299 }
300 }
301
302 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
303 {
304 struct task_struct *p;
305
306 if (!rt_entity_is_task(rt_se))
307 return;
308
309 p = rt_task_of(rt_se);
310 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
311
312 rt_rq->rt_nr_total++;
313 if (p->nr_cpus_allowed > 1)
314 rt_rq->rt_nr_migratory++;
315
316 update_rt_migration(rt_rq);
317 }
318
319 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
320 {
321 struct task_struct *p;
322
323 if (!rt_entity_is_task(rt_se))
324 return;
325
326 p = rt_task_of(rt_se);
327 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
328
329 rt_rq->rt_nr_total--;
330 if (p->nr_cpus_allowed > 1)
331 rt_rq->rt_nr_migratory--;
332
333 update_rt_migration(rt_rq);
334 }
335
336 static inline int has_pushable_tasks(struct rq *rq)
337 {
338 return !plist_head_empty(&rq->rt.pushable_tasks);
339 }
340
341 static inline void set_post_schedule(struct rq *rq)
342 {
343 /*
344 * We detect this state here so that we can avoid taking the RQ
345 * lock again later if there is no need to push
346 */
347 rq->post_schedule = has_pushable_tasks(rq);
348 }
349
350 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
351 {
352 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
353 plist_node_init(&p->pushable_tasks, p->prio);
354 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
355
356 /* Update the highest prio pushable task */
357 if (p->prio < rq->rt.highest_prio.next)
358 rq->rt.highest_prio.next = p->prio;
359 }
360
361 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
362 {
363 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
364
365 /* Update the new highest prio pushable task */
366 if (has_pushable_tasks(rq)) {
367 p = plist_first_entry(&rq->rt.pushable_tasks,
368 struct task_struct, pushable_tasks);
369 rq->rt.highest_prio.next = p->prio;
370 } else
371 rq->rt.highest_prio.next = MAX_RT_PRIO;
372 }
373
374 #else
375
376 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377 {
378 }
379
380 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
381 {
382 }
383
384 static inline
385 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
386 {
387 }
388
389 static inline
390 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
391 {
392 }
393
394 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
395 {
396 return false;
397 }
398
399 static inline int pull_rt_task(struct rq *this_rq)
400 {
401 return 0;
402 }
403
404 static inline void set_post_schedule(struct rq *rq)
405 {
406 }
407 #endif /* CONFIG_SMP */
408
409 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
412 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
413 {
414 return !list_empty(&rt_se->run_list);
415 }
416
417 #ifdef CONFIG_RT_GROUP_SCHED
418
419 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
420 {
421 if (!rt_rq->tg)
422 return RUNTIME_INF;
423
424 return rt_rq->rt_runtime;
425 }
426
427 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
428 {
429 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
430 }
431
432 typedef struct task_group *rt_rq_iter_t;
433
434 static inline struct task_group *next_task_group(struct task_group *tg)
435 {
436 do {
437 tg = list_entry_rcu(tg->list.next,
438 typeof(struct task_group), list);
439 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
440
441 if (&tg->list == &task_groups)
442 tg = NULL;
443
444 return tg;
445 }
446
447 #define for_each_rt_rq(rt_rq, iter, rq) \
448 for (iter = container_of(&task_groups, typeof(*iter), list); \
449 (iter = next_task_group(iter)) && \
450 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
451
452 #define for_each_sched_rt_entity(rt_se) \
453 for (; rt_se; rt_se = rt_se->parent)
454
455 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
456 {
457 return rt_se->my_q;
458 }
459
460 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
461 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
462
463 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
464 {
465 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
466 struct sched_rt_entity *rt_se;
467
468 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
469
470 rt_se = rt_rq->tg->rt_se[cpu];
471
472 if (rt_rq->rt_nr_running) {
473 if (!rt_se)
474 enqueue_top_rt_rq(rt_rq);
475 else if (!on_rt_rq(rt_se))
476 enqueue_rt_entity(rt_se, false);
477
478 if (rt_rq->highest_prio.curr < curr->prio)
479 resched_task(curr);
480 }
481 }
482
483 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
484 {
485 struct sched_rt_entity *rt_se;
486 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
487
488 rt_se = rt_rq->tg->rt_se[cpu];
489
490 if (!rt_se)
491 dequeue_top_rt_rq(rt_rq);
492 else if (on_rt_rq(rt_se))
493 dequeue_rt_entity(rt_se);
494 }
495
496 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
497 {
498 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
499 }
500
501 static int rt_se_boosted(struct sched_rt_entity *rt_se)
502 {
503 struct rt_rq *rt_rq = group_rt_rq(rt_se);
504 struct task_struct *p;
505
506 if (rt_rq)
507 return !!rt_rq->rt_nr_boosted;
508
509 p = rt_task_of(rt_se);
510 return p->prio != p->normal_prio;
511 }
512
513 #ifdef CONFIG_SMP
514 static inline const struct cpumask *sched_rt_period_mask(void)
515 {
516 return this_rq()->rd->span;
517 }
518 #else
519 static inline const struct cpumask *sched_rt_period_mask(void)
520 {
521 return cpu_online_mask;
522 }
523 #endif
524
525 static inline
526 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
527 {
528 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
529 }
530
531 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
532 {
533 return &rt_rq->tg->rt_bandwidth;
534 }
535
536 #else /* !CONFIG_RT_GROUP_SCHED */
537
538 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
539 {
540 return rt_rq->rt_runtime;
541 }
542
543 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
544 {
545 return ktime_to_ns(def_rt_bandwidth.rt_period);
546 }
547
548 typedef struct rt_rq *rt_rq_iter_t;
549
550 #define for_each_rt_rq(rt_rq, iter, rq) \
551 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
552
553 #define for_each_sched_rt_entity(rt_se) \
554 for (; rt_se; rt_se = NULL)
555
556 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
557 {
558 return NULL;
559 }
560
561 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
562 {
563 struct rq *rq = rq_of_rt_rq(rt_rq);
564
565 if (!rt_rq->rt_nr_running)
566 return;
567
568 enqueue_top_rt_rq(rt_rq);
569 resched_task(rq->curr);
570 }
571
572 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
573 {
574 dequeue_top_rt_rq(rt_rq);
575 }
576
577 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578 {
579 return rt_rq->rt_throttled;
580 }
581
582 static inline const struct cpumask *sched_rt_period_mask(void)
583 {
584 return cpu_online_mask;
585 }
586
587 static inline
588 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
589 {
590 return &cpu_rq(cpu)->rt;
591 }
592
593 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
594 {
595 return &def_rt_bandwidth;
596 }
597
598 #endif /* CONFIG_RT_GROUP_SCHED */
599
600 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
601 {
602 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
603
604 return (hrtimer_active(&rt_b->rt_period_timer) ||
605 rt_rq->rt_time < rt_b->rt_runtime);
606 }
607
608 #ifdef CONFIG_SMP
609 /*
610 * We ran out of runtime, see if we can borrow some from our neighbours.
611 */
612 static int do_balance_runtime(struct rt_rq *rt_rq)
613 {
614 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
615 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
616 int i, weight, more = 0;
617 u64 rt_period;
618
619 weight = cpumask_weight(rd->span);
620
621 raw_spin_lock(&rt_b->rt_runtime_lock);
622 rt_period = ktime_to_ns(rt_b->rt_period);
623 for_each_cpu(i, rd->span) {
624 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
625 s64 diff;
626
627 if (iter == rt_rq)
628 continue;
629
630 raw_spin_lock(&iter->rt_runtime_lock);
631 /*
632 * Either all rqs have inf runtime and there's nothing to steal
633 * or __disable_runtime() below sets a specific rq to inf to
634 * indicate its been disabled and disalow stealing.
635 */
636 if (iter->rt_runtime == RUNTIME_INF)
637 goto next;
638
639 /*
640 * From runqueues with spare time, take 1/n part of their
641 * spare time, but no more than our period.
642 */
643 diff = iter->rt_runtime - iter->rt_time;
644 if (diff > 0) {
645 diff = div_u64((u64)diff, weight);
646 if (rt_rq->rt_runtime + diff > rt_period)
647 diff = rt_period - rt_rq->rt_runtime;
648 iter->rt_runtime -= diff;
649 rt_rq->rt_runtime += diff;
650 more = 1;
651 if (rt_rq->rt_runtime == rt_period) {
652 raw_spin_unlock(&iter->rt_runtime_lock);
653 break;
654 }
655 }
656 next:
657 raw_spin_unlock(&iter->rt_runtime_lock);
658 }
659 raw_spin_unlock(&rt_b->rt_runtime_lock);
660
661 return more;
662 }
663
664 /*
665 * Ensure this RQ takes back all the runtime it lend to its neighbours.
666 */
667 static void __disable_runtime(struct rq *rq)
668 {
669 struct root_domain *rd = rq->rd;
670 rt_rq_iter_t iter;
671 struct rt_rq *rt_rq;
672
673 if (unlikely(!scheduler_running))
674 return;
675
676 for_each_rt_rq(rt_rq, iter, rq) {
677 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
678 s64 want;
679 int i;
680
681 raw_spin_lock(&rt_b->rt_runtime_lock);
682 raw_spin_lock(&rt_rq->rt_runtime_lock);
683 /*
684 * Either we're all inf and nobody needs to borrow, or we're
685 * already disabled and thus have nothing to do, or we have
686 * exactly the right amount of runtime to take out.
687 */
688 if (rt_rq->rt_runtime == RUNTIME_INF ||
689 rt_rq->rt_runtime == rt_b->rt_runtime)
690 goto balanced;
691 raw_spin_unlock(&rt_rq->rt_runtime_lock);
692
693 /*
694 * Calculate the difference between what we started out with
695 * and what we current have, that's the amount of runtime
696 * we lend and now have to reclaim.
697 */
698 want = rt_b->rt_runtime - rt_rq->rt_runtime;
699
700 /*
701 * Greedy reclaim, take back as much as we can.
702 */
703 for_each_cpu(i, rd->span) {
704 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
705 s64 diff;
706
707 /*
708 * Can't reclaim from ourselves or disabled runqueues.
709 */
710 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
711 continue;
712
713 raw_spin_lock(&iter->rt_runtime_lock);
714 if (want > 0) {
715 diff = min_t(s64, iter->rt_runtime, want);
716 iter->rt_runtime -= diff;
717 want -= diff;
718 } else {
719 iter->rt_runtime -= want;
720 want -= want;
721 }
722 raw_spin_unlock(&iter->rt_runtime_lock);
723
724 if (!want)
725 break;
726 }
727
728 raw_spin_lock(&rt_rq->rt_runtime_lock);
729 /*
730 * We cannot be left wanting - that would mean some runtime
731 * leaked out of the system.
732 */
733 BUG_ON(want);
734 balanced:
735 /*
736 * Disable all the borrow logic by pretending we have inf
737 * runtime - in which case borrowing doesn't make sense.
738 */
739 rt_rq->rt_runtime = RUNTIME_INF;
740 rt_rq->rt_throttled = 0;
741 raw_spin_unlock(&rt_rq->rt_runtime_lock);
742 raw_spin_unlock(&rt_b->rt_runtime_lock);
743 }
744 }
745
746 static void __enable_runtime(struct rq *rq)
747 {
748 rt_rq_iter_t iter;
749 struct rt_rq *rt_rq;
750
751 if (unlikely(!scheduler_running))
752 return;
753
754 /*
755 * Reset each runqueue's bandwidth settings
756 */
757 for_each_rt_rq(rt_rq, iter, rq) {
758 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
759
760 raw_spin_lock(&rt_b->rt_runtime_lock);
761 raw_spin_lock(&rt_rq->rt_runtime_lock);
762 rt_rq->rt_runtime = rt_b->rt_runtime;
763 rt_rq->rt_time = 0;
764 rt_rq->rt_throttled = 0;
765 raw_spin_unlock(&rt_rq->rt_runtime_lock);
766 raw_spin_unlock(&rt_b->rt_runtime_lock);
767 }
768 }
769
770 static int balance_runtime(struct rt_rq *rt_rq)
771 {
772 int more = 0;
773
774 if (!sched_feat(RT_RUNTIME_SHARE))
775 return more;
776
777 if (rt_rq->rt_time > rt_rq->rt_runtime) {
778 raw_spin_unlock(&rt_rq->rt_runtime_lock);
779 more = do_balance_runtime(rt_rq);
780 raw_spin_lock(&rt_rq->rt_runtime_lock);
781 }
782
783 return more;
784 }
785 #else /* !CONFIG_SMP */
786 static inline int balance_runtime(struct rt_rq *rt_rq)
787 {
788 return 0;
789 }
790 #endif /* CONFIG_SMP */
791
792 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
793 {
794 int i, idle = 1, throttled = 0;
795 const struct cpumask *span;
796
797 span = sched_rt_period_mask();
798 #ifdef CONFIG_RT_GROUP_SCHED
799 /*
800 * FIXME: isolated CPUs should really leave the root task group,
801 * whether they are isolcpus or were isolated via cpusets, lest
802 * the timer run on a CPU which does not service all runqueues,
803 * potentially leaving other CPUs indefinitely throttled. If
804 * isolation is really required, the user will turn the throttle
805 * off to kill the perturbations it causes anyway. Meanwhile,
806 * this maintains functionality for boot and/or troubleshooting.
807 */
808 if (rt_b == &root_task_group.rt_bandwidth)
809 span = cpu_online_mask;
810 #endif
811 for_each_cpu(i, span) {
812 int enqueue = 0;
813 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
814 struct rq *rq = rq_of_rt_rq(rt_rq);
815
816 raw_spin_lock(&rq->lock);
817 if (rt_rq->rt_time) {
818 u64 runtime;
819
820 raw_spin_lock(&rt_rq->rt_runtime_lock);
821 if (rt_rq->rt_throttled)
822 balance_runtime(rt_rq);
823 runtime = rt_rq->rt_runtime;
824 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
825 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
826 rt_rq->rt_throttled = 0;
827 enqueue = 1;
828
829 /*
830 * Force a clock update if the CPU was idle,
831 * lest wakeup -> unthrottle time accumulate.
832 */
833 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
834 rq->skip_clock_update = -1;
835 }
836 if (rt_rq->rt_time || rt_rq->rt_nr_running)
837 idle = 0;
838 raw_spin_unlock(&rt_rq->rt_runtime_lock);
839 } else if (rt_rq->rt_nr_running) {
840 idle = 0;
841 if (!rt_rq_throttled(rt_rq))
842 enqueue = 1;
843 }
844 if (rt_rq->rt_throttled)
845 throttled = 1;
846
847 if (enqueue)
848 sched_rt_rq_enqueue(rt_rq);
849 raw_spin_unlock(&rq->lock);
850 }
851
852 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
853 return 1;
854
855 return idle;
856 }
857
858 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
859 {
860 #ifdef CONFIG_RT_GROUP_SCHED
861 struct rt_rq *rt_rq = group_rt_rq(rt_se);
862
863 if (rt_rq)
864 return rt_rq->highest_prio.curr;
865 #endif
866
867 return rt_task_of(rt_se)->prio;
868 }
869
870 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
871 {
872 u64 runtime = sched_rt_runtime(rt_rq);
873
874 if (rt_rq->rt_throttled)
875 return rt_rq_throttled(rt_rq);
876
877 if (runtime >= sched_rt_period(rt_rq))
878 return 0;
879
880 balance_runtime(rt_rq);
881 runtime = sched_rt_runtime(rt_rq);
882 if (runtime == RUNTIME_INF)
883 return 0;
884
885 if (rt_rq->rt_time > runtime) {
886 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
887
888 /*
889 * Don't actually throttle groups that have no runtime assigned
890 * but accrue some time due to boosting.
891 */
892 if (likely(rt_b->rt_runtime)) {
893 static bool once = false;
894
895 rt_rq->rt_throttled = 1;
896
897 if (!once) {
898 once = true;
899 printk_sched("sched: RT throttling activated\n");
900 }
901 } else {
902 /*
903 * In case we did anyway, make it go away,
904 * replenishment is a joke, since it will replenish us
905 * with exactly 0 ns.
906 */
907 rt_rq->rt_time = 0;
908 }
909
910 if (rt_rq_throttled(rt_rq)) {
911 sched_rt_rq_dequeue(rt_rq);
912 return 1;
913 }
914 }
915
916 return 0;
917 }
918
919 /*
920 * Update the current task's runtime statistics. Skip current tasks that
921 * are not in our scheduling class.
922 */
923 static void update_curr_rt(struct rq *rq)
924 {
925 struct task_struct *curr = rq->curr;
926 struct sched_rt_entity *rt_se = &curr->rt;
927 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
928 u64 delta_exec;
929
930 if (curr->sched_class != &rt_sched_class)
931 return;
932
933 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
934 if (unlikely((s64)delta_exec <= 0))
935 return;
936
937 schedstat_set(curr->se.statistics.exec_max,
938 max(curr->se.statistics.exec_max, delta_exec));
939
940 curr->se.sum_exec_runtime += delta_exec;
941 account_group_exec_runtime(curr, delta_exec);
942
943 curr->se.exec_start = rq_clock_task(rq);
944 cpuacct_charge(curr, delta_exec);
945
946 sched_rt_avg_update(rq, delta_exec);
947
948 if (!rt_bandwidth_enabled())
949 return;
950
951 for_each_sched_rt_entity(rt_se) {
952 rt_rq = rt_rq_of_se(rt_se);
953
954 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
955 raw_spin_lock(&rt_rq->rt_runtime_lock);
956 rt_rq->rt_time += delta_exec;
957 if (sched_rt_runtime_exceeded(rt_rq))
958 resched_task(curr);
959 raw_spin_unlock(&rt_rq->rt_runtime_lock);
960 }
961 }
962 }
963
964 static void
965 dequeue_top_rt_rq(struct rt_rq *rt_rq)
966 {
967 struct rq *rq = rq_of_rt_rq(rt_rq);
968
969 BUG_ON(&rq->rt != rt_rq);
970
971 if (!rt_rq->rt_queued)
972 return;
973
974 BUG_ON(!rq->nr_running);
975
976 sub_nr_running(rq, rt_rq->rt_nr_running);
977 rt_rq->rt_queued = 0;
978 }
979
980 static void
981 enqueue_top_rt_rq(struct rt_rq *rt_rq)
982 {
983 struct rq *rq = rq_of_rt_rq(rt_rq);
984
985 BUG_ON(&rq->rt != rt_rq);
986
987 if (rt_rq->rt_queued)
988 return;
989 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
990 return;
991
992 add_nr_running(rq, rt_rq->rt_nr_running);
993 rt_rq->rt_queued = 1;
994 }
995
996 #if defined CONFIG_SMP
997
998 static void
999 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1000 {
1001 struct rq *rq = rq_of_rt_rq(rt_rq);
1002
1003 #ifdef CONFIG_RT_GROUP_SCHED
1004 /*
1005 * Change rq's cpupri only if rt_rq is the top queue.
1006 */
1007 if (&rq->rt != rt_rq)
1008 return;
1009 #endif
1010 if (rq->online && prio < prev_prio)
1011 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1012 }
1013
1014 static void
1015 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1016 {
1017 struct rq *rq = rq_of_rt_rq(rt_rq);
1018
1019 #ifdef CONFIG_RT_GROUP_SCHED
1020 /*
1021 * Change rq's cpupri only if rt_rq is the top queue.
1022 */
1023 if (&rq->rt != rt_rq)
1024 return;
1025 #endif
1026 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1027 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1028 }
1029
1030 #else /* CONFIG_SMP */
1031
1032 static inline
1033 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1034 static inline
1035 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1036
1037 #endif /* CONFIG_SMP */
1038
1039 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1040 static void
1041 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1042 {
1043 int prev_prio = rt_rq->highest_prio.curr;
1044
1045 if (prio < prev_prio)
1046 rt_rq->highest_prio.curr = prio;
1047
1048 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1049 }
1050
1051 static void
1052 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1053 {
1054 int prev_prio = rt_rq->highest_prio.curr;
1055
1056 if (rt_rq->rt_nr_running) {
1057
1058 WARN_ON(prio < prev_prio);
1059
1060 /*
1061 * This may have been our highest task, and therefore
1062 * we may have some recomputation to do
1063 */
1064 if (prio == prev_prio) {
1065 struct rt_prio_array *array = &rt_rq->active;
1066
1067 rt_rq->highest_prio.curr =
1068 sched_find_first_bit(array->bitmap);
1069 }
1070
1071 } else
1072 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1073
1074 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1075 }
1076
1077 #else
1078
1079 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1080 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1081
1082 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1083
1084 #ifdef CONFIG_RT_GROUP_SCHED
1085
1086 static void
1087 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1088 {
1089 if (rt_se_boosted(rt_se))
1090 rt_rq->rt_nr_boosted++;
1091
1092 if (rt_rq->tg)
1093 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1094 }
1095
1096 static void
1097 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1098 {
1099 if (rt_se_boosted(rt_se))
1100 rt_rq->rt_nr_boosted--;
1101
1102 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1103 }
1104
1105 #else /* CONFIG_RT_GROUP_SCHED */
1106
1107 static void
1108 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1109 {
1110 start_rt_bandwidth(&def_rt_bandwidth);
1111 }
1112
1113 static inline
1114 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1115
1116 #endif /* CONFIG_RT_GROUP_SCHED */
1117
1118 static inline
1119 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1120 {
1121 struct rt_rq *group_rq = group_rt_rq(rt_se);
1122
1123 if (group_rq)
1124 return group_rq->rt_nr_running;
1125 else
1126 return 1;
1127 }
1128
1129 static inline
1130 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1131 {
1132 int prio = rt_se_prio(rt_se);
1133
1134 WARN_ON(!rt_prio(prio));
1135 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1136
1137 inc_rt_prio(rt_rq, prio);
1138 inc_rt_migration(rt_se, rt_rq);
1139 inc_rt_group(rt_se, rt_rq);
1140 }
1141
1142 static inline
1143 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1144 {
1145 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1146 WARN_ON(!rt_rq->rt_nr_running);
1147 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1148
1149 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1150 dec_rt_migration(rt_se, rt_rq);
1151 dec_rt_group(rt_se, rt_rq);
1152 }
1153
1154 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1155 {
1156 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1157 struct rt_prio_array *array = &rt_rq->active;
1158 struct rt_rq *group_rq = group_rt_rq(rt_se);
1159 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1160
1161 /*
1162 * Don't enqueue the group if its throttled, or when empty.
1163 * The latter is a consequence of the former when a child group
1164 * get throttled and the current group doesn't have any other
1165 * active members.
1166 */
1167 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1168 return;
1169
1170 if (head)
1171 list_add(&rt_se->run_list, queue);
1172 else
1173 list_add_tail(&rt_se->run_list, queue);
1174 __set_bit(rt_se_prio(rt_se), array->bitmap);
1175
1176 inc_rt_tasks(rt_se, rt_rq);
1177 }
1178
1179 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1180 {
1181 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1182 struct rt_prio_array *array = &rt_rq->active;
1183
1184 list_del_init(&rt_se->run_list);
1185 if (list_empty(array->queue + rt_se_prio(rt_se)))
1186 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1187
1188 dec_rt_tasks(rt_se, rt_rq);
1189 }
1190
1191 /*
1192 * Because the prio of an upper entry depends on the lower
1193 * entries, we must remove entries top - down.
1194 */
1195 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1196 {
1197 struct sched_rt_entity *back = NULL;
1198
1199 for_each_sched_rt_entity(rt_se) {
1200 rt_se->back = back;
1201 back = rt_se;
1202 }
1203
1204 dequeue_top_rt_rq(rt_rq_of_se(back));
1205
1206 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1207 if (on_rt_rq(rt_se))
1208 __dequeue_rt_entity(rt_se);
1209 }
1210 }
1211
1212 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1213 {
1214 struct rq *rq = rq_of_rt_se(rt_se);
1215
1216 dequeue_rt_stack(rt_se);
1217 for_each_sched_rt_entity(rt_se)
1218 __enqueue_rt_entity(rt_se, head);
1219 enqueue_top_rt_rq(&rq->rt);
1220 }
1221
1222 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1223 {
1224 struct rq *rq = rq_of_rt_se(rt_se);
1225
1226 dequeue_rt_stack(rt_se);
1227
1228 for_each_sched_rt_entity(rt_se) {
1229 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1230
1231 if (rt_rq && rt_rq->rt_nr_running)
1232 __enqueue_rt_entity(rt_se, false);
1233 }
1234 enqueue_top_rt_rq(&rq->rt);
1235 }
1236
1237 /*
1238 * Adding/removing a task to/from a priority array:
1239 */
1240 static void
1241 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1242 {
1243 struct sched_rt_entity *rt_se = &p->rt;
1244
1245 if (flags & ENQUEUE_WAKEUP)
1246 rt_se->timeout = 0;
1247
1248 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1249
1250 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1251 enqueue_pushable_task(rq, p);
1252 }
1253
1254 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1255 {
1256 struct sched_rt_entity *rt_se = &p->rt;
1257
1258 update_curr_rt(rq);
1259 dequeue_rt_entity(rt_se);
1260
1261 dequeue_pushable_task(rq, p);
1262 }
1263
1264 /*
1265 * Put task to the head or the end of the run list without the overhead of
1266 * dequeue followed by enqueue.
1267 */
1268 static void
1269 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1270 {
1271 if (on_rt_rq(rt_se)) {
1272 struct rt_prio_array *array = &rt_rq->active;
1273 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1274
1275 if (head)
1276 list_move(&rt_se->run_list, queue);
1277 else
1278 list_move_tail(&rt_se->run_list, queue);
1279 }
1280 }
1281
1282 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1283 {
1284 struct sched_rt_entity *rt_se = &p->rt;
1285 struct rt_rq *rt_rq;
1286
1287 for_each_sched_rt_entity(rt_se) {
1288 rt_rq = rt_rq_of_se(rt_se);
1289 requeue_rt_entity(rt_rq, rt_se, head);
1290 }
1291 }
1292
1293 static void yield_task_rt(struct rq *rq)
1294 {
1295 requeue_task_rt(rq, rq->curr, 0);
1296 }
1297
1298 #ifdef CONFIG_SMP
1299 static int find_lowest_rq(struct task_struct *task);
1300
1301 static int
1302 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1303 {
1304 struct task_struct *curr;
1305 struct rq *rq;
1306
1307 if (p->nr_cpus_allowed == 1)
1308 goto out;
1309
1310 /* For anything but wake ups, just return the task_cpu */
1311 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1312 goto out;
1313
1314 rq = cpu_rq(cpu);
1315
1316 rcu_read_lock();
1317 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1318
1319 /*
1320 * If the current task on @p's runqueue is an RT task, then
1321 * try to see if we can wake this RT task up on another
1322 * runqueue. Otherwise simply start this RT task
1323 * on its current runqueue.
1324 *
1325 * We want to avoid overloading runqueues. If the woken
1326 * task is a higher priority, then it will stay on this CPU
1327 * and the lower prio task should be moved to another CPU.
1328 * Even though this will probably make the lower prio task
1329 * lose its cache, we do not want to bounce a higher task
1330 * around just because it gave up its CPU, perhaps for a
1331 * lock?
1332 *
1333 * For equal prio tasks, we just let the scheduler sort it out.
1334 *
1335 * Otherwise, just let it ride on the affined RQ and the
1336 * post-schedule router will push the preempted task away
1337 *
1338 * This test is optimistic, if we get it wrong the load-balancer
1339 * will have to sort it out.
1340 */
1341 if (curr && unlikely(rt_task(curr)) &&
1342 (curr->nr_cpus_allowed < 2 ||
1343 curr->prio <= p->prio)) {
1344 int target = find_lowest_rq(p);
1345
1346 if (target != -1)
1347 cpu = target;
1348 }
1349 rcu_read_unlock();
1350
1351 out:
1352 return cpu;
1353 }
1354
1355 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1356 {
1357 if (rq->curr->nr_cpus_allowed == 1)
1358 return;
1359
1360 if (p->nr_cpus_allowed != 1
1361 && cpupri_find(&rq->rd->cpupri, p, NULL))
1362 return;
1363
1364 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1365 return;
1366
1367 /*
1368 * There appears to be other cpus that can accept
1369 * current and none to run 'p', so lets reschedule
1370 * to try and push current away:
1371 */
1372 requeue_task_rt(rq, p, 1);
1373 resched_task(rq->curr);
1374 }
1375
1376 #endif /* CONFIG_SMP */
1377
1378 /*
1379 * Preempt the current task with a newly woken task if needed:
1380 */
1381 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1382 {
1383 if (p->prio < rq->curr->prio) {
1384 resched_task(rq->curr);
1385 return;
1386 }
1387
1388 #ifdef CONFIG_SMP
1389 /*
1390 * If:
1391 *
1392 * - the newly woken task is of equal priority to the current task
1393 * - the newly woken task is non-migratable while current is migratable
1394 * - current will be preempted on the next reschedule
1395 *
1396 * we should check to see if current can readily move to a different
1397 * cpu. If so, we will reschedule to allow the push logic to try
1398 * to move current somewhere else, making room for our non-migratable
1399 * task.
1400 */
1401 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1402 check_preempt_equal_prio(rq, p);
1403 #endif
1404 }
1405
1406 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1407 struct rt_rq *rt_rq)
1408 {
1409 struct rt_prio_array *array = &rt_rq->active;
1410 struct sched_rt_entity *next = NULL;
1411 struct list_head *queue;
1412 int idx;
1413
1414 idx = sched_find_first_bit(array->bitmap);
1415 BUG_ON(idx >= MAX_RT_PRIO);
1416
1417 queue = array->queue + idx;
1418 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1419
1420 return next;
1421 }
1422
1423 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1424 {
1425 struct sched_rt_entity *rt_se;
1426 struct task_struct *p;
1427 struct rt_rq *rt_rq = &rq->rt;
1428
1429 do {
1430 rt_se = pick_next_rt_entity(rq, rt_rq);
1431 BUG_ON(!rt_se);
1432 rt_rq = group_rt_rq(rt_se);
1433 } while (rt_rq);
1434
1435 p = rt_task_of(rt_se);
1436 p->se.exec_start = rq_clock_task(rq);
1437
1438 return p;
1439 }
1440
1441 static struct task_struct *
1442 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1443 {
1444 struct task_struct *p;
1445 struct rt_rq *rt_rq = &rq->rt;
1446
1447 if (need_pull_rt_task(rq, prev)) {
1448 pull_rt_task(rq);
1449 /*
1450 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1451 * means a dl or stop task can slip in, in which case we need
1452 * to re-start task selection.
1453 */
1454 if (unlikely((rq->stop && rq->stop->on_rq) ||
1455 rq->dl.dl_nr_running))
1456 return RETRY_TASK;
1457 }
1458
1459 /*
1460 * We may dequeue prev's rt_rq in put_prev_task().
1461 * So, we update time before rt_nr_running check.
1462 */
1463 if (prev->sched_class == &rt_sched_class)
1464 update_curr_rt(rq);
1465
1466 if (!rt_rq->rt_queued)
1467 return NULL;
1468
1469 put_prev_task(rq, prev);
1470
1471 p = _pick_next_task_rt(rq);
1472
1473 /* The running task is never eligible for pushing */
1474 if (p)
1475 dequeue_pushable_task(rq, p);
1476
1477 set_post_schedule(rq);
1478
1479 return p;
1480 }
1481
1482 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1483 {
1484 update_curr_rt(rq);
1485
1486 /*
1487 * The previous task needs to be made eligible for pushing
1488 * if it is still active
1489 */
1490 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1491 enqueue_pushable_task(rq, p);
1492 }
1493
1494 #ifdef CONFIG_SMP
1495
1496 /* Only try algorithms three times */
1497 #define RT_MAX_TRIES 3
1498
1499 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1500 {
1501 if (!task_running(rq, p) &&
1502 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1503 return 1;
1504 return 0;
1505 }
1506
1507 /*
1508 * Return the highest pushable rq's task, which is suitable to be executed
1509 * on the cpu, NULL otherwise
1510 */
1511 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1512 {
1513 struct plist_head *head = &rq->rt.pushable_tasks;
1514 struct task_struct *p;
1515
1516 if (!has_pushable_tasks(rq))
1517 return NULL;
1518
1519 plist_for_each_entry(p, head, pushable_tasks) {
1520 if (pick_rt_task(rq, p, cpu))
1521 return p;
1522 }
1523
1524 return NULL;
1525 }
1526
1527 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1528
1529 static int find_lowest_rq(struct task_struct *task)
1530 {
1531 struct sched_domain *sd;
1532 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1533 int this_cpu = smp_processor_id();
1534 int cpu = task_cpu(task);
1535
1536 /* Make sure the mask is initialized first */
1537 if (unlikely(!lowest_mask))
1538 return -1;
1539
1540 if (task->nr_cpus_allowed == 1)
1541 return -1; /* No other targets possible */
1542
1543 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1544 return -1; /* No targets found */
1545
1546 /*
1547 * At this point we have built a mask of cpus representing the
1548 * lowest priority tasks in the system. Now we want to elect
1549 * the best one based on our affinity and topology.
1550 *
1551 * We prioritize the last cpu that the task executed on since
1552 * it is most likely cache-hot in that location.
1553 */
1554 if (cpumask_test_cpu(cpu, lowest_mask))
1555 return cpu;
1556
1557 /*
1558 * Otherwise, we consult the sched_domains span maps to figure
1559 * out which cpu is logically closest to our hot cache data.
1560 */
1561 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1562 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1563
1564 rcu_read_lock();
1565 for_each_domain(cpu, sd) {
1566 if (sd->flags & SD_WAKE_AFFINE) {
1567 int best_cpu;
1568
1569 /*
1570 * "this_cpu" is cheaper to preempt than a
1571 * remote processor.
1572 */
1573 if (this_cpu != -1 &&
1574 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1575 rcu_read_unlock();
1576 return this_cpu;
1577 }
1578
1579 best_cpu = cpumask_first_and(lowest_mask,
1580 sched_domain_span(sd));
1581 if (best_cpu < nr_cpu_ids) {
1582 rcu_read_unlock();
1583 return best_cpu;
1584 }
1585 }
1586 }
1587 rcu_read_unlock();
1588
1589 /*
1590 * And finally, if there were no matches within the domains
1591 * just give the caller *something* to work with from the compatible
1592 * locations.
1593 */
1594 if (this_cpu != -1)
1595 return this_cpu;
1596
1597 cpu = cpumask_any(lowest_mask);
1598 if (cpu < nr_cpu_ids)
1599 return cpu;
1600 return -1;
1601 }
1602
1603 /* Will lock the rq it finds */
1604 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1605 {
1606 struct rq *lowest_rq = NULL;
1607 int tries;
1608 int cpu;
1609
1610 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1611 cpu = find_lowest_rq(task);
1612
1613 if ((cpu == -1) || (cpu == rq->cpu))
1614 break;
1615
1616 lowest_rq = cpu_rq(cpu);
1617
1618 /* if the prio of this runqueue changed, try again */
1619 if (double_lock_balance(rq, lowest_rq)) {
1620 /*
1621 * We had to unlock the run queue. In
1622 * the mean time, task could have
1623 * migrated already or had its affinity changed.
1624 * Also make sure that it wasn't scheduled on its rq.
1625 */
1626 if (unlikely(task_rq(task) != rq ||
1627 !cpumask_test_cpu(lowest_rq->cpu,
1628 tsk_cpus_allowed(task)) ||
1629 task_running(rq, task) ||
1630 !task->on_rq)) {
1631
1632 double_unlock_balance(rq, lowest_rq);
1633 lowest_rq = NULL;
1634 break;
1635 }
1636 }
1637
1638 /* If this rq is still suitable use it. */
1639 if (lowest_rq->rt.highest_prio.curr > task->prio)
1640 break;
1641
1642 /* try again */
1643 double_unlock_balance(rq, lowest_rq);
1644 lowest_rq = NULL;
1645 }
1646
1647 return lowest_rq;
1648 }
1649
1650 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1651 {
1652 struct task_struct *p;
1653
1654 if (!has_pushable_tasks(rq))
1655 return NULL;
1656
1657 p = plist_first_entry(&rq->rt.pushable_tasks,
1658 struct task_struct, pushable_tasks);
1659
1660 BUG_ON(rq->cpu != task_cpu(p));
1661 BUG_ON(task_current(rq, p));
1662 BUG_ON(p->nr_cpus_allowed <= 1);
1663
1664 BUG_ON(!p->on_rq);
1665 BUG_ON(!rt_task(p));
1666
1667 return p;
1668 }
1669
1670 /*
1671 * If the current CPU has more than one RT task, see if the non
1672 * running task can migrate over to a CPU that is running a task
1673 * of lesser priority.
1674 */
1675 static int push_rt_task(struct rq *rq)
1676 {
1677 struct task_struct *next_task;
1678 struct rq *lowest_rq;
1679 int ret = 0;
1680
1681 if (!rq->rt.overloaded)
1682 return 0;
1683
1684 next_task = pick_next_pushable_task(rq);
1685 if (!next_task)
1686 return 0;
1687
1688 retry:
1689 if (unlikely(next_task == rq->curr)) {
1690 WARN_ON(1);
1691 return 0;
1692 }
1693
1694 /*
1695 * It's possible that the next_task slipped in of
1696 * higher priority than current. If that's the case
1697 * just reschedule current.
1698 */
1699 if (unlikely(next_task->prio < rq->curr->prio)) {
1700 resched_task(rq->curr);
1701 return 0;
1702 }
1703
1704 /* We might release rq lock */
1705 get_task_struct(next_task);
1706
1707 /* find_lock_lowest_rq locks the rq if found */
1708 lowest_rq = find_lock_lowest_rq(next_task, rq);
1709 if (!lowest_rq) {
1710 struct task_struct *task;
1711 /*
1712 * find_lock_lowest_rq releases rq->lock
1713 * so it is possible that next_task has migrated.
1714 *
1715 * We need to make sure that the task is still on the same
1716 * run-queue and is also still the next task eligible for
1717 * pushing.
1718 */
1719 task = pick_next_pushable_task(rq);
1720 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1721 /*
1722 * The task hasn't migrated, and is still the next
1723 * eligible task, but we failed to find a run-queue
1724 * to push it to. Do not retry in this case, since
1725 * other cpus will pull from us when ready.
1726 */
1727 goto out;
1728 }
1729
1730 if (!task)
1731 /* No more tasks, just exit */
1732 goto out;
1733
1734 /*
1735 * Something has shifted, try again.
1736 */
1737 put_task_struct(next_task);
1738 next_task = task;
1739 goto retry;
1740 }
1741
1742 deactivate_task(rq, next_task, 0);
1743 set_task_cpu(next_task, lowest_rq->cpu);
1744 activate_task(lowest_rq, next_task, 0);
1745 ret = 1;
1746
1747 resched_task(lowest_rq->curr);
1748
1749 double_unlock_balance(rq, lowest_rq);
1750
1751 out:
1752 put_task_struct(next_task);
1753
1754 return ret;
1755 }
1756
1757 static void push_rt_tasks(struct rq *rq)
1758 {
1759 /* push_rt_task will return true if it moved an RT */
1760 while (push_rt_task(rq))
1761 ;
1762 }
1763
1764 static int pull_rt_task(struct rq *this_rq)
1765 {
1766 int this_cpu = this_rq->cpu, ret = 0, cpu;
1767 struct task_struct *p;
1768 struct rq *src_rq;
1769
1770 if (likely(!rt_overloaded(this_rq)))
1771 return 0;
1772
1773 /*
1774 * Match the barrier from rt_set_overloaded; this guarantees that if we
1775 * see overloaded we must also see the rto_mask bit.
1776 */
1777 smp_rmb();
1778
1779 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1780 if (this_cpu == cpu)
1781 continue;
1782
1783 src_rq = cpu_rq(cpu);
1784
1785 /*
1786 * Don't bother taking the src_rq->lock if the next highest
1787 * task is known to be lower-priority than our current task.
1788 * This may look racy, but if this value is about to go
1789 * logically higher, the src_rq will push this task away.
1790 * And if its going logically lower, we do not care
1791 */
1792 if (src_rq->rt.highest_prio.next >=
1793 this_rq->rt.highest_prio.curr)
1794 continue;
1795
1796 /*
1797 * We can potentially drop this_rq's lock in
1798 * double_lock_balance, and another CPU could
1799 * alter this_rq
1800 */
1801 double_lock_balance(this_rq, src_rq);
1802
1803 /*
1804 * We can pull only a task, which is pushable
1805 * on its rq, and no others.
1806 */
1807 p = pick_highest_pushable_task(src_rq, this_cpu);
1808
1809 /*
1810 * Do we have an RT task that preempts
1811 * the to-be-scheduled task?
1812 */
1813 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1814 WARN_ON(p == src_rq->curr);
1815 WARN_ON(!p->on_rq);
1816
1817 /*
1818 * There's a chance that p is higher in priority
1819 * than what's currently running on its cpu.
1820 * This is just that p is wakeing up and hasn't
1821 * had a chance to schedule. We only pull
1822 * p if it is lower in priority than the
1823 * current task on the run queue
1824 */
1825 if (p->prio < src_rq->curr->prio)
1826 goto skip;
1827
1828 ret = 1;
1829
1830 deactivate_task(src_rq, p, 0);
1831 set_task_cpu(p, this_cpu);
1832 activate_task(this_rq, p, 0);
1833 /*
1834 * We continue with the search, just in
1835 * case there's an even higher prio task
1836 * in another runqueue. (low likelihood
1837 * but possible)
1838 */
1839 }
1840 skip:
1841 double_unlock_balance(this_rq, src_rq);
1842 }
1843
1844 return ret;
1845 }
1846
1847 static void post_schedule_rt(struct rq *rq)
1848 {
1849 push_rt_tasks(rq);
1850 }
1851
1852 /*
1853 * If we are not running and we are not going to reschedule soon, we should
1854 * try to push tasks away now
1855 */
1856 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1857 {
1858 if (!task_running(rq, p) &&
1859 !test_tsk_need_resched(rq->curr) &&
1860 has_pushable_tasks(rq) &&
1861 p->nr_cpus_allowed > 1 &&
1862 (dl_task(rq->curr) || rt_task(rq->curr)) &&
1863 (rq->curr->nr_cpus_allowed < 2 ||
1864 rq->curr->prio <= p->prio))
1865 push_rt_tasks(rq);
1866 }
1867
1868 static void set_cpus_allowed_rt(struct task_struct *p,
1869 const struct cpumask *new_mask)
1870 {
1871 struct rq *rq;
1872 int weight;
1873
1874 BUG_ON(!rt_task(p));
1875
1876 if (!p->on_rq)
1877 return;
1878
1879 weight = cpumask_weight(new_mask);
1880
1881 /*
1882 * Only update if the process changes its state from whether it
1883 * can migrate or not.
1884 */
1885 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1886 return;
1887
1888 rq = task_rq(p);
1889
1890 /*
1891 * The process used to be able to migrate OR it can now migrate
1892 */
1893 if (weight <= 1) {
1894 if (!task_current(rq, p))
1895 dequeue_pushable_task(rq, p);
1896 BUG_ON(!rq->rt.rt_nr_migratory);
1897 rq->rt.rt_nr_migratory--;
1898 } else {
1899 if (!task_current(rq, p))
1900 enqueue_pushable_task(rq, p);
1901 rq->rt.rt_nr_migratory++;
1902 }
1903
1904 update_rt_migration(&rq->rt);
1905 }
1906
1907 /* Assumes rq->lock is held */
1908 static void rq_online_rt(struct rq *rq)
1909 {
1910 if (rq->rt.overloaded)
1911 rt_set_overload(rq);
1912
1913 __enable_runtime(rq);
1914
1915 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1916 }
1917
1918 /* Assumes rq->lock is held */
1919 static void rq_offline_rt(struct rq *rq)
1920 {
1921 if (rq->rt.overloaded)
1922 rt_clear_overload(rq);
1923
1924 __disable_runtime(rq);
1925
1926 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1927 }
1928
1929 /*
1930 * When switch from the rt queue, we bring ourselves to a position
1931 * that we might want to pull RT tasks from other runqueues.
1932 */
1933 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1934 {
1935 /*
1936 * If there are other RT tasks then we will reschedule
1937 * and the scheduling of the other RT tasks will handle
1938 * the balancing. But if we are the last RT task
1939 * we may need to handle the pulling of RT tasks
1940 * now.
1941 */
1942 if (!p->on_rq || rq->rt.rt_nr_running)
1943 return;
1944
1945 if (pull_rt_task(rq))
1946 resched_task(rq->curr);
1947 }
1948
1949 void __init init_sched_rt_class(void)
1950 {
1951 unsigned int i;
1952
1953 for_each_possible_cpu(i) {
1954 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1955 GFP_KERNEL, cpu_to_node(i));
1956 }
1957 }
1958 #endif /* CONFIG_SMP */
1959
1960 /*
1961 * When switching a task to RT, we may overload the runqueue
1962 * with RT tasks. In this case we try to push them off to
1963 * other runqueues.
1964 */
1965 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1966 {
1967 int check_resched = 1;
1968
1969 /*
1970 * If we are already running, then there's nothing
1971 * that needs to be done. But if we are not running
1972 * we may need to preempt the current running task.
1973 * If that current running task is also an RT task
1974 * then see if we can move to another run queue.
1975 */
1976 if (p->on_rq && rq->curr != p) {
1977 #ifdef CONFIG_SMP
1978 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1979 /* Don't resched if we changed runqueues */
1980 push_rt_task(rq) && rq != task_rq(p))
1981 check_resched = 0;
1982 #endif /* CONFIG_SMP */
1983 if (check_resched && p->prio < rq->curr->prio)
1984 resched_task(rq->curr);
1985 }
1986 }
1987
1988 /*
1989 * Priority of the task has changed. This may cause
1990 * us to initiate a push or pull.
1991 */
1992 static void
1993 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1994 {
1995 if (!p->on_rq)
1996 return;
1997
1998 if (rq->curr == p) {
1999 #ifdef CONFIG_SMP
2000 /*
2001 * If our priority decreases while running, we
2002 * may need to pull tasks to this runqueue.
2003 */
2004 if (oldprio < p->prio)
2005 pull_rt_task(rq);
2006 /*
2007 * If there's a higher priority task waiting to run
2008 * then reschedule. Note, the above pull_rt_task
2009 * can release the rq lock and p could migrate.
2010 * Only reschedule if p is still on the same runqueue.
2011 */
2012 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2013 resched_task(p);
2014 #else
2015 /* For UP simply resched on drop of prio */
2016 if (oldprio < p->prio)
2017 resched_task(p);
2018 #endif /* CONFIG_SMP */
2019 } else {
2020 /*
2021 * This task is not running, but if it is
2022 * greater than the current running task
2023 * then reschedule.
2024 */
2025 if (p->prio < rq->curr->prio)
2026 resched_task(rq->curr);
2027 }
2028 }
2029
2030 static void watchdog(struct rq *rq, struct task_struct *p)
2031 {
2032 unsigned long soft, hard;
2033
2034 /* max may change after cur was read, this will be fixed next tick */
2035 soft = task_rlimit(p, RLIMIT_RTTIME);
2036 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2037
2038 if (soft != RLIM_INFINITY) {
2039 unsigned long next;
2040
2041 if (p->rt.watchdog_stamp != jiffies) {
2042 p->rt.timeout++;
2043 p->rt.watchdog_stamp = jiffies;
2044 }
2045
2046 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2047 if (p->rt.timeout > next)
2048 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2049 }
2050 }
2051
2052 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2053 {
2054 struct sched_rt_entity *rt_se = &p->rt;
2055
2056 update_curr_rt(rq);
2057
2058 watchdog(rq, p);
2059
2060 /*
2061 * RR tasks need a special form of timeslice management.
2062 * FIFO tasks have no timeslices.
2063 */
2064 if (p->policy != SCHED_RR)
2065 return;
2066
2067 if (--p->rt.time_slice)
2068 return;
2069
2070 p->rt.time_slice = sched_rr_timeslice;
2071
2072 /*
2073 * Requeue to the end of queue if we (and all of our ancestors) are not
2074 * the only element on the queue
2075 */
2076 for_each_sched_rt_entity(rt_se) {
2077 if (rt_se->run_list.prev != rt_se->run_list.next) {
2078 requeue_task_rt(rq, p, 0);
2079 set_tsk_need_resched(p);
2080 return;
2081 }
2082 }
2083 }
2084
2085 static void set_curr_task_rt(struct rq *rq)
2086 {
2087 struct task_struct *p = rq->curr;
2088
2089 p->se.exec_start = rq_clock_task(rq);
2090
2091 /* The running task is never eligible for pushing */
2092 dequeue_pushable_task(rq, p);
2093 }
2094
2095 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2096 {
2097 /*
2098 * Time slice is 0 for SCHED_FIFO tasks
2099 */
2100 if (task->policy == SCHED_RR)
2101 return sched_rr_timeslice;
2102 else
2103 return 0;
2104 }
2105
2106 const struct sched_class rt_sched_class = {
2107 .next = &fair_sched_class,
2108 .enqueue_task = enqueue_task_rt,
2109 .dequeue_task = dequeue_task_rt,
2110 .yield_task = yield_task_rt,
2111
2112 .check_preempt_curr = check_preempt_curr_rt,
2113
2114 .pick_next_task = pick_next_task_rt,
2115 .put_prev_task = put_prev_task_rt,
2116
2117 #ifdef CONFIG_SMP
2118 .select_task_rq = select_task_rq_rt,
2119
2120 .set_cpus_allowed = set_cpus_allowed_rt,
2121 .rq_online = rq_online_rt,
2122 .rq_offline = rq_offline_rt,
2123 .post_schedule = post_schedule_rt,
2124 .task_woken = task_woken_rt,
2125 .switched_from = switched_from_rt,
2126 #endif
2127
2128 .set_curr_task = set_curr_task_rt,
2129 .task_tick = task_tick_rt,
2130
2131 .get_rr_interval = get_rr_interval_rt,
2132
2133 .prio_changed = prio_changed_rt,
2134 .switched_to = switched_to_rt,
2135 };
2136
2137 #ifdef CONFIG_SCHED_DEBUG
2138 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2139
2140 void print_rt_stats(struct seq_file *m, int cpu)
2141 {
2142 rt_rq_iter_t iter;
2143 struct rt_rq *rt_rq;
2144
2145 rcu_read_lock();
2146 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2147 print_rt_rq(m, cpu, rt_rq);
2148 rcu_read_unlock();
2149 }
2150 #endif /* CONFIG_SCHED_DEBUG */
This page took 0.136354 seconds and 5 git commands to generate.