Merge branch 'pm-tools'
[deliverable/linux.git] / kernel / sched / rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15 struct rt_bandwidth def_rt_bandwidth;
16
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18 {
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
23
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
29
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
33 }
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39 }
40
41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42 {
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
45
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
51 }
52
53 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54 {
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
57
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
62 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
63 }
64 raw_spin_unlock(&rt_b->rt_runtime_lock);
65 }
66
67 #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
68 static void push_irq_work_func(struct irq_work *work);
69 #endif
70
71 void init_rt_rq(struct rt_rq *rt_rq)
72 {
73 struct rt_prio_array *array;
74 int i;
75
76 array = &rt_rq->active;
77 for (i = 0; i < MAX_RT_PRIO; i++) {
78 INIT_LIST_HEAD(array->queue + i);
79 __clear_bit(i, array->bitmap);
80 }
81 /* delimiter for bitsearch: */
82 __set_bit(MAX_RT_PRIO, array->bitmap);
83
84 #if defined CONFIG_SMP
85 rt_rq->highest_prio.curr = MAX_RT_PRIO;
86 rt_rq->highest_prio.next = MAX_RT_PRIO;
87 rt_rq->rt_nr_migratory = 0;
88 rt_rq->overloaded = 0;
89 plist_head_init(&rt_rq->pushable_tasks);
90
91 #ifdef HAVE_RT_PUSH_IPI
92 rt_rq->push_flags = 0;
93 rt_rq->push_cpu = nr_cpu_ids;
94 raw_spin_lock_init(&rt_rq->push_lock);
95 init_irq_work(&rt_rq->push_work, push_irq_work_func);
96 #endif
97 #endif /* CONFIG_SMP */
98 /* We start is dequeued state, because no RT tasks are queued */
99 rt_rq->rt_queued = 0;
100
101 rt_rq->rt_time = 0;
102 rt_rq->rt_throttled = 0;
103 rt_rq->rt_runtime = 0;
104 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
105 }
106
107 #ifdef CONFIG_RT_GROUP_SCHED
108 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
109 {
110 hrtimer_cancel(&rt_b->rt_period_timer);
111 }
112
113 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
114
115 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
116 {
117 #ifdef CONFIG_SCHED_DEBUG
118 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
119 #endif
120 return container_of(rt_se, struct task_struct, rt);
121 }
122
123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
124 {
125 return rt_rq->rq;
126 }
127
128 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
129 {
130 return rt_se->rt_rq;
131 }
132
133 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
134 {
135 struct rt_rq *rt_rq = rt_se->rt_rq;
136
137 return rt_rq->rq;
138 }
139
140 void free_rt_sched_group(struct task_group *tg)
141 {
142 int i;
143
144 if (tg->rt_se)
145 destroy_rt_bandwidth(&tg->rt_bandwidth);
146
147 for_each_possible_cpu(i) {
148 if (tg->rt_rq)
149 kfree(tg->rt_rq[i]);
150 if (tg->rt_se)
151 kfree(tg->rt_se[i]);
152 }
153
154 kfree(tg->rt_rq);
155 kfree(tg->rt_se);
156 }
157
158 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
159 struct sched_rt_entity *rt_se, int cpu,
160 struct sched_rt_entity *parent)
161 {
162 struct rq *rq = cpu_rq(cpu);
163
164 rt_rq->highest_prio.curr = MAX_RT_PRIO;
165 rt_rq->rt_nr_boosted = 0;
166 rt_rq->rq = rq;
167 rt_rq->tg = tg;
168
169 tg->rt_rq[cpu] = rt_rq;
170 tg->rt_se[cpu] = rt_se;
171
172 if (!rt_se)
173 return;
174
175 if (!parent)
176 rt_se->rt_rq = &rq->rt;
177 else
178 rt_se->rt_rq = parent->my_q;
179
180 rt_se->my_q = rt_rq;
181 rt_se->parent = parent;
182 INIT_LIST_HEAD(&rt_se->run_list);
183 }
184
185 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
186 {
187 struct rt_rq *rt_rq;
188 struct sched_rt_entity *rt_se;
189 int i;
190
191 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
192 if (!tg->rt_rq)
193 goto err;
194 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
195 if (!tg->rt_se)
196 goto err;
197
198 init_rt_bandwidth(&tg->rt_bandwidth,
199 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
200
201 for_each_possible_cpu(i) {
202 rt_rq = kzalloc_node(sizeof(struct rt_rq),
203 GFP_KERNEL, cpu_to_node(i));
204 if (!rt_rq)
205 goto err;
206
207 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
208 GFP_KERNEL, cpu_to_node(i));
209 if (!rt_se)
210 goto err_free_rq;
211
212 init_rt_rq(rt_rq);
213 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
214 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
215 }
216
217 return 1;
218
219 err_free_rq:
220 kfree(rt_rq);
221 err:
222 return 0;
223 }
224
225 #else /* CONFIG_RT_GROUP_SCHED */
226
227 #define rt_entity_is_task(rt_se) (1)
228
229 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
230 {
231 return container_of(rt_se, struct task_struct, rt);
232 }
233
234 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
235 {
236 return container_of(rt_rq, struct rq, rt);
237 }
238
239 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
240 {
241 struct task_struct *p = rt_task_of(rt_se);
242
243 return task_rq(p);
244 }
245
246 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
247 {
248 struct rq *rq = rq_of_rt_se(rt_se);
249
250 return &rq->rt;
251 }
252
253 void free_rt_sched_group(struct task_group *tg) { }
254
255 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
256 {
257 return 1;
258 }
259 #endif /* CONFIG_RT_GROUP_SCHED */
260
261 #ifdef CONFIG_SMP
262
263 static void pull_rt_task(struct rq *this_rq);
264
265 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
266 {
267 /* Try to pull RT tasks here if we lower this rq's prio */
268 return rq->rt.highest_prio.curr > prev->prio;
269 }
270
271 static inline int rt_overloaded(struct rq *rq)
272 {
273 return atomic_read(&rq->rd->rto_count);
274 }
275
276 static inline void rt_set_overload(struct rq *rq)
277 {
278 if (!rq->online)
279 return;
280
281 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
282 /*
283 * Make sure the mask is visible before we set
284 * the overload count. That is checked to determine
285 * if we should look at the mask. It would be a shame
286 * if we looked at the mask, but the mask was not
287 * updated yet.
288 *
289 * Matched by the barrier in pull_rt_task().
290 */
291 smp_wmb();
292 atomic_inc(&rq->rd->rto_count);
293 }
294
295 static inline void rt_clear_overload(struct rq *rq)
296 {
297 if (!rq->online)
298 return;
299
300 /* the order here really doesn't matter */
301 atomic_dec(&rq->rd->rto_count);
302 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
303 }
304
305 static void update_rt_migration(struct rt_rq *rt_rq)
306 {
307 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
308 if (!rt_rq->overloaded) {
309 rt_set_overload(rq_of_rt_rq(rt_rq));
310 rt_rq->overloaded = 1;
311 }
312 } else if (rt_rq->overloaded) {
313 rt_clear_overload(rq_of_rt_rq(rt_rq));
314 rt_rq->overloaded = 0;
315 }
316 }
317
318 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
319 {
320 struct task_struct *p;
321
322 if (!rt_entity_is_task(rt_se))
323 return;
324
325 p = rt_task_of(rt_se);
326 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
327
328 rt_rq->rt_nr_total++;
329 if (p->nr_cpus_allowed > 1)
330 rt_rq->rt_nr_migratory++;
331
332 update_rt_migration(rt_rq);
333 }
334
335 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
336 {
337 struct task_struct *p;
338
339 if (!rt_entity_is_task(rt_se))
340 return;
341
342 p = rt_task_of(rt_se);
343 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
344
345 rt_rq->rt_nr_total--;
346 if (p->nr_cpus_allowed > 1)
347 rt_rq->rt_nr_migratory--;
348
349 update_rt_migration(rt_rq);
350 }
351
352 static inline int has_pushable_tasks(struct rq *rq)
353 {
354 return !plist_head_empty(&rq->rt.pushable_tasks);
355 }
356
357 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
358 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
359
360 static void push_rt_tasks(struct rq *);
361 static void pull_rt_task(struct rq *);
362
363 static inline void queue_push_tasks(struct rq *rq)
364 {
365 if (!has_pushable_tasks(rq))
366 return;
367
368 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
369 }
370
371 static inline void queue_pull_task(struct rq *rq)
372 {
373 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
374 }
375
376 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377 {
378 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 plist_node_init(&p->pushable_tasks, p->prio);
380 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
381
382 /* Update the highest prio pushable task */
383 if (p->prio < rq->rt.highest_prio.next)
384 rq->rt.highest_prio.next = p->prio;
385 }
386
387 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
388 {
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
390
391 /* Update the new highest prio pushable task */
392 if (has_pushable_tasks(rq)) {
393 p = plist_first_entry(&rq->rt.pushable_tasks,
394 struct task_struct, pushable_tasks);
395 rq->rt.highest_prio.next = p->prio;
396 } else
397 rq->rt.highest_prio.next = MAX_RT_PRIO;
398 }
399
400 #else
401
402 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
403 {
404 }
405
406 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
407 {
408 }
409
410 static inline
411 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
412 {
413 }
414
415 static inline
416 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
417 {
418 }
419
420 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
421 {
422 return false;
423 }
424
425 static inline void pull_rt_task(struct rq *this_rq)
426 {
427 }
428
429 static inline void queue_push_tasks(struct rq *rq)
430 {
431 }
432 #endif /* CONFIG_SMP */
433
434 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
435 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
436
437 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
438 {
439 return !list_empty(&rt_se->run_list);
440 }
441
442 #ifdef CONFIG_RT_GROUP_SCHED
443
444 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
445 {
446 if (!rt_rq->tg)
447 return RUNTIME_INF;
448
449 return rt_rq->rt_runtime;
450 }
451
452 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
453 {
454 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
455 }
456
457 typedef struct task_group *rt_rq_iter_t;
458
459 static inline struct task_group *next_task_group(struct task_group *tg)
460 {
461 do {
462 tg = list_entry_rcu(tg->list.next,
463 typeof(struct task_group), list);
464 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
465
466 if (&tg->list == &task_groups)
467 tg = NULL;
468
469 return tg;
470 }
471
472 #define for_each_rt_rq(rt_rq, iter, rq) \
473 for (iter = container_of(&task_groups, typeof(*iter), list); \
474 (iter = next_task_group(iter)) && \
475 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
476
477 #define for_each_sched_rt_entity(rt_se) \
478 for (; rt_se; rt_se = rt_se->parent)
479
480 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
481 {
482 return rt_se->my_q;
483 }
484
485 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
486 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
487
488 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
489 {
490 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
491 struct rq *rq = rq_of_rt_rq(rt_rq);
492 struct sched_rt_entity *rt_se;
493
494 int cpu = cpu_of(rq);
495
496 rt_se = rt_rq->tg->rt_se[cpu];
497
498 if (rt_rq->rt_nr_running) {
499 if (!rt_se)
500 enqueue_top_rt_rq(rt_rq);
501 else if (!on_rt_rq(rt_se))
502 enqueue_rt_entity(rt_se, false);
503
504 if (rt_rq->highest_prio.curr < curr->prio)
505 resched_curr(rq);
506 }
507 }
508
509 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
510 {
511 struct sched_rt_entity *rt_se;
512 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
513
514 rt_se = rt_rq->tg->rt_se[cpu];
515
516 if (!rt_se)
517 dequeue_top_rt_rq(rt_rq);
518 else if (on_rt_rq(rt_se))
519 dequeue_rt_entity(rt_se);
520 }
521
522 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
523 {
524 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
525 }
526
527 static int rt_se_boosted(struct sched_rt_entity *rt_se)
528 {
529 struct rt_rq *rt_rq = group_rt_rq(rt_se);
530 struct task_struct *p;
531
532 if (rt_rq)
533 return !!rt_rq->rt_nr_boosted;
534
535 p = rt_task_of(rt_se);
536 return p->prio != p->normal_prio;
537 }
538
539 #ifdef CONFIG_SMP
540 static inline const struct cpumask *sched_rt_period_mask(void)
541 {
542 return this_rq()->rd->span;
543 }
544 #else
545 static inline const struct cpumask *sched_rt_period_mask(void)
546 {
547 return cpu_online_mask;
548 }
549 #endif
550
551 static inline
552 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
553 {
554 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
555 }
556
557 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
558 {
559 return &rt_rq->tg->rt_bandwidth;
560 }
561
562 #else /* !CONFIG_RT_GROUP_SCHED */
563
564 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
565 {
566 return rt_rq->rt_runtime;
567 }
568
569 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
570 {
571 return ktime_to_ns(def_rt_bandwidth.rt_period);
572 }
573
574 typedef struct rt_rq *rt_rq_iter_t;
575
576 #define for_each_rt_rq(rt_rq, iter, rq) \
577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
578
579 #define for_each_sched_rt_entity(rt_se) \
580 for (; rt_se; rt_se = NULL)
581
582 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
583 {
584 return NULL;
585 }
586
587 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
588 {
589 struct rq *rq = rq_of_rt_rq(rt_rq);
590
591 if (!rt_rq->rt_nr_running)
592 return;
593
594 enqueue_top_rt_rq(rt_rq);
595 resched_curr(rq);
596 }
597
598 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
599 {
600 dequeue_top_rt_rq(rt_rq);
601 }
602
603 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
604 {
605 return rt_rq->rt_throttled;
606 }
607
608 static inline const struct cpumask *sched_rt_period_mask(void)
609 {
610 return cpu_online_mask;
611 }
612
613 static inline
614 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
615 {
616 return &cpu_rq(cpu)->rt;
617 }
618
619 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
620 {
621 return &def_rt_bandwidth;
622 }
623
624 #endif /* CONFIG_RT_GROUP_SCHED */
625
626 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
627 {
628 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
629
630 return (hrtimer_active(&rt_b->rt_period_timer) ||
631 rt_rq->rt_time < rt_b->rt_runtime);
632 }
633
634 #ifdef CONFIG_SMP
635 /*
636 * We ran out of runtime, see if we can borrow some from our neighbours.
637 */
638 static void do_balance_runtime(struct rt_rq *rt_rq)
639 {
640 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
641 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
642 int i, weight;
643 u64 rt_period;
644
645 weight = cpumask_weight(rd->span);
646
647 raw_spin_lock(&rt_b->rt_runtime_lock);
648 rt_period = ktime_to_ns(rt_b->rt_period);
649 for_each_cpu(i, rd->span) {
650 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
651 s64 diff;
652
653 if (iter == rt_rq)
654 continue;
655
656 raw_spin_lock(&iter->rt_runtime_lock);
657 /*
658 * Either all rqs have inf runtime and there's nothing to steal
659 * or __disable_runtime() below sets a specific rq to inf to
660 * indicate its been disabled and disalow stealing.
661 */
662 if (iter->rt_runtime == RUNTIME_INF)
663 goto next;
664
665 /*
666 * From runqueues with spare time, take 1/n part of their
667 * spare time, but no more than our period.
668 */
669 diff = iter->rt_runtime - iter->rt_time;
670 if (diff > 0) {
671 diff = div_u64((u64)diff, weight);
672 if (rt_rq->rt_runtime + diff > rt_period)
673 diff = rt_period - rt_rq->rt_runtime;
674 iter->rt_runtime -= diff;
675 rt_rq->rt_runtime += diff;
676 if (rt_rq->rt_runtime == rt_period) {
677 raw_spin_unlock(&iter->rt_runtime_lock);
678 break;
679 }
680 }
681 next:
682 raw_spin_unlock(&iter->rt_runtime_lock);
683 }
684 raw_spin_unlock(&rt_b->rt_runtime_lock);
685 }
686
687 /*
688 * Ensure this RQ takes back all the runtime it lend to its neighbours.
689 */
690 static void __disable_runtime(struct rq *rq)
691 {
692 struct root_domain *rd = rq->rd;
693 rt_rq_iter_t iter;
694 struct rt_rq *rt_rq;
695
696 if (unlikely(!scheduler_running))
697 return;
698
699 for_each_rt_rq(rt_rq, iter, rq) {
700 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
701 s64 want;
702 int i;
703
704 raw_spin_lock(&rt_b->rt_runtime_lock);
705 raw_spin_lock(&rt_rq->rt_runtime_lock);
706 /*
707 * Either we're all inf and nobody needs to borrow, or we're
708 * already disabled and thus have nothing to do, or we have
709 * exactly the right amount of runtime to take out.
710 */
711 if (rt_rq->rt_runtime == RUNTIME_INF ||
712 rt_rq->rt_runtime == rt_b->rt_runtime)
713 goto balanced;
714 raw_spin_unlock(&rt_rq->rt_runtime_lock);
715
716 /*
717 * Calculate the difference between what we started out with
718 * and what we current have, that's the amount of runtime
719 * we lend and now have to reclaim.
720 */
721 want = rt_b->rt_runtime - rt_rq->rt_runtime;
722
723 /*
724 * Greedy reclaim, take back as much as we can.
725 */
726 for_each_cpu(i, rd->span) {
727 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
728 s64 diff;
729
730 /*
731 * Can't reclaim from ourselves or disabled runqueues.
732 */
733 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
734 continue;
735
736 raw_spin_lock(&iter->rt_runtime_lock);
737 if (want > 0) {
738 diff = min_t(s64, iter->rt_runtime, want);
739 iter->rt_runtime -= diff;
740 want -= diff;
741 } else {
742 iter->rt_runtime -= want;
743 want -= want;
744 }
745 raw_spin_unlock(&iter->rt_runtime_lock);
746
747 if (!want)
748 break;
749 }
750
751 raw_spin_lock(&rt_rq->rt_runtime_lock);
752 /*
753 * We cannot be left wanting - that would mean some runtime
754 * leaked out of the system.
755 */
756 BUG_ON(want);
757 balanced:
758 /*
759 * Disable all the borrow logic by pretending we have inf
760 * runtime - in which case borrowing doesn't make sense.
761 */
762 rt_rq->rt_runtime = RUNTIME_INF;
763 rt_rq->rt_throttled = 0;
764 raw_spin_unlock(&rt_rq->rt_runtime_lock);
765 raw_spin_unlock(&rt_b->rt_runtime_lock);
766
767 /* Make rt_rq available for pick_next_task() */
768 sched_rt_rq_enqueue(rt_rq);
769 }
770 }
771
772 static void __enable_runtime(struct rq *rq)
773 {
774 rt_rq_iter_t iter;
775 struct rt_rq *rt_rq;
776
777 if (unlikely(!scheduler_running))
778 return;
779
780 /*
781 * Reset each runqueue's bandwidth settings
782 */
783 for_each_rt_rq(rt_rq, iter, rq) {
784 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
785
786 raw_spin_lock(&rt_b->rt_runtime_lock);
787 raw_spin_lock(&rt_rq->rt_runtime_lock);
788 rt_rq->rt_runtime = rt_b->rt_runtime;
789 rt_rq->rt_time = 0;
790 rt_rq->rt_throttled = 0;
791 raw_spin_unlock(&rt_rq->rt_runtime_lock);
792 raw_spin_unlock(&rt_b->rt_runtime_lock);
793 }
794 }
795
796 static void balance_runtime(struct rt_rq *rt_rq)
797 {
798 if (!sched_feat(RT_RUNTIME_SHARE))
799 return;
800
801 if (rt_rq->rt_time > rt_rq->rt_runtime) {
802 raw_spin_unlock(&rt_rq->rt_runtime_lock);
803 do_balance_runtime(rt_rq);
804 raw_spin_lock(&rt_rq->rt_runtime_lock);
805 }
806 }
807 #else /* !CONFIG_SMP */
808 static inline void balance_runtime(struct rt_rq *rt_rq) {}
809 #endif /* CONFIG_SMP */
810
811 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
812 {
813 int i, idle = 1, throttled = 0;
814 const struct cpumask *span;
815
816 span = sched_rt_period_mask();
817 #ifdef CONFIG_RT_GROUP_SCHED
818 /*
819 * FIXME: isolated CPUs should really leave the root task group,
820 * whether they are isolcpus or were isolated via cpusets, lest
821 * the timer run on a CPU which does not service all runqueues,
822 * potentially leaving other CPUs indefinitely throttled. If
823 * isolation is really required, the user will turn the throttle
824 * off to kill the perturbations it causes anyway. Meanwhile,
825 * this maintains functionality for boot and/or troubleshooting.
826 */
827 if (rt_b == &root_task_group.rt_bandwidth)
828 span = cpu_online_mask;
829 #endif
830 for_each_cpu(i, span) {
831 int enqueue = 0;
832 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
833 struct rq *rq = rq_of_rt_rq(rt_rq);
834
835 raw_spin_lock(&rq->lock);
836 if (rt_rq->rt_time) {
837 u64 runtime;
838
839 raw_spin_lock(&rt_rq->rt_runtime_lock);
840 if (rt_rq->rt_throttled)
841 balance_runtime(rt_rq);
842 runtime = rt_rq->rt_runtime;
843 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
844 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
845 rt_rq->rt_throttled = 0;
846 enqueue = 1;
847
848 /*
849 * When we're idle and a woken (rt) task is
850 * throttled check_preempt_curr() will set
851 * skip_update and the time between the wakeup
852 * and this unthrottle will get accounted as
853 * 'runtime'.
854 */
855 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
856 rq_clock_skip_update(rq, false);
857 }
858 if (rt_rq->rt_time || rt_rq->rt_nr_running)
859 idle = 0;
860 raw_spin_unlock(&rt_rq->rt_runtime_lock);
861 } else if (rt_rq->rt_nr_running) {
862 idle = 0;
863 if (!rt_rq_throttled(rt_rq))
864 enqueue = 1;
865 }
866 if (rt_rq->rt_throttled)
867 throttled = 1;
868
869 if (enqueue)
870 sched_rt_rq_enqueue(rt_rq);
871 raw_spin_unlock(&rq->lock);
872 }
873
874 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
875 return 1;
876
877 return idle;
878 }
879
880 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
881 {
882 #ifdef CONFIG_RT_GROUP_SCHED
883 struct rt_rq *rt_rq = group_rt_rq(rt_se);
884
885 if (rt_rq)
886 return rt_rq->highest_prio.curr;
887 #endif
888
889 return rt_task_of(rt_se)->prio;
890 }
891
892 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
893 {
894 u64 runtime = sched_rt_runtime(rt_rq);
895
896 if (rt_rq->rt_throttled)
897 return rt_rq_throttled(rt_rq);
898
899 if (runtime >= sched_rt_period(rt_rq))
900 return 0;
901
902 balance_runtime(rt_rq);
903 runtime = sched_rt_runtime(rt_rq);
904 if (runtime == RUNTIME_INF)
905 return 0;
906
907 if (rt_rq->rt_time > runtime) {
908 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
909
910 /*
911 * Don't actually throttle groups that have no runtime assigned
912 * but accrue some time due to boosting.
913 */
914 if (likely(rt_b->rt_runtime)) {
915 rt_rq->rt_throttled = 1;
916 printk_deferred_once("sched: RT throttling activated\n");
917 } else {
918 /*
919 * In case we did anyway, make it go away,
920 * replenishment is a joke, since it will replenish us
921 * with exactly 0 ns.
922 */
923 rt_rq->rt_time = 0;
924 }
925
926 if (rt_rq_throttled(rt_rq)) {
927 sched_rt_rq_dequeue(rt_rq);
928 return 1;
929 }
930 }
931
932 return 0;
933 }
934
935 /*
936 * Update the current task's runtime statistics. Skip current tasks that
937 * are not in our scheduling class.
938 */
939 static void update_curr_rt(struct rq *rq)
940 {
941 struct task_struct *curr = rq->curr;
942 struct sched_rt_entity *rt_se = &curr->rt;
943 u64 delta_exec;
944
945 if (curr->sched_class != &rt_sched_class)
946 return;
947
948 /* Kick cpufreq (see the comment in linux/cpufreq.h). */
949 if (cpu_of(rq) == smp_processor_id())
950 cpufreq_trigger_update(rq_clock(rq));
951
952 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
953 if (unlikely((s64)delta_exec <= 0))
954 return;
955
956 schedstat_set(curr->se.statistics.exec_max,
957 max(curr->se.statistics.exec_max, delta_exec));
958
959 curr->se.sum_exec_runtime += delta_exec;
960 account_group_exec_runtime(curr, delta_exec);
961
962 curr->se.exec_start = rq_clock_task(rq);
963 cpuacct_charge(curr, delta_exec);
964
965 sched_rt_avg_update(rq, delta_exec);
966
967 if (!rt_bandwidth_enabled())
968 return;
969
970 for_each_sched_rt_entity(rt_se) {
971 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
972
973 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
974 raw_spin_lock(&rt_rq->rt_runtime_lock);
975 rt_rq->rt_time += delta_exec;
976 if (sched_rt_runtime_exceeded(rt_rq))
977 resched_curr(rq);
978 raw_spin_unlock(&rt_rq->rt_runtime_lock);
979 }
980 }
981 }
982
983 static void
984 dequeue_top_rt_rq(struct rt_rq *rt_rq)
985 {
986 struct rq *rq = rq_of_rt_rq(rt_rq);
987
988 BUG_ON(&rq->rt != rt_rq);
989
990 if (!rt_rq->rt_queued)
991 return;
992
993 BUG_ON(!rq->nr_running);
994
995 sub_nr_running(rq, rt_rq->rt_nr_running);
996 rt_rq->rt_queued = 0;
997 }
998
999 static void
1000 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1001 {
1002 struct rq *rq = rq_of_rt_rq(rt_rq);
1003
1004 BUG_ON(&rq->rt != rt_rq);
1005
1006 if (rt_rq->rt_queued)
1007 return;
1008 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1009 return;
1010
1011 add_nr_running(rq, rt_rq->rt_nr_running);
1012 rt_rq->rt_queued = 1;
1013 }
1014
1015 #if defined CONFIG_SMP
1016
1017 static void
1018 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1019 {
1020 struct rq *rq = rq_of_rt_rq(rt_rq);
1021
1022 #ifdef CONFIG_RT_GROUP_SCHED
1023 /*
1024 * Change rq's cpupri only if rt_rq is the top queue.
1025 */
1026 if (&rq->rt != rt_rq)
1027 return;
1028 #endif
1029 if (rq->online && prio < prev_prio)
1030 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1031 }
1032
1033 static void
1034 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1035 {
1036 struct rq *rq = rq_of_rt_rq(rt_rq);
1037
1038 #ifdef CONFIG_RT_GROUP_SCHED
1039 /*
1040 * Change rq's cpupri only if rt_rq is the top queue.
1041 */
1042 if (&rq->rt != rt_rq)
1043 return;
1044 #endif
1045 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1046 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1047 }
1048
1049 #else /* CONFIG_SMP */
1050
1051 static inline
1052 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1053 static inline
1054 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1055
1056 #endif /* CONFIG_SMP */
1057
1058 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1059 static void
1060 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1061 {
1062 int prev_prio = rt_rq->highest_prio.curr;
1063
1064 if (prio < prev_prio)
1065 rt_rq->highest_prio.curr = prio;
1066
1067 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1068 }
1069
1070 static void
1071 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1072 {
1073 int prev_prio = rt_rq->highest_prio.curr;
1074
1075 if (rt_rq->rt_nr_running) {
1076
1077 WARN_ON(prio < prev_prio);
1078
1079 /*
1080 * This may have been our highest task, and therefore
1081 * we may have some recomputation to do
1082 */
1083 if (prio == prev_prio) {
1084 struct rt_prio_array *array = &rt_rq->active;
1085
1086 rt_rq->highest_prio.curr =
1087 sched_find_first_bit(array->bitmap);
1088 }
1089
1090 } else
1091 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1092
1093 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1094 }
1095
1096 #else
1097
1098 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1099 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1100
1101 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1102
1103 #ifdef CONFIG_RT_GROUP_SCHED
1104
1105 static void
1106 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1107 {
1108 if (rt_se_boosted(rt_se))
1109 rt_rq->rt_nr_boosted++;
1110
1111 if (rt_rq->tg)
1112 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1113 }
1114
1115 static void
1116 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1117 {
1118 if (rt_se_boosted(rt_se))
1119 rt_rq->rt_nr_boosted--;
1120
1121 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1122 }
1123
1124 #else /* CONFIG_RT_GROUP_SCHED */
1125
1126 static void
1127 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1128 {
1129 start_rt_bandwidth(&def_rt_bandwidth);
1130 }
1131
1132 static inline
1133 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1134
1135 #endif /* CONFIG_RT_GROUP_SCHED */
1136
1137 static inline
1138 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1139 {
1140 struct rt_rq *group_rq = group_rt_rq(rt_se);
1141
1142 if (group_rq)
1143 return group_rq->rt_nr_running;
1144 else
1145 return 1;
1146 }
1147
1148 static inline
1149 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1150 {
1151 int prio = rt_se_prio(rt_se);
1152
1153 WARN_ON(!rt_prio(prio));
1154 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1155
1156 inc_rt_prio(rt_rq, prio);
1157 inc_rt_migration(rt_se, rt_rq);
1158 inc_rt_group(rt_se, rt_rq);
1159 }
1160
1161 static inline
1162 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1163 {
1164 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1165 WARN_ON(!rt_rq->rt_nr_running);
1166 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1167
1168 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1169 dec_rt_migration(rt_se, rt_rq);
1170 dec_rt_group(rt_se, rt_rq);
1171 }
1172
1173 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1174 {
1175 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1176 struct rt_prio_array *array = &rt_rq->active;
1177 struct rt_rq *group_rq = group_rt_rq(rt_se);
1178 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1179
1180 /*
1181 * Don't enqueue the group if its throttled, or when empty.
1182 * The latter is a consequence of the former when a child group
1183 * get throttled and the current group doesn't have any other
1184 * active members.
1185 */
1186 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1187 return;
1188
1189 if (head)
1190 list_add(&rt_se->run_list, queue);
1191 else
1192 list_add_tail(&rt_se->run_list, queue);
1193 __set_bit(rt_se_prio(rt_se), array->bitmap);
1194
1195 inc_rt_tasks(rt_se, rt_rq);
1196 }
1197
1198 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1199 {
1200 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1201 struct rt_prio_array *array = &rt_rq->active;
1202
1203 list_del_init(&rt_se->run_list);
1204 if (list_empty(array->queue + rt_se_prio(rt_se)))
1205 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1206
1207 dec_rt_tasks(rt_se, rt_rq);
1208 }
1209
1210 /*
1211 * Because the prio of an upper entry depends on the lower
1212 * entries, we must remove entries top - down.
1213 */
1214 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1215 {
1216 struct sched_rt_entity *back = NULL;
1217
1218 for_each_sched_rt_entity(rt_se) {
1219 rt_se->back = back;
1220 back = rt_se;
1221 }
1222
1223 dequeue_top_rt_rq(rt_rq_of_se(back));
1224
1225 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1226 if (on_rt_rq(rt_se))
1227 __dequeue_rt_entity(rt_se);
1228 }
1229 }
1230
1231 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1232 {
1233 struct rq *rq = rq_of_rt_se(rt_se);
1234
1235 dequeue_rt_stack(rt_se);
1236 for_each_sched_rt_entity(rt_se)
1237 __enqueue_rt_entity(rt_se, head);
1238 enqueue_top_rt_rq(&rq->rt);
1239 }
1240
1241 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1242 {
1243 struct rq *rq = rq_of_rt_se(rt_se);
1244
1245 dequeue_rt_stack(rt_se);
1246
1247 for_each_sched_rt_entity(rt_se) {
1248 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1249
1250 if (rt_rq && rt_rq->rt_nr_running)
1251 __enqueue_rt_entity(rt_se, false);
1252 }
1253 enqueue_top_rt_rq(&rq->rt);
1254 }
1255
1256 /*
1257 * Adding/removing a task to/from a priority array:
1258 */
1259 static void
1260 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1261 {
1262 struct sched_rt_entity *rt_se = &p->rt;
1263
1264 if (flags & ENQUEUE_WAKEUP)
1265 rt_se->timeout = 0;
1266
1267 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1268
1269 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1270 enqueue_pushable_task(rq, p);
1271 }
1272
1273 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1274 {
1275 struct sched_rt_entity *rt_se = &p->rt;
1276
1277 update_curr_rt(rq);
1278 dequeue_rt_entity(rt_se);
1279
1280 dequeue_pushable_task(rq, p);
1281 }
1282
1283 /*
1284 * Put task to the head or the end of the run list without the overhead of
1285 * dequeue followed by enqueue.
1286 */
1287 static void
1288 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1289 {
1290 if (on_rt_rq(rt_se)) {
1291 struct rt_prio_array *array = &rt_rq->active;
1292 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1293
1294 if (head)
1295 list_move(&rt_se->run_list, queue);
1296 else
1297 list_move_tail(&rt_se->run_list, queue);
1298 }
1299 }
1300
1301 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1302 {
1303 struct sched_rt_entity *rt_se = &p->rt;
1304 struct rt_rq *rt_rq;
1305
1306 for_each_sched_rt_entity(rt_se) {
1307 rt_rq = rt_rq_of_se(rt_se);
1308 requeue_rt_entity(rt_rq, rt_se, head);
1309 }
1310 }
1311
1312 static void yield_task_rt(struct rq *rq)
1313 {
1314 requeue_task_rt(rq, rq->curr, 0);
1315 }
1316
1317 #ifdef CONFIG_SMP
1318 static int find_lowest_rq(struct task_struct *task);
1319
1320 static int
1321 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1322 {
1323 struct task_struct *curr;
1324 struct rq *rq;
1325
1326 /* For anything but wake ups, just return the task_cpu */
1327 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1328 goto out;
1329
1330 rq = cpu_rq(cpu);
1331
1332 rcu_read_lock();
1333 curr = READ_ONCE(rq->curr); /* unlocked access */
1334
1335 /*
1336 * If the current task on @p's runqueue is an RT task, then
1337 * try to see if we can wake this RT task up on another
1338 * runqueue. Otherwise simply start this RT task
1339 * on its current runqueue.
1340 *
1341 * We want to avoid overloading runqueues. If the woken
1342 * task is a higher priority, then it will stay on this CPU
1343 * and the lower prio task should be moved to another CPU.
1344 * Even though this will probably make the lower prio task
1345 * lose its cache, we do not want to bounce a higher task
1346 * around just because it gave up its CPU, perhaps for a
1347 * lock?
1348 *
1349 * For equal prio tasks, we just let the scheduler sort it out.
1350 *
1351 * Otherwise, just let it ride on the affined RQ and the
1352 * post-schedule router will push the preempted task away
1353 *
1354 * This test is optimistic, if we get it wrong the load-balancer
1355 * will have to sort it out.
1356 */
1357 if (curr && unlikely(rt_task(curr)) &&
1358 (curr->nr_cpus_allowed < 2 ||
1359 curr->prio <= p->prio)) {
1360 int target = find_lowest_rq(p);
1361
1362 /*
1363 * Don't bother moving it if the destination CPU is
1364 * not running a lower priority task.
1365 */
1366 if (target != -1 &&
1367 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1368 cpu = target;
1369 }
1370 rcu_read_unlock();
1371
1372 out:
1373 return cpu;
1374 }
1375
1376 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1377 {
1378 /*
1379 * Current can't be migrated, useless to reschedule,
1380 * let's hope p can move out.
1381 */
1382 if (rq->curr->nr_cpus_allowed == 1 ||
1383 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1384 return;
1385
1386 /*
1387 * p is migratable, so let's not schedule it and
1388 * see if it is pushed or pulled somewhere else.
1389 */
1390 if (p->nr_cpus_allowed != 1
1391 && cpupri_find(&rq->rd->cpupri, p, NULL))
1392 return;
1393
1394 /*
1395 * There appears to be other cpus that can accept
1396 * current and none to run 'p', so lets reschedule
1397 * to try and push current away:
1398 */
1399 requeue_task_rt(rq, p, 1);
1400 resched_curr(rq);
1401 }
1402
1403 #endif /* CONFIG_SMP */
1404
1405 /*
1406 * Preempt the current task with a newly woken task if needed:
1407 */
1408 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1409 {
1410 if (p->prio < rq->curr->prio) {
1411 resched_curr(rq);
1412 return;
1413 }
1414
1415 #ifdef CONFIG_SMP
1416 /*
1417 * If:
1418 *
1419 * - the newly woken task is of equal priority to the current task
1420 * - the newly woken task is non-migratable while current is migratable
1421 * - current will be preempted on the next reschedule
1422 *
1423 * we should check to see if current can readily move to a different
1424 * cpu. If so, we will reschedule to allow the push logic to try
1425 * to move current somewhere else, making room for our non-migratable
1426 * task.
1427 */
1428 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1429 check_preempt_equal_prio(rq, p);
1430 #endif
1431 }
1432
1433 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1434 struct rt_rq *rt_rq)
1435 {
1436 struct rt_prio_array *array = &rt_rq->active;
1437 struct sched_rt_entity *next = NULL;
1438 struct list_head *queue;
1439 int idx;
1440
1441 idx = sched_find_first_bit(array->bitmap);
1442 BUG_ON(idx >= MAX_RT_PRIO);
1443
1444 queue = array->queue + idx;
1445 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1446
1447 return next;
1448 }
1449
1450 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1451 {
1452 struct sched_rt_entity *rt_se;
1453 struct task_struct *p;
1454 struct rt_rq *rt_rq = &rq->rt;
1455
1456 do {
1457 rt_se = pick_next_rt_entity(rq, rt_rq);
1458 BUG_ON(!rt_se);
1459 rt_rq = group_rt_rq(rt_se);
1460 } while (rt_rq);
1461
1462 p = rt_task_of(rt_se);
1463 p->se.exec_start = rq_clock_task(rq);
1464
1465 return p;
1466 }
1467
1468 static struct task_struct *
1469 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1470 {
1471 struct task_struct *p;
1472 struct rt_rq *rt_rq = &rq->rt;
1473
1474 if (need_pull_rt_task(rq, prev)) {
1475 /*
1476 * This is OK, because current is on_cpu, which avoids it being
1477 * picked for load-balance and preemption/IRQs are still
1478 * disabled avoiding further scheduler activity on it and we're
1479 * being very careful to re-start the picking loop.
1480 */
1481 lockdep_unpin_lock(&rq->lock);
1482 pull_rt_task(rq);
1483 lockdep_pin_lock(&rq->lock);
1484 /*
1485 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1486 * means a dl or stop task can slip in, in which case we need
1487 * to re-start task selection.
1488 */
1489 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1490 rq->dl.dl_nr_running))
1491 return RETRY_TASK;
1492 }
1493
1494 /*
1495 * We may dequeue prev's rt_rq in put_prev_task().
1496 * So, we update time before rt_nr_running check.
1497 */
1498 if (prev->sched_class == &rt_sched_class)
1499 update_curr_rt(rq);
1500
1501 if (!rt_rq->rt_queued)
1502 return NULL;
1503
1504 put_prev_task(rq, prev);
1505
1506 p = _pick_next_task_rt(rq);
1507
1508 /* The running task is never eligible for pushing */
1509 dequeue_pushable_task(rq, p);
1510
1511 queue_push_tasks(rq);
1512
1513 return p;
1514 }
1515
1516 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1517 {
1518 update_curr_rt(rq);
1519
1520 /*
1521 * The previous task needs to be made eligible for pushing
1522 * if it is still active
1523 */
1524 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1525 enqueue_pushable_task(rq, p);
1526 }
1527
1528 #ifdef CONFIG_SMP
1529
1530 /* Only try algorithms three times */
1531 #define RT_MAX_TRIES 3
1532
1533 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1534 {
1535 if (!task_running(rq, p) &&
1536 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1537 return 1;
1538 return 0;
1539 }
1540
1541 /*
1542 * Return the highest pushable rq's task, which is suitable to be executed
1543 * on the cpu, NULL otherwise
1544 */
1545 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1546 {
1547 struct plist_head *head = &rq->rt.pushable_tasks;
1548 struct task_struct *p;
1549
1550 if (!has_pushable_tasks(rq))
1551 return NULL;
1552
1553 plist_for_each_entry(p, head, pushable_tasks) {
1554 if (pick_rt_task(rq, p, cpu))
1555 return p;
1556 }
1557
1558 return NULL;
1559 }
1560
1561 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1562
1563 static int find_lowest_rq(struct task_struct *task)
1564 {
1565 struct sched_domain *sd;
1566 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1567 int this_cpu = smp_processor_id();
1568 int cpu = task_cpu(task);
1569
1570 /* Make sure the mask is initialized first */
1571 if (unlikely(!lowest_mask))
1572 return -1;
1573
1574 if (task->nr_cpus_allowed == 1)
1575 return -1; /* No other targets possible */
1576
1577 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1578 return -1; /* No targets found */
1579
1580 /*
1581 * At this point we have built a mask of cpus representing the
1582 * lowest priority tasks in the system. Now we want to elect
1583 * the best one based on our affinity and topology.
1584 *
1585 * We prioritize the last cpu that the task executed on since
1586 * it is most likely cache-hot in that location.
1587 */
1588 if (cpumask_test_cpu(cpu, lowest_mask))
1589 return cpu;
1590
1591 /*
1592 * Otherwise, we consult the sched_domains span maps to figure
1593 * out which cpu is logically closest to our hot cache data.
1594 */
1595 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1596 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1597
1598 rcu_read_lock();
1599 for_each_domain(cpu, sd) {
1600 if (sd->flags & SD_WAKE_AFFINE) {
1601 int best_cpu;
1602
1603 /*
1604 * "this_cpu" is cheaper to preempt than a
1605 * remote processor.
1606 */
1607 if (this_cpu != -1 &&
1608 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1609 rcu_read_unlock();
1610 return this_cpu;
1611 }
1612
1613 best_cpu = cpumask_first_and(lowest_mask,
1614 sched_domain_span(sd));
1615 if (best_cpu < nr_cpu_ids) {
1616 rcu_read_unlock();
1617 return best_cpu;
1618 }
1619 }
1620 }
1621 rcu_read_unlock();
1622
1623 /*
1624 * And finally, if there were no matches within the domains
1625 * just give the caller *something* to work with from the compatible
1626 * locations.
1627 */
1628 if (this_cpu != -1)
1629 return this_cpu;
1630
1631 cpu = cpumask_any(lowest_mask);
1632 if (cpu < nr_cpu_ids)
1633 return cpu;
1634 return -1;
1635 }
1636
1637 /* Will lock the rq it finds */
1638 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1639 {
1640 struct rq *lowest_rq = NULL;
1641 int tries;
1642 int cpu;
1643
1644 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1645 cpu = find_lowest_rq(task);
1646
1647 if ((cpu == -1) || (cpu == rq->cpu))
1648 break;
1649
1650 lowest_rq = cpu_rq(cpu);
1651
1652 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1653 /*
1654 * Target rq has tasks of equal or higher priority,
1655 * retrying does not release any lock and is unlikely
1656 * to yield a different result.
1657 */
1658 lowest_rq = NULL;
1659 break;
1660 }
1661
1662 /* if the prio of this runqueue changed, try again */
1663 if (double_lock_balance(rq, lowest_rq)) {
1664 /*
1665 * We had to unlock the run queue. In
1666 * the mean time, task could have
1667 * migrated already or had its affinity changed.
1668 * Also make sure that it wasn't scheduled on its rq.
1669 */
1670 if (unlikely(task_rq(task) != rq ||
1671 !cpumask_test_cpu(lowest_rq->cpu,
1672 tsk_cpus_allowed(task)) ||
1673 task_running(rq, task) ||
1674 !task_on_rq_queued(task))) {
1675
1676 double_unlock_balance(rq, lowest_rq);
1677 lowest_rq = NULL;
1678 break;
1679 }
1680 }
1681
1682 /* If this rq is still suitable use it. */
1683 if (lowest_rq->rt.highest_prio.curr > task->prio)
1684 break;
1685
1686 /* try again */
1687 double_unlock_balance(rq, lowest_rq);
1688 lowest_rq = NULL;
1689 }
1690
1691 return lowest_rq;
1692 }
1693
1694 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1695 {
1696 struct task_struct *p;
1697
1698 if (!has_pushable_tasks(rq))
1699 return NULL;
1700
1701 p = plist_first_entry(&rq->rt.pushable_tasks,
1702 struct task_struct, pushable_tasks);
1703
1704 BUG_ON(rq->cpu != task_cpu(p));
1705 BUG_ON(task_current(rq, p));
1706 BUG_ON(p->nr_cpus_allowed <= 1);
1707
1708 BUG_ON(!task_on_rq_queued(p));
1709 BUG_ON(!rt_task(p));
1710
1711 return p;
1712 }
1713
1714 /*
1715 * If the current CPU has more than one RT task, see if the non
1716 * running task can migrate over to a CPU that is running a task
1717 * of lesser priority.
1718 */
1719 static int push_rt_task(struct rq *rq)
1720 {
1721 struct task_struct *next_task;
1722 struct rq *lowest_rq;
1723 int ret = 0;
1724
1725 if (!rq->rt.overloaded)
1726 return 0;
1727
1728 next_task = pick_next_pushable_task(rq);
1729 if (!next_task)
1730 return 0;
1731
1732 retry:
1733 if (unlikely(next_task == rq->curr)) {
1734 WARN_ON(1);
1735 return 0;
1736 }
1737
1738 /*
1739 * It's possible that the next_task slipped in of
1740 * higher priority than current. If that's the case
1741 * just reschedule current.
1742 */
1743 if (unlikely(next_task->prio < rq->curr->prio)) {
1744 resched_curr(rq);
1745 return 0;
1746 }
1747
1748 /* We might release rq lock */
1749 get_task_struct(next_task);
1750
1751 /* find_lock_lowest_rq locks the rq if found */
1752 lowest_rq = find_lock_lowest_rq(next_task, rq);
1753 if (!lowest_rq) {
1754 struct task_struct *task;
1755 /*
1756 * find_lock_lowest_rq releases rq->lock
1757 * so it is possible that next_task has migrated.
1758 *
1759 * We need to make sure that the task is still on the same
1760 * run-queue and is also still the next task eligible for
1761 * pushing.
1762 */
1763 task = pick_next_pushable_task(rq);
1764 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1765 /*
1766 * The task hasn't migrated, and is still the next
1767 * eligible task, but we failed to find a run-queue
1768 * to push it to. Do not retry in this case, since
1769 * other cpus will pull from us when ready.
1770 */
1771 goto out;
1772 }
1773
1774 if (!task)
1775 /* No more tasks, just exit */
1776 goto out;
1777
1778 /*
1779 * Something has shifted, try again.
1780 */
1781 put_task_struct(next_task);
1782 next_task = task;
1783 goto retry;
1784 }
1785
1786 deactivate_task(rq, next_task, 0);
1787 set_task_cpu(next_task, lowest_rq->cpu);
1788 activate_task(lowest_rq, next_task, 0);
1789 ret = 1;
1790
1791 resched_curr(lowest_rq);
1792
1793 double_unlock_balance(rq, lowest_rq);
1794
1795 out:
1796 put_task_struct(next_task);
1797
1798 return ret;
1799 }
1800
1801 static void push_rt_tasks(struct rq *rq)
1802 {
1803 /* push_rt_task will return true if it moved an RT */
1804 while (push_rt_task(rq))
1805 ;
1806 }
1807
1808 #ifdef HAVE_RT_PUSH_IPI
1809 /*
1810 * The search for the next cpu always starts at rq->cpu and ends
1811 * when we reach rq->cpu again. It will never return rq->cpu.
1812 * This returns the next cpu to check, or nr_cpu_ids if the loop
1813 * is complete.
1814 *
1815 * rq->rt.push_cpu holds the last cpu returned by this function,
1816 * or if this is the first instance, it must hold rq->cpu.
1817 */
1818 static int rto_next_cpu(struct rq *rq)
1819 {
1820 int prev_cpu = rq->rt.push_cpu;
1821 int cpu;
1822
1823 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1824
1825 /*
1826 * If the previous cpu is less than the rq's CPU, then it already
1827 * passed the end of the mask, and has started from the beginning.
1828 * We end if the next CPU is greater or equal to rq's CPU.
1829 */
1830 if (prev_cpu < rq->cpu) {
1831 if (cpu >= rq->cpu)
1832 return nr_cpu_ids;
1833
1834 } else if (cpu >= nr_cpu_ids) {
1835 /*
1836 * We passed the end of the mask, start at the beginning.
1837 * If the result is greater or equal to the rq's CPU, then
1838 * the loop is finished.
1839 */
1840 cpu = cpumask_first(rq->rd->rto_mask);
1841 if (cpu >= rq->cpu)
1842 return nr_cpu_ids;
1843 }
1844 rq->rt.push_cpu = cpu;
1845
1846 /* Return cpu to let the caller know if the loop is finished or not */
1847 return cpu;
1848 }
1849
1850 static int find_next_push_cpu(struct rq *rq)
1851 {
1852 struct rq *next_rq;
1853 int cpu;
1854
1855 while (1) {
1856 cpu = rto_next_cpu(rq);
1857 if (cpu >= nr_cpu_ids)
1858 break;
1859 next_rq = cpu_rq(cpu);
1860
1861 /* Make sure the next rq can push to this rq */
1862 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1863 break;
1864 }
1865
1866 return cpu;
1867 }
1868
1869 #define RT_PUSH_IPI_EXECUTING 1
1870 #define RT_PUSH_IPI_RESTART 2
1871
1872 static void tell_cpu_to_push(struct rq *rq)
1873 {
1874 int cpu;
1875
1876 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1877 raw_spin_lock(&rq->rt.push_lock);
1878 /* Make sure it's still executing */
1879 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1880 /*
1881 * Tell the IPI to restart the loop as things have
1882 * changed since it started.
1883 */
1884 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1885 raw_spin_unlock(&rq->rt.push_lock);
1886 return;
1887 }
1888 raw_spin_unlock(&rq->rt.push_lock);
1889 }
1890
1891 /* When here, there's no IPI going around */
1892
1893 rq->rt.push_cpu = rq->cpu;
1894 cpu = find_next_push_cpu(rq);
1895 if (cpu >= nr_cpu_ids)
1896 return;
1897
1898 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1899
1900 irq_work_queue_on(&rq->rt.push_work, cpu);
1901 }
1902
1903 /* Called from hardirq context */
1904 static void try_to_push_tasks(void *arg)
1905 {
1906 struct rt_rq *rt_rq = arg;
1907 struct rq *rq, *src_rq;
1908 int this_cpu;
1909 int cpu;
1910
1911 this_cpu = rt_rq->push_cpu;
1912
1913 /* Paranoid check */
1914 BUG_ON(this_cpu != smp_processor_id());
1915
1916 rq = cpu_rq(this_cpu);
1917 src_rq = rq_of_rt_rq(rt_rq);
1918
1919 again:
1920 if (has_pushable_tasks(rq)) {
1921 raw_spin_lock(&rq->lock);
1922 push_rt_task(rq);
1923 raw_spin_unlock(&rq->lock);
1924 }
1925
1926 /* Pass the IPI to the next rt overloaded queue */
1927 raw_spin_lock(&rt_rq->push_lock);
1928 /*
1929 * If the source queue changed since the IPI went out,
1930 * we need to restart the search from that CPU again.
1931 */
1932 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1933 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1934 rt_rq->push_cpu = src_rq->cpu;
1935 }
1936
1937 cpu = find_next_push_cpu(src_rq);
1938
1939 if (cpu >= nr_cpu_ids)
1940 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1941 raw_spin_unlock(&rt_rq->push_lock);
1942
1943 if (cpu >= nr_cpu_ids)
1944 return;
1945
1946 /*
1947 * It is possible that a restart caused this CPU to be
1948 * chosen again. Don't bother with an IPI, just see if we
1949 * have more to push.
1950 */
1951 if (unlikely(cpu == rq->cpu))
1952 goto again;
1953
1954 /* Try the next RT overloaded CPU */
1955 irq_work_queue_on(&rt_rq->push_work, cpu);
1956 }
1957
1958 static void push_irq_work_func(struct irq_work *work)
1959 {
1960 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1961
1962 try_to_push_tasks(rt_rq);
1963 }
1964 #endif /* HAVE_RT_PUSH_IPI */
1965
1966 static void pull_rt_task(struct rq *this_rq)
1967 {
1968 int this_cpu = this_rq->cpu, cpu;
1969 bool resched = false;
1970 struct task_struct *p;
1971 struct rq *src_rq;
1972
1973 if (likely(!rt_overloaded(this_rq)))
1974 return;
1975
1976 /*
1977 * Match the barrier from rt_set_overloaded; this guarantees that if we
1978 * see overloaded we must also see the rto_mask bit.
1979 */
1980 smp_rmb();
1981
1982 #ifdef HAVE_RT_PUSH_IPI
1983 if (sched_feat(RT_PUSH_IPI)) {
1984 tell_cpu_to_push(this_rq);
1985 return;
1986 }
1987 #endif
1988
1989 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1990 if (this_cpu == cpu)
1991 continue;
1992
1993 src_rq = cpu_rq(cpu);
1994
1995 /*
1996 * Don't bother taking the src_rq->lock if the next highest
1997 * task is known to be lower-priority than our current task.
1998 * This may look racy, but if this value is about to go
1999 * logically higher, the src_rq will push this task away.
2000 * And if its going logically lower, we do not care
2001 */
2002 if (src_rq->rt.highest_prio.next >=
2003 this_rq->rt.highest_prio.curr)
2004 continue;
2005
2006 /*
2007 * We can potentially drop this_rq's lock in
2008 * double_lock_balance, and another CPU could
2009 * alter this_rq
2010 */
2011 double_lock_balance(this_rq, src_rq);
2012
2013 /*
2014 * We can pull only a task, which is pushable
2015 * on its rq, and no others.
2016 */
2017 p = pick_highest_pushable_task(src_rq, this_cpu);
2018
2019 /*
2020 * Do we have an RT task that preempts
2021 * the to-be-scheduled task?
2022 */
2023 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2024 WARN_ON(p == src_rq->curr);
2025 WARN_ON(!task_on_rq_queued(p));
2026
2027 /*
2028 * There's a chance that p is higher in priority
2029 * than what's currently running on its cpu.
2030 * This is just that p is wakeing up and hasn't
2031 * had a chance to schedule. We only pull
2032 * p if it is lower in priority than the
2033 * current task on the run queue
2034 */
2035 if (p->prio < src_rq->curr->prio)
2036 goto skip;
2037
2038 resched = true;
2039
2040 deactivate_task(src_rq, p, 0);
2041 set_task_cpu(p, this_cpu);
2042 activate_task(this_rq, p, 0);
2043 /*
2044 * We continue with the search, just in
2045 * case there's an even higher prio task
2046 * in another runqueue. (low likelihood
2047 * but possible)
2048 */
2049 }
2050 skip:
2051 double_unlock_balance(this_rq, src_rq);
2052 }
2053
2054 if (resched)
2055 resched_curr(this_rq);
2056 }
2057
2058 /*
2059 * If we are not running and we are not going to reschedule soon, we should
2060 * try to push tasks away now
2061 */
2062 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2063 {
2064 if (!task_running(rq, p) &&
2065 !test_tsk_need_resched(rq->curr) &&
2066 p->nr_cpus_allowed > 1 &&
2067 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2068 (rq->curr->nr_cpus_allowed < 2 ||
2069 rq->curr->prio <= p->prio))
2070 push_rt_tasks(rq);
2071 }
2072
2073 /* Assumes rq->lock is held */
2074 static void rq_online_rt(struct rq *rq)
2075 {
2076 if (rq->rt.overloaded)
2077 rt_set_overload(rq);
2078
2079 __enable_runtime(rq);
2080
2081 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2082 }
2083
2084 /* Assumes rq->lock is held */
2085 static void rq_offline_rt(struct rq *rq)
2086 {
2087 if (rq->rt.overloaded)
2088 rt_clear_overload(rq);
2089
2090 __disable_runtime(rq);
2091
2092 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2093 }
2094
2095 /*
2096 * When switch from the rt queue, we bring ourselves to a position
2097 * that we might want to pull RT tasks from other runqueues.
2098 */
2099 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2100 {
2101 /*
2102 * If there are other RT tasks then we will reschedule
2103 * and the scheduling of the other RT tasks will handle
2104 * the balancing. But if we are the last RT task
2105 * we may need to handle the pulling of RT tasks
2106 * now.
2107 */
2108 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2109 return;
2110
2111 queue_pull_task(rq);
2112 }
2113
2114 void __init init_sched_rt_class(void)
2115 {
2116 unsigned int i;
2117
2118 for_each_possible_cpu(i) {
2119 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2120 GFP_KERNEL, cpu_to_node(i));
2121 }
2122 }
2123 #endif /* CONFIG_SMP */
2124
2125 /*
2126 * When switching a task to RT, we may overload the runqueue
2127 * with RT tasks. In this case we try to push them off to
2128 * other runqueues.
2129 */
2130 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2131 {
2132 /*
2133 * If we are already running, then there's nothing
2134 * that needs to be done. But if we are not running
2135 * we may need to preempt the current running task.
2136 * If that current running task is also an RT task
2137 * then see if we can move to another run queue.
2138 */
2139 if (task_on_rq_queued(p) && rq->curr != p) {
2140 #ifdef CONFIG_SMP
2141 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2142 queue_push_tasks(rq);
2143 #else
2144 if (p->prio < rq->curr->prio)
2145 resched_curr(rq);
2146 #endif /* CONFIG_SMP */
2147 }
2148 }
2149
2150 /*
2151 * Priority of the task has changed. This may cause
2152 * us to initiate a push or pull.
2153 */
2154 static void
2155 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2156 {
2157 if (!task_on_rq_queued(p))
2158 return;
2159
2160 if (rq->curr == p) {
2161 #ifdef CONFIG_SMP
2162 /*
2163 * If our priority decreases while running, we
2164 * may need to pull tasks to this runqueue.
2165 */
2166 if (oldprio < p->prio)
2167 queue_pull_task(rq);
2168
2169 /*
2170 * If there's a higher priority task waiting to run
2171 * then reschedule.
2172 */
2173 if (p->prio > rq->rt.highest_prio.curr)
2174 resched_curr(rq);
2175 #else
2176 /* For UP simply resched on drop of prio */
2177 if (oldprio < p->prio)
2178 resched_curr(rq);
2179 #endif /* CONFIG_SMP */
2180 } else {
2181 /*
2182 * This task is not running, but if it is
2183 * greater than the current running task
2184 * then reschedule.
2185 */
2186 if (p->prio < rq->curr->prio)
2187 resched_curr(rq);
2188 }
2189 }
2190
2191 static void watchdog(struct rq *rq, struct task_struct *p)
2192 {
2193 unsigned long soft, hard;
2194
2195 /* max may change after cur was read, this will be fixed next tick */
2196 soft = task_rlimit(p, RLIMIT_RTTIME);
2197 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2198
2199 if (soft != RLIM_INFINITY) {
2200 unsigned long next;
2201
2202 if (p->rt.watchdog_stamp != jiffies) {
2203 p->rt.timeout++;
2204 p->rt.watchdog_stamp = jiffies;
2205 }
2206
2207 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2208 if (p->rt.timeout > next)
2209 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2210 }
2211 }
2212
2213 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2214 {
2215 struct sched_rt_entity *rt_se = &p->rt;
2216
2217 update_curr_rt(rq);
2218
2219 watchdog(rq, p);
2220
2221 /*
2222 * RR tasks need a special form of timeslice management.
2223 * FIFO tasks have no timeslices.
2224 */
2225 if (p->policy != SCHED_RR)
2226 return;
2227
2228 if (--p->rt.time_slice)
2229 return;
2230
2231 p->rt.time_slice = sched_rr_timeslice;
2232
2233 /*
2234 * Requeue to the end of queue if we (and all of our ancestors) are not
2235 * the only element on the queue
2236 */
2237 for_each_sched_rt_entity(rt_se) {
2238 if (rt_se->run_list.prev != rt_se->run_list.next) {
2239 requeue_task_rt(rq, p, 0);
2240 resched_curr(rq);
2241 return;
2242 }
2243 }
2244 }
2245
2246 static void set_curr_task_rt(struct rq *rq)
2247 {
2248 struct task_struct *p = rq->curr;
2249
2250 p->se.exec_start = rq_clock_task(rq);
2251
2252 /* The running task is never eligible for pushing */
2253 dequeue_pushable_task(rq, p);
2254 }
2255
2256 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2257 {
2258 /*
2259 * Time slice is 0 for SCHED_FIFO tasks
2260 */
2261 if (task->policy == SCHED_RR)
2262 return sched_rr_timeslice;
2263 else
2264 return 0;
2265 }
2266
2267 const struct sched_class rt_sched_class = {
2268 .next = &fair_sched_class,
2269 .enqueue_task = enqueue_task_rt,
2270 .dequeue_task = dequeue_task_rt,
2271 .yield_task = yield_task_rt,
2272
2273 .check_preempt_curr = check_preempt_curr_rt,
2274
2275 .pick_next_task = pick_next_task_rt,
2276 .put_prev_task = put_prev_task_rt,
2277
2278 #ifdef CONFIG_SMP
2279 .select_task_rq = select_task_rq_rt,
2280
2281 .set_cpus_allowed = set_cpus_allowed_common,
2282 .rq_online = rq_online_rt,
2283 .rq_offline = rq_offline_rt,
2284 .task_woken = task_woken_rt,
2285 .switched_from = switched_from_rt,
2286 #endif
2287
2288 .set_curr_task = set_curr_task_rt,
2289 .task_tick = task_tick_rt,
2290
2291 .get_rr_interval = get_rr_interval_rt,
2292
2293 .prio_changed = prio_changed_rt,
2294 .switched_to = switched_to_rt,
2295
2296 .update_curr = update_curr_rt,
2297 };
2298
2299 #ifdef CONFIG_SCHED_DEBUG
2300 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2301
2302 void print_rt_stats(struct seq_file *m, int cpu)
2303 {
2304 rt_rq_iter_t iter;
2305 struct rt_rq *rt_rq;
2306
2307 rcu_read_lock();
2308 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2309 print_rt_rq(m, cpu, rt_rq);
2310 rcu_read_unlock();
2311 }
2312 #endif /* CONFIG_SCHED_DEBUG */
This page took 0.080564 seconds and 6 git commands to generate.