Merge tag 'regmap-v4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / kernel / sched / rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15 struct rt_bandwidth def_rt_bandwidth;
16
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18 {
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
23
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
29
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
33 }
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39 }
40
41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42 {
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
45
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
51 }
52
53 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54 {
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
57
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61 /*
62 * SCHED_DEADLINE updates the bandwidth, as a run away
63 * RT task with a DL task could hog a CPU. But DL does
64 * not reset the period. If a deadline task was running
65 * without an RT task running, it can cause RT tasks to
66 * throttle when they start up. Kick the timer right away
67 * to update the period.
68 */
69 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
70 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
71 }
72 raw_spin_unlock(&rt_b->rt_runtime_lock);
73 }
74
75 #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
76 static void push_irq_work_func(struct irq_work *work);
77 #endif
78
79 void init_rt_rq(struct rt_rq *rt_rq)
80 {
81 struct rt_prio_array *array;
82 int i;
83
84 array = &rt_rq->active;
85 for (i = 0; i < MAX_RT_PRIO; i++) {
86 INIT_LIST_HEAD(array->queue + i);
87 __clear_bit(i, array->bitmap);
88 }
89 /* delimiter for bitsearch: */
90 __set_bit(MAX_RT_PRIO, array->bitmap);
91
92 #if defined CONFIG_SMP
93 rt_rq->highest_prio.curr = MAX_RT_PRIO;
94 rt_rq->highest_prio.next = MAX_RT_PRIO;
95 rt_rq->rt_nr_migratory = 0;
96 rt_rq->overloaded = 0;
97 plist_head_init(&rt_rq->pushable_tasks);
98
99 #ifdef HAVE_RT_PUSH_IPI
100 rt_rq->push_flags = 0;
101 rt_rq->push_cpu = nr_cpu_ids;
102 raw_spin_lock_init(&rt_rq->push_lock);
103 init_irq_work(&rt_rq->push_work, push_irq_work_func);
104 #endif
105 #endif /* CONFIG_SMP */
106 /* We start is dequeued state, because no RT tasks are queued */
107 rt_rq->rt_queued = 0;
108
109 rt_rq->rt_time = 0;
110 rt_rq->rt_throttled = 0;
111 rt_rq->rt_runtime = 0;
112 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
113 }
114
115 #ifdef CONFIG_RT_GROUP_SCHED
116 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
117 {
118 hrtimer_cancel(&rt_b->rt_period_timer);
119 }
120
121 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
122
123 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
124 {
125 #ifdef CONFIG_SCHED_DEBUG
126 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
127 #endif
128 return container_of(rt_se, struct task_struct, rt);
129 }
130
131 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
132 {
133 return rt_rq->rq;
134 }
135
136 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
137 {
138 return rt_se->rt_rq;
139 }
140
141 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
142 {
143 struct rt_rq *rt_rq = rt_se->rt_rq;
144
145 return rt_rq->rq;
146 }
147
148 void free_rt_sched_group(struct task_group *tg)
149 {
150 int i;
151
152 if (tg->rt_se)
153 destroy_rt_bandwidth(&tg->rt_bandwidth);
154
155 for_each_possible_cpu(i) {
156 if (tg->rt_rq)
157 kfree(tg->rt_rq[i]);
158 if (tg->rt_se)
159 kfree(tg->rt_se[i]);
160 }
161
162 kfree(tg->rt_rq);
163 kfree(tg->rt_se);
164 }
165
166 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
167 struct sched_rt_entity *rt_se, int cpu,
168 struct sched_rt_entity *parent)
169 {
170 struct rq *rq = cpu_rq(cpu);
171
172 rt_rq->highest_prio.curr = MAX_RT_PRIO;
173 rt_rq->rt_nr_boosted = 0;
174 rt_rq->rq = rq;
175 rt_rq->tg = tg;
176
177 tg->rt_rq[cpu] = rt_rq;
178 tg->rt_se[cpu] = rt_se;
179
180 if (!rt_se)
181 return;
182
183 if (!parent)
184 rt_se->rt_rq = &rq->rt;
185 else
186 rt_se->rt_rq = parent->my_q;
187
188 rt_se->my_q = rt_rq;
189 rt_se->parent = parent;
190 INIT_LIST_HEAD(&rt_se->run_list);
191 }
192
193 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
194 {
195 struct rt_rq *rt_rq;
196 struct sched_rt_entity *rt_se;
197 int i;
198
199 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
200 if (!tg->rt_rq)
201 goto err;
202 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
203 if (!tg->rt_se)
204 goto err;
205
206 init_rt_bandwidth(&tg->rt_bandwidth,
207 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
208
209 for_each_possible_cpu(i) {
210 rt_rq = kzalloc_node(sizeof(struct rt_rq),
211 GFP_KERNEL, cpu_to_node(i));
212 if (!rt_rq)
213 goto err;
214
215 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
216 GFP_KERNEL, cpu_to_node(i));
217 if (!rt_se)
218 goto err_free_rq;
219
220 init_rt_rq(rt_rq);
221 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
222 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
223 }
224
225 return 1;
226
227 err_free_rq:
228 kfree(rt_rq);
229 err:
230 return 0;
231 }
232
233 #else /* CONFIG_RT_GROUP_SCHED */
234
235 #define rt_entity_is_task(rt_se) (1)
236
237 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
238 {
239 return container_of(rt_se, struct task_struct, rt);
240 }
241
242 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
243 {
244 return container_of(rt_rq, struct rq, rt);
245 }
246
247 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
248 {
249 struct task_struct *p = rt_task_of(rt_se);
250
251 return task_rq(p);
252 }
253
254 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
255 {
256 struct rq *rq = rq_of_rt_se(rt_se);
257
258 return &rq->rt;
259 }
260
261 void free_rt_sched_group(struct task_group *tg) { }
262
263 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
264 {
265 return 1;
266 }
267 #endif /* CONFIG_RT_GROUP_SCHED */
268
269 #ifdef CONFIG_SMP
270
271 static void pull_rt_task(struct rq *this_rq);
272
273 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
274 {
275 /* Try to pull RT tasks here if we lower this rq's prio */
276 return rq->rt.highest_prio.curr > prev->prio;
277 }
278
279 static inline int rt_overloaded(struct rq *rq)
280 {
281 return atomic_read(&rq->rd->rto_count);
282 }
283
284 static inline void rt_set_overload(struct rq *rq)
285 {
286 if (!rq->online)
287 return;
288
289 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
290 /*
291 * Make sure the mask is visible before we set
292 * the overload count. That is checked to determine
293 * if we should look at the mask. It would be a shame
294 * if we looked at the mask, but the mask was not
295 * updated yet.
296 *
297 * Matched by the barrier in pull_rt_task().
298 */
299 smp_wmb();
300 atomic_inc(&rq->rd->rto_count);
301 }
302
303 static inline void rt_clear_overload(struct rq *rq)
304 {
305 if (!rq->online)
306 return;
307
308 /* the order here really doesn't matter */
309 atomic_dec(&rq->rd->rto_count);
310 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
311 }
312
313 static void update_rt_migration(struct rt_rq *rt_rq)
314 {
315 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
316 if (!rt_rq->overloaded) {
317 rt_set_overload(rq_of_rt_rq(rt_rq));
318 rt_rq->overloaded = 1;
319 }
320 } else if (rt_rq->overloaded) {
321 rt_clear_overload(rq_of_rt_rq(rt_rq));
322 rt_rq->overloaded = 0;
323 }
324 }
325
326 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
327 {
328 struct task_struct *p;
329
330 if (!rt_entity_is_task(rt_se))
331 return;
332
333 p = rt_task_of(rt_se);
334 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
335
336 rt_rq->rt_nr_total++;
337 if (p->nr_cpus_allowed > 1)
338 rt_rq->rt_nr_migratory++;
339
340 update_rt_migration(rt_rq);
341 }
342
343 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344 {
345 struct task_struct *p;
346
347 if (!rt_entity_is_task(rt_se))
348 return;
349
350 p = rt_task_of(rt_se);
351 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
352
353 rt_rq->rt_nr_total--;
354 if (p->nr_cpus_allowed > 1)
355 rt_rq->rt_nr_migratory--;
356
357 update_rt_migration(rt_rq);
358 }
359
360 static inline int has_pushable_tasks(struct rq *rq)
361 {
362 return !plist_head_empty(&rq->rt.pushable_tasks);
363 }
364
365 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
366 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
367
368 static void push_rt_tasks(struct rq *);
369 static void pull_rt_task(struct rq *);
370
371 static inline void queue_push_tasks(struct rq *rq)
372 {
373 if (!has_pushable_tasks(rq))
374 return;
375
376 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
377 }
378
379 static inline void queue_pull_task(struct rq *rq)
380 {
381 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
382 }
383
384 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
385 {
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
387 plist_node_init(&p->pushable_tasks, p->prio);
388 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
389
390 /* Update the highest prio pushable task */
391 if (p->prio < rq->rt.highest_prio.next)
392 rq->rt.highest_prio.next = p->prio;
393 }
394
395 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
396 {
397 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
398
399 /* Update the new highest prio pushable task */
400 if (has_pushable_tasks(rq)) {
401 p = plist_first_entry(&rq->rt.pushable_tasks,
402 struct task_struct, pushable_tasks);
403 rq->rt.highest_prio.next = p->prio;
404 } else
405 rq->rt.highest_prio.next = MAX_RT_PRIO;
406 }
407
408 #else
409
410 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
411 {
412 }
413
414 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
415 {
416 }
417
418 static inline
419 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
420 {
421 }
422
423 static inline
424 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425 {
426 }
427
428 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
429 {
430 return false;
431 }
432
433 static inline void pull_rt_task(struct rq *this_rq)
434 {
435 }
436
437 static inline void queue_push_tasks(struct rq *rq)
438 {
439 }
440 #endif /* CONFIG_SMP */
441
442 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
443 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
444
445 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
446 {
447 return rt_se->on_rq;
448 }
449
450 #ifdef CONFIG_RT_GROUP_SCHED
451
452 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
453 {
454 if (!rt_rq->tg)
455 return RUNTIME_INF;
456
457 return rt_rq->rt_runtime;
458 }
459
460 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
461 {
462 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
463 }
464
465 typedef struct task_group *rt_rq_iter_t;
466
467 static inline struct task_group *next_task_group(struct task_group *tg)
468 {
469 do {
470 tg = list_entry_rcu(tg->list.next,
471 typeof(struct task_group), list);
472 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
473
474 if (&tg->list == &task_groups)
475 tg = NULL;
476
477 return tg;
478 }
479
480 #define for_each_rt_rq(rt_rq, iter, rq) \
481 for (iter = container_of(&task_groups, typeof(*iter), list); \
482 (iter = next_task_group(iter)) && \
483 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
484
485 #define for_each_sched_rt_entity(rt_se) \
486 for (; rt_se; rt_se = rt_se->parent)
487
488 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
489 {
490 return rt_se->my_q;
491 }
492
493 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
494 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
495
496 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
497 {
498 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
499 struct rq *rq = rq_of_rt_rq(rt_rq);
500 struct sched_rt_entity *rt_se;
501
502 int cpu = cpu_of(rq);
503
504 rt_se = rt_rq->tg->rt_se[cpu];
505
506 if (rt_rq->rt_nr_running) {
507 if (!rt_se)
508 enqueue_top_rt_rq(rt_rq);
509 else if (!on_rt_rq(rt_se))
510 enqueue_rt_entity(rt_se, 0);
511
512 if (rt_rq->highest_prio.curr < curr->prio)
513 resched_curr(rq);
514 }
515 }
516
517 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
518 {
519 struct sched_rt_entity *rt_se;
520 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
521
522 rt_se = rt_rq->tg->rt_se[cpu];
523
524 if (!rt_se)
525 dequeue_top_rt_rq(rt_rq);
526 else if (on_rt_rq(rt_se))
527 dequeue_rt_entity(rt_se, 0);
528 }
529
530 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
531 {
532 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
533 }
534
535 static int rt_se_boosted(struct sched_rt_entity *rt_se)
536 {
537 struct rt_rq *rt_rq = group_rt_rq(rt_se);
538 struct task_struct *p;
539
540 if (rt_rq)
541 return !!rt_rq->rt_nr_boosted;
542
543 p = rt_task_of(rt_se);
544 return p->prio != p->normal_prio;
545 }
546
547 #ifdef CONFIG_SMP
548 static inline const struct cpumask *sched_rt_period_mask(void)
549 {
550 return this_rq()->rd->span;
551 }
552 #else
553 static inline const struct cpumask *sched_rt_period_mask(void)
554 {
555 return cpu_online_mask;
556 }
557 #endif
558
559 static inline
560 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
561 {
562 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
563 }
564
565 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566 {
567 return &rt_rq->tg->rt_bandwidth;
568 }
569
570 #else /* !CONFIG_RT_GROUP_SCHED */
571
572 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
573 {
574 return rt_rq->rt_runtime;
575 }
576
577 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
578 {
579 return ktime_to_ns(def_rt_bandwidth.rt_period);
580 }
581
582 typedef struct rt_rq *rt_rq_iter_t;
583
584 #define for_each_rt_rq(rt_rq, iter, rq) \
585 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
586
587 #define for_each_sched_rt_entity(rt_se) \
588 for (; rt_se; rt_se = NULL)
589
590 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
591 {
592 return NULL;
593 }
594
595 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
596 {
597 struct rq *rq = rq_of_rt_rq(rt_rq);
598
599 if (!rt_rq->rt_nr_running)
600 return;
601
602 enqueue_top_rt_rq(rt_rq);
603 resched_curr(rq);
604 }
605
606 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
607 {
608 dequeue_top_rt_rq(rt_rq);
609 }
610
611 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
612 {
613 return rt_rq->rt_throttled;
614 }
615
616 static inline const struct cpumask *sched_rt_period_mask(void)
617 {
618 return cpu_online_mask;
619 }
620
621 static inline
622 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
623 {
624 return &cpu_rq(cpu)->rt;
625 }
626
627 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
628 {
629 return &def_rt_bandwidth;
630 }
631
632 #endif /* CONFIG_RT_GROUP_SCHED */
633
634 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
635 {
636 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
637
638 return (hrtimer_active(&rt_b->rt_period_timer) ||
639 rt_rq->rt_time < rt_b->rt_runtime);
640 }
641
642 #ifdef CONFIG_SMP
643 /*
644 * We ran out of runtime, see if we can borrow some from our neighbours.
645 */
646 static void do_balance_runtime(struct rt_rq *rt_rq)
647 {
648 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
649 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
650 int i, weight;
651 u64 rt_period;
652
653 weight = cpumask_weight(rd->span);
654
655 raw_spin_lock(&rt_b->rt_runtime_lock);
656 rt_period = ktime_to_ns(rt_b->rt_period);
657 for_each_cpu(i, rd->span) {
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 s64 diff;
660
661 if (iter == rt_rq)
662 continue;
663
664 raw_spin_lock(&iter->rt_runtime_lock);
665 /*
666 * Either all rqs have inf runtime and there's nothing to steal
667 * or __disable_runtime() below sets a specific rq to inf to
668 * indicate its been disabled and disalow stealing.
669 */
670 if (iter->rt_runtime == RUNTIME_INF)
671 goto next;
672
673 /*
674 * From runqueues with spare time, take 1/n part of their
675 * spare time, but no more than our period.
676 */
677 diff = iter->rt_runtime - iter->rt_time;
678 if (diff > 0) {
679 diff = div_u64((u64)diff, weight);
680 if (rt_rq->rt_runtime + diff > rt_period)
681 diff = rt_period - rt_rq->rt_runtime;
682 iter->rt_runtime -= diff;
683 rt_rq->rt_runtime += diff;
684 if (rt_rq->rt_runtime == rt_period) {
685 raw_spin_unlock(&iter->rt_runtime_lock);
686 break;
687 }
688 }
689 next:
690 raw_spin_unlock(&iter->rt_runtime_lock);
691 }
692 raw_spin_unlock(&rt_b->rt_runtime_lock);
693 }
694
695 /*
696 * Ensure this RQ takes back all the runtime it lend to its neighbours.
697 */
698 static void __disable_runtime(struct rq *rq)
699 {
700 struct root_domain *rd = rq->rd;
701 rt_rq_iter_t iter;
702 struct rt_rq *rt_rq;
703
704 if (unlikely(!scheduler_running))
705 return;
706
707 for_each_rt_rq(rt_rq, iter, rq) {
708 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
709 s64 want;
710 int i;
711
712 raw_spin_lock(&rt_b->rt_runtime_lock);
713 raw_spin_lock(&rt_rq->rt_runtime_lock);
714 /*
715 * Either we're all inf and nobody needs to borrow, or we're
716 * already disabled and thus have nothing to do, or we have
717 * exactly the right amount of runtime to take out.
718 */
719 if (rt_rq->rt_runtime == RUNTIME_INF ||
720 rt_rq->rt_runtime == rt_b->rt_runtime)
721 goto balanced;
722 raw_spin_unlock(&rt_rq->rt_runtime_lock);
723
724 /*
725 * Calculate the difference between what we started out with
726 * and what we current have, that's the amount of runtime
727 * we lend and now have to reclaim.
728 */
729 want = rt_b->rt_runtime - rt_rq->rt_runtime;
730
731 /*
732 * Greedy reclaim, take back as much as we can.
733 */
734 for_each_cpu(i, rd->span) {
735 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
736 s64 diff;
737
738 /*
739 * Can't reclaim from ourselves or disabled runqueues.
740 */
741 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
742 continue;
743
744 raw_spin_lock(&iter->rt_runtime_lock);
745 if (want > 0) {
746 diff = min_t(s64, iter->rt_runtime, want);
747 iter->rt_runtime -= diff;
748 want -= diff;
749 } else {
750 iter->rt_runtime -= want;
751 want -= want;
752 }
753 raw_spin_unlock(&iter->rt_runtime_lock);
754
755 if (!want)
756 break;
757 }
758
759 raw_spin_lock(&rt_rq->rt_runtime_lock);
760 /*
761 * We cannot be left wanting - that would mean some runtime
762 * leaked out of the system.
763 */
764 BUG_ON(want);
765 balanced:
766 /*
767 * Disable all the borrow logic by pretending we have inf
768 * runtime - in which case borrowing doesn't make sense.
769 */
770 rt_rq->rt_runtime = RUNTIME_INF;
771 rt_rq->rt_throttled = 0;
772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 raw_spin_unlock(&rt_b->rt_runtime_lock);
774
775 /* Make rt_rq available for pick_next_task() */
776 sched_rt_rq_enqueue(rt_rq);
777 }
778 }
779
780 static void __enable_runtime(struct rq *rq)
781 {
782 rt_rq_iter_t iter;
783 struct rt_rq *rt_rq;
784
785 if (unlikely(!scheduler_running))
786 return;
787
788 /*
789 * Reset each runqueue's bandwidth settings
790 */
791 for_each_rt_rq(rt_rq, iter, rq) {
792 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
793
794 raw_spin_lock(&rt_b->rt_runtime_lock);
795 raw_spin_lock(&rt_rq->rt_runtime_lock);
796 rt_rq->rt_runtime = rt_b->rt_runtime;
797 rt_rq->rt_time = 0;
798 rt_rq->rt_throttled = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 raw_spin_unlock(&rt_b->rt_runtime_lock);
801 }
802 }
803
804 static void balance_runtime(struct rt_rq *rt_rq)
805 {
806 if (!sched_feat(RT_RUNTIME_SHARE))
807 return;
808
809 if (rt_rq->rt_time > rt_rq->rt_runtime) {
810 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 do_balance_runtime(rt_rq);
812 raw_spin_lock(&rt_rq->rt_runtime_lock);
813 }
814 }
815 #else /* !CONFIG_SMP */
816 static inline void balance_runtime(struct rt_rq *rt_rq) {}
817 #endif /* CONFIG_SMP */
818
819 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
820 {
821 int i, idle = 1, throttled = 0;
822 const struct cpumask *span;
823
824 span = sched_rt_period_mask();
825 #ifdef CONFIG_RT_GROUP_SCHED
826 /*
827 * FIXME: isolated CPUs should really leave the root task group,
828 * whether they are isolcpus or were isolated via cpusets, lest
829 * the timer run on a CPU which does not service all runqueues,
830 * potentially leaving other CPUs indefinitely throttled. If
831 * isolation is really required, the user will turn the throttle
832 * off to kill the perturbations it causes anyway. Meanwhile,
833 * this maintains functionality for boot and/or troubleshooting.
834 */
835 if (rt_b == &root_task_group.rt_bandwidth)
836 span = cpu_online_mask;
837 #endif
838 for_each_cpu(i, span) {
839 int enqueue = 0;
840 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
841 struct rq *rq = rq_of_rt_rq(rt_rq);
842
843 raw_spin_lock(&rq->lock);
844 if (rt_rq->rt_time) {
845 u64 runtime;
846
847 raw_spin_lock(&rt_rq->rt_runtime_lock);
848 if (rt_rq->rt_throttled)
849 balance_runtime(rt_rq);
850 runtime = rt_rq->rt_runtime;
851 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
852 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
853 rt_rq->rt_throttled = 0;
854 enqueue = 1;
855
856 /*
857 * When we're idle and a woken (rt) task is
858 * throttled check_preempt_curr() will set
859 * skip_update and the time between the wakeup
860 * and this unthrottle will get accounted as
861 * 'runtime'.
862 */
863 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864 rq_clock_skip_update(rq, false);
865 }
866 if (rt_rq->rt_time || rt_rq->rt_nr_running)
867 idle = 0;
868 raw_spin_unlock(&rt_rq->rt_runtime_lock);
869 } else if (rt_rq->rt_nr_running) {
870 idle = 0;
871 if (!rt_rq_throttled(rt_rq))
872 enqueue = 1;
873 }
874 if (rt_rq->rt_throttled)
875 throttled = 1;
876
877 if (enqueue)
878 sched_rt_rq_enqueue(rt_rq);
879 raw_spin_unlock(&rq->lock);
880 }
881
882 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
883 return 1;
884
885 return idle;
886 }
887
888 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
889 {
890 #ifdef CONFIG_RT_GROUP_SCHED
891 struct rt_rq *rt_rq = group_rt_rq(rt_se);
892
893 if (rt_rq)
894 return rt_rq->highest_prio.curr;
895 #endif
896
897 return rt_task_of(rt_se)->prio;
898 }
899
900 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
901 {
902 u64 runtime = sched_rt_runtime(rt_rq);
903
904 if (rt_rq->rt_throttled)
905 return rt_rq_throttled(rt_rq);
906
907 if (runtime >= sched_rt_period(rt_rq))
908 return 0;
909
910 balance_runtime(rt_rq);
911 runtime = sched_rt_runtime(rt_rq);
912 if (runtime == RUNTIME_INF)
913 return 0;
914
915 if (rt_rq->rt_time > runtime) {
916 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
917
918 /*
919 * Don't actually throttle groups that have no runtime assigned
920 * but accrue some time due to boosting.
921 */
922 if (likely(rt_b->rt_runtime)) {
923 rt_rq->rt_throttled = 1;
924 printk_deferred_once("sched: RT throttling activated\n");
925 } else {
926 /*
927 * In case we did anyway, make it go away,
928 * replenishment is a joke, since it will replenish us
929 * with exactly 0 ns.
930 */
931 rt_rq->rt_time = 0;
932 }
933
934 if (rt_rq_throttled(rt_rq)) {
935 sched_rt_rq_dequeue(rt_rq);
936 return 1;
937 }
938 }
939
940 return 0;
941 }
942
943 /*
944 * Update the current task's runtime statistics. Skip current tasks that
945 * are not in our scheduling class.
946 */
947 static void update_curr_rt(struct rq *rq)
948 {
949 struct task_struct *curr = rq->curr;
950 struct sched_rt_entity *rt_se = &curr->rt;
951 u64 delta_exec;
952
953 if (curr->sched_class != &rt_sched_class)
954 return;
955
956 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
957 if (unlikely((s64)delta_exec <= 0))
958 return;
959
960 schedstat_set(curr->se.statistics.exec_max,
961 max(curr->se.statistics.exec_max, delta_exec));
962
963 curr->se.sum_exec_runtime += delta_exec;
964 account_group_exec_runtime(curr, delta_exec);
965
966 curr->se.exec_start = rq_clock_task(rq);
967 cpuacct_charge(curr, delta_exec);
968
969 sched_rt_avg_update(rq, delta_exec);
970
971 if (!rt_bandwidth_enabled())
972 return;
973
974 for_each_sched_rt_entity(rt_se) {
975 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
976
977 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
978 raw_spin_lock(&rt_rq->rt_runtime_lock);
979 rt_rq->rt_time += delta_exec;
980 if (sched_rt_runtime_exceeded(rt_rq))
981 resched_curr(rq);
982 raw_spin_unlock(&rt_rq->rt_runtime_lock);
983 }
984 }
985 }
986
987 static void
988 dequeue_top_rt_rq(struct rt_rq *rt_rq)
989 {
990 struct rq *rq = rq_of_rt_rq(rt_rq);
991
992 BUG_ON(&rq->rt != rt_rq);
993
994 if (!rt_rq->rt_queued)
995 return;
996
997 BUG_ON(!rq->nr_running);
998
999 sub_nr_running(rq, rt_rq->rt_nr_running);
1000 rt_rq->rt_queued = 0;
1001 }
1002
1003 static void
1004 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1005 {
1006 struct rq *rq = rq_of_rt_rq(rt_rq);
1007
1008 BUG_ON(&rq->rt != rt_rq);
1009
1010 if (rt_rq->rt_queued)
1011 return;
1012 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1013 return;
1014
1015 add_nr_running(rq, rt_rq->rt_nr_running);
1016 rt_rq->rt_queued = 1;
1017 }
1018
1019 #if defined CONFIG_SMP
1020
1021 static void
1022 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1023 {
1024 struct rq *rq = rq_of_rt_rq(rt_rq);
1025
1026 #ifdef CONFIG_RT_GROUP_SCHED
1027 /*
1028 * Change rq's cpupri only if rt_rq is the top queue.
1029 */
1030 if (&rq->rt != rt_rq)
1031 return;
1032 #endif
1033 if (rq->online && prio < prev_prio)
1034 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1035 }
1036
1037 static void
1038 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1039 {
1040 struct rq *rq = rq_of_rt_rq(rt_rq);
1041
1042 #ifdef CONFIG_RT_GROUP_SCHED
1043 /*
1044 * Change rq's cpupri only if rt_rq is the top queue.
1045 */
1046 if (&rq->rt != rt_rq)
1047 return;
1048 #endif
1049 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1050 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1051 }
1052
1053 #else /* CONFIG_SMP */
1054
1055 static inline
1056 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1057 static inline
1058 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1059
1060 #endif /* CONFIG_SMP */
1061
1062 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1063 static void
1064 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1065 {
1066 int prev_prio = rt_rq->highest_prio.curr;
1067
1068 if (prio < prev_prio)
1069 rt_rq->highest_prio.curr = prio;
1070
1071 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1072 }
1073
1074 static void
1075 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1076 {
1077 int prev_prio = rt_rq->highest_prio.curr;
1078
1079 if (rt_rq->rt_nr_running) {
1080
1081 WARN_ON(prio < prev_prio);
1082
1083 /*
1084 * This may have been our highest task, and therefore
1085 * we may have some recomputation to do
1086 */
1087 if (prio == prev_prio) {
1088 struct rt_prio_array *array = &rt_rq->active;
1089
1090 rt_rq->highest_prio.curr =
1091 sched_find_first_bit(array->bitmap);
1092 }
1093
1094 } else
1095 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1096
1097 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1098 }
1099
1100 #else
1101
1102 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1103 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1104
1105 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1106
1107 #ifdef CONFIG_RT_GROUP_SCHED
1108
1109 static void
1110 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1111 {
1112 if (rt_se_boosted(rt_se))
1113 rt_rq->rt_nr_boosted++;
1114
1115 if (rt_rq->tg)
1116 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1117 }
1118
1119 static void
1120 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1121 {
1122 if (rt_se_boosted(rt_se))
1123 rt_rq->rt_nr_boosted--;
1124
1125 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1126 }
1127
1128 #else /* CONFIG_RT_GROUP_SCHED */
1129
1130 static void
1131 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1132 {
1133 start_rt_bandwidth(&def_rt_bandwidth);
1134 }
1135
1136 static inline
1137 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1138
1139 #endif /* CONFIG_RT_GROUP_SCHED */
1140
1141 static inline
1142 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1143 {
1144 struct rt_rq *group_rq = group_rt_rq(rt_se);
1145
1146 if (group_rq)
1147 return group_rq->rt_nr_running;
1148 else
1149 return 1;
1150 }
1151
1152 static inline
1153 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1154 {
1155 struct rt_rq *group_rq = group_rt_rq(rt_se);
1156 struct task_struct *tsk;
1157
1158 if (group_rq)
1159 return group_rq->rr_nr_running;
1160
1161 tsk = rt_task_of(rt_se);
1162
1163 return (tsk->policy == SCHED_RR) ? 1 : 0;
1164 }
1165
1166 static inline
1167 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1168 {
1169 int prio = rt_se_prio(rt_se);
1170
1171 WARN_ON(!rt_prio(prio));
1172 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1173 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1174
1175 inc_rt_prio(rt_rq, prio);
1176 inc_rt_migration(rt_se, rt_rq);
1177 inc_rt_group(rt_se, rt_rq);
1178 }
1179
1180 static inline
1181 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1182 {
1183 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1184 WARN_ON(!rt_rq->rt_nr_running);
1185 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1186 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1187
1188 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1189 dec_rt_migration(rt_se, rt_rq);
1190 dec_rt_group(rt_se, rt_rq);
1191 }
1192
1193 /*
1194 * Change rt_se->run_list location unless SAVE && !MOVE
1195 *
1196 * assumes ENQUEUE/DEQUEUE flags match
1197 */
1198 static inline bool move_entity(unsigned int flags)
1199 {
1200 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1201 return false;
1202
1203 return true;
1204 }
1205
1206 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1207 {
1208 list_del_init(&rt_se->run_list);
1209
1210 if (list_empty(array->queue + rt_se_prio(rt_se)))
1211 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1212
1213 rt_se->on_list = 0;
1214 }
1215
1216 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1217 {
1218 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1219 struct rt_prio_array *array = &rt_rq->active;
1220 struct rt_rq *group_rq = group_rt_rq(rt_se);
1221 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1222
1223 /*
1224 * Don't enqueue the group if its throttled, or when empty.
1225 * The latter is a consequence of the former when a child group
1226 * get throttled and the current group doesn't have any other
1227 * active members.
1228 */
1229 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1230 if (rt_se->on_list)
1231 __delist_rt_entity(rt_se, array);
1232 return;
1233 }
1234
1235 if (move_entity(flags)) {
1236 WARN_ON_ONCE(rt_se->on_list);
1237 if (flags & ENQUEUE_HEAD)
1238 list_add(&rt_se->run_list, queue);
1239 else
1240 list_add_tail(&rt_se->run_list, queue);
1241
1242 __set_bit(rt_se_prio(rt_se), array->bitmap);
1243 rt_se->on_list = 1;
1244 }
1245 rt_se->on_rq = 1;
1246
1247 inc_rt_tasks(rt_se, rt_rq);
1248 }
1249
1250 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1251 {
1252 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1253 struct rt_prio_array *array = &rt_rq->active;
1254
1255 if (move_entity(flags)) {
1256 WARN_ON_ONCE(!rt_se->on_list);
1257 __delist_rt_entity(rt_se, array);
1258 }
1259 rt_se->on_rq = 0;
1260
1261 dec_rt_tasks(rt_se, rt_rq);
1262 }
1263
1264 /*
1265 * Because the prio of an upper entry depends on the lower
1266 * entries, we must remove entries top - down.
1267 */
1268 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1269 {
1270 struct sched_rt_entity *back = NULL;
1271
1272 for_each_sched_rt_entity(rt_se) {
1273 rt_se->back = back;
1274 back = rt_se;
1275 }
1276
1277 dequeue_top_rt_rq(rt_rq_of_se(back));
1278
1279 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1280 if (on_rt_rq(rt_se))
1281 __dequeue_rt_entity(rt_se, flags);
1282 }
1283 }
1284
1285 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1286 {
1287 struct rq *rq = rq_of_rt_se(rt_se);
1288
1289 dequeue_rt_stack(rt_se, flags);
1290 for_each_sched_rt_entity(rt_se)
1291 __enqueue_rt_entity(rt_se, flags);
1292 enqueue_top_rt_rq(&rq->rt);
1293 }
1294
1295 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1296 {
1297 struct rq *rq = rq_of_rt_se(rt_se);
1298
1299 dequeue_rt_stack(rt_se, flags);
1300
1301 for_each_sched_rt_entity(rt_se) {
1302 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1303
1304 if (rt_rq && rt_rq->rt_nr_running)
1305 __enqueue_rt_entity(rt_se, flags);
1306 }
1307 enqueue_top_rt_rq(&rq->rt);
1308 }
1309
1310 /*
1311 * Adding/removing a task to/from a priority array:
1312 */
1313 static void
1314 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1315 {
1316 struct sched_rt_entity *rt_se = &p->rt;
1317
1318 if (flags & ENQUEUE_WAKEUP)
1319 rt_se->timeout = 0;
1320
1321 enqueue_rt_entity(rt_se, flags);
1322
1323 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1324 enqueue_pushable_task(rq, p);
1325 }
1326
1327 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1328 {
1329 struct sched_rt_entity *rt_se = &p->rt;
1330
1331 update_curr_rt(rq);
1332 dequeue_rt_entity(rt_se, flags);
1333
1334 dequeue_pushable_task(rq, p);
1335 }
1336
1337 /*
1338 * Put task to the head or the end of the run list without the overhead of
1339 * dequeue followed by enqueue.
1340 */
1341 static void
1342 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1343 {
1344 if (on_rt_rq(rt_se)) {
1345 struct rt_prio_array *array = &rt_rq->active;
1346 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1347
1348 if (head)
1349 list_move(&rt_se->run_list, queue);
1350 else
1351 list_move_tail(&rt_se->run_list, queue);
1352 }
1353 }
1354
1355 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1356 {
1357 struct sched_rt_entity *rt_se = &p->rt;
1358 struct rt_rq *rt_rq;
1359
1360 for_each_sched_rt_entity(rt_se) {
1361 rt_rq = rt_rq_of_se(rt_se);
1362 requeue_rt_entity(rt_rq, rt_se, head);
1363 }
1364 }
1365
1366 static void yield_task_rt(struct rq *rq)
1367 {
1368 requeue_task_rt(rq, rq->curr, 0);
1369 }
1370
1371 #ifdef CONFIG_SMP
1372 static int find_lowest_rq(struct task_struct *task);
1373
1374 static int
1375 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1376 {
1377 struct task_struct *curr;
1378 struct rq *rq;
1379
1380 /* For anything but wake ups, just return the task_cpu */
1381 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1382 goto out;
1383
1384 rq = cpu_rq(cpu);
1385
1386 rcu_read_lock();
1387 curr = READ_ONCE(rq->curr); /* unlocked access */
1388
1389 /*
1390 * If the current task on @p's runqueue is an RT task, then
1391 * try to see if we can wake this RT task up on another
1392 * runqueue. Otherwise simply start this RT task
1393 * on its current runqueue.
1394 *
1395 * We want to avoid overloading runqueues. If the woken
1396 * task is a higher priority, then it will stay on this CPU
1397 * and the lower prio task should be moved to another CPU.
1398 * Even though this will probably make the lower prio task
1399 * lose its cache, we do not want to bounce a higher task
1400 * around just because it gave up its CPU, perhaps for a
1401 * lock?
1402 *
1403 * For equal prio tasks, we just let the scheduler sort it out.
1404 *
1405 * Otherwise, just let it ride on the affined RQ and the
1406 * post-schedule router will push the preempted task away
1407 *
1408 * This test is optimistic, if we get it wrong the load-balancer
1409 * will have to sort it out.
1410 */
1411 if (curr && unlikely(rt_task(curr)) &&
1412 (curr->nr_cpus_allowed < 2 ||
1413 curr->prio <= p->prio)) {
1414 int target = find_lowest_rq(p);
1415
1416 /*
1417 * Don't bother moving it if the destination CPU is
1418 * not running a lower priority task.
1419 */
1420 if (target != -1 &&
1421 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1422 cpu = target;
1423 }
1424 rcu_read_unlock();
1425
1426 out:
1427 return cpu;
1428 }
1429
1430 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1431 {
1432 /*
1433 * Current can't be migrated, useless to reschedule,
1434 * let's hope p can move out.
1435 */
1436 if (rq->curr->nr_cpus_allowed == 1 ||
1437 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1438 return;
1439
1440 /*
1441 * p is migratable, so let's not schedule it and
1442 * see if it is pushed or pulled somewhere else.
1443 */
1444 if (p->nr_cpus_allowed != 1
1445 && cpupri_find(&rq->rd->cpupri, p, NULL))
1446 return;
1447
1448 /*
1449 * There appears to be other cpus that can accept
1450 * current and none to run 'p', so lets reschedule
1451 * to try and push current away:
1452 */
1453 requeue_task_rt(rq, p, 1);
1454 resched_curr(rq);
1455 }
1456
1457 #endif /* CONFIG_SMP */
1458
1459 /*
1460 * Preempt the current task with a newly woken task if needed:
1461 */
1462 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1463 {
1464 if (p->prio < rq->curr->prio) {
1465 resched_curr(rq);
1466 return;
1467 }
1468
1469 #ifdef CONFIG_SMP
1470 /*
1471 * If:
1472 *
1473 * - the newly woken task is of equal priority to the current task
1474 * - the newly woken task is non-migratable while current is migratable
1475 * - current will be preempted on the next reschedule
1476 *
1477 * we should check to see if current can readily move to a different
1478 * cpu. If so, we will reschedule to allow the push logic to try
1479 * to move current somewhere else, making room for our non-migratable
1480 * task.
1481 */
1482 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1483 check_preempt_equal_prio(rq, p);
1484 #endif
1485 }
1486
1487 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1488 struct rt_rq *rt_rq)
1489 {
1490 struct rt_prio_array *array = &rt_rq->active;
1491 struct sched_rt_entity *next = NULL;
1492 struct list_head *queue;
1493 int idx;
1494
1495 idx = sched_find_first_bit(array->bitmap);
1496 BUG_ON(idx >= MAX_RT_PRIO);
1497
1498 queue = array->queue + idx;
1499 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1500
1501 return next;
1502 }
1503
1504 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1505 {
1506 struct sched_rt_entity *rt_se;
1507 struct task_struct *p;
1508 struct rt_rq *rt_rq = &rq->rt;
1509
1510 do {
1511 rt_se = pick_next_rt_entity(rq, rt_rq);
1512 BUG_ON(!rt_se);
1513 rt_rq = group_rt_rq(rt_se);
1514 } while (rt_rq);
1515
1516 p = rt_task_of(rt_se);
1517 p->se.exec_start = rq_clock_task(rq);
1518
1519 return p;
1520 }
1521
1522 static struct task_struct *
1523 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1524 {
1525 struct task_struct *p;
1526 struct rt_rq *rt_rq = &rq->rt;
1527
1528 if (need_pull_rt_task(rq, prev)) {
1529 /*
1530 * This is OK, because current is on_cpu, which avoids it being
1531 * picked for load-balance and preemption/IRQs are still
1532 * disabled avoiding further scheduler activity on it and we're
1533 * being very careful to re-start the picking loop.
1534 */
1535 lockdep_unpin_lock(&rq->lock);
1536 pull_rt_task(rq);
1537 lockdep_pin_lock(&rq->lock);
1538 /*
1539 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1540 * means a dl or stop task can slip in, in which case we need
1541 * to re-start task selection.
1542 */
1543 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1544 rq->dl.dl_nr_running))
1545 return RETRY_TASK;
1546 }
1547
1548 /*
1549 * We may dequeue prev's rt_rq in put_prev_task().
1550 * So, we update time before rt_nr_running check.
1551 */
1552 if (prev->sched_class == &rt_sched_class)
1553 update_curr_rt(rq);
1554
1555 if (!rt_rq->rt_queued)
1556 return NULL;
1557
1558 put_prev_task(rq, prev);
1559
1560 p = _pick_next_task_rt(rq);
1561
1562 /* The running task is never eligible for pushing */
1563 dequeue_pushable_task(rq, p);
1564
1565 queue_push_tasks(rq);
1566
1567 return p;
1568 }
1569
1570 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1571 {
1572 update_curr_rt(rq);
1573
1574 /*
1575 * The previous task needs to be made eligible for pushing
1576 * if it is still active
1577 */
1578 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1579 enqueue_pushable_task(rq, p);
1580 }
1581
1582 #ifdef CONFIG_SMP
1583
1584 /* Only try algorithms three times */
1585 #define RT_MAX_TRIES 3
1586
1587 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1588 {
1589 if (!task_running(rq, p) &&
1590 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1591 return 1;
1592 return 0;
1593 }
1594
1595 /*
1596 * Return the highest pushable rq's task, which is suitable to be executed
1597 * on the cpu, NULL otherwise
1598 */
1599 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1600 {
1601 struct plist_head *head = &rq->rt.pushable_tasks;
1602 struct task_struct *p;
1603
1604 if (!has_pushable_tasks(rq))
1605 return NULL;
1606
1607 plist_for_each_entry(p, head, pushable_tasks) {
1608 if (pick_rt_task(rq, p, cpu))
1609 return p;
1610 }
1611
1612 return NULL;
1613 }
1614
1615 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1616
1617 static int find_lowest_rq(struct task_struct *task)
1618 {
1619 struct sched_domain *sd;
1620 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1621 int this_cpu = smp_processor_id();
1622 int cpu = task_cpu(task);
1623
1624 /* Make sure the mask is initialized first */
1625 if (unlikely(!lowest_mask))
1626 return -1;
1627
1628 if (task->nr_cpus_allowed == 1)
1629 return -1; /* No other targets possible */
1630
1631 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1632 return -1; /* No targets found */
1633
1634 /*
1635 * At this point we have built a mask of cpus representing the
1636 * lowest priority tasks in the system. Now we want to elect
1637 * the best one based on our affinity and topology.
1638 *
1639 * We prioritize the last cpu that the task executed on since
1640 * it is most likely cache-hot in that location.
1641 */
1642 if (cpumask_test_cpu(cpu, lowest_mask))
1643 return cpu;
1644
1645 /*
1646 * Otherwise, we consult the sched_domains span maps to figure
1647 * out which cpu is logically closest to our hot cache data.
1648 */
1649 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1650 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1651
1652 rcu_read_lock();
1653 for_each_domain(cpu, sd) {
1654 if (sd->flags & SD_WAKE_AFFINE) {
1655 int best_cpu;
1656
1657 /*
1658 * "this_cpu" is cheaper to preempt than a
1659 * remote processor.
1660 */
1661 if (this_cpu != -1 &&
1662 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1663 rcu_read_unlock();
1664 return this_cpu;
1665 }
1666
1667 best_cpu = cpumask_first_and(lowest_mask,
1668 sched_domain_span(sd));
1669 if (best_cpu < nr_cpu_ids) {
1670 rcu_read_unlock();
1671 return best_cpu;
1672 }
1673 }
1674 }
1675 rcu_read_unlock();
1676
1677 /*
1678 * And finally, if there were no matches within the domains
1679 * just give the caller *something* to work with from the compatible
1680 * locations.
1681 */
1682 if (this_cpu != -1)
1683 return this_cpu;
1684
1685 cpu = cpumask_any(lowest_mask);
1686 if (cpu < nr_cpu_ids)
1687 return cpu;
1688 return -1;
1689 }
1690
1691 /* Will lock the rq it finds */
1692 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1693 {
1694 struct rq *lowest_rq = NULL;
1695 int tries;
1696 int cpu;
1697
1698 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1699 cpu = find_lowest_rq(task);
1700
1701 if ((cpu == -1) || (cpu == rq->cpu))
1702 break;
1703
1704 lowest_rq = cpu_rq(cpu);
1705
1706 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1707 /*
1708 * Target rq has tasks of equal or higher priority,
1709 * retrying does not release any lock and is unlikely
1710 * to yield a different result.
1711 */
1712 lowest_rq = NULL;
1713 break;
1714 }
1715
1716 /* if the prio of this runqueue changed, try again */
1717 if (double_lock_balance(rq, lowest_rq)) {
1718 /*
1719 * We had to unlock the run queue. In
1720 * the mean time, task could have
1721 * migrated already or had its affinity changed.
1722 * Also make sure that it wasn't scheduled on its rq.
1723 */
1724 if (unlikely(task_rq(task) != rq ||
1725 !cpumask_test_cpu(lowest_rq->cpu,
1726 tsk_cpus_allowed(task)) ||
1727 task_running(rq, task) ||
1728 !task_on_rq_queued(task))) {
1729
1730 double_unlock_balance(rq, lowest_rq);
1731 lowest_rq = NULL;
1732 break;
1733 }
1734 }
1735
1736 /* If this rq is still suitable use it. */
1737 if (lowest_rq->rt.highest_prio.curr > task->prio)
1738 break;
1739
1740 /* try again */
1741 double_unlock_balance(rq, lowest_rq);
1742 lowest_rq = NULL;
1743 }
1744
1745 return lowest_rq;
1746 }
1747
1748 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1749 {
1750 struct task_struct *p;
1751
1752 if (!has_pushable_tasks(rq))
1753 return NULL;
1754
1755 p = plist_first_entry(&rq->rt.pushable_tasks,
1756 struct task_struct, pushable_tasks);
1757
1758 BUG_ON(rq->cpu != task_cpu(p));
1759 BUG_ON(task_current(rq, p));
1760 BUG_ON(p->nr_cpus_allowed <= 1);
1761
1762 BUG_ON(!task_on_rq_queued(p));
1763 BUG_ON(!rt_task(p));
1764
1765 return p;
1766 }
1767
1768 /*
1769 * If the current CPU has more than one RT task, see if the non
1770 * running task can migrate over to a CPU that is running a task
1771 * of lesser priority.
1772 */
1773 static int push_rt_task(struct rq *rq)
1774 {
1775 struct task_struct *next_task;
1776 struct rq *lowest_rq;
1777 int ret = 0;
1778
1779 if (!rq->rt.overloaded)
1780 return 0;
1781
1782 next_task = pick_next_pushable_task(rq);
1783 if (!next_task)
1784 return 0;
1785
1786 retry:
1787 if (unlikely(next_task == rq->curr)) {
1788 WARN_ON(1);
1789 return 0;
1790 }
1791
1792 /*
1793 * It's possible that the next_task slipped in of
1794 * higher priority than current. If that's the case
1795 * just reschedule current.
1796 */
1797 if (unlikely(next_task->prio < rq->curr->prio)) {
1798 resched_curr(rq);
1799 return 0;
1800 }
1801
1802 /* We might release rq lock */
1803 get_task_struct(next_task);
1804
1805 /* find_lock_lowest_rq locks the rq if found */
1806 lowest_rq = find_lock_lowest_rq(next_task, rq);
1807 if (!lowest_rq) {
1808 struct task_struct *task;
1809 /*
1810 * find_lock_lowest_rq releases rq->lock
1811 * so it is possible that next_task has migrated.
1812 *
1813 * We need to make sure that the task is still on the same
1814 * run-queue and is also still the next task eligible for
1815 * pushing.
1816 */
1817 task = pick_next_pushable_task(rq);
1818 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1819 /*
1820 * The task hasn't migrated, and is still the next
1821 * eligible task, but we failed to find a run-queue
1822 * to push it to. Do not retry in this case, since
1823 * other cpus will pull from us when ready.
1824 */
1825 goto out;
1826 }
1827
1828 if (!task)
1829 /* No more tasks, just exit */
1830 goto out;
1831
1832 /*
1833 * Something has shifted, try again.
1834 */
1835 put_task_struct(next_task);
1836 next_task = task;
1837 goto retry;
1838 }
1839
1840 deactivate_task(rq, next_task, 0);
1841 set_task_cpu(next_task, lowest_rq->cpu);
1842 activate_task(lowest_rq, next_task, 0);
1843 ret = 1;
1844
1845 resched_curr(lowest_rq);
1846
1847 double_unlock_balance(rq, lowest_rq);
1848
1849 out:
1850 put_task_struct(next_task);
1851
1852 return ret;
1853 }
1854
1855 static void push_rt_tasks(struct rq *rq)
1856 {
1857 /* push_rt_task will return true if it moved an RT */
1858 while (push_rt_task(rq))
1859 ;
1860 }
1861
1862 #ifdef HAVE_RT_PUSH_IPI
1863 /*
1864 * The search for the next cpu always starts at rq->cpu and ends
1865 * when we reach rq->cpu again. It will never return rq->cpu.
1866 * This returns the next cpu to check, or nr_cpu_ids if the loop
1867 * is complete.
1868 *
1869 * rq->rt.push_cpu holds the last cpu returned by this function,
1870 * or if this is the first instance, it must hold rq->cpu.
1871 */
1872 static int rto_next_cpu(struct rq *rq)
1873 {
1874 int prev_cpu = rq->rt.push_cpu;
1875 int cpu;
1876
1877 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1878
1879 /*
1880 * If the previous cpu is less than the rq's CPU, then it already
1881 * passed the end of the mask, and has started from the beginning.
1882 * We end if the next CPU is greater or equal to rq's CPU.
1883 */
1884 if (prev_cpu < rq->cpu) {
1885 if (cpu >= rq->cpu)
1886 return nr_cpu_ids;
1887
1888 } else if (cpu >= nr_cpu_ids) {
1889 /*
1890 * We passed the end of the mask, start at the beginning.
1891 * If the result is greater or equal to the rq's CPU, then
1892 * the loop is finished.
1893 */
1894 cpu = cpumask_first(rq->rd->rto_mask);
1895 if (cpu >= rq->cpu)
1896 return nr_cpu_ids;
1897 }
1898 rq->rt.push_cpu = cpu;
1899
1900 /* Return cpu to let the caller know if the loop is finished or not */
1901 return cpu;
1902 }
1903
1904 static int find_next_push_cpu(struct rq *rq)
1905 {
1906 struct rq *next_rq;
1907 int cpu;
1908
1909 while (1) {
1910 cpu = rto_next_cpu(rq);
1911 if (cpu >= nr_cpu_ids)
1912 break;
1913 next_rq = cpu_rq(cpu);
1914
1915 /* Make sure the next rq can push to this rq */
1916 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1917 break;
1918 }
1919
1920 return cpu;
1921 }
1922
1923 #define RT_PUSH_IPI_EXECUTING 1
1924 #define RT_PUSH_IPI_RESTART 2
1925
1926 static void tell_cpu_to_push(struct rq *rq)
1927 {
1928 int cpu;
1929
1930 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1931 raw_spin_lock(&rq->rt.push_lock);
1932 /* Make sure it's still executing */
1933 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1934 /*
1935 * Tell the IPI to restart the loop as things have
1936 * changed since it started.
1937 */
1938 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1939 raw_spin_unlock(&rq->rt.push_lock);
1940 return;
1941 }
1942 raw_spin_unlock(&rq->rt.push_lock);
1943 }
1944
1945 /* When here, there's no IPI going around */
1946
1947 rq->rt.push_cpu = rq->cpu;
1948 cpu = find_next_push_cpu(rq);
1949 if (cpu >= nr_cpu_ids)
1950 return;
1951
1952 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1953
1954 irq_work_queue_on(&rq->rt.push_work, cpu);
1955 }
1956
1957 /* Called from hardirq context */
1958 static void try_to_push_tasks(void *arg)
1959 {
1960 struct rt_rq *rt_rq = arg;
1961 struct rq *rq, *src_rq;
1962 int this_cpu;
1963 int cpu;
1964
1965 this_cpu = rt_rq->push_cpu;
1966
1967 /* Paranoid check */
1968 BUG_ON(this_cpu != smp_processor_id());
1969
1970 rq = cpu_rq(this_cpu);
1971 src_rq = rq_of_rt_rq(rt_rq);
1972
1973 again:
1974 if (has_pushable_tasks(rq)) {
1975 raw_spin_lock(&rq->lock);
1976 push_rt_task(rq);
1977 raw_spin_unlock(&rq->lock);
1978 }
1979
1980 /* Pass the IPI to the next rt overloaded queue */
1981 raw_spin_lock(&rt_rq->push_lock);
1982 /*
1983 * If the source queue changed since the IPI went out,
1984 * we need to restart the search from that CPU again.
1985 */
1986 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1987 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1988 rt_rq->push_cpu = src_rq->cpu;
1989 }
1990
1991 cpu = find_next_push_cpu(src_rq);
1992
1993 if (cpu >= nr_cpu_ids)
1994 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1995 raw_spin_unlock(&rt_rq->push_lock);
1996
1997 if (cpu >= nr_cpu_ids)
1998 return;
1999
2000 /*
2001 * It is possible that a restart caused this CPU to be
2002 * chosen again. Don't bother with an IPI, just see if we
2003 * have more to push.
2004 */
2005 if (unlikely(cpu == rq->cpu))
2006 goto again;
2007
2008 /* Try the next RT overloaded CPU */
2009 irq_work_queue_on(&rt_rq->push_work, cpu);
2010 }
2011
2012 static void push_irq_work_func(struct irq_work *work)
2013 {
2014 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2015
2016 try_to_push_tasks(rt_rq);
2017 }
2018 #endif /* HAVE_RT_PUSH_IPI */
2019
2020 static void pull_rt_task(struct rq *this_rq)
2021 {
2022 int this_cpu = this_rq->cpu, cpu;
2023 bool resched = false;
2024 struct task_struct *p;
2025 struct rq *src_rq;
2026
2027 if (likely(!rt_overloaded(this_rq)))
2028 return;
2029
2030 /*
2031 * Match the barrier from rt_set_overloaded; this guarantees that if we
2032 * see overloaded we must also see the rto_mask bit.
2033 */
2034 smp_rmb();
2035
2036 #ifdef HAVE_RT_PUSH_IPI
2037 if (sched_feat(RT_PUSH_IPI)) {
2038 tell_cpu_to_push(this_rq);
2039 return;
2040 }
2041 #endif
2042
2043 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2044 if (this_cpu == cpu)
2045 continue;
2046
2047 src_rq = cpu_rq(cpu);
2048
2049 /*
2050 * Don't bother taking the src_rq->lock if the next highest
2051 * task is known to be lower-priority than our current task.
2052 * This may look racy, but if this value is about to go
2053 * logically higher, the src_rq will push this task away.
2054 * And if its going logically lower, we do not care
2055 */
2056 if (src_rq->rt.highest_prio.next >=
2057 this_rq->rt.highest_prio.curr)
2058 continue;
2059
2060 /*
2061 * We can potentially drop this_rq's lock in
2062 * double_lock_balance, and another CPU could
2063 * alter this_rq
2064 */
2065 double_lock_balance(this_rq, src_rq);
2066
2067 /*
2068 * We can pull only a task, which is pushable
2069 * on its rq, and no others.
2070 */
2071 p = pick_highest_pushable_task(src_rq, this_cpu);
2072
2073 /*
2074 * Do we have an RT task that preempts
2075 * the to-be-scheduled task?
2076 */
2077 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2078 WARN_ON(p == src_rq->curr);
2079 WARN_ON(!task_on_rq_queued(p));
2080
2081 /*
2082 * There's a chance that p is higher in priority
2083 * than what's currently running on its cpu.
2084 * This is just that p is wakeing up and hasn't
2085 * had a chance to schedule. We only pull
2086 * p if it is lower in priority than the
2087 * current task on the run queue
2088 */
2089 if (p->prio < src_rq->curr->prio)
2090 goto skip;
2091
2092 resched = true;
2093
2094 deactivate_task(src_rq, p, 0);
2095 set_task_cpu(p, this_cpu);
2096 activate_task(this_rq, p, 0);
2097 /*
2098 * We continue with the search, just in
2099 * case there's an even higher prio task
2100 * in another runqueue. (low likelihood
2101 * but possible)
2102 */
2103 }
2104 skip:
2105 double_unlock_balance(this_rq, src_rq);
2106 }
2107
2108 if (resched)
2109 resched_curr(this_rq);
2110 }
2111
2112 /*
2113 * If we are not running and we are not going to reschedule soon, we should
2114 * try to push tasks away now
2115 */
2116 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2117 {
2118 if (!task_running(rq, p) &&
2119 !test_tsk_need_resched(rq->curr) &&
2120 p->nr_cpus_allowed > 1 &&
2121 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2122 (rq->curr->nr_cpus_allowed < 2 ||
2123 rq->curr->prio <= p->prio))
2124 push_rt_tasks(rq);
2125 }
2126
2127 /* Assumes rq->lock is held */
2128 static void rq_online_rt(struct rq *rq)
2129 {
2130 if (rq->rt.overloaded)
2131 rt_set_overload(rq);
2132
2133 __enable_runtime(rq);
2134
2135 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2136 }
2137
2138 /* Assumes rq->lock is held */
2139 static void rq_offline_rt(struct rq *rq)
2140 {
2141 if (rq->rt.overloaded)
2142 rt_clear_overload(rq);
2143
2144 __disable_runtime(rq);
2145
2146 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2147 }
2148
2149 /*
2150 * When switch from the rt queue, we bring ourselves to a position
2151 * that we might want to pull RT tasks from other runqueues.
2152 */
2153 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2154 {
2155 /*
2156 * If there are other RT tasks then we will reschedule
2157 * and the scheduling of the other RT tasks will handle
2158 * the balancing. But if we are the last RT task
2159 * we may need to handle the pulling of RT tasks
2160 * now.
2161 */
2162 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2163 return;
2164
2165 queue_pull_task(rq);
2166 }
2167
2168 void __init init_sched_rt_class(void)
2169 {
2170 unsigned int i;
2171
2172 for_each_possible_cpu(i) {
2173 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2174 GFP_KERNEL, cpu_to_node(i));
2175 }
2176 }
2177 #endif /* CONFIG_SMP */
2178
2179 /*
2180 * When switching a task to RT, we may overload the runqueue
2181 * with RT tasks. In this case we try to push them off to
2182 * other runqueues.
2183 */
2184 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2185 {
2186 /*
2187 * If we are already running, then there's nothing
2188 * that needs to be done. But if we are not running
2189 * we may need to preempt the current running task.
2190 * If that current running task is also an RT task
2191 * then see if we can move to another run queue.
2192 */
2193 if (task_on_rq_queued(p) && rq->curr != p) {
2194 #ifdef CONFIG_SMP
2195 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2196 queue_push_tasks(rq);
2197 #else
2198 if (p->prio < rq->curr->prio)
2199 resched_curr(rq);
2200 #endif /* CONFIG_SMP */
2201 }
2202 }
2203
2204 /*
2205 * Priority of the task has changed. This may cause
2206 * us to initiate a push or pull.
2207 */
2208 static void
2209 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2210 {
2211 if (!task_on_rq_queued(p))
2212 return;
2213
2214 if (rq->curr == p) {
2215 #ifdef CONFIG_SMP
2216 /*
2217 * If our priority decreases while running, we
2218 * may need to pull tasks to this runqueue.
2219 */
2220 if (oldprio < p->prio)
2221 queue_pull_task(rq);
2222
2223 /*
2224 * If there's a higher priority task waiting to run
2225 * then reschedule.
2226 */
2227 if (p->prio > rq->rt.highest_prio.curr)
2228 resched_curr(rq);
2229 #else
2230 /* For UP simply resched on drop of prio */
2231 if (oldprio < p->prio)
2232 resched_curr(rq);
2233 #endif /* CONFIG_SMP */
2234 } else {
2235 /*
2236 * This task is not running, but if it is
2237 * greater than the current running task
2238 * then reschedule.
2239 */
2240 if (p->prio < rq->curr->prio)
2241 resched_curr(rq);
2242 }
2243 }
2244
2245 static void watchdog(struct rq *rq, struct task_struct *p)
2246 {
2247 unsigned long soft, hard;
2248
2249 /* max may change after cur was read, this will be fixed next tick */
2250 soft = task_rlimit(p, RLIMIT_RTTIME);
2251 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2252
2253 if (soft != RLIM_INFINITY) {
2254 unsigned long next;
2255
2256 if (p->rt.watchdog_stamp != jiffies) {
2257 p->rt.timeout++;
2258 p->rt.watchdog_stamp = jiffies;
2259 }
2260
2261 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2262 if (p->rt.timeout > next)
2263 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2264 }
2265 }
2266
2267 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2268 {
2269 struct sched_rt_entity *rt_se = &p->rt;
2270
2271 update_curr_rt(rq);
2272
2273 watchdog(rq, p);
2274
2275 /*
2276 * RR tasks need a special form of timeslice management.
2277 * FIFO tasks have no timeslices.
2278 */
2279 if (p->policy != SCHED_RR)
2280 return;
2281
2282 if (--p->rt.time_slice)
2283 return;
2284
2285 p->rt.time_slice = sched_rr_timeslice;
2286
2287 /*
2288 * Requeue to the end of queue if we (and all of our ancestors) are not
2289 * the only element on the queue
2290 */
2291 for_each_sched_rt_entity(rt_se) {
2292 if (rt_se->run_list.prev != rt_se->run_list.next) {
2293 requeue_task_rt(rq, p, 0);
2294 resched_curr(rq);
2295 return;
2296 }
2297 }
2298 }
2299
2300 static void set_curr_task_rt(struct rq *rq)
2301 {
2302 struct task_struct *p = rq->curr;
2303
2304 p->se.exec_start = rq_clock_task(rq);
2305
2306 /* The running task is never eligible for pushing */
2307 dequeue_pushable_task(rq, p);
2308 }
2309
2310 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2311 {
2312 /*
2313 * Time slice is 0 for SCHED_FIFO tasks
2314 */
2315 if (task->policy == SCHED_RR)
2316 return sched_rr_timeslice;
2317 else
2318 return 0;
2319 }
2320
2321 const struct sched_class rt_sched_class = {
2322 .next = &fair_sched_class,
2323 .enqueue_task = enqueue_task_rt,
2324 .dequeue_task = dequeue_task_rt,
2325 .yield_task = yield_task_rt,
2326
2327 .check_preempt_curr = check_preempt_curr_rt,
2328
2329 .pick_next_task = pick_next_task_rt,
2330 .put_prev_task = put_prev_task_rt,
2331
2332 #ifdef CONFIG_SMP
2333 .select_task_rq = select_task_rq_rt,
2334
2335 .set_cpus_allowed = set_cpus_allowed_common,
2336 .rq_online = rq_online_rt,
2337 .rq_offline = rq_offline_rt,
2338 .task_woken = task_woken_rt,
2339 .switched_from = switched_from_rt,
2340 #endif
2341
2342 .set_curr_task = set_curr_task_rt,
2343 .task_tick = task_tick_rt,
2344
2345 .get_rr_interval = get_rr_interval_rt,
2346
2347 .prio_changed = prio_changed_rt,
2348 .switched_to = switched_to_rt,
2349
2350 .update_curr = update_curr_rt,
2351 };
2352
2353 #ifdef CONFIG_SCHED_DEBUG
2354 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2355
2356 void print_rt_stats(struct seq_file *m, int cpu)
2357 {
2358 rt_rq_iter_t iter;
2359 struct rt_rq *rt_rq;
2360
2361 rcu_read_lock();
2362 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2363 print_rt_rq(m, cpu, rt_rq);
2364 rcu_read_unlock();
2365 }
2366 #endif /* CONFIG_SCHED_DEBUG */
This page took 0.077165 seconds and 6 git commands to generate.