ftrace: avoid modifying kprobe'd records
[deliverable/linux.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10 return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15 cpu_set(rq->cpu, rq->rd->rto_mask);
16 /*
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
22 */
23 wmb();
24 atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
40 }
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
44 }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50 return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55 return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62 if (!rt_rq->tg)
63 return RUNTIME_INF;
64
65 return rt_rq->rt_runtime;
66 }
67
68 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69 {
70 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
71 }
72
73 #define for_each_leaf_rt_rq(rt_rq, rq) \
74 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77 {
78 return rt_rq->rq;
79 }
80
81 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82 {
83 return rt_se->rt_rq;
84 }
85
86 #define for_each_sched_rt_entity(rt_se) \
87 for (; rt_se; rt_se = rt_se->parent)
88
89 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90 {
91 return rt_se->my_q;
92 }
93
94 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
97 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
98 {
99 struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
102 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
104 enqueue_rt_entity(rt_se);
105 if (rt_rq->highest_prio < curr->prio)
106 resched_task(curr);
107 }
108 }
109
110 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
111 {
112 struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114 if (rt_se && on_rt_rq(rt_se))
115 dequeue_rt_entity(rt_se);
116 }
117
118 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119 {
120 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121 }
122
123 static int rt_se_boosted(struct sched_rt_entity *rt_se)
124 {
125 struct rt_rq *rt_rq = group_rt_rq(rt_se);
126 struct task_struct *p;
127
128 if (rt_rq)
129 return !!rt_rq->rt_nr_boosted;
130
131 p = rt_task_of(rt_se);
132 return p->prio != p->normal_prio;
133 }
134
135 #ifdef CONFIG_SMP
136 static inline cpumask_t sched_rt_period_mask(void)
137 {
138 return cpu_rq(smp_processor_id())->rd->span;
139 }
140 #else
141 static inline cpumask_t sched_rt_period_mask(void)
142 {
143 return cpu_online_map;
144 }
145 #endif
146
147 static inline
148 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
149 {
150 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151 }
152
153 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154 {
155 return &rt_rq->tg->rt_bandwidth;
156 }
157
158 #else
159
160 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
161 {
162 return rt_rq->rt_runtime;
163 }
164
165 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166 {
167 return ktime_to_ns(def_rt_bandwidth.rt_period);
168 }
169
170 #define for_each_leaf_rt_rq(rt_rq, rq) \
171 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174 {
175 return container_of(rt_rq, struct rq, rt);
176 }
177
178 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179 {
180 struct task_struct *p = rt_task_of(rt_se);
181 struct rq *rq = task_rq(p);
182
183 return &rq->rt;
184 }
185
186 #define for_each_sched_rt_entity(rt_se) \
187 for (; rt_se; rt_se = NULL)
188
189 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190 {
191 return NULL;
192 }
193
194 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
195 {
196 }
197
198 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
199 {
200 }
201
202 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203 {
204 return rt_rq->rt_throttled;
205 }
206
207 static inline cpumask_t sched_rt_period_mask(void)
208 {
209 return cpu_online_map;
210 }
211
212 static inline
213 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214 {
215 return &cpu_rq(cpu)->rt;
216 }
217
218 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219 {
220 return &def_rt_bandwidth;
221 }
222
223 #endif
224
225 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226 {
227 int i, idle = 1;
228 cpumask_t span;
229
230 if (rt_b->rt_runtime == RUNTIME_INF)
231 return 1;
232
233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) {
235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239 spin_lock(&rq->lock);
240 if (rt_rq->rt_time) {
241 u64 runtime;
242
243 spin_lock(&rt_rq->rt_runtime_lock);
244 runtime = rt_rq->rt_runtime;
245 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247 rt_rq->rt_throttled = 0;
248 enqueue = 1;
249 }
250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock);
253 }
254
255 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq);
257 spin_unlock(&rq->lock);
258 }
259
260 return idle;
261 }
262
263 #ifdef CONFIG_SMP
264 static int balance_runtime(struct rt_rq *rt_rq)
265 {
266 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
267 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
268 int i, weight, more = 0;
269 u64 rt_period;
270
271 weight = cpus_weight(rd->span);
272
273 spin_lock(&rt_b->rt_runtime_lock);
274 rt_period = ktime_to_ns(rt_b->rt_period);
275 for_each_cpu_mask(i, rd->span) {
276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277 s64 diff;
278
279 if (iter == rt_rq)
280 continue;
281
282 spin_lock(&iter->rt_runtime_lock);
283 diff = iter->rt_runtime - iter->rt_time;
284 if (diff > 0) {
285 do_div(diff, weight);
286 if (rt_rq->rt_runtime + diff > rt_period)
287 diff = rt_period - rt_rq->rt_runtime;
288 iter->rt_runtime -= diff;
289 rt_rq->rt_runtime += diff;
290 more = 1;
291 if (rt_rq->rt_runtime == rt_period) {
292 spin_unlock(&iter->rt_runtime_lock);
293 break;
294 }
295 }
296 spin_unlock(&iter->rt_runtime_lock);
297 }
298 spin_unlock(&rt_b->rt_runtime_lock);
299
300 return more;
301 }
302 #endif
303
304 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
305 {
306 #ifdef CONFIG_RT_GROUP_SCHED
307 struct rt_rq *rt_rq = group_rt_rq(rt_se);
308
309 if (rt_rq)
310 return rt_rq->highest_prio;
311 #endif
312
313 return rt_task_of(rt_se)->prio;
314 }
315
316 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
317 {
318 u64 runtime = sched_rt_runtime(rt_rq);
319
320 if (runtime == RUNTIME_INF)
321 return 0;
322
323 if (rt_rq->rt_throttled)
324 return rt_rq_throttled(rt_rq);
325
326 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
327 return 0;
328
329 #ifdef CONFIG_SMP
330 if (rt_rq->rt_time > runtime) {
331 int more;
332
333 spin_unlock(&rt_rq->rt_runtime_lock);
334 more = balance_runtime(rt_rq);
335 spin_lock(&rt_rq->rt_runtime_lock);
336
337 if (more)
338 runtime = sched_rt_runtime(rt_rq);
339 }
340 #endif
341
342 if (rt_rq->rt_time > runtime) {
343 rt_rq->rt_throttled = 1;
344 if (rt_rq_throttled(rt_rq)) {
345 sched_rt_rq_dequeue(rt_rq);
346 return 1;
347 }
348 }
349
350 return 0;
351 }
352
353 /*
354 * Update the current task's runtime statistics. Skip current tasks that
355 * are not in our scheduling class.
356 */
357 static void update_curr_rt(struct rq *rq)
358 {
359 struct task_struct *curr = rq->curr;
360 struct sched_rt_entity *rt_se = &curr->rt;
361 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
362 u64 delta_exec;
363
364 if (!task_has_rt_policy(curr))
365 return;
366
367 delta_exec = rq->clock - curr->se.exec_start;
368 if (unlikely((s64)delta_exec < 0))
369 delta_exec = 0;
370
371 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
372
373 curr->se.sum_exec_runtime += delta_exec;
374 curr->se.exec_start = rq->clock;
375 cpuacct_charge(curr, delta_exec);
376
377 for_each_sched_rt_entity(rt_se) {
378 rt_rq = rt_rq_of_se(rt_se);
379
380 spin_lock(&rt_rq->rt_runtime_lock);
381 rt_rq->rt_time += delta_exec;
382 if (sched_rt_runtime_exceeded(rt_rq))
383 resched_task(curr);
384 spin_unlock(&rt_rq->rt_runtime_lock);
385 }
386 }
387
388 static inline
389 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
390 {
391 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
392 rt_rq->rt_nr_running++;
393 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
394 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
395 rt_rq->highest_prio = rt_se_prio(rt_se);
396 #endif
397 #ifdef CONFIG_SMP
398 if (rt_se->nr_cpus_allowed > 1) {
399 struct rq *rq = rq_of_rt_rq(rt_rq);
400 rq->rt.rt_nr_migratory++;
401 }
402
403 update_rt_migration(rq_of_rt_rq(rt_rq));
404 #endif
405 #ifdef CONFIG_RT_GROUP_SCHED
406 if (rt_se_boosted(rt_se))
407 rt_rq->rt_nr_boosted++;
408
409 if (rt_rq->tg)
410 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
411 #else
412 start_rt_bandwidth(&def_rt_bandwidth);
413 #endif
414 }
415
416 static inline
417 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
418 {
419 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
420 WARN_ON(!rt_rq->rt_nr_running);
421 rt_rq->rt_nr_running--;
422 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
423 if (rt_rq->rt_nr_running) {
424 struct rt_prio_array *array;
425
426 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
427 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
428 /* recalculate */
429 array = &rt_rq->active;
430 rt_rq->highest_prio =
431 sched_find_first_bit(array->bitmap);
432 } /* otherwise leave rq->highest prio alone */
433 } else
434 rt_rq->highest_prio = MAX_RT_PRIO;
435 #endif
436 #ifdef CONFIG_SMP
437 if (rt_se->nr_cpus_allowed > 1) {
438 struct rq *rq = rq_of_rt_rq(rt_rq);
439 rq->rt.rt_nr_migratory--;
440 }
441
442 update_rt_migration(rq_of_rt_rq(rt_rq));
443 #endif /* CONFIG_SMP */
444 #ifdef CONFIG_RT_GROUP_SCHED
445 if (rt_se_boosted(rt_se))
446 rt_rq->rt_nr_boosted--;
447
448 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
449 #endif
450 }
451
452 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453 {
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se);
457
458 /*
459 * Don't enqueue the group if its throttled, or when empty.
460 * The latter is a consequence of the former when a child group
461 * get throttled and the current group doesn't have any other
462 * active members.
463 */
464 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
465 return;
466
467 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
468 __set_bit(rt_se_prio(rt_se), array->bitmap);
469
470 inc_rt_tasks(rt_se, rt_rq);
471 }
472
473 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
474 {
475 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
476 struct rt_prio_array *array = &rt_rq->active;
477
478 list_del_init(&rt_se->run_list);
479 if (list_empty(array->queue + rt_se_prio(rt_se)))
480 __clear_bit(rt_se_prio(rt_se), array->bitmap);
481
482 dec_rt_tasks(rt_se, rt_rq);
483 }
484
485 /*
486 * Because the prio of an upper entry depends on the lower
487 * entries, we must remove entries top - down.
488 */
489 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
490 {
491 struct sched_rt_entity *back = NULL;
492
493 for_each_sched_rt_entity(rt_se) {
494 rt_se->back = back;
495 back = rt_se;
496 }
497
498 for (rt_se = back; rt_se; rt_se = rt_se->back) {
499 if (on_rt_rq(rt_se))
500 __dequeue_rt_entity(rt_se);
501 }
502 }
503
504 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
505 {
506 dequeue_rt_stack(rt_se);
507 for_each_sched_rt_entity(rt_se)
508 __enqueue_rt_entity(rt_se);
509 }
510
511 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
512 {
513 dequeue_rt_stack(rt_se);
514
515 for_each_sched_rt_entity(rt_se) {
516 struct rt_rq *rt_rq = group_rt_rq(rt_se);
517
518 if (rt_rq && rt_rq->rt_nr_running)
519 __enqueue_rt_entity(rt_se);
520 }
521 }
522
523 /*
524 * Adding/removing a task to/from a priority array:
525 */
526 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
527 {
528 struct sched_rt_entity *rt_se = &p->rt;
529
530 if (wakeup)
531 rt_se->timeout = 0;
532
533 enqueue_rt_entity(rt_se);
534 }
535
536 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
537 {
538 struct sched_rt_entity *rt_se = &p->rt;
539
540 update_curr_rt(rq);
541 dequeue_rt_entity(rt_se);
542 }
543
544 /*
545 * Put task to the end of the run list without the overhead of dequeue
546 * followed by enqueue.
547 */
548 static
549 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
550 {
551 struct rt_prio_array *array = &rt_rq->active;
552 struct list_head *queue = array->queue + rt_se_prio(rt_se);
553
554 if (on_rt_rq(rt_se))
555 list_move_tail(&rt_se->run_list, queue);
556 }
557
558 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
559 {
560 struct sched_rt_entity *rt_se = &p->rt;
561 struct rt_rq *rt_rq;
562
563 for_each_sched_rt_entity(rt_se) {
564 rt_rq = rt_rq_of_se(rt_se);
565 requeue_rt_entity(rt_rq, rt_se);
566 }
567 }
568
569 static void yield_task_rt(struct rq *rq)
570 {
571 requeue_task_rt(rq, rq->curr);
572 }
573
574 #ifdef CONFIG_SMP
575 static int find_lowest_rq(struct task_struct *task);
576
577 static int select_task_rq_rt(struct task_struct *p, int sync)
578 {
579 struct rq *rq = task_rq(p);
580
581 /*
582 * If the current task is an RT task, then
583 * try to see if we can wake this RT task up on another
584 * runqueue. Otherwise simply start this RT task
585 * on its current runqueue.
586 *
587 * We want to avoid overloading runqueues. Even if
588 * the RT task is of higher priority than the current RT task.
589 * RT tasks behave differently than other tasks. If
590 * one gets preempted, we try to push it off to another queue.
591 * So trying to keep a preempting RT task on the same
592 * cache hot CPU will force the running RT task to
593 * a cold CPU. So we waste all the cache for the lower
594 * RT task in hopes of saving some of a RT task
595 * that is just being woken and probably will have
596 * cold cache anyway.
597 */
598 if (unlikely(rt_task(rq->curr)) &&
599 (p->rt.nr_cpus_allowed > 1)) {
600 int cpu = find_lowest_rq(p);
601
602 return (cpu == -1) ? task_cpu(p) : cpu;
603 }
604
605 /*
606 * Otherwise, just let it ride on the affined RQ and the
607 * post-schedule router will push the preempted task away
608 */
609 return task_cpu(p);
610 }
611 #endif /* CONFIG_SMP */
612
613 /*
614 * Preempt the current task with a newly woken task if needed:
615 */
616 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
617 {
618 if (p->prio < rq->curr->prio)
619 resched_task(rq->curr);
620 }
621
622 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
623 struct rt_rq *rt_rq)
624 {
625 struct rt_prio_array *array = &rt_rq->active;
626 struct sched_rt_entity *next = NULL;
627 struct list_head *queue;
628 int idx;
629
630 idx = sched_find_first_bit(array->bitmap);
631 BUG_ON(idx >= MAX_RT_PRIO);
632
633 queue = array->queue + idx;
634 next = list_entry(queue->next, struct sched_rt_entity, run_list);
635
636 return next;
637 }
638
639 static struct task_struct *pick_next_task_rt(struct rq *rq)
640 {
641 struct sched_rt_entity *rt_se;
642 struct task_struct *p;
643 struct rt_rq *rt_rq;
644
645 rt_rq = &rq->rt;
646
647 if (unlikely(!rt_rq->rt_nr_running))
648 return NULL;
649
650 if (rt_rq_throttled(rt_rq))
651 return NULL;
652
653 do {
654 rt_se = pick_next_rt_entity(rq, rt_rq);
655 BUG_ON(!rt_se);
656 rt_rq = group_rt_rq(rt_se);
657 } while (rt_rq);
658
659 p = rt_task_of(rt_se);
660 p->se.exec_start = rq->clock;
661 return p;
662 }
663
664 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
665 {
666 update_curr_rt(rq);
667 p->se.exec_start = 0;
668 }
669
670 #ifdef CONFIG_SMP
671
672 /* Only try algorithms three times */
673 #define RT_MAX_TRIES 3
674
675 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
676 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
677
678 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
679 {
680 if (!task_running(rq, p) &&
681 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
682 (p->rt.nr_cpus_allowed > 1))
683 return 1;
684 return 0;
685 }
686
687 /* Return the second highest RT task, NULL otherwise */
688 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
689 {
690 struct task_struct *next = NULL;
691 struct sched_rt_entity *rt_se;
692 struct rt_prio_array *array;
693 struct rt_rq *rt_rq;
694 int idx;
695
696 for_each_leaf_rt_rq(rt_rq, rq) {
697 array = &rt_rq->active;
698 idx = sched_find_first_bit(array->bitmap);
699 next_idx:
700 if (idx >= MAX_RT_PRIO)
701 continue;
702 if (next && next->prio < idx)
703 continue;
704 list_for_each_entry(rt_se, array->queue + idx, run_list) {
705 struct task_struct *p = rt_task_of(rt_se);
706 if (pick_rt_task(rq, p, cpu)) {
707 next = p;
708 break;
709 }
710 }
711 if (!next) {
712 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
713 goto next_idx;
714 }
715 }
716
717 return next;
718 }
719
720 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
721
722 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
723 {
724 int lowest_prio = -1;
725 int lowest_cpu = -1;
726 int count = 0;
727 int cpu;
728
729 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
730
731 /*
732 * Scan each rq for the lowest prio.
733 */
734 for_each_cpu_mask(cpu, *lowest_mask) {
735 struct rq *rq = cpu_rq(cpu);
736
737 /* We look for lowest RT prio or non-rt CPU */
738 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
739 /*
740 * if we already found a low RT queue
741 * and now we found this non-rt queue
742 * clear the mask and set our bit.
743 * Otherwise just return the queue as is
744 * and the count==1 will cause the algorithm
745 * to use the first bit found.
746 */
747 if (lowest_cpu != -1) {
748 cpus_clear(*lowest_mask);
749 cpu_set(rq->cpu, *lowest_mask);
750 }
751 return 1;
752 }
753
754 /* no locking for now */
755 if ((rq->rt.highest_prio > task->prio)
756 && (rq->rt.highest_prio >= lowest_prio)) {
757 if (rq->rt.highest_prio > lowest_prio) {
758 /* new low - clear old data */
759 lowest_prio = rq->rt.highest_prio;
760 lowest_cpu = cpu;
761 count = 0;
762 }
763 count++;
764 } else
765 cpu_clear(cpu, *lowest_mask);
766 }
767
768 /*
769 * Clear out all the set bits that represent
770 * runqueues that were of higher prio than
771 * the lowest_prio.
772 */
773 if (lowest_cpu > 0) {
774 /*
775 * Perhaps we could add another cpumask op to
776 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
777 * Then that could be optimized to use memset and such.
778 */
779 for_each_cpu_mask(cpu, *lowest_mask) {
780 if (cpu >= lowest_cpu)
781 break;
782 cpu_clear(cpu, *lowest_mask);
783 }
784 }
785
786 return count;
787 }
788
789 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
790 {
791 int first;
792
793 /* "this_cpu" is cheaper to preempt than a remote processor */
794 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
795 return this_cpu;
796
797 first = first_cpu(*mask);
798 if (first != NR_CPUS)
799 return first;
800
801 return -1;
802 }
803
804 static int find_lowest_rq(struct task_struct *task)
805 {
806 struct sched_domain *sd;
807 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
808 int this_cpu = smp_processor_id();
809 int cpu = task_cpu(task);
810 int count = find_lowest_cpus(task, lowest_mask);
811
812 if (!count)
813 return -1; /* No targets found */
814
815 /*
816 * There is no sense in performing an optimal search if only one
817 * target is found.
818 */
819 if (count == 1)
820 return first_cpu(*lowest_mask);
821
822 /*
823 * At this point we have built a mask of cpus representing the
824 * lowest priority tasks in the system. Now we want to elect
825 * the best one based on our affinity and topology.
826 *
827 * We prioritize the last cpu that the task executed on since
828 * it is most likely cache-hot in that location.
829 */
830 if (cpu_isset(cpu, *lowest_mask))
831 return cpu;
832
833 /*
834 * Otherwise, we consult the sched_domains span maps to figure
835 * out which cpu is logically closest to our hot cache data.
836 */
837 if (this_cpu == cpu)
838 this_cpu = -1; /* Skip this_cpu opt if the same */
839
840 for_each_domain(cpu, sd) {
841 if (sd->flags & SD_WAKE_AFFINE) {
842 cpumask_t domain_mask;
843 int best_cpu;
844
845 cpus_and(domain_mask, sd->span, *lowest_mask);
846
847 best_cpu = pick_optimal_cpu(this_cpu,
848 &domain_mask);
849 if (best_cpu != -1)
850 return best_cpu;
851 }
852 }
853
854 /*
855 * And finally, if there were no matches within the domains
856 * just give the caller *something* to work with from the compatible
857 * locations.
858 */
859 return pick_optimal_cpu(this_cpu, lowest_mask);
860 }
861
862 /* Will lock the rq it finds */
863 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
864 {
865 struct rq *lowest_rq = NULL;
866 int tries;
867 int cpu;
868
869 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
870 cpu = find_lowest_rq(task);
871
872 if ((cpu == -1) || (cpu == rq->cpu))
873 break;
874
875 lowest_rq = cpu_rq(cpu);
876
877 /* if the prio of this runqueue changed, try again */
878 if (double_lock_balance(rq, lowest_rq)) {
879 /*
880 * We had to unlock the run queue. In
881 * the mean time, task could have
882 * migrated already or had its affinity changed.
883 * Also make sure that it wasn't scheduled on its rq.
884 */
885 if (unlikely(task_rq(task) != rq ||
886 !cpu_isset(lowest_rq->cpu,
887 task->cpus_allowed) ||
888 task_running(rq, task) ||
889 !task->se.on_rq)) {
890
891 spin_unlock(&lowest_rq->lock);
892 lowest_rq = NULL;
893 break;
894 }
895 }
896
897 /* If this rq is still suitable use it. */
898 if (lowest_rq->rt.highest_prio > task->prio)
899 break;
900
901 /* try again */
902 spin_unlock(&lowest_rq->lock);
903 lowest_rq = NULL;
904 }
905
906 return lowest_rq;
907 }
908
909 /*
910 * If the current CPU has more than one RT task, see if the non
911 * running task can migrate over to a CPU that is running a task
912 * of lesser priority.
913 */
914 static int push_rt_task(struct rq *rq)
915 {
916 struct task_struct *next_task;
917 struct rq *lowest_rq;
918 int ret = 0;
919 int paranoid = RT_MAX_TRIES;
920
921 if (!rq->rt.overloaded)
922 return 0;
923
924 next_task = pick_next_highest_task_rt(rq, -1);
925 if (!next_task)
926 return 0;
927
928 retry:
929 if (unlikely(next_task == rq->curr)) {
930 WARN_ON(1);
931 return 0;
932 }
933
934 /*
935 * It's possible that the next_task slipped in of
936 * higher priority than current. If that's the case
937 * just reschedule current.
938 */
939 if (unlikely(next_task->prio < rq->curr->prio)) {
940 resched_task(rq->curr);
941 return 0;
942 }
943
944 /* We might release rq lock */
945 get_task_struct(next_task);
946
947 /* find_lock_lowest_rq locks the rq if found */
948 lowest_rq = find_lock_lowest_rq(next_task, rq);
949 if (!lowest_rq) {
950 struct task_struct *task;
951 /*
952 * find lock_lowest_rq releases rq->lock
953 * so it is possible that next_task has changed.
954 * If it has, then try again.
955 */
956 task = pick_next_highest_task_rt(rq, -1);
957 if (unlikely(task != next_task) && task && paranoid--) {
958 put_task_struct(next_task);
959 next_task = task;
960 goto retry;
961 }
962 goto out;
963 }
964
965 deactivate_task(rq, next_task, 0);
966 set_task_cpu(next_task, lowest_rq->cpu);
967 activate_task(lowest_rq, next_task, 0);
968
969 resched_task(lowest_rq->curr);
970
971 spin_unlock(&lowest_rq->lock);
972
973 ret = 1;
974 out:
975 put_task_struct(next_task);
976
977 return ret;
978 }
979
980 /*
981 * TODO: Currently we just use the second highest prio task on
982 * the queue, and stop when it can't migrate (or there's
983 * no more RT tasks). There may be a case where a lower
984 * priority RT task has a different affinity than the
985 * higher RT task. In this case the lower RT task could
986 * possibly be able to migrate where as the higher priority
987 * RT task could not. We currently ignore this issue.
988 * Enhancements are welcome!
989 */
990 static void push_rt_tasks(struct rq *rq)
991 {
992 /* push_rt_task will return true if it moved an RT */
993 while (push_rt_task(rq))
994 ;
995 }
996
997 static int pull_rt_task(struct rq *this_rq)
998 {
999 int this_cpu = this_rq->cpu, ret = 0, cpu;
1000 struct task_struct *p, *next;
1001 struct rq *src_rq;
1002
1003 if (likely(!rt_overloaded(this_rq)))
1004 return 0;
1005
1006 next = pick_next_task_rt(this_rq);
1007
1008 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1009 if (this_cpu == cpu)
1010 continue;
1011
1012 src_rq = cpu_rq(cpu);
1013 /*
1014 * We can potentially drop this_rq's lock in
1015 * double_lock_balance, and another CPU could
1016 * steal our next task - hence we must cause
1017 * the caller to recalculate the next task
1018 * in that case:
1019 */
1020 if (double_lock_balance(this_rq, src_rq)) {
1021 struct task_struct *old_next = next;
1022
1023 next = pick_next_task_rt(this_rq);
1024 if (next != old_next)
1025 ret = 1;
1026 }
1027
1028 /*
1029 * Are there still pullable RT tasks?
1030 */
1031 if (src_rq->rt.rt_nr_running <= 1)
1032 goto skip;
1033
1034 p = pick_next_highest_task_rt(src_rq, this_cpu);
1035
1036 /*
1037 * Do we have an RT task that preempts
1038 * the to-be-scheduled task?
1039 */
1040 if (p && (!next || (p->prio < next->prio))) {
1041 WARN_ON(p == src_rq->curr);
1042 WARN_ON(!p->se.on_rq);
1043
1044 /*
1045 * There's a chance that p is higher in priority
1046 * than what's currently running on its cpu.
1047 * This is just that p is wakeing up and hasn't
1048 * had a chance to schedule. We only pull
1049 * p if it is lower in priority than the
1050 * current task on the run queue or
1051 * this_rq next task is lower in prio than
1052 * the current task on that rq.
1053 */
1054 if (p->prio < src_rq->curr->prio ||
1055 (next && next->prio < src_rq->curr->prio))
1056 goto skip;
1057
1058 ret = 1;
1059
1060 deactivate_task(src_rq, p, 0);
1061 set_task_cpu(p, this_cpu);
1062 activate_task(this_rq, p, 0);
1063 /*
1064 * We continue with the search, just in
1065 * case there's an even higher prio task
1066 * in another runqueue. (low likelyhood
1067 * but possible)
1068 *
1069 * Update next so that we won't pick a task
1070 * on another cpu with a priority lower (or equal)
1071 * than the one we just picked.
1072 */
1073 next = p;
1074
1075 }
1076 skip:
1077 spin_unlock(&src_rq->lock);
1078 }
1079
1080 return ret;
1081 }
1082
1083 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1084 {
1085 /* Try to pull RT tasks here if we lower this rq's prio */
1086 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1087 pull_rt_task(rq);
1088 }
1089
1090 static void post_schedule_rt(struct rq *rq)
1091 {
1092 /*
1093 * If we have more than one rt_task queued, then
1094 * see if we can push the other rt_tasks off to other CPUS.
1095 * Note we may release the rq lock, and since
1096 * the lock was owned by prev, we need to release it
1097 * first via finish_lock_switch and then reaquire it here.
1098 */
1099 if (unlikely(rq->rt.overloaded)) {
1100 spin_lock_irq(&rq->lock);
1101 push_rt_tasks(rq);
1102 spin_unlock_irq(&rq->lock);
1103 }
1104 }
1105
1106 /*
1107 * If we are not running and we are not going to reschedule soon, we should
1108 * try to push tasks away now
1109 */
1110 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1111 {
1112 if (!task_running(rq, p) &&
1113 !test_tsk_need_resched(rq->curr) &&
1114 rq->rt.overloaded)
1115 push_rt_tasks(rq);
1116 }
1117
1118 static unsigned long
1119 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1120 unsigned long max_load_move,
1121 struct sched_domain *sd, enum cpu_idle_type idle,
1122 int *all_pinned, int *this_best_prio)
1123 {
1124 /* don't touch RT tasks */
1125 return 0;
1126 }
1127
1128 static int
1129 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1130 struct sched_domain *sd, enum cpu_idle_type idle)
1131 {
1132 /* don't touch RT tasks */
1133 return 0;
1134 }
1135
1136 static void set_cpus_allowed_rt(struct task_struct *p,
1137 const cpumask_t *new_mask)
1138 {
1139 int weight = cpus_weight(*new_mask);
1140
1141 BUG_ON(!rt_task(p));
1142
1143 /*
1144 * Update the migration status of the RQ if we have an RT task
1145 * which is running AND changing its weight value.
1146 */
1147 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1148 struct rq *rq = task_rq(p);
1149
1150 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1151 rq->rt.rt_nr_migratory++;
1152 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1153 BUG_ON(!rq->rt.rt_nr_migratory);
1154 rq->rt.rt_nr_migratory--;
1155 }
1156
1157 update_rt_migration(rq);
1158 }
1159
1160 p->cpus_allowed = *new_mask;
1161 p->rt.nr_cpus_allowed = weight;
1162 }
1163
1164 /* Assumes rq->lock is held */
1165 static void join_domain_rt(struct rq *rq)
1166 {
1167 if (rq->rt.overloaded)
1168 rt_set_overload(rq);
1169 }
1170
1171 /* Assumes rq->lock is held */
1172 static void leave_domain_rt(struct rq *rq)
1173 {
1174 if (rq->rt.overloaded)
1175 rt_clear_overload(rq);
1176 }
1177
1178 /*
1179 * When switch from the rt queue, we bring ourselves to a position
1180 * that we might want to pull RT tasks from other runqueues.
1181 */
1182 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1183 int running)
1184 {
1185 /*
1186 * If there are other RT tasks then we will reschedule
1187 * and the scheduling of the other RT tasks will handle
1188 * the balancing. But if we are the last RT task
1189 * we may need to handle the pulling of RT tasks
1190 * now.
1191 */
1192 if (!rq->rt.rt_nr_running)
1193 pull_rt_task(rq);
1194 }
1195 #endif /* CONFIG_SMP */
1196
1197 /*
1198 * When switching a task to RT, we may overload the runqueue
1199 * with RT tasks. In this case we try to push them off to
1200 * other runqueues.
1201 */
1202 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1203 int running)
1204 {
1205 int check_resched = 1;
1206
1207 /*
1208 * If we are already running, then there's nothing
1209 * that needs to be done. But if we are not running
1210 * we may need to preempt the current running task.
1211 * If that current running task is also an RT task
1212 * then see if we can move to another run queue.
1213 */
1214 if (!running) {
1215 #ifdef CONFIG_SMP
1216 if (rq->rt.overloaded && push_rt_task(rq) &&
1217 /* Don't resched if we changed runqueues */
1218 rq != task_rq(p))
1219 check_resched = 0;
1220 #endif /* CONFIG_SMP */
1221 if (check_resched && p->prio < rq->curr->prio)
1222 resched_task(rq->curr);
1223 }
1224 }
1225
1226 /*
1227 * Priority of the task has changed. This may cause
1228 * us to initiate a push or pull.
1229 */
1230 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1231 int oldprio, int running)
1232 {
1233 if (running) {
1234 #ifdef CONFIG_SMP
1235 /*
1236 * If our priority decreases while running, we
1237 * may need to pull tasks to this runqueue.
1238 */
1239 if (oldprio < p->prio)
1240 pull_rt_task(rq);
1241 /*
1242 * If there's a higher priority task waiting to run
1243 * then reschedule. Note, the above pull_rt_task
1244 * can release the rq lock and p could migrate.
1245 * Only reschedule if p is still on the same runqueue.
1246 */
1247 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1248 resched_task(p);
1249 #else
1250 /* For UP simply resched on drop of prio */
1251 if (oldprio < p->prio)
1252 resched_task(p);
1253 #endif /* CONFIG_SMP */
1254 } else {
1255 /*
1256 * This task is not running, but if it is
1257 * greater than the current running task
1258 * then reschedule.
1259 */
1260 if (p->prio < rq->curr->prio)
1261 resched_task(rq->curr);
1262 }
1263 }
1264
1265 static void watchdog(struct rq *rq, struct task_struct *p)
1266 {
1267 unsigned long soft, hard;
1268
1269 if (!p->signal)
1270 return;
1271
1272 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1273 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1274
1275 if (soft != RLIM_INFINITY) {
1276 unsigned long next;
1277
1278 p->rt.timeout++;
1279 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1280 if (p->rt.timeout > next)
1281 p->it_sched_expires = p->se.sum_exec_runtime;
1282 }
1283 }
1284
1285 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1286 {
1287 update_curr_rt(rq);
1288
1289 watchdog(rq, p);
1290
1291 /*
1292 * RR tasks need a special form of timeslice management.
1293 * FIFO tasks have no timeslices.
1294 */
1295 if (p->policy != SCHED_RR)
1296 return;
1297
1298 if (--p->rt.time_slice)
1299 return;
1300
1301 p->rt.time_slice = DEF_TIMESLICE;
1302
1303 /*
1304 * Requeue to the end of queue if we are not the only element
1305 * on the queue:
1306 */
1307 if (p->rt.run_list.prev != p->rt.run_list.next) {
1308 requeue_task_rt(rq, p);
1309 set_tsk_need_resched(p);
1310 }
1311 }
1312
1313 static void set_curr_task_rt(struct rq *rq)
1314 {
1315 struct task_struct *p = rq->curr;
1316
1317 p->se.exec_start = rq->clock;
1318 }
1319
1320 static const struct sched_class rt_sched_class = {
1321 .next = &fair_sched_class,
1322 .enqueue_task = enqueue_task_rt,
1323 .dequeue_task = dequeue_task_rt,
1324 .yield_task = yield_task_rt,
1325 #ifdef CONFIG_SMP
1326 .select_task_rq = select_task_rq_rt,
1327 #endif /* CONFIG_SMP */
1328
1329 .check_preempt_curr = check_preempt_curr_rt,
1330
1331 .pick_next_task = pick_next_task_rt,
1332 .put_prev_task = put_prev_task_rt,
1333
1334 #ifdef CONFIG_SMP
1335 .load_balance = load_balance_rt,
1336 .move_one_task = move_one_task_rt,
1337 .set_cpus_allowed = set_cpus_allowed_rt,
1338 .join_domain = join_domain_rt,
1339 .leave_domain = leave_domain_rt,
1340 .pre_schedule = pre_schedule_rt,
1341 .post_schedule = post_schedule_rt,
1342 .task_wake_up = task_wake_up_rt,
1343 .switched_from = switched_from_rt,
1344 #endif
1345
1346 .set_curr_task = set_curr_task_rt,
1347 .task_tick = task_tick_rt,
1348
1349 .prio_changed = prio_changed_rt,
1350 .switched_to = switched_to_rt,
1351 };
This page took 0.087281 seconds and 5 git commands to generate.