sched: high-res preemption tick
[deliverable/linux.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10 return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15 cpu_set(rq->cpu, rq->rd->rto_mask);
16 /*
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
22 */
23 wmb();
24 atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
40 }
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
44 }
45 }
46 #endif /* CONFIG_SMP */
47
48 /*
49 * Update the current task's runtime statistics. Skip current tasks that
50 * are not in our scheduling class.
51 */
52 static void update_curr_rt(struct rq *rq)
53 {
54 struct task_struct *curr = rq->curr;
55 u64 delta_exec;
56
57 if (!task_has_rt_policy(curr))
58 return;
59
60 delta_exec = rq->clock - curr->se.exec_start;
61 if (unlikely((s64)delta_exec < 0))
62 delta_exec = 0;
63
64 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
65
66 curr->se.sum_exec_runtime += delta_exec;
67 curr->se.exec_start = rq->clock;
68 cpuacct_charge(curr, delta_exec);
69 }
70
71 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
72 {
73 WARN_ON(!rt_task(p));
74 rq->rt.rt_nr_running++;
75 #ifdef CONFIG_SMP
76 if (p->prio < rq->rt.highest_prio)
77 rq->rt.highest_prio = p->prio;
78 if (p->nr_cpus_allowed > 1)
79 rq->rt.rt_nr_migratory++;
80
81 update_rt_migration(rq);
82 #endif /* CONFIG_SMP */
83 }
84
85 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
86 {
87 WARN_ON(!rt_task(p));
88 WARN_ON(!rq->rt.rt_nr_running);
89 rq->rt.rt_nr_running--;
90 #ifdef CONFIG_SMP
91 if (rq->rt.rt_nr_running) {
92 struct rt_prio_array *array;
93
94 WARN_ON(p->prio < rq->rt.highest_prio);
95 if (p->prio == rq->rt.highest_prio) {
96 /* recalculate */
97 array = &rq->rt.active;
98 rq->rt.highest_prio =
99 sched_find_first_bit(array->bitmap);
100 } /* otherwise leave rq->highest prio alone */
101 } else
102 rq->rt.highest_prio = MAX_RT_PRIO;
103 if (p->nr_cpus_allowed > 1)
104 rq->rt.rt_nr_migratory--;
105
106 update_rt_migration(rq);
107 #endif /* CONFIG_SMP */
108 }
109
110 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
111 {
112 struct rt_prio_array *array = &rq->rt.active;
113
114 list_add_tail(&p->rt.run_list, array->queue + p->prio);
115 __set_bit(p->prio, array->bitmap);
116 inc_cpu_load(rq, p->se.load.weight);
117
118 inc_rt_tasks(p, rq);
119
120 if (wakeup)
121 p->rt.timeout = 0;
122 }
123
124 /*
125 * Adding/removing a task to/from a priority array:
126 */
127 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
128 {
129 struct rt_prio_array *array = &rq->rt.active;
130
131 update_curr_rt(rq);
132
133 list_del(&p->rt.run_list);
134 if (list_empty(array->queue + p->prio))
135 __clear_bit(p->prio, array->bitmap);
136 dec_cpu_load(rq, p->se.load.weight);
137
138 dec_rt_tasks(p, rq);
139 }
140
141 /*
142 * Put task to the end of the run list without the overhead of dequeue
143 * followed by enqueue.
144 */
145 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
146 {
147 struct rt_prio_array *array = &rq->rt.active;
148
149 list_move_tail(&p->rt.run_list, array->queue + p->prio);
150 }
151
152 static void
153 yield_task_rt(struct rq *rq)
154 {
155 requeue_task_rt(rq, rq->curr);
156 }
157
158 #ifdef CONFIG_SMP
159 static int find_lowest_rq(struct task_struct *task);
160
161 static int select_task_rq_rt(struct task_struct *p, int sync)
162 {
163 struct rq *rq = task_rq(p);
164
165 /*
166 * If the current task is an RT task, then
167 * try to see if we can wake this RT task up on another
168 * runqueue. Otherwise simply start this RT task
169 * on its current runqueue.
170 *
171 * We want to avoid overloading runqueues. Even if
172 * the RT task is of higher priority than the current RT task.
173 * RT tasks behave differently than other tasks. If
174 * one gets preempted, we try to push it off to another queue.
175 * So trying to keep a preempting RT task on the same
176 * cache hot CPU will force the running RT task to
177 * a cold CPU. So we waste all the cache for the lower
178 * RT task in hopes of saving some of a RT task
179 * that is just being woken and probably will have
180 * cold cache anyway.
181 */
182 if (unlikely(rt_task(rq->curr)) &&
183 (p->nr_cpus_allowed > 1)) {
184 int cpu = find_lowest_rq(p);
185
186 return (cpu == -1) ? task_cpu(p) : cpu;
187 }
188
189 /*
190 * Otherwise, just let it ride on the affined RQ and the
191 * post-schedule router will push the preempted task away
192 */
193 return task_cpu(p);
194 }
195 #endif /* CONFIG_SMP */
196
197 /*
198 * Preempt the current task with a newly woken task if needed:
199 */
200 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
201 {
202 if (p->prio < rq->curr->prio)
203 resched_task(rq->curr);
204 }
205
206 static struct task_struct *pick_next_task_rt(struct rq *rq)
207 {
208 struct rt_prio_array *array = &rq->rt.active;
209 struct task_struct *next;
210 struct list_head *queue;
211 int idx;
212
213 idx = sched_find_first_bit(array->bitmap);
214 if (idx >= MAX_RT_PRIO)
215 return NULL;
216
217 queue = array->queue + idx;
218 next = list_entry(queue->next, struct task_struct, rt.run_list);
219
220 next->se.exec_start = rq->clock;
221
222 return next;
223 }
224
225 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
226 {
227 update_curr_rt(rq);
228 p->se.exec_start = 0;
229 }
230
231 #ifdef CONFIG_SMP
232 /* Only try algorithms three times */
233 #define RT_MAX_TRIES 3
234
235 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
236 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
237
238 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
239 {
240 if (!task_running(rq, p) &&
241 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
242 (p->nr_cpus_allowed > 1))
243 return 1;
244 return 0;
245 }
246
247 /* Return the second highest RT task, NULL otherwise */
248 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
249 {
250 struct rt_prio_array *array = &rq->rt.active;
251 struct task_struct *next;
252 struct list_head *queue;
253 int idx;
254
255 if (likely(rq->rt.rt_nr_running < 2))
256 return NULL;
257
258 idx = sched_find_first_bit(array->bitmap);
259 if (unlikely(idx >= MAX_RT_PRIO)) {
260 WARN_ON(1); /* rt_nr_running is bad */
261 return NULL;
262 }
263
264 queue = array->queue + idx;
265 BUG_ON(list_empty(queue));
266
267 next = list_entry(queue->next, struct task_struct, rt.run_list);
268 if (unlikely(pick_rt_task(rq, next, cpu)))
269 goto out;
270
271 if (queue->next->next != queue) {
272 /* same prio task */
273 next = list_entry(queue->next->next, struct task_struct,
274 rt.run_list);
275 if (pick_rt_task(rq, next, cpu))
276 goto out;
277 }
278
279 retry:
280 /* slower, but more flexible */
281 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
282 if (unlikely(idx >= MAX_RT_PRIO))
283 return NULL;
284
285 queue = array->queue + idx;
286 BUG_ON(list_empty(queue));
287
288 list_for_each_entry(next, queue, rt.run_list) {
289 if (pick_rt_task(rq, next, cpu))
290 goto out;
291 }
292
293 goto retry;
294
295 out:
296 return next;
297 }
298
299 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
300
301 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
302 {
303 int lowest_prio = -1;
304 int lowest_cpu = -1;
305 int count = 0;
306 int cpu;
307
308 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
309
310 /*
311 * Scan each rq for the lowest prio.
312 */
313 for_each_cpu_mask(cpu, *lowest_mask) {
314 struct rq *rq = cpu_rq(cpu);
315
316 /* We look for lowest RT prio or non-rt CPU */
317 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
318 /*
319 * if we already found a low RT queue
320 * and now we found this non-rt queue
321 * clear the mask and set our bit.
322 * Otherwise just return the queue as is
323 * and the count==1 will cause the algorithm
324 * to use the first bit found.
325 */
326 if (lowest_cpu != -1) {
327 cpus_clear(*lowest_mask);
328 cpu_set(rq->cpu, *lowest_mask);
329 }
330 return 1;
331 }
332
333 /* no locking for now */
334 if ((rq->rt.highest_prio > task->prio)
335 && (rq->rt.highest_prio >= lowest_prio)) {
336 if (rq->rt.highest_prio > lowest_prio) {
337 /* new low - clear old data */
338 lowest_prio = rq->rt.highest_prio;
339 lowest_cpu = cpu;
340 count = 0;
341 }
342 count++;
343 } else
344 cpu_clear(cpu, *lowest_mask);
345 }
346
347 /*
348 * Clear out all the set bits that represent
349 * runqueues that were of higher prio than
350 * the lowest_prio.
351 */
352 if (lowest_cpu > 0) {
353 /*
354 * Perhaps we could add another cpumask op to
355 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
356 * Then that could be optimized to use memset and such.
357 */
358 for_each_cpu_mask(cpu, *lowest_mask) {
359 if (cpu >= lowest_cpu)
360 break;
361 cpu_clear(cpu, *lowest_mask);
362 }
363 }
364
365 return count;
366 }
367
368 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
369 {
370 int first;
371
372 /* "this_cpu" is cheaper to preempt than a remote processor */
373 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
374 return this_cpu;
375
376 first = first_cpu(*mask);
377 if (first != NR_CPUS)
378 return first;
379
380 return -1;
381 }
382
383 static int find_lowest_rq(struct task_struct *task)
384 {
385 struct sched_domain *sd;
386 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
387 int this_cpu = smp_processor_id();
388 int cpu = task_cpu(task);
389 int count = find_lowest_cpus(task, lowest_mask);
390
391 if (!count)
392 return -1; /* No targets found */
393
394 /*
395 * There is no sense in performing an optimal search if only one
396 * target is found.
397 */
398 if (count == 1)
399 return first_cpu(*lowest_mask);
400
401 /*
402 * At this point we have built a mask of cpus representing the
403 * lowest priority tasks in the system. Now we want to elect
404 * the best one based on our affinity and topology.
405 *
406 * We prioritize the last cpu that the task executed on since
407 * it is most likely cache-hot in that location.
408 */
409 if (cpu_isset(cpu, *lowest_mask))
410 return cpu;
411
412 /*
413 * Otherwise, we consult the sched_domains span maps to figure
414 * out which cpu is logically closest to our hot cache data.
415 */
416 if (this_cpu == cpu)
417 this_cpu = -1; /* Skip this_cpu opt if the same */
418
419 for_each_domain(cpu, sd) {
420 if (sd->flags & SD_WAKE_AFFINE) {
421 cpumask_t domain_mask;
422 int best_cpu;
423
424 cpus_and(domain_mask, sd->span, *lowest_mask);
425
426 best_cpu = pick_optimal_cpu(this_cpu,
427 &domain_mask);
428 if (best_cpu != -1)
429 return best_cpu;
430 }
431 }
432
433 /*
434 * And finally, if there were no matches within the domains
435 * just give the caller *something* to work with from the compatible
436 * locations.
437 */
438 return pick_optimal_cpu(this_cpu, lowest_mask);
439 }
440
441 /* Will lock the rq it finds */
442 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
443 {
444 struct rq *lowest_rq = NULL;
445 int tries;
446 int cpu;
447
448 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
449 cpu = find_lowest_rq(task);
450
451 if ((cpu == -1) || (cpu == rq->cpu))
452 break;
453
454 lowest_rq = cpu_rq(cpu);
455
456 /* if the prio of this runqueue changed, try again */
457 if (double_lock_balance(rq, lowest_rq)) {
458 /*
459 * We had to unlock the run queue. In
460 * the mean time, task could have
461 * migrated already or had its affinity changed.
462 * Also make sure that it wasn't scheduled on its rq.
463 */
464 if (unlikely(task_rq(task) != rq ||
465 !cpu_isset(lowest_rq->cpu,
466 task->cpus_allowed) ||
467 task_running(rq, task) ||
468 !task->se.on_rq)) {
469
470 spin_unlock(&lowest_rq->lock);
471 lowest_rq = NULL;
472 break;
473 }
474 }
475
476 /* If this rq is still suitable use it. */
477 if (lowest_rq->rt.highest_prio > task->prio)
478 break;
479
480 /* try again */
481 spin_unlock(&lowest_rq->lock);
482 lowest_rq = NULL;
483 }
484
485 return lowest_rq;
486 }
487
488 /*
489 * If the current CPU has more than one RT task, see if the non
490 * running task can migrate over to a CPU that is running a task
491 * of lesser priority.
492 */
493 static int push_rt_task(struct rq *rq)
494 {
495 struct task_struct *next_task;
496 struct rq *lowest_rq;
497 int ret = 0;
498 int paranoid = RT_MAX_TRIES;
499
500 if (!rq->rt.overloaded)
501 return 0;
502
503 next_task = pick_next_highest_task_rt(rq, -1);
504 if (!next_task)
505 return 0;
506
507 retry:
508 if (unlikely(next_task == rq->curr)) {
509 WARN_ON(1);
510 return 0;
511 }
512
513 /*
514 * It's possible that the next_task slipped in of
515 * higher priority than current. If that's the case
516 * just reschedule current.
517 */
518 if (unlikely(next_task->prio < rq->curr->prio)) {
519 resched_task(rq->curr);
520 return 0;
521 }
522
523 /* We might release rq lock */
524 get_task_struct(next_task);
525
526 /* find_lock_lowest_rq locks the rq if found */
527 lowest_rq = find_lock_lowest_rq(next_task, rq);
528 if (!lowest_rq) {
529 struct task_struct *task;
530 /*
531 * find lock_lowest_rq releases rq->lock
532 * so it is possible that next_task has changed.
533 * If it has, then try again.
534 */
535 task = pick_next_highest_task_rt(rq, -1);
536 if (unlikely(task != next_task) && task && paranoid--) {
537 put_task_struct(next_task);
538 next_task = task;
539 goto retry;
540 }
541 goto out;
542 }
543
544 deactivate_task(rq, next_task, 0);
545 set_task_cpu(next_task, lowest_rq->cpu);
546 activate_task(lowest_rq, next_task, 0);
547
548 resched_task(lowest_rq->curr);
549
550 spin_unlock(&lowest_rq->lock);
551
552 ret = 1;
553 out:
554 put_task_struct(next_task);
555
556 return ret;
557 }
558
559 /*
560 * TODO: Currently we just use the second highest prio task on
561 * the queue, and stop when it can't migrate (or there's
562 * no more RT tasks). There may be a case where a lower
563 * priority RT task has a different affinity than the
564 * higher RT task. In this case the lower RT task could
565 * possibly be able to migrate where as the higher priority
566 * RT task could not. We currently ignore this issue.
567 * Enhancements are welcome!
568 */
569 static void push_rt_tasks(struct rq *rq)
570 {
571 /* push_rt_task will return true if it moved an RT */
572 while (push_rt_task(rq))
573 ;
574 }
575
576 static int pull_rt_task(struct rq *this_rq)
577 {
578 int this_cpu = this_rq->cpu, ret = 0, cpu;
579 struct task_struct *p, *next;
580 struct rq *src_rq;
581
582 if (likely(!rt_overloaded(this_rq)))
583 return 0;
584
585 next = pick_next_task_rt(this_rq);
586
587 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
588 if (this_cpu == cpu)
589 continue;
590
591 src_rq = cpu_rq(cpu);
592 /*
593 * We can potentially drop this_rq's lock in
594 * double_lock_balance, and another CPU could
595 * steal our next task - hence we must cause
596 * the caller to recalculate the next task
597 * in that case:
598 */
599 if (double_lock_balance(this_rq, src_rq)) {
600 struct task_struct *old_next = next;
601
602 next = pick_next_task_rt(this_rq);
603 if (next != old_next)
604 ret = 1;
605 }
606
607 /*
608 * Are there still pullable RT tasks?
609 */
610 if (src_rq->rt.rt_nr_running <= 1) {
611 spin_unlock(&src_rq->lock);
612 continue;
613 }
614
615 p = pick_next_highest_task_rt(src_rq, this_cpu);
616
617 /*
618 * Do we have an RT task that preempts
619 * the to-be-scheduled task?
620 */
621 if (p && (!next || (p->prio < next->prio))) {
622 WARN_ON(p == src_rq->curr);
623 WARN_ON(!p->se.on_rq);
624
625 /*
626 * There's a chance that p is higher in priority
627 * than what's currently running on its cpu.
628 * This is just that p is wakeing up and hasn't
629 * had a chance to schedule. We only pull
630 * p if it is lower in priority than the
631 * current task on the run queue or
632 * this_rq next task is lower in prio than
633 * the current task on that rq.
634 */
635 if (p->prio < src_rq->curr->prio ||
636 (next && next->prio < src_rq->curr->prio))
637 goto out;
638
639 ret = 1;
640
641 deactivate_task(src_rq, p, 0);
642 set_task_cpu(p, this_cpu);
643 activate_task(this_rq, p, 0);
644 /*
645 * We continue with the search, just in
646 * case there's an even higher prio task
647 * in another runqueue. (low likelyhood
648 * but possible)
649 *
650 * Update next so that we won't pick a task
651 * on another cpu with a priority lower (or equal)
652 * than the one we just picked.
653 */
654 next = p;
655
656 }
657 out:
658 spin_unlock(&src_rq->lock);
659 }
660
661 return ret;
662 }
663
664 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
665 {
666 /* Try to pull RT tasks here if we lower this rq's prio */
667 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
668 pull_rt_task(rq);
669 }
670
671 static void post_schedule_rt(struct rq *rq)
672 {
673 /*
674 * If we have more than one rt_task queued, then
675 * see if we can push the other rt_tasks off to other CPUS.
676 * Note we may release the rq lock, and since
677 * the lock was owned by prev, we need to release it
678 * first via finish_lock_switch and then reaquire it here.
679 */
680 if (unlikely(rq->rt.overloaded)) {
681 spin_lock_irq(&rq->lock);
682 push_rt_tasks(rq);
683 spin_unlock_irq(&rq->lock);
684 }
685 }
686
687
688 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
689 {
690 if (!task_running(rq, p) &&
691 (p->prio >= rq->rt.highest_prio) &&
692 rq->rt.overloaded)
693 push_rt_tasks(rq);
694 }
695
696 static unsigned long
697 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
698 unsigned long max_load_move,
699 struct sched_domain *sd, enum cpu_idle_type idle,
700 int *all_pinned, int *this_best_prio)
701 {
702 /* don't touch RT tasks */
703 return 0;
704 }
705
706 static int
707 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
708 struct sched_domain *sd, enum cpu_idle_type idle)
709 {
710 /* don't touch RT tasks */
711 return 0;
712 }
713
714 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
715 {
716 int weight = cpus_weight(*new_mask);
717
718 BUG_ON(!rt_task(p));
719
720 /*
721 * Update the migration status of the RQ if we have an RT task
722 * which is running AND changing its weight value.
723 */
724 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
725 struct rq *rq = task_rq(p);
726
727 if ((p->nr_cpus_allowed <= 1) && (weight > 1)) {
728 rq->rt.rt_nr_migratory++;
729 } else if ((p->nr_cpus_allowed > 1) && (weight <= 1)) {
730 BUG_ON(!rq->rt.rt_nr_migratory);
731 rq->rt.rt_nr_migratory--;
732 }
733
734 update_rt_migration(rq);
735 }
736
737 p->cpus_allowed = *new_mask;
738 p->nr_cpus_allowed = weight;
739 }
740
741 /* Assumes rq->lock is held */
742 static void join_domain_rt(struct rq *rq)
743 {
744 if (rq->rt.overloaded)
745 rt_set_overload(rq);
746 }
747
748 /* Assumes rq->lock is held */
749 static void leave_domain_rt(struct rq *rq)
750 {
751 if (rq->rt.overloaded)
752 rt_clear_overload(rq);
753 }
754
755 /*
756 * When switch from the rt queue, we bring ourselves to a position
757 * that we might want to pull RT tasks from other runqueues.
758 */
759 static void switched_from_rt(struct rq *rq, struct task_struct *p,
760 int running)
761 {
762 /*
763 * If there are other RT tasks then we will reschedule
764 * and the scheduling of the other RT tasks will handle
765 * the balancing. But if we are the last RT task
766 * we may need to handle the pulling of RT tasks
767 * now.
768 */
769 if (!rq->rt.rt_nr_running)
770 pull_rt_task(rq);
771 }
772 #endif /* CONFIG_SMP */
773
774 /*
775 * When switching a task to RT, we may overload the runqueue
776 * with RT tasks. In this case we try to push them off to
777 * other runqueues.
778 */
779 static void switched_to_rt(struct rq *rq, struct task_struct *p,
780 int running)
781 {
782 int check_resched = 1;
783
784 /*
785 * If we are already running, then there's nothing
786 * that needs to be done. But if we are not running
787 * we may need to preempt the current running task.
788 * If that current running task is also an RT task
789 * then see if we can move to another run queue.
790 */
791 if (!running) {
792 #ifdef CONFIG_SMP
793 if (rq->rt.overloaded && push_rt_task(rq) &&
794 /* Don't resched if we changed runqueues */
795 rq != task_rq(p))
796 check_resched = 0;
797 #endif /* CONFIG_SMP */
798 if (check_resched && p->prio < rq->curr->prio)
799 resched_task(rq->curr);
800 }
801 }
802
803 /*
804 * Priority of the task has changed. This may cause
805 * us to initiate a push or pull.
806 */
807 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
808 int oldprio, int running)
809 {
810 if (running) {
811 #ifdef CONFIG_SMP
812 /*
813 * If our priority decreases while running, we
814 * may need to pull tasks to this runqueue.
815 */
816 if (oldprio < p->prio)
817 pull_rt_task(rq);
818 /*
819 * If there's a higher priority task waiting to run
820 * then reschedule.
821 */
822 if (p->prio > rq->rt.highest_prio)
823 resched_task(p);
824 #else
825 /* For UP simply resched on drop of prio */
826 if (oldprio < p->prio)
827 resched_task(p);
828 #endif /* CONFIG_SMP */
829 } else {
830 /*
831 * This task is not running, but if it is
832 * greater than the current running task
833 * then reschedule.
834 */
835 if (p->prio < rq->curr->prio)
836 resched_task(rq->curr);
837 }
838 }
839
840 static void watchdog(struct rq *rq, struct task_struct *p)
841 {
842 unsigned long soft, hard;
843
844 if (!p->signal)
845 return;
846
847 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
848 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
849
850 if (soft != RLIM_INFINITY) {
851 unsigned long next;
852
853 p->rt.timeout++;
854 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
855 if (next > p->rt.timeout) {
856 u64 next_time = p->se.sum_exec_runtime;
857
858 next_time += next * (NSEC_PER_SEC/HZ);
859 if (p->it_sched_expires > next_time)
860 p->it_sched_expires = next_time;
861 } else
862 p->it_sched_expires = p->se.sum_exec_runtime;
863 }
864 }
865
866 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
867 {
868 update_curr_rt(rq);
869
870 watchdog(rq, p);
871
872 /*
873 * RR tasks need a special form of timeslice management.
874 * FIFO tasks have no timeslices.
875 */
876 if (p->policy != SCHED_RR)
877 return;
878
879 if (--p->rt.time_slice)
880 return;
881
882 p->rt.time_slice = DEF_TIMESLICE;
883
884 /*
885 * Requeue to the end of queue if we are not the only element
886 * on the queue:
887 */
888 if (p->rt.run_list.prev != p->rt.run_list.next) {
889 requeue_task_rt(rq, p);
890 set_tsk_need_resched(p);
891 }
892 }
893
894 static void set_curr_task_rt(struct rq *rq)
895 {
896 struct task_struct *p = rq->curr;
897
898 p->se.exec_start = rq->clock;
899 }
900
901 const struct sched_class rt_sched_class = {
902 .next = &fair_sched_class,
903 .enqueue_task = enqueue_task_rt,
904 .dequeue_task = dequeue_task_rt,
905 .yield_task = yield_task_rt,
906 #ifdef CONFIG_SMP
907 .select_task_rq = select_task_rq_rt,
908 #endif /* CONFIG_SMP */
909
910 .check_preempt_curr = check_preempt_curr_rt,
911
912 .pick_next_task = pick_next_task_rt,
913 .put_prev_task = put_prev_task_rt,
914
915 #ifdef CONFIG_SMP
916 .load_balance = load_balance_rt,
917 .move_one_task = move_one_task_rt,
918 .set_cpus_allowed = set_cpus_allowed_rt,
919 .join_domain = join_domain_rt,
920 .leave_domain = leave_domain_rt,
921 .pre_schedule = pre_schedule_rt,
922 .post_schedule = post_schedule_rt,
923 .task_wake_up = task_wake_up_rt,
924 .switched_from = switched_from_rt,
925 #endif
926
927 .set_curr_task = set_curr_task_rt,
928 .task_tick = task_tick_rt,
929
930 .prio_changed = prio_changed_rt,
931 .switched_to = switched_to_rt,
932 };
This page took 0.129503 seconds and 6 git commands to generate.