Merge branch 'parisc-4.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[deliverable/linux.git] / kernel / sched / deadline.c
index cd64c979d0e1857aceb8f9f4383c507ad5fa6809..affd97ec9f65a0c1b9751ee9f1d39cf77757b0aa 100644 (file)
@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
+       WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
+
+       /*
+        * We are racing with the deadline timer. So, do nothing because
+        * the deadline timer handler will take care of properly recharging
+        * the runtime and postponing the deadline
+        */
+       if (dl_se->dl_throttled)
+               return;
 
        /*
         * We use the regular wall clock time to set deadlines in the
@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
         */
        dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
        dl_se->runtime = pi_se->dl_runtime;
-       dl_se->dl_new = 0;
 }
 
 /*
@@ -399,6 +406,9 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                dl_se->runtime = pi_se->dl_runtime;
        }
 
+       if (dl_se->dl_yielded && dl_se->runtime > 0)
+               dl_se->runtime = 0;
+
        /*
         * We keep moving the deadline away until we get some
         * available runtime for the entity. This ensures correct
@@ -420,7 +430,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
         * entity.
         */
        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
-               printk_deferred_once("sched: DL replenish lagged to much\n");
+               printk_deferred_once("sched: DL replenish lagged too much\n");
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                dl_se->runtime = pi_se->dl_runtime;
        }
@@ -500,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       /*
-        * The arrival of a new instance needs special treatment, i.e.,
-        * the actual scheduling parameters have to be "renewed".
-        */
-       if (dl_se->dl_new) {
-               setup_new_dl_entity(dl_se, pi_se);
-               return;
-       }
-
        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
            dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
@@ -604,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                goto unlock;
        }
 
-       /*
-        * This is possible if switched_from_dl() raced against a running
-        * callback that took the above !dl_task() path and we've since then
-        * switched back into SCHED_DEADLINE.
-        *
-        * There's nothing to do except drop our task reference.
-        */
-       if (dl_se->dl_new)
-               goto unlock;
-
        /*
         * The task might have been boosted by someone else and might be in the
         * boosting/deboosting path, its not throttled.
@@ -726,6 +717,10 @@ static void update_curr_dl(struct rq *rq)
        if (!dl_task(curr) || !on_dl_rq(dl_se))
                return;
 
+       /* Kick cpufreq (see the comment in linux/cpufreq.h). */
+       if (cpu_of(rq) == smp_processor_id())
+               cpufreq_trigger_update(rq_clock(rq));
+
        /*
         * Consumed budget is computed considering the time as
         * observed by schedulable tasks (excluding time spent
@@ -735,8 +730,11 @@ static void update_curr_dl(struct rq *rq)
         * approach need further study.
         */
        delta_exec = rq_clock_task(rq) - curr->se.exec_start;
-       if (unlikely((s64)delta_exec <= 0))
+       if (unlikely((s64)delta_exec <= 0)) {
+               if (unlikely(dl_se->dl_yielded))
+                       goto throttle;
                return;
+       }
 
        schedstat_set(curr->se.statistics.exec_max,
                      max(curr->se.statistics.exec_max, delta_exec));
@@ -749,8 +747,10 @@ static void update_curr_dl(struct rq *rq)
 
        sched_rt_avg_update(rq, delta_exec);
 
-       dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
-       if (dl_runtime_exceeded(dl_se)) {
+       dl_se->runtime -= delta_exec;
+
+throttle:
+       if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
                dl_se->dl_throttled = 1;
                __dequeue_task_dl(rq, curr, 0);
                if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
@@ -917,7 +917,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
+       if (flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
        else if (flags & ENQUEUE_REPLENISH)
                replenish_dl_entity(dl_se, pi_se);
@@ -994,18 +994,14 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
  */
 static void yield_task_dl(struct rq *rq)
 {
-       struct task_struct *p = rq->curr;
-
        /*
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
         * new scheduling parameters (thanks to dl_yielded=1).
         */
-       if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_yielded = 1;
-               p->dl.runtime = 0;
-       }
+       rq->curr->dl.dl_yielded = 1;
+
        update_rq_clock(rq);
        update_curr_dl(rq);
        /*
@@ -1722,6 +1718,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_dl(struct rq *rq, struct task_struct *p)
 {
+       if (dl_time_before(p->dl.deadline, rq_clock(rq)))
+               setup_new_dl_entity(&p->dl, &p->dl);
+
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
@@ -1768,8 +1767,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
                 */
                resched_curr(rq);
 #endif /* CONFIG_SMP */
-       } else
-               switched_to_dl(rq, p);
+       }
 }
 
 const struct sched_class dl_sched_class = {
This page took 0.025911 seconds and 5 git commands to generate.