sched: Use resched IPI to kick off the nohz idle balance
[deliverable/linux.git] / kernel / sched_fair.c
index d201f28c1de701b7d0c4651851a83aa4f8bad3b1..6c5fa1099229191e98e4daec7e01c8f6b8dac70f 100644 (file)
@@ -1052,6 +1052,8 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
                __clear_buddies_skip(se);
 }
 
+static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
@@ -1090,6 +1092,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        if (!(flags & DEQUEUE_SLEEP))
                se->vruntime -= cfs_rq->min_vruntime;
 
+       /* return excess runtime on last dequeue */
+       return_cfs_rq_runtime(cfs_rq);
+
        update_min_vruntime(cfs_rq);
        update_cfs_shares(cfs_rq);
 }
@@ -1101,6 +1106,8 @@ static void
 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
        unsigned long ideal_runtime, delta_exec;
+       struct sched_entity *se;
+       s64 delta;
 
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
@@ -1122,16 +1129,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        if (delta_exec < sysctl_sched_min_granularity)
                return;
 
-       if (cfs_rq->nr_running > 1) {
-               struct sched_entity *se = __pick_first_entity(cfs_rq);
-               s64 delta = curr->vruntime - se->vruntime;
+       se = __pick_first_entity(cfs_rq);
+       delta = curr->vruntime - se->vruntime;
 
-               if (delta < 0)
-                       return;
+       if (delta < 0)
+               return;
 
-               if (delta > ideal_runtime)
-                       resched_task(rq_of(cfs_rq)->curr);
-       }
+       if (delta > ideal_runtime)
+               resched_task(rq_of(cfs_rq)->curr);
 }
 
 static void
@@ -1674,6 +1679,108 @@ out_unlock:
        return idle;
 }
 
+/* a cfs_rq won't donate quota below this amount */
+static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
+/* minimum remaining period time to redistribute slack quota */
+static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
+/* how long we wait to gather additional slack before distributing */
+static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
+
+/* are we near the end of the current quota period? */
+static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
+{
+       struct hrtimer *refresh_timer = &cfs_b->period_timer;
+       u64 remaining;
+
+       /* if the call-back is running a quota refresh is already occurring */
+       if (hrtimer_callback_running(refresh_timer))
+               return 1;
+
+       /* is a quota refresh about to occur? */
+       remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
+       if (remaining < min_expire)
+               return 1;
+
+       return 0;
+}
+
+static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+       u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
+
+       /* if there's a quota refresh soon don't bother with slack */
+       if (runtime_refresh_within(cfs_b, min_left))
+               return;
+
+       start_bandwidth_timer(&cfs_b->slack_timer,
+                               ns_to_ktime(cfs_bandwidth_slack_period));
+}
+
+/* we know any runtime found here is valid as update_curr() precedes return */
+static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+       struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+       s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
+
+       if (slack_runtime <= 0)
+               return;
+
+       raw_spin_lock(&cfs_b->lock);
+       if (cfs_b->quota != RUNTIME_INF &&
+           cfs_rq->runtime_expires == cfs_b->runtime_expires) {
+               cfs_b->runtime += slack_runtime;
+
+               /* we are under rq->lock, defer unthrottling using a timer */
+               if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
+                   !list_empty(&cfs_b->throttled_cfs_rq))
+                       start_cfs_slack_bandwidth(cfs_b);
+       }
+       raw_spin_unlock(&cfs_b->lock);
+
+       /* even if it's not valid for return we don't want to try again */
+       cfs_rq->runtime_remaining -= slack_runtime;
+}
+
+static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+               return;
+
+       __return_cfs_rq_runtime(cfs_rq);
+}
+
+/*
+ * This is done with a timer (instead of inline with bandwidth return) since
+ * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
+ */
+static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+{
+       u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+       u64 expires;
+
+       /* confirm we're still not at a refresh boundary */
+       if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
+               return;
+
+       raw_spin_lock(&cfs_b->lock);
+       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+               runtime = cfs_b->runtime;
+               cfs_b->runtime = 0;
+       }
+       expires = cfs_b->runtime_expires;
+       raw_spin_unlock(&cfs_b->lock);
+
+       if (!runtime)
+               return;
+
+       runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
+
+       raw_spin_lock(&cfs_b->lock);
+       if (expires == cfs_b->runtime_expires)
+               cfs_b->runtime = runtime;
+       raw_spin_unlock(&cfs_b->lock);
+}
+
 /*
  * When a group wakes up we want to make sure that its quota is not already
  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
@@ -1715,6 +1822,7 @@ static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
                                     unsigned long delta_exec) {}
 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
+static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 {
@@ -4161,22 +4269,6 @@ out_unlock:
 }
 
 #ifdef CONFIG_NO_HZ
-
-static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
-
-static void trigger_sched_softirq(void *data)
-{
-       raise_softirq_irqoff(SCHED_SOFTIRQ);
-}
-
-static inline void init_sched_softirq_csd(struct call_single_data *csd)
-{
-       csd->func = trigger_sched_softirq;
-       csd->info = NULL;
-       csd->flags = 0;
-       csd->priv = 0;
-}
-
 /*
  * idle load balancing details
  * - One of the idle CPUs nominates itself as idle load_balancer, while
@@ -4342,11 +4434,16 @@ static void nohz_balancer_kick(int cpu)
        }
 
        if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
-               struct call_single_data *cp;
-
                cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
-               cp = &per_cpu(remote_sched_softirq_cb, cpu);
-               __smp_call_function_single(ilb_cpu, cp, 0);
+
+               smp_mb();
+               /*
+                * Use smp_send_reschedule() instead of resched_cpu().
+                * This way we generate a sched IPI on the target cpu which
+                * is idle. And the softirq performing nohz idle load balance
+                * will be run before returning from the IPI.
+                */
+               smp_send_reschedule(ilb_cpu);
        }
        return;
 }
This page took 0.038722 seconds and 5 git commands to generate.