nohz: Re-evaluate the tick for the new task after a context switch
[deliverable/linux.git] / kernel / sched / core.c
index 0f0a5b3fd62c8c6ddeffc2d0ee1ec840f596eafc..dd09def88567bf9f418825c0f684b105b0667be7 100644 (file)
@@ -650,6 +650,24 @@ static inline bool got_nohz_idle_kick(void)
 
 #endif /* CONFIG_NO_HZ_COMMON */
 
+#ifdef CONFIG_NO_HZ_FULL
+bool sched_can_stop_tick(void)
+{
+       struct rq *rq;
+
+       rq = this_rq();
+
+       /* Make sure rq->nr_running update is visible after the IPI */
+       smp_rmb();
+
+       /* More than one running task need preemption */
+       if (rq->nr_running > 1)
+               return false;
+
+       return true;
+}
+#endif /* CONFIG_NO_HZ_FULL */
+
 void sched_avg_update(struct rq *rq)
 {
        s64 period = sched_avg_period();
@@ -1380,7 +1398,8 @@ static void sched_ttwu_pending(void)
 
 void scheduler_ipi(void)
 {
-       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
+           && !tick_nohz_full_cpu(smp_processor_id()))
                return;
 
        /*
@@ -1397,6 +1416,7 @@ void scheduler_ipi(void)
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
+       tick_nohz_full_check();
        sched_ttwu_pending();
 
        /*
@@ -1876,6 +1896,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
                kprobe_flush_task(prev);
                put_task_struct(prev);
        }
+
+       tick_nohz_task_switch(current);
 }
 
 #ifdef CONFIG_SMP
This page took 0.050028 seconds and 5 git commands to generate.