sched: Drop the rq argument to sched_class::select_task_rq()
[deliverable/linux.git] / kernel / sched_fair.c
index 60f9d407c5ec09c39d55df51e7e363ba90118d19..96b2c95ac356b950f36ca172295b02c587e0956e 100644 (file)
@@ -1657,7 +1657,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
  * preempt must be disabled.
  */
 static int
-select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 {
        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
        int cpu = smp_processor_id();
@@ -1789,10 +1789,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
         * This is especially important for buddies when the leftmost
         * task is higher priority than the buddy.
         */
-       if (unlikely(se->load.weight != NICE_0_LOAD))
-               gran = calc_delta_fair(gran, se);
-
-       return gran;
+       return calc_delta_fair(gran, se);
 }
 
 /*
@@ -2104,21 +2101,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
              enum cpu_idle_type idle, int *all_pinned,
              int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
 {
-       int loops = 0, pulled = 0, pinned = 0;
+       int loops = 0, pulled = 0;
        long rem_load_move = max_load_move;
        struct task_struct *p, *n;
 
        if (max_load_move == 0)
                goto out;
 
-       pinned = 1;
-
        list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
                if (loops++ > sysctl_sched_nr_migrate)
                        break;
 
                if ((p->se.load.weight >> 1) > rem_load_move ||
-                   !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
+                   !can_migrate_task(p, busiest, this_cpu, sd, idle,
+                                     all_pinned))
                        continue;
 
                pull_task(busiest, p, this_rq, this_cpu);
@@ -2153,9 +2149,6 @@ out:
         */
        schedstat_add(sd, lb_gained[idle], pulled);
 
-       if (all_pinned)
-               *all_pinned = pinned;
-
        return max_load_move - rem_load_move;
 }
 
@@ -3341,6 +3334,7 @@ redo:
                 * still unbalanced. ld_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
+               all_pinned = 1;
                local_irq_save(flags);
                double_rq_lock(this_rq, busiest);
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
This page took 0.040079 seconds and 5 git commands to generate.