Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 24 Jul 2008 19:53:51 +0000 (12:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 24 Jul 2008 19:53:51 +0000 (12:53 -0700)
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: clean up compiler warning
  sched: fix hrtick & generic-ipi dependency

1  2 
kernel/sched_rt.c

diff --combined kernel/sched_rt.c
index f85a76363eee873e23edd0983e7427be65b4fb08,93ac8ee08271fe0d2bee0ca4ca28bb0d7e06c752..908c04f9dad02d23df66fbc55a28f5619fc10081
@@@ -240,7 -240,7 +240,7 @@@ static int do_balance_runtime(struct rt
  
        spin_lock(&rt_b->rt_runtime_lock);
        rt_period = ktime_to_ns(rt_b->rt_period);
 -      for_each_cpu_mask(i, rd->span) {
 +      for_each_cpu_mask_nr(i, rd->span) {
                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                s64 diff;
  
  
                diff = iter->rt_runtime - iter->rt_time;
                if (diff > 0) {
-                       do_div(diff, weight);
+                       diff = div_u64((u64)diff, weight);
                        if (rt_rq->rt_runtime + diff > rt_period)
                                diff = rt_period - rt_rq->rt_runtime;
                        iter->rt_runtime -= diff;
@@@ -601,7 -601,11 +601,7 @@@ static void __enqueue_rt_entity(struct 
        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
  
 -      if (rt_se->nr_cpus_allowed == 1)
 -              list_add(&rt_se->run_list, queue);
 -      else
 -              list_add_tail(&rt_se->run_list, queue);
 -
 +      list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);
  
        inc_rt_tasks(rt_se, rt_rq);
@@@ -686,34 -690,32 +686,34 @@@ static void dequeue_task_rt(struct rq *
   * Put task to the end of the run list without the overhead of dequeue
   * followed by enqueue.
   */
 -static
 -void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 +static void
 +requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
  {
 -      struct rt_prio_array *array = &rt_rq->active;
 -
        if (on_rt_rq(rt_se)) {
 -              list_del_init(&rt_se->run_list);
 -              list_add_tail(&rt_se->run_list,
 -                            array->queue + rt_se_prio(rt_se));
 +              struct rt_prio_array *array = &rt_rq->active;
 +              struct list_head *queue = array->queue + rt_se_prio(rt_se);
 +
 +              if (head)
 +                      list_move(&rt_se->run_list, queue);
 +              else
 +                      list_move_tail(&rt_se->run_list, queue);
        }
  }
  
 -static void requeue_task_rt(struct rq *rq, struct task_struct *p)
 +static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
  {
        struct sched_rt_entity *rt_se = &p->rt;
        struct rt_rq *rt_rq;
  
        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
 -              requeue_rt_entity(rt_rq, rt_se);
 +              requeue_rt_entity(rt_rq, rt_se, head);
        }
  }
  
  static void yield_task_rt(struct rq *rq)
  {
 -      requeue_task_rt(rq, rq->curr);
 +      requeue_task_rt(rq, rq->curr, 0);
  }
  
  #ifdef CONFIG_SMP
@@@ -753,30 -755,6 +753,30 @@@ static int select_task_rq_rt(struct tas
         */
        return task_cpu(p);
  }
 +
 +static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 +{
 +      cpumask_t mask;
 +
 +      if (rq->curr->rt.nr_cpus_allowed == 1)
 +              return;
 +
 +      if (p->rt.nr_cpus_allowed != 1
 +          && cpupri_find(&rq->rd->cpupri, p, &mask))
 +              return;
 +
 +      if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
 +              return;
 +
 +      /*
 +       * There appears to be other cpus that can accept
 +       * current and none to run 'p', so lets reschedule
 +       * to try and push current away:
 +       */
 +      requeue_task_rt(rq, p, 1);
 +      resched_task(rq->curr);
 +}
 +
  #endif /* CONFIG_SMP */
  
  /*
@@@ -802,8 -780,18 +802,8 @@@ static void check_preempt_curr_rt(struc
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
 -      if((p->prio == rq->curr->prio)
 -         && p->rt.nr_cpus_allowed == 1
 -         && rq->curr->rt.nr_cpus_allowed != 1) {
 -              cpumask_t mask;
 -
 -              if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
 -                      /*
 -                       * There appears to be other cpus that can accept
 -                       * current, so lets reschedule to try and push it away
 -                       */
 -                      resched_task(rq->curr);
 -      }
 +      if (p->prio == rq->curr->prio && !need_resched())
 +              check_preempt_equal_prio(rq, p);
  #endif
  }
  
@@@ -935,13 -923,6 +935,13 @@@ static int find_lowest_rq(struct task_s
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
                return -1; /* No targets found */
  
 +      /*
 +       * Only consider CPUs that are usable for migration.
 +       * I guess we might want to change cpupri_find() to ignore those
 +       * in the first place.
 +       */
 +      cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
 +
        /*
         * At this point we have built a mask of cpus representing the
         * lowest priority tasks in the system.  Now we want to elect
@@@ -1128,7 -1109,7 +1128,7 @@@ static int pull_rt_task(struct rq *this
  
        next = pick_next_task_rt(this_rq);
  
 -      for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
 +      for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
  
@@@ -1436,7 -1417,7 +1436,7 @@@ static void task_tick_rt(struct rq *rq
         * on the queue:
         */
        if (p->rt.run_list.prev != p->rt.run_list.next) {
 -              requeue_task_rt(rq, p);
 +              requeue_task_rt(rq, p, 0);
                set_tsk_need_resched(p);
        }
  }
This page took 0.028859 seconds and 5 git commands to generate.