net: Add init_dummy_netdev() and fix EMAC driver using it
[deliverable/linux.git] / kernel / sched.c
index 2e3545f57e770830c4fca6702a64e3e2bca510a1..8be2c13b50d018cc69280829e14e31612d4deddd 100644 (file)
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
 DEFINE_TRACE(sched_migrate_task);
 
 #ifdef CONFIG_SMP
+
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
 /*
  * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
  * Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -3728,8 +3731,13 @@ redo:
                }
 
                double_unlock_balance(this_rq, busiest);
+               /*
+                * Should not call ttwu while holding a rq->lock
+                */
+               spin_unlock(&this_rq->lock);
                if (active_balance)
                        wake_up_process(busiest->migration_thread);
+               spin_lock(&this_rq->lock);
 
        } else
                sd->nr_balance_failed = 0;
@@ -7277,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
  * groups, so roll our own. Now each node has its own list of groups which
  * gets dynamically allocated.
  */
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
 static struct sched_group ***sched_group_nodes_bycpu;
 
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
 static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
 
 static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7555,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 #ifdef CONFIG_NUMA
                if (cpumask_weight(cpu_map) >
                                SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
-                       sd = &per_cpu(allnodes_domains, i);
+                       sd = &per_cpu(allnodes_domains, i).sd;
                        SD_INIT(sd, ALLNODES);
                        set_domain_attribute(sd, attr);
                        cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7565,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                } else
                        p = NULL;
 
-               sd = &per_cpu(node_domains, i);
+               sd = &per_cpu(node_domains, i).sd;
                SD_INIT(sd, NODE);
                set_domain_attribute(sd, attr);
                sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7683,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                for_each_cpu(j, nodemask) {
                        struct sched_domain *sd;
 
-                       sd = &per_cpu(node_domains, j);
+                       sd = &per_cpu(node_domains, j).sd;
                        sd->groups = sg;
                }
                sg->__cpu_power = 0;
This page took 0.064212 seconds and 5 git commands to generate.