Merge commit 'v2.6.28-rc7' into tracing/core
[deliverable/linux.git] / kernel / sched.c
index 700aa9a1413fc783028170bcfebc014b17a38120..7729c4bbc8baec10e47529142b788c43b71fa501 100644 (file)
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
+DEFINE_TRACE(sched_wait_task);
+DEFINE_TRACE(sched_wakeup);
+DEFINE_TRACE(sched_wakeup_new);
+DEFINE_TRACE(sched_switch);
+DEFINE_TRACE(sched_migrate_task);
+
 #ifdef CONFIG_SMP
 /*
  * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
@@ -1453,7 +1459,7 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long nr_running = rq->nr_running;
+       unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
 
        if (nr_running)
                rq->avg_load_per_task = rq->load.weight / nr_running;
@@ -5896,6 +5902,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         * The idle tasks have their own, simple scheduling class:
         */
        idle->sched_class = &idle_sched_class;
+       ftrace_graph_init_task(idle);
 }
 
 /*
This page took 0.026225 seconds and 5 git commands to generate.