ftrace: fix wakeup callback
[deliverable/linux.git] / kernel / sched.c
index 58fb8af157762ace482952419c724f5dc23063a1..1ec3fb2efee6d6bcccb72cc292a59e0b82ba959c 100644 (file)
@@ -70,6 +70,7 @@
 #include <linux/bootmem.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
+#include <linux/ftrace.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -641,6 +642,24 @@ static inline void update_rq_clock(struct rq *rq)
 # define const_debug static const
 #endif
 
+/**
+ * runqueue_is_locked
+ *
+ * Returns true if the current cpu runqueue is locked.
+ * This interface allows printk to be called with the runqueue lock
+ * held and know whether or not it is OK to wake up the klogd.
+ */
+int runqueue_is_locked(void)
+{
+       int cpu = get_cpu();
+       struct rq *rq = cpu_rq(cpu);
+       int ret;
+
+       ret = spin_is_locked(&rq->lock);
+       put_cpu();
+       return ret;
+}
+
 /*
  * Debugging: various feature bits
  */
@@ -2393,6 +2412,53 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+
+void ftrace_task(struct task_struct *p, void *__tr, void *__data)
+{
+#if 0
+       /*  
+        * trace timeline tree
+        */
+       __trace_special(__tr, __data,
+                       p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+#else
+       /*
+        * trace balance metrics
+        */
+       __trace_special(__tr, __data,
+                       p->pid, p->se.avg_overlap, 0);
+#endif
+}
+
+void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
+{
+       struct task_struct *p;
+       struct sched_entity *se;
+       struct rb_node *curr;
+       struct rq *rq = __rq;
+
+       if (rq->cfs.curr) {
+               p = task_of(rq->cfs.curr);
+               ftrace_task(p, __tr, __data);
+       }
+       if (rq->cfs.next) {
+               p = task_of(rq->cfs.next);
+               ftrace_task(p, __tr, __data);
+       }
+
+       for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
+               se = rb_entry(curr, struct sched_entity, run_node);
+               if (!entity_is_task(se))
+                       continue;
+
+               p = task_of(se);
+               ftrace_task(p, __tr, __data);
+       }
+}
+
+#endif
+
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2481,6 +2547,7 @@ out_activate:
        success = 1;
 
 out_running:
+       ftrace_wake_up_task(rq, p, rq->curr);
        check_preempt_curr(rq, p);
 
        p->state = TASK_RUNNING;
@@ -2611,6 +2678,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                p->sched_class->task_new(rq, p);
                inc_nr_running(rq);
        }
+       ftrace_wake_up_task(rq, p, rq->curr);
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
@@ -2783,6 +2851,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
+       ftrace_ctx_switch(rq, prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -4362,26 +4431,44 @@ void scheduler_tick(void)
 #endif
 }
 
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+                               defined(CONFIG_PREEMPT_TRACER))
+
+static inline unsigned long get_parent_ip(unsigned long addr)
+{
+       if (in_lock_functions(addr)) {
+               addr = CALLER_ADDR2;
+               if (in_lock_functions(addr))
+                       addr = CALLER_ADDR3;
+       }
+       return addr;
+}
 
 void __kprobes add_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
                return;
+#endif
        preempt_count() += val;
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Spinlock count overflowing soon?
         */
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
+#endif
+       if (preempt_count() == val)
+               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 EXPORT_SYMBOL(add_preempt_count);
 
 void __kprobes sub_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
@@ -4393,7 +4480,10 @@ void __kprobes sub_preempt_count(int val)
        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
                        !(preempt_count() & PREEMPT_MASK)))
                return;
+#endif
 
+       if (preempt_count() == val)
+               trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
        preempt_count() -= val;
 }
 EXPORT_SYMBOL(sub_preempt_count);
@@ -4567,8 +4657,6 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -4579,16 +4667,7 @@ asmlinkage void __sched preempt_schedule(void)
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                schedule();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4609,26 +4688,15 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                local_irq_enable();
                schedule();
                local_irq_disable();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -5547,7 +5615,6 @@ static void __cond_resched(void)
        } while (need_resched());
 }
 
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
 int __sched _cond_resched(void)
 {
        if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
@@ -5558,7 +5625,6 @@ int __sched _cond_resched(void)
        return 0;
 }
 EXPORT_SYMBOL(_cond_resched);
-#endif
 
 /*
  * cond_resched_lock() - if a reschedule is pending, drop the given lock,
@@ -5750,7 +5816,7 @@ out_unlock:
        return retval;
 }
 
-static const char stat_nam[] = "RSDTtZX";
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 
 void sched_show_task(struct task_struct *p)
 {
@@ -5853,8 +5919,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
+#if defined(CONFIG_PREEMPT)
+       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
+#else
        task_thread_info(idle)->preempt_count = 0;
-
+#endif
        /*
         * The idle tasks have their own, simple scheduling class:
         */
@@ -9007,7 +9076,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
                                s64 val)
 {
        return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
This page took 0.02846 seconds and 5 git commands to generate.