Merge tag 'kvm-3.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[deliverable/linux.git] / kernel / sched / core.c
index 26058d0bebba1ca927c4df3e993190d5415f006c..f1bdecf09afb593560f01309b791b5dcb1ed45d6 100644 (file)
@@ -83,7 +83,7 @@
 #endif
 
 #include "sched.h"
-#include "../workqueue_sched.h"
+#include "../workqueue_internal.h"
 #include "../smpboot.h"
 
 #define CREATE_TRACE_POINTS
@@ -1132,18 +1132,28 @@ EXPORT_SYMBOL_GPL(kick_process);
  */
 static int select_fallback_rq(int cpu, struct task_struct *p)
 {
-       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+       int nid = cpu_to_node(cpu);
+       const struct cpumask *nodemask = NULL;
        enum { cpuset, possible, fail } state = cpuset;
        int dest_cpu;
 
-       /* Look for allowed, online CPU in same node. */
-       for_each_cpu(dest_cpu, nodemask) {
-               if (!cpu_online(dest_cpu))
-                       continue;
-               if (!cpu_active(dest_cpu))
-                       continue;
-               if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
-                       return dest_cpu;
+       /*
+        * If the node that the cpu is on has been offlined, cpu_to_node()
+        * will return -1. There is no cpu on the node, and we should
+        * select the cpu on the other node.
+        */
+       if (nid != -1) {
+               nodemask = cpumask_of_node(nid);
+
+               /* Look for allowed, online CPU in same node. */
+               for_each_cpu(dest_cpu, nodemask) {
+                       if (!cpu_online(dest_cpu))
+                               continue;
+                       if (!cpu_active(dest_cpu))
+                               continue;
+                       if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+                               return dest_cpu;
+               }
        }
 
        for (;;) {
@@ -4364,20 +4374,32 @@ EXPORT_SYMBOL(yield);
  * It's the caller's job to ensure that the target task struct
  * can't go away on us before we can do any checks.
  *
- * Returns true if we indeed boosted the target task.
+ * Returns:
+ *     true (>0) if we indeed boosted the target task.
+ *     false (0) if we failed to boost the target.
+ *     -ESRCH if there's no task to yield to.
  */
 bool __sched yield_to(struct task_struct *p, bool preempt)
 {
        struct task_struct *curr = current;
        struct rq *rq, *p_rq;
        unsigned long flags;
-       bool yielded = 0;
+       int yielded = 0;
 
        local_irq_save(flags);
        rq = this_rq();
 
 again:
        p_rq = task_rq(p);
+       /*
+        * If we're the only runnable task on the rq and target rq also
+        * has only one task, there's absolutely no point in yielding.
+        */
+       if (rq->nr_running == 1 && p_rq->nr_running == 1) {
+               yielded = -ESRCH;
+               goto out_irq;
+       }
+
        double_rq_lock(rq, p_rq);
        while (task_rq(p) != p_rq) {
                double_rq_unlock(rq, p_rq);
@@ -4385,13 +4407,13 @@ again:
        }
 
        if (!curr->sched_class->yield_to_task)
-               goto out;
+               goto out_unlock;
 
        if (curr->sched_class != p->sched_class)
-               goto out;
+               goto out_unlock;
 
        if (task_running(p_rq, p) || p->state)
-               goto out;
+               goto out_unlock;
 
        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
        if (yielded) {
@@ -4404,11 +4426,12 @@ again:
                        resched_task(p_rq->curr);
        }
 
-out:
+out_unlock:
        double_rq_unlock(rq, p_rq);
+out_irq:
        local_irq_restore(flags);
 
-       if (yielded)
+       if (yielded > 0)
                schedule();
 
        return yielded;
@@ -4667,6 +4690,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+       vtime_init_idle(idle);
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
@@ -7160,7 +7184,6 @@ static void free_sched_group(struct task_group *tg)
 struct task_group *sched_create_group(struct task_group *parent)
 {
        struct task_group *tg;
-       unsigned long flags;
 
        tg = kzalloc(sizeof(*tg), GFP_KERNEL);
        if (!tg)
@@ -7172,6 +7195,17 @@ struct task_group *sched_create_group(struct task_group *parent)
        if (!alloc_rt_sched_group(tg, parent))
                goto err;
 
+       return tg;
+
+err:
+       free_sched_group(tg);
+       return ERR_PTR(-ENOMEM);
+}
+
+void sched_online_group(struct task_group *tg, struct task_group *parent)
+{
+       unsigned long flags;
+
        spin_lock_irqsave(&task_group_lock, flags);
        list_add_rcu(&tg->list, &task_groups);
 
@@ -7181,12 +7215,6 @@ struct task_group *sched_create_group(struct task_group *parent)
        INIT_LIST_HEAD(&tg->children);
        list_add_rcu(&tg->siblings, &parent->children);
        spin_unlock_irqrestore(&task_group_lock, flags);
-
-       return tg;
-
-err:
-       free_sched_group(tg);
-       return ERR_PTR(-ENOMEM);
 }
 
 /* rcu callback to free various structures associated with a task group */
@@ -7198,6 +7226,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
 
 /* Destroy runqueue etc associated with a task group */
 void sched_destroy_group(struct task_group *tg)
+{
+       /* wait for possible concurrent references to cfs_rqs complete */
+       call_rcu(&tg->rcu, free_sched_group_rcu);
+}
+
+void sched_offline_group(struct task_group *tg)
 {
        unsigned long flags;
        int i;
@@ -7210,9 +7244,6 @@ void sched_destroy_group(struct task_group *tg)
        list_del_rcu(&tg->list);
        list_del_rcu(&tg->siblings);
        spin_unlock_irqrestore(&task_group_lock, flags);
-
-       /* wait for possible concurrent references to cfs_rqs complete */
-       call_rcu(&tg->rcu, free_sched_group_rcu);
 }
 
 /* change task's runqueue when it moves between groups.
@@ -7508,6 +7539,25 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+       static DEFINE_MUTEX(mutex);
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       /* make sure that internally we keep jiffies */
+       /* also, writing zero resets timeslice to default */
+       if (!ret && write) {
+               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+                       RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+       }
+       mutex_unlock(&mutex);
+       return ret;
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -7564,6 +7614,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
        return &tg->css;
 }
 
+static int cpu_cgroup_css_online(struct cgroup *cgrp)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *parent;
+
+       if (!cgrp->parent)
+               return 0;
+
+       parent = cgroup_tg(cgrp->parent);
+       sched_online_group(tg, parent);
+       return 0;
+}
+
 static void cpu_cgroup_css_free(struct cgroup *cgrp)
 {
        struct task_group *tg = cgroup_tg(cgrp);
@@ -7571,6 +7634,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
        sched_destroy_group(tg);
 }
 
+static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
+
+       sched_offline_group(tg);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -7926,6 +7996,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .name           = "cpu",
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_free       = cpu_cgroup_css_free,
+       .css_online     = cpu_cgroup_css_online,
+       .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
This page took 0.027304 seconds and 5 git commands to generate.