taint: add explicit flag to show whether lock dep is still OK.
[deliverable/linux.git] / kernel / sched / core.c
index 0533496b6228cfde3afe20051006683da12e0ca5..662f3d5121837436f77be9c567ea9271f1067ae1 100644 (file)
@@ -193,23 +193,10 @@ static void sched_feat_disable(int i) { };
 static void sched_feat_enable(int i) { };
 #endif /* HAVE_JUMP_LABEL */
 
-static ssize_t
-sched_feat_write(struct file *filp, const char __user *ubuf,
-               size_t cnt, loff_t *ppos)
+static int sched_feat_set(char *cmp)
 {
-       char buf[64];
-       char *cmp;
-       int neg = 0;
        int i;
-
-       if (cnt > 63)
-               cnt = 63;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-       cmp = strstrip(buf);
+       int neg = 0;
 
        if (strncmp(cmp, "NO_", 3) == 0) {
                neg = 1;
@@ -229,6 +216,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                }
        }
 
+       return i;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       char *cmp;
+       int i;
+
+       if (cnt > 63)
+               cnt = 63;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+       cmp = strstrip(buf);
+
+       i = sched_feat_set(cmp);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -1560,7 +1568,40 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
+
+#ifdef CONFIG_NUMA_BALANCING
+       if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
+               p->mm->numa_next_scan = jiffies;
+               p->mm->numa_next_reset = jiffies;
+               p->mm->numa_scan_seq = 0;
+       }
+
+       p->node_stamp = 0ULL;
+       p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
+       p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
+       p->numa_scan_period = sysctl_numa_balancing_scan_delay;
+       p->numa_work.next = &p->numa_work;
+#endif /* CONFIG_NUMA_BALANCING */
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_SCHED_DEBUG
+void set_numabalancing_state(bool enabled)
+{
+       if (enabled)
+               sched_feat_set("NUMA");
+       else
+               sched_feat_set("NO_NUMA");
 }
+#else
+__read_mostly bool numabalancing_enabled;
+
+void set_numabalancing_state(bool enabled)
+{
+       numabalancing_enabled = enabled;
+}
+#endif /* CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_NUMA_BALANCING */
 
 /*
  * fork()/clone()-time setup:
@@ -2744,7 +2785,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
        if (irqs_disabled())
                print_irqtrace_events(prev);
        dump_stack();
-       add_taint(TAINT_WARN);
+       add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
 
 /*
@@ -4056,8 +4097,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                goto out_free_cpus_allowed;
        }
        retval = -EPERM;
-       if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
-               goto out_unlock;
+       if (!check_same_owner(p)) {
+               rcu_read_lock();
+               if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+                       rcu_read_unlock();
+                       goto out_unlock;
+               }
+               rcu_read_unlock();
+       }
 
        retval = security_task_setscheduler(p);
        if (retval)
This page took 0.026512 seconds and 5 git commands to generate.