sched/deadline: Ensure that updates to exclusive cpusets don't break AC
authorJuri Lelli <juri.lelli@arm.com>
Tue, 7 Oct 2014 08:52:11 +0000 (09:52 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 28 Oct 2014 09:48:00 +0000 (10:48 +0100)
How we deal with updates to exclusive cpusets is currently broken.
As an example, suppose we have an exclusive cpuset composed of
two cpus: A[cpu0,cpu1]. We can assign SCHED_DEADLINE task to it
up to the allowed bandwidth. If we want now to modify cpusetA's
cpumask, we have to check that removing a cpu's amount of
bandwidth doesn't break AC guarantees. This thing isn't checked
in the current code.

This patch fixes the problem above, denying an update if the
new cpumask won't have enough bandwidth for SCHED_DEADLINE tasks
that are currently active.

Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Li Zefan <lizefan@huawei.com>
Cc: cgroups@vger.kernel.org
Link: http://lkml.kernel.org/r/5433E6AF.5080105@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
kernel/cpuset.c
kernel/sched/core.c

index 1d1fa081d44fa2d3b5d50254a45f2251870d107a..320a9779f1b43cc065ea3e0a6d41f9ea690cebca 100644 (file)
@@ -2052,6 +2052,8 @@ static inline void tsk_restore_flags(struct task_struct *task,
        task->flags |= orig_flags & flags;
 }
 
+extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                                    const struct cpumask *trial);
 extern int task_can_attach(struct task_struct *p,
                           const struct cpumask *cs_cpus_allowed);
 #ifdef CONFIG_SMP
index 7af8577fc8f8167f624cc32ae9305e20a191e9f3..723cfc9d0ad7ac417fdeb6ea2824f00129bcfaa1 100644 (file)
@@ -506,6 +506,16 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
                        goto out;
        }
 
+       /*
+        * We can't shrink if we won't have enough room for SCHED_DEADLINE
+        * tasks.
+        */
+       ret = -EBUSY;
+       if (is_cpu_exclusive(cur) &&
+           !cpuset_cpumask_can_shrink(cur->cpus_allowed,
+                                      trial->cpus_allowed))
+               goto out;
+
        ret = 0;
 out:
        rcu_read_unlock();
index 9993feeb8b1088c930d0642eaf55a797d915b368..0456a55fc27fe72fb7ebbc5d3094514a3989492d 100644 (file)
@@ -4650,6 +4650,25 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
 }
 
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
+{
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
+       unsigned long flags;
+
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
+
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+
+       return ret;
+}
+
 int task_can_attach(struct task_struct *p,
                    const struct cpumask *cs_cpus_allowed)
 {
This page took 0.035034 seconds and 5 git commands to generate.