projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sched: Drop the rq argument to sched_class::select_task_rq()
[deliverable/linux.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index d398f2f0a3c91a41e70f707cba746d97d0bb3594..d4b815d345b331117c951d9e4910ce3b835a759f 100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-2195,13
+2195,15
@@
static int migration_cpu_stop(void *data);
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
-static bool
migrate_task(struct task_struct *p, struct rq *rq
)
+static bool
need_migrate_task(struct task_struct *p
)
{
/*
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
{
/*
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
- return p->on_rq || task_running(rq, p);
+ bool running = p->on_rq || p->on_cpu;
+ smp_rmb(); /* finish_lock_switch() */
+ return running;
}
/*
}
/*
@@
-2376,9
+2378,9
@@
static int select_fallback_rq(int cpu, struct task_struct *p)
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
-int select_task_rq(struct
rq *rq, struct
task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{
{
- int cpu = p->sched_class->select_task_rq(
rq,
p, sd_flags, wake_flags);
+ int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
/*
* In order not to call set_task_cpu() on a blocking task we need
@@
-2533,7
+2535,7
@@
static int try_to_wake_up(struct task_struct *p, unsigned int state,
en_flags |= ENQUEUE_WAKING;
}
en_flags |= ENQUEUE_WAKING;
}
- cpu = select_task_rq(
rq,
p, SD_BALANCE_WAKE, wake_flags);
+ cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
__task_rq_unlock(rq);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
__task_rq_unlock(rq);
@@
-2744,7
+2746,7
@@
void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed.
*/
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed.
*/
- cpu = select_task_rq(
rq,
p, SD_BALANCE_FORK, 0);
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu);
p->state = TASK_RUNNING;
set_task_cpu(p, cpu);
p->state = TASK_RUNNING;
@@
-3474,7
+3476,7
@@
void sched_exec(void)
int dest_cpu;
rq = task_rq_lock(p, &flags);
int dest_cpu;
rq = task_rq_lock(p, &flags);
- dest_cpu = p->sched_class->select_task_rq(
rq,
p, SD_BALANCE_EXEC, 0);
+ dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
if (dest_cpu == smp_processor_id())
goto unlock;
@@
-3482,7
+3484,7
@@
void sched_exec(void)
* select_task_rq() can race against ->cpus_allowed
*/
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
* select_task_rq() can race against ->cpus_allowed
*/
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
- likely(cpu_active(dest_cpu)) &&
migrate_task(p, rq
)) {
+ likely(cpu_active(dest_cpu)) &&
need_migrate_task(p
)) {
struct migration_arg arg = { p, dest_cpu };
task_rq_unlock(rq, &flags);
struct migration_arg arg = { p, dest_cpu };
task_rq_unlock(rq, &flags);
@@
-5911,7
+5913,7
@@
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
- if (
migrate_task(p, rq
)) {
+ if (
need_migrate_task(p
)) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
__task_rq_unlock(rq);
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
__task_rq_unlock(rq);
This page took
0.039114 seconds
and
5
git commands to generate.