projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sched: sched_slice() fixlet
[deliverable/linux.git]
/
kernel
/
sched_fair.c
diff --git
a/kernel/sched_fair.c
b/kernel/sched_fair.c
index e0c0b4bc3f08e822d976fe2d6c222300a61c6352..5cc1c162044fc4f40b15c351cd170ce2c58703f7 100644
(file)
--- a/
kernel/sched_fair.c
+++ b/
kernel/sched_fair.c
@@
-283,7
+283,7
@@
static void update_min_vruntime(struct cfs_rq *cfs_rq)
struct sched_entity,
run_node);
struct sched_entity,
run_node);
- if (
vruntime == cfs_rq->min_vruntime
)
+ if (
!cfs_rq->curr
)
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
@@
-429,7
+429,10
@@
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) {
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) {
- struct load_weight *load = &cfs_rq->load;
+ struct load_weight *load;
+
+ cfs_rq = cfs_rq_of(se);
+ load = &cfs_rq->load;
if (unlikely(!se->on_rq)) {
struct load_weight lw = cfs_rq->load;
if (unlikely(!se->on_rq)) {
struct load_weight lw = cfs_rq->load;
@@
-677,9
+680,13
@@
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
unsigned long thresh = sysctl_sched_latency;
/*
unsigned long thresh = sysctl_sched_latency;
/*
- * convert the sleeper threshold into virtual time
+ * Convert the sleeper threshold into virtual time.
+ * SCHED_IDLE is a special sub-class. We care about
+ * fairness only relative to other SCHED_IDLE tasks,
+ * all of which have the same weight.
*/
*/
- if (sched_feat(NORMALIZED_SLEEPER))
+ if (sched_feat(NORMALIZED_SLEEPER) &&
+ task_of(se)->policy != SCHED_IDLE)
thresh = calc_delta_fair(thresh, se);
vruntime -= thresh;
thresh = calc_delta_fair(thresh, se);
vruntime -= thresh;
@@
-1340,14
+1347,18
@@
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
static void set_last_buddy(struct sched_entity *se)
{
static void set_last_buddy(struct sched_entity *se)
{
- for_each_sched_entity(se)
- cfs_rq_of(se)->last = se;
+ if (likely(task_of(se)->policy != SCHED_IDLE)) {
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->last = se;
+ }
}
static void set_next_buddy(struct sched_entity *se)
{
}
static void set_next_buddy(struct sched_entity *se)
{
- for_each_sched_entity(se)
- cfs_rq_of(se)->next = se;
+ if (likely(task_of(se)->policy != SCHED_IDLE)) {
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->next = se;
+ }
}
/*
}
/*
@@
-1393,12
+1404,18
@@
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
return;
/*
return;
/*
- * Batch tasks do not preempt (their preemption is driven by
+ * Batch
and idle
tasks do not preempt (their preemption is driven by
* the tick):
*/
* the tick):
*/
- if (unlikely(p->policy
== SCHED_BATCH
))
+ if (unlikely(p->policy
!= SCHED_NORMAL
))
return;
return;
+ /* Idle tasks are by definition preempted by everybody. */
+ if (unlikely(curr->policy == SCHED_IDLE)) {
+ resched_task(curr);
+ return;
+ }
+
if (!sched_feat(WAKEUP_PREEMPT))
return;
if (!sched_feat(WAKEUP_PREEMPT))
return;
@@
-1617,8
+1634,6
@@
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
}
}
}
}
-#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
-
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
This page took
0.027158 seconds
and
5
git commands to generate.