2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
30 #include <trace/events/sched.h>
35 * Targeted preemption latency for CPU-bound tasks:
36 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
38 * NOTE: this latency value is not the same as the concept of
39 * 'timeslice length' - timeslices in CFS are of variable length
40 * and have no persistent notion like in traditional, time-slice
41 * based scheduling concepts.
43 * (to see the precise effective timeslice length of your workload,
44 * run vmstat and monitor the context-switches (cs) field)
46 unsigned int sysctl_sched_latency
= 6000000ULL;
47 unsigned int normalized_sysctl_sched_latency
= 6000000ULL;
50 * The initial- and re-scaling of tunables is configurable
51 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
54 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
55 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
56 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
58 enum sched_tunable_scaling sysctl_sched_tunable_scaling
59 = SCHED_TUNABLESCALING_LOG
;
62 * Minimal preemption granularity for CPU-bound tasks:
63 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
65 unsigned int sysctl_sched_min_granularity
= 750000ULL;
66 unsigned int normalized_sysctl_sched_min_granularity
= 750000ULL;
69 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
71 static unsigned int sched_nr_latency
= 8;
74 * After fork, child runs first. If set to 0 (default) then
75 * parent will (try to) run first.
77 unsigned int sysctl_sched_child_runs_first __read_mostly
;
80 * SCHED_OTHER wake-up granularity.
81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
87 unsigned int sysctl_sched_wakeup_granularity
= 1000000UL;
88 unsigned int normalized_sysctl_sched_wakeup_granularity
= 1000000UL;
90 const_debug
unsigned int sysctl_sched_migration_cost
= 500000UL;
93 * The exponential sliding window over which load is averaged for shares
97 unsigned int __read_mostly sysctl_sched_shares_window
= 10000000UL;
99 #ifdef CONFIG_CFS_BANDWIDTH
101 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102 * each time a cfs_rq requests quota.
104 * Note: in the case that the slice exceeds the runtime remaining (either due
105 * to consumption or the quota being specified to be smaller than the slice)
106 * we will always only issue the remaining available time.
108 * default: 5 msec, units: microseconds
110 unsigned int sysctl_sched_cfs_bandwidth_slice
= 5000UL;
114 * Increase the granularity value when there are more CPUs,
115 * because with more CPUs the 'effective latency' as visible
116 * to users decreases. But the relationship is not linear,
117 * so pick a second-best guess by going with the log2 of the
120 * This idea comes from the SD scheduler of Con Kolivas:
122 static int get_update_sysctl_factor(void)
124 unsigned int cpus
= min_t(int, num_online_cpus(), 8);
127 switch (sysctl_sched_tunable_scaling
) {
128 case SCHED_TUNABLESCALING_NONE
:
131 case SCHED_TUNABLESCALING_LINEAR
:
134 case SCHED_TUNABLESCALING_LOG
:
136 factor
= 1 + ilog2(cpus
);
143 static void update_sysctl(void)
145 unsigned int factor
= get_update_sysctl_factor();
147 #define SET_SYSCTL(name) \
148 (sysctl_##name = (factor) * normalized_sysctl_##name)
149 SET_SYSCTL(sched_min_granularity
);
150 SET_SYSCTL(sched_latency
);
151 SET_SYSCTL(sched_wakeup_granularity
);
155 void sched_init_granularity(void)
160 #if BITS_PER_LONG == 32
161 # define WMULT_CONST (~0UL)
163 # define WMULT_CONST (1UL << 32)
166 #define WMULT_SHIFT 32
169 * Shift right and round:
171 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
174 * delta *= weight / lw
177 calc_delta_mine(unsigned long delta_exec
, unsigned long weight
,
178 struct load_weight
*lw
)
183 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
184 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
185 * 2^SCHED_LOAD_RESOLUTION.
187 if (likely(weight
> (1UL << SCHED_LOAD_RESOLUTION
)))
188 tmp
= (u64
)delta_exec
* scale_load_down(weight
);
190 tmp
= (u64
)delta_exec
;
192 if (!lw
->inv_weight
) {
193 unsigned long w
= scale_load_down(lw
->weight
);
195 if (BITS_PER_LONG
> 32 && unlikely(w
>= WMULT_CONST
))
197 else if (unlikely(!w
))
198 lw
->inv_weight
= WMULT_CONST
;
200 lw
->inv_weight
= WMULT_CONST
/ w
;
204 * Check whether we'd overflow the 64-bit multiplication:
206 if (unlikely(tmp
> WMULT_CONST
))
207 tmp
= SRR(SRR(tmp
, WMULT_SHIFT
/2) * lw
->inv_weight
,
210 tmp
= SRR(tmp
* lw
->inv_weight
, WMULT_SHIFT
);
212 return (unsigned long)min(tmp
, (u64
)(unsigned long)LONG_MAX
);
216 const struct sched_class fair_sched_class
;
218 /**************************************************************
219 * CFS operations on generic schedulable entities:
222 #ifdef CONFIG_FAIR_GROUP_SCHED
224 /* cpu runqueue to which this cfs_rq is attached */
225 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
230 /* An entity is a task if it doesn't "own" a runqueue */
231 #define entity_is_task(se) (!se->my_q)
233 static inline struct task_struct
*task_of(struct sched_entity
*se
)
235 #ifdef CONFIG_SCHED_DEBUG
236 WARN_ON_ONCE(!entity_is_task(se
));
238 return container_of(se
, struct task_struct
, se
);
241 /* Walk up scheduling entities hierarchy */
242 #define for_each_sched_entity(se) \
243 for (; se; se = se->parent)
245 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
250 /* runqueue on which this entity is (to be) queued */
251 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
256 /* runqueue "owned" by this group */
257 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
262 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
264 if (!cfs_rq
->on_list
) {
266 * Ensure we either appear before our parent (if already
267 * enqueued) or force our parent to appear after us when it is
268 * enqueued. The fact that we always enqueue bottom-up
269 * reduces this to two cases.
271 if (cfs_rq
->tg
->parent
&&
272 cfs_rq
->tg
->parent
->cfs_rq
[cpu_of(rq_of(cfs_rq
))]->on_list
) {
273 list_add_rcu(&cfs_rq
->leaf_cfs_rq_list
,
274 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
276 list_add_tail_rcu(&cfs_rq
->leaf_cfs_rq_list
,
277 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
284 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
286 if (cfs_rq
->on_list
) {
287 list_del_rcu(&cfs_rq
->leaf_cfs_rq_list
);
292 /* Iterate thr' all leaf cfs_rq's on a runqueue */
293 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
294 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
296 /* Do the two (enqueued) entities belong to the same group ? */
298 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
300 if (se
->cfs_rq
== pse
->cfs_rq
)
306 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
311 /* return depth at which a sched entity is present in the hierarchy */
312 static inline int depth_se(struct sched_entity
*se
)
316 for_each_sched_entity(se
)
323 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
325 int se_depth
, pse_depth
;
328 * preemption test can be made between sibling entities who are in the
329 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
330 * both tasks until we find their ancestors who are siblings of common
334 /* First walk up until both entities are at same depth */
335 se_depth
= depth_se(*se
);
336 pse_depth
= depth_se(*pse
);
338 while (se_depth
> pse_depth
) {
340 *se
= parent_entity(*se
);
343 while (pse_depth
> se_depth
) {
345 *pse
= parent_entity(*pse
);
348 while (!is_same_group(*se
, *pse
)) {
349 *se
= parent_entity(*se
);
350 *pse
= parent_entity(*pse
);
354 #else /* !CONFIG_FAIR_GROUP_SCHED */
356 static inline struct task_struct
*task_of(struct sched_entity
*se
)
358 return container_of(se
, struct task_struct
, se
);
361 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
363 return container_of(cfs_rq
, struct rq
, cfs
);
366 #define entity_is_task(se) 1
368 #define for_each_sched_entity(se) \
369 for (; se; se = NULL)
371 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
373 return &task_rq(p
)->cfs
;
376 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
378 struct task_struct
*p
= task_of(se
);
379 struct rq
*rq
= task_rq(p
);
384 /* runqueue "owned" by this group */
385 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
390 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
394 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
398 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
399 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
402 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
407 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
413 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
417 #endif /* CONFIG_FAIR_GROUP_SCHED */
419 static void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
420 unsigned long delta_exec
);
422 /**************************************************************
423 * Scheduling class tree data structure manipulation methods:
426 static inline u64
max_vruntime(u64 min_vruntime
, u64 vruntime
)
428 s64 delta
= (s64
)(vruntime
- min_vruntime
);
430 min_vruntime
= vruntime
;
435 static inline u64
min_vruntime(u64 min_vruntime
, u64 vruntime
)
437 s64 delta
= (s64
)(vruntime
- min_vruntime
);
439 min_vruntime
= vruntime
;
444 static inline int entity_before(struct sched_entity
*a
,
445 struct sched_entity
*b
)
447 return (s64
)(a
->vruntime
- b
->vruntime
) < 0;
450 static void update_min_vruntime(struct cfs_rq
*cfs_rq
)
452 u64 vruntime
= cfs_rq
->min_vruntime
;
455 vruntime
= cfs_rq
->curr
->vruntime
;
457 if (cfs_rq
->rb_leftmost
) {
458 struct sched_entity
*se
= rb_entry(cfs_rq
->rb_leftmost
,
463 vruntime
= se
->vruntime
;
465 vruntime
= min_vruntime(vruntime
, se
->vruntime
);
468 cfs_rq
->min_vruntime
= max_vruntime(cfs_rq
->min_vruntime
, vruntime
);
471 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
476 * Enqueue an entity into the rb-tree:
478 static void __enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
480 struct rb_node
**link
= &cfs_rq
->tasks_timeline
.rb_node
;
481 struct rb_node
*parent
= NULL
;
482 struct sched_entity
*entry
;
486 * Find the right place in the rbtree:
490 entry
= rb_entry(parent
, struct sched_entity
, run_node
);
492 * We dont care about collisions. Nodes with
493 * the same key stay together.
495 if (entity_before(se
, entry
)) {
496 link
= &parent
->rb_left
;
498 link
= &parent
->rb_right
;
504 * Maintain a cache of leftmost tree entries (it is frequently
508 cfs_rq
->rb_leftmost
= &se
->run_node
;
510 rb_link_node(&se
->run_node
, parent
, link
);
511 rb_insert_color(&se
->run_node
, &cfs_rq
->tasks_timeline
);
514 static void __dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
516 if (cfs_rq
->rb_leftmost
== &se
->run_node
) {
517 struct rb_node
*next_node
;
519 next_node
= rb_next(&se
->run_node
);
520 cfs_rq
->rb_leftmost
= next_node
;
523 rb_erase(&se
->run_node
, &cfs_rq
->tasks_timeline
);
526 struct sched_entity
*__pick_first_entity(struct cfs_rq
*cfs_rq
)
528 struct rb_node
*left
= cfs_rq
->rb_leftmost
;
533 return rb_entry(left
, struct sched_entity
, run_node
);
536 static struct sched_entity
*__pick_next_entity(struct sched_entity
*se
)
538 struct rb_node
*next
= rb_next(&se
->run_node
);
543 return rb_entry(next
, struct sched_entity
, run_node
);
546 #ifdef CONFIG_SCHED_DEBUG
547 struct sched_entity
*__pick_last_entity(struct cfs_rq
*cfs_rq
)
549 struct rb_node
*last
= rb_last(&cfs_rq
->tasks_timeline
);
554 return rb_entry(last
, struct sched_entity
, run_node
);
557 /**************************************************************
558 * Scheduling class statistics methods:
561 int sched_proc_update_handler(struct ctl_table
*table
, int write
,
562 void __user
*buffer
, size_t *lenp
,
565 int ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
566 int factor
= get_update_sysctl_factor();
571 sched_nr_latency
= DIV_ROUND_UP(sysctl_sched_latency
,
572 sysctl_sched_min_granularity
);
574 #define WRT_SYSCTL(name) \
575 (normalized_sysctl_##name = sysctl_##name / (factor))
576 WRT_SYSCTL(sched_min_granularity
);
577 WRT_SYSCTL(sched_latency
);
578 WRT_SYSCTL(sched_wakeup_granularity
);
588 static inline unsigned long
589 calc_delta_fair(unsigned long delta
, struct sched_entity
*se
)
591 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
592 delta
= calc_delta_mine(delta
, NICE_0_LOAD
, &se
->load
);
598 * The idea is to set a period in which each task runs once.
600 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
601 * this period because otherwise the slices get too small.
603 * p = (nr <= nl) ? l : l*nr/nl
605 static u64
__sched_period(unsigned long nr_running
)
607 u64 period
= sysctl_sched_latency
;
608 unsigned long nr_latency
= sched_nr_latency
;
610 if (unlikely(nr_running
> nr_latency
)) {
611 period
= sysctl_sched_min_granularity
;
612 period
*= nr_running
;
619 * We calculate the wall-time slice from the period by taking a part
620 * proportional to the weight.
624 static u64
sched_slice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
626 u64 slice
= __sched_period(cfs_rq
->nr_running
+ !se
->on_rq
);
628 for_each_sched_entity(se
) {
629 struct load_weight
*load
;
630 struct load_weight lw
;
632 cfs_rq
= cfs_rq_of(se
);
633 load
= &cfs_rq
->load
;
635 if (unlikely(!se
->on_rq
)) {
638 update_load_add(&lw
, se
->load
.weight
);
641 slice
= calc_delta_mine(slice
, se
->load
.weight
, load
);
647 * We calculate the vruntime slice of a to be inserted task
651 static u64
sched_vslice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
653 return calc_delta_fair(sched_slice(cfs_rq
, se
), se
);
656 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
);
657 static void update_cfs_shares(struct cfs_rq
*cfs_rq
);
660 * Update the current task's runtime statistics. Skip current tasks that
661 * are not in our scheduling class.
664 __update_curr(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
,
665 unsigned long delta_exec
)
667 unsigned long delta_exec_weighted
;
669 schedstat_set(curr
->statistics
.exec_max
,
670 max((u64
)delta_exec
, curr
->statistics
.exec_max
));
672 curr
->sum_exec_runtime
+= delta_exec
;
673 schedstat_add(cfs_rq
, exec_clock
, delta_exec
);
674 delta_exec_weighted
= calc_delta_fair(delta_exec
, curr
);
676 curr
->vruntime
+= delta_exec_weighted
;
677 update_min_vruntime(cfs_rq
);
679 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
680 cfs_rq
->load_unacc_exec_time
+= delta_exec
;
684 static void update_curr(struct cfs_rq
*cfs_rq
)
686 struct sched_entity
*curr
= cfs_rq
->curr
;
687 u64 now
= rq_of(cfs_rq
)->clock_task
;
688 unsigned long delta_exec
;
694 * Get the amount of time the current task was running
695 * since the last time we changed load (this cannot
696 * overflow on 32 bits):
698 delta_exec
= (unsigned long)(now
- curr
->exec_start
);
702 __update_curr(cfs_rq
, curr
, delta_exec
);
703 curr
->exec_start
= now
;
705 if (entity_is_task(curr
)) {
706 struct task_struct
*curtask
= task_of(curr
);
708 trace_sched_stat_runtime(curtask
, delta_exec
, curr
->vruntime
);
709 cpuacct_charge(curtask
, delta_exec
);
710 account_group_exec_runtime(curtask
, delta_exec
);
713 account_cfs_rq_runtime(cfs_rq
, delta_exec
);
717 update_stats_wait_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
719 schedstat_set(se
->statistics
.wait_start
, rq_of(cfs_rq
)->clock
);
723 * Task is being enqueued - update stats:
725 static void update_stats_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
728 * Are we enqueueing a waiting task? (for current tasks
729 * a dequeue/enqueue event is a NOP)
731 if (se
!= cfs_rq
->curr
)
732 update_stats_wait_start(cfs_rq
, se
);
736 update_stats_wait_end(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
738 schedstat_set(se
->statistics
.wait_max
, max(se
->statistics
.wait_max
,
739 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
));
740 schedstat_set(se
->statistics
.wait_count
, se
->statistics
.wait_count
+ 1);
741 schedstat_set(se
->statistics
.wait_sum
, se
->statistics
.wait_sum
+
742 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
743 #ifdef CONFIG_SCHEDSTATS
744 if (entity_is_task(se
)) {
745 trace_sched_stat_wait(task_of(se
),
746 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
749 schedstat_set(se
->statistics
.wait_start
, 0);
753 update_stats_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
756 * Mark the end of the wait period if dequeueing a
759 if (se
!= cfs_rq
->curr
)
760 update_stats_wait_end(cfs_rq
, se
);
764 * We are picking a new current task - update its stats:
767 update_stats_curr_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
770 * We are starting a new run period:
772 se
->exec_start
= rq_of(cfs_rq
)->clock_task
;
775 /**************************************************
776 * Scheduling class queueing methods:
779 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
781 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
783 cfs_rq
->task_weight
+= weight
;
787 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
793 account_entity_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
795 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
796 if (!parent_entity(se
))
797 update_load_add(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
798 if (entity_is_task(se
)) {
799 add_cfs_task_weight(cfs_rq
, se
->load
.weight
);
800 list_add(&se
->group_node
, &cfs_rq
->tasks
);
802 cfs_rq
->nr_running
++;
806 account_entity_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
808 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
809 if (!parent_entity(se
))
810 update_load_sub(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
811 if (entity_is_task(se
)) {
812 add_cfs_task_weight(cfs_rq
, -se
->load
.weight
);
813 list_del_init(&se
->group_node
);
815 cfs_rq
->nr_running
--;
818 #ifdef CONFIG_FAIR_GROUP_SCHED
819 /* we need this in update_cfs_load and load-balance functions below */
820 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
);
822 static void update_cfs_rq_load_contribution(struct cfs_rq
*cfs_rq
,
825 struct task_group
*tg
= cfs_rq
->tg
;
828 load_avg
= div64_u64(cfs_rq
->load_avg
, cfs_rq
->load_period
+1);
829 load_avg
-= cfs_rq
->load_contribution
;
831 if (global_update
|| abs(load_avg
) > cfs_rq
->load_contribution
/ 8) {
832 atomic_add(load_avg
, &tg
->load_weight
);
833 cfs_rq
->load_contribution
+= load_avg
;
837 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
839 u64 period
= sysctl_sched_shares_window
;
841 unsigned long load
= cfs_rq
->load
.weight
;
843 if (cfs_rq
->tg
== &root_task_group
|| throttled_hierarchy(cfs_rq
))
846 now
= rq_of(cfs_rq
)->clock_task
;
847 delta
= now
- cfs_rq
->load_stamp
;
849 /* truncate load history at 4 idle periods */
850 if (cfs_rq
->load_stamp
> cfs_rq
->load_last
&&
851 now
- cfs_rq
->load_last
> 4 * period
) {
852 cfs_rq
->load_period
= 0;
853 cfs_rq
->load_avg
= 0;
857 cfs_rq
->load_stamp
= now
;
858 cfs_rq
->load_unacc_exec_time
= 0;
859 cfs_rq
->load_period
+= delta
;
861 cfs_rq
->load_last
= now
;
862 cfs_rq
->load_avg
+= delta
* load
;
865 /* consider updating load contribution on each fold or truncate */
866 if (global_update
|| cfs_rq
->load_period
> period
867 || !cfs_rq
->load_period
)
868 update_cfs_rq_load_contribution(cfs_rq
, global_update
);
870 while (cfs_rq
->load_period
> period
) {
872 * Inline assembly required to prevent the compiler
873 * optimising this loop into a divmod call.
874 * See __iter_div_u64_rem() for another example of this.
876 asm("" : "+rm" (cfs_rq
->load_period
));
877 cfs_rq
->load_period
/= 2;
878 cfs_rq
->load_avg
/= 2;
881 if (!cfs_rq
->curr
&& !cfs_rq
->nr_running
&& !cfs_rq
->load_avg
)
882 list_del_leaf_cfs_rq(cfs_rq
);
885 static inline long calc_tg_weight(struct task_group
*tg
, struct cfs_rq
*cfs_rq
)
890 * Use this CPU's actual weight instead of the last load_contribution
891 * to gain a more accurate current total weight. See
892 * update_cfs_rq_load_contribution().
894 tg_weight
= atomic_read(&tg
->load_weight
);
895 tg_weight
-= cfs_rq
->load_contribution
;
896 tg_weight
+= cfs_rq
->load
.weight
;
901 static long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
903 long tg_weight
, load
, shares
;
905 tg_weight
= calc_tg_weight(tg
, cfs_rq
);
906 load
= cfs_rq
->load
.weight
;
908 shares
= (tg
->shares
* load
);
912 if (shares
< MIN_SHARES
)
914 if (shares
> tg
->shares
)
920 static void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
922 if (cfs_rq
->load_unacc_exec_time
> sysctl_sched_shares_window
) {
923 update_cfs_load(cfs_rq
, 0);
924 update_cfs_shares(cfs_rq
);
927 # else /* CONFIG_SMP */
928 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
932 static inline long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
937 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
940 # endif /* CONFIG_SMP */
941 static void reweight_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
,
942 unsigned long weight
)
945 /* commit outstanding execution time */
946 if (cfs_rq
->curr
== se
)
948 account_entity_dequeue(cfs_rq
, se
);
951 update_load_set(&se
->load
, weight
);
954 account_entity_enqueue(cfs_rq
, se
);
957 static void update_cfs_shares(struct cfs_rq
*cfs_rq
)
959 struct task_group
*tg
;
960 struct sched_entity
*se
;
964 se
= tg
->se
[cpu_of(rq_of(cfs_rq
))];
965 if (!se
|| throttled_hierarchy(cfs_rq
))
968 if (likely(se
->load
.weight
== tg
->shares
))
971 shares
= calc_cfs_shares(cfs_rq
, tg
);
973 reweight_entity(cfs_rq_of(se
), se
, shares
);
975 #else /* CONFIG_FAIR_GROUP_SCHED */
976 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
980 static inline void update_cfs_shares(struct cfs_rq
*cfs_rq
)
984 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
987 #endif /* CONFIG_FAIR_GROUP_SCHED */
989 static void enqueue_sleeper(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
991 #ifdef CONFIG_SCHEDSTATS
992 struct task_struct
*tsk
= NULL
;
994 if (entity_is_task(se
))
997 if (se
->statistics
.sleep_start
) {
998 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.sleep_start
;
1003 if (unlikely(delta
> se
->statistics
.sleep_max
))
1004 se
->statistics
.sleep_max
= delta
;
1006 se
->statistics
.sleep_start
= 0;
1007 se
->statistics
.sum_sleep_runtime
+= delta
;
1010 account_scheduler_latency(tsk
, delta
>> 10, 1);
1011 trace_sched_stat_sleep(tsk
, delta
);
1014 if (se
->statistics
.block_start
) {
1015 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.block_start
;
1020 if (unlikely(delta
> se
->statistics
.block_max
))
1021 se
->statistics
.block_max
= delta
;
1023 se
->statistics
.block_start
= 0;
1024 se
->statistics
.sum_sleep_runtime
+= delta
;
1027 if (tsk
->in_iowait
) {
1028 se
->statistics
.iowait_sum
+= delta
;
1029 se
->statistics
.iowait_count
++;
1030 trace_sched_stat_iowait(tsk
, delta
);
1034 * Blocking time is in units of nanosecs, so shift by
1035 * 20 to get a milliseconds-range estimation of the
1036 * amount of time that the task spent sleeping:
1038 if (unlikely(prof_on
== SLEEP_PROFILING
)) {
1039 profile_hits(SLEEP_PROFILING
,
1040 (void *)get_wchan(tsk
),
1043 account_scheduler_latency(tsk
, delta
>> 10, 0);
1049 static void check_spread(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1051 #ifdef CONFIG_SCHED_DEBUG
1052 s64 d
= se
->vruntime
- cfs_rq
->min_vruntime
;
1057 if (d
> 3*sysctl_sched_latency
)
1058 schedstat_inc(cfs_rq
, nr_spread_over
);
1063 place_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int initial
)
1065 u64 vruntime
= cfs_rq
->min_vruntime
;
1068 * The 'current' period is already promised to the current tasks,
1069 * however the extra weight of the new task will slow them down a
1070 * little, place the new task so that it fits in the slot that
1071 * stays open at the end.
1073 if (initial
&& sched_feat(START_DEBIT
))
1074 vruntime
+= sched_vslice(cfs_rq
, se
);
1076 /* sleeps up to a single latency don't count. */
1078 unsigned long thresh
= sysctl_sched_latency
;
1081 * Halve their sleep time's effect, to allow
1082 * for a gentler effect of sleepers:
1084 if (sched_feat(GENTLE_FAIR_SLEEPERS
))
1090 /* ensure we never gain time by being placed backwards. */
1091 vruntime
= max_vruntime(se
->vruntime
, vruntime
);
1093 se
->vruntime
= vruntime
;
1096 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
);
1099 enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1102 * Update the normalized vruntime before updating min_vruntime
1103 * through callig update_curr().
1105 if (!(flags
& ENQUEUE_WAKEUP
) || (flags
& ENQUEUE_WAKING
))
1106 se
->vruntime
+= cfs_rq
->min_vruntime
;
1109 * Update run-time statistics of the 'current'.
1111 update_curr(cfs_rq
);
1112 update_cfs_load(cfs_rq
, 0);
1113 account_entity_enqueue(cfs_rq
, se
);
1114 update_cfs_shares(cfs_rq
);
1116 if (flags
& ENQUEUE_WAKEUP
) {
1117 place_entity(cfs_rq
, se
, 0);
1118 enqueue_sleeper(cfs_rq
, se
);
1121 update_stats_enqueue(cfs_rq
, se
);
1122 check_spread(cfs_rq
, se
);
1123 if (se
!= cfs_rq
->curr
)
1124 __enqueue_entity(cfs_rq
, se
);
1127 if (cfs_rq
->nr_running
== 1) {
1128 list_add_leaf_cfs_rq(cfs_rq
);
1129 check_enqueue_throttle(cfs_rq
);
1133 static void __clear_buddies_last(struct sched_entity
*se
)
1135 for_each_sched_entity(se
) {
1136 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1137 if (cfs_rq
->last
== se
)
1138 cfs_rq
->last
= NULL
;
1144 static void __clear_buddies_next(struct sched_entity
*se
)
1146 for_each_sched_entity(se
) {
1147 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1148 if (cfs_rq
->next
== se
)
1149 cfs_rq
->next
= NULL
;
1155 static void __clear_buddies_skip(struct sched_entity
*se
)
1157 for_each_sched_entity(se
) {
1158 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1159 if (cfs_rq
->skip
== se
)
1160 cfs_rq
->skip
= NULL
;
1166 static void clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1168 if (cfs_rq
->last
== se
)
1169 __clear_buddies_last(se
);
1171 if (cfs_rq
->next
== se
)
1172 __clear_buddies_next(se
);
1174 if (cfs_rq
->skip
== se
)
1175 __clear_buddies_skip(se
);
1178 static void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1181 dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1184 * Update run-time statistics of the 'current'.
1186 update_curr(cfs_rq
);
1188 update_stats_dequeue(cfs_rq
, se
);
1189 if (flags
& DEQUEUE_SLEEP
) {
1190 #ifdef CONFIG_SCHEDSTATS
1191 if (entity_is_task(se
)) {
1192 struct task_struct
*tsk
= task_of(se
);
1194 if (tsk
->state
& TASK_INTERRUPTIBLE
)
1195 se
->statistics
.sleep_start
= rq_of(cfs_rq
)->clock
;
1196 if (tsk
->state
& TASK_UNINTERRUPTIBLE
)
1197 se
->statistics
.block_start
= rq_of(cfs_rq
)->clock
;
1202 clear_buddies(cfs_rq
, se
);
1204 if (se
!= cfs_rq
->curr
)
1205 __dequeue_entity(cfs_rq
, se
);
1207 update_cfs_load(cfs_rq
, 0);
1208 account_entity_dequeue(cfs_rq
, se
);
1211 * Normalize the entity after updating the min_vruntime because the
1212 * update can refer to the ->curr item and we need to reflect this
1213 * movement in our normalized position.
1215 if (!(flags
& DEQUEUE_SLEEP
))
1216 se
->vruntime
-= cfs_rq
->min_vruntime
;
1218 /* return excess runtime on last dequeue */
1219 return_cfs_rq_runtime(cfs_rq
);
1221 update_min_vruntime(cfs_rq
);
1222 update_cfs_shares(cfs_rq
);
1226 * Preempt the current task with a newly woken task if needed:
1229 check_preempt_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
1231 unsigned long ideal_runtime
, delta_exec
;
1232 struct sched_entity
*se
;
1235 ideal_runtime
= sched_slice(cfs_rq
, curr
);
1236 delta_exec
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
1237 if (delta_exec
> ideal_runtime
) {
1238 resched_task(rq_of(cfs_rq
)->curr
);
1240 * The current task ran long enough, ensure it doesn't get
1241 * re-elected due to buddy favours.
1243 clear_buddies(cfs_rq
, curr
);
1248 * Ensure that a task that missed wakeup preemption by a
1249 * narrow margin doesn't have to wait for a full slice.
1250 * This also mitigates buddy induced latencies under load.
1252 if (delta_exec
< sysctl_sched_min_granularity
)
1255 se
= __pick_first_entity(cfs_rq
);
1256 delta
= curr
->vruntime
- se
->vruntime
;
1261 if (delta
> ideal_runtime
)
1262 resched_task(rq_of(cfs_rq
)->curr
);
1266 set_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1268 /* 'current' is not kept within the tree. */
1271 * Any task has to be enqueued before it get to execute on
1272 * a CPU. So account for the time it spent waiting on the
1275 update_stats_wait_end(cfs_rq
, se
);
1276 __dequeue_entity(cfs_rq
, se
);
1279 update_stats_curr_start(cfs_rq
, se
);
1281 #ifdef CONFIG_SCHEDSTATS
1283 * Track our maximum slice length, if the CPU's load is at
1284 * least twice that of our own weight (i.e. dont track it
1285 * when there are only lesser-weight tasks around):
1287 if (rq_of(cfs_rq
)->load
.weight
>= 2*se
->load
.weight
) {
1288 se
->statistics
.slice_max
= max(se
->statistics
.slice_max
,
1289 se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
);
1292 se
->prev_sum_exec_runtime
= se
->sum_exec_runtime
;
1296 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
);
1299 * Pick the next process, keeping these things in mind, in this order:
1300 * 1) keep things fair between processes/task groups
1301 * 2) pick the "next" process, since someone really wants that to run
1302 * 3) pick the "last" process, for cache locality
1303 * 4) do not run the "skip" process, if something else is available
1305 static struct sched_entity
*pick_next_entity(struct cfs_rq
*cfs_rq
)
1307 struct sched_entity
*se
= __pick_first_entity(cfs_rq
);
1308 struct sched_entity
*left
= se
;
1311 * Avoid running the skip buddy, if running something else can
1312 * be done without getting too unfair.
1314 if (cfs_rq
->skip
== se
) {
1315 struct sched_entity
*second
= __pick_next_entity(se
);
1316 if (second
&& wakeup_preempt_entity(second
, left
) < 1)
1321 * Prefer last buddy, try to return the CPU to a preempted task.
1323 if (cfs_rq
->last
&& wakeup_preempt_entity(cfs_rq
->last
, left
) < 1)
1327 * Someone really wants this to run. If it's not unfair, run it.
1329 if (cfs_rq
->next
&& wakeup_preempt_entity(cfs_rq
->next
, left
) < 1)
1332 clear_buddies(cfs_rq
, se
);
1337 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1339 static void put_prev_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*prev
)
1342 * If still on the runqueue then deactivate_task()
1343 * was not called and update_curr() has to be done:
1346 update_curr(cfs_rq
);
1348 /* throttle cfs_rqs exceeding runtime */
1349 check_cfs_rq_runtime(cfs_rq
);
1351 check_spread(cfs_rq
, prev
);
1353 update_stats_wait_start(cfs_rq
, prev
);
1354 /* Put 'current' back into the tree. */
1355 __enqueue_entity(cfs_rq
, prev
);
1357 cfs_rq
->curr
= NULL
;
1361 entity_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
, int queued
)
1364 * Update run-time statistics of the 'current'.
1366 update_curr(cfs_rq
);
1369 * Update share accounting for long-running entities.
1371 update_entity_shares_tick(cfs_rq
);
1373 #ifdef CONFIG_SCHED_HRTICK
1375 * queued ticks are scheduled to match the slice, so don't bother
1376 * validating it and just reschedule.
1379 resched_task(rq_of(cfs_rq
)->curr
);
1383 * don't let the period tick interfere with the hrtick preemption
1385 if (!sched_feat(DOUBLE_TICK
) &&
1386 hrtimer_active(&rq_of(cfs_rq
)->hrtick_timer
))
1390 if (cfs_rq
->nr_running
> 1)
1391 check_preempt_tick(cfs_rq
, curr
);
1395 /**************************************************
1396 * CFS bandwidth control machinery
1399 #ifdef CONFIG_CFS_BANDWIDTH
1401 #ifdef HAVE_JUMP_LABEL
1402 static struct jump_label_key __cfs_bandwidth_used
;
1404 static inline bool cfs_bandwidth_used(void)
1406 return static_branch(&__cfs_bandwidth_used
);
1409 void account_cfs_bandwidth_used(int enabled
, int was_enabled
)
1411 /* only need to count groups transitioning between enabled/!enabled */
1412 if (enabled
&& !was_enabled
)
1413 jump_label_inc(&__cfs_bandwidth_used
);
1414 else if (!enabled
&& was_enabled
)
1415 jump_label_dec(&__cfs_bandwidth_used
);
1417 #else /* HAVE_JUMP_LABEL */
1418 static bool cfs_bandwidth_used(void)
1423 void account_cfs_bandwidth_used(int enabled
, int was_enabled
) {}
1424 #endif /* HAVE_JUMP_LABEL */
1427 * default period for cfs group bandwidth.
1428 * default: 0.1s, units: nanoseconds
1430 static inline u64
default_cfs_period(void)
1432 return 100000000ULL;
1435 static inline u64
sched_cfs_bandwidth_slice(void)
1437 return (u64
)sysctl_sched_cfs_bandwidth_slice
* NSEC_PER_USEC
;
1441 * Replenish runtime according to assigned quota and update expiration time.
1442 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1443 * additional synchronization around rq->lock.
1445 * requires cfs_b->lock
1447 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth
*cfs_b
)
1451 if (cfs_b
->quota
== RUNTIME_INF
)
1454 now
= sched_clock_cpu(smp_processor_id());
1455 cfs_b
->runtime
= cfs_b
->quota
;
1456 cfs_b
->runtime_expires
= now
+ ktime_to_ns(cfs_b
->period
);
1459 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
1461 return &tg
->cfs_bandwidth
;
1464 /* returns 0 on failure to allocate runtime */
1465 static int assign_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1467 struct task_group
*tg
= cfs_rq
->tg
;
1468 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(tg
);
1469 u64 amount
= 0, min_amount
, expires
;
1471 /* note: this is a positive sum as runtime_remaining <= 0 */
1472 min_amount
= sched_cfs_bandwidth_slice() - cfs_rq
->runtime_remaining
;
1474 raw_spin_lock(&cfs_b
->lock
);
1475 if (cfs_b
->quota
== RUNTIME_INF
)
1476 amount
= min_amount
;
1479 * If the bandwidth pool has become inactive, then at least one
1480 * period must have elapsed since the last consumption.
1481 * Refresh the global state and ensure bandwidth timer becomes
1484 if (!cfs_b
->timer_active
) {
1485 __refill_cfs_bandwidth_runtime(cfs_b
);
1486 __start_cfs_bandwidth(cfs_b
);
1489 if (cfs_b
->runtime
> 0) {
1490 amount
= min(cfs_b
->runtime
, min_amount
);
1491 cfs_b
->runtime
-= amount
;
1495 expires
= cfs_b
->runtime_expires
;
1496 raw_spin_unlock(&cfs_b
->lock
);
1498 cfs_rq
->runtime_remaining
+= amount
;
1500 * we may have advanced our local expiration to account for allowed
1501 * spread between our sched_clock and the one on which runtime was
1504 if ((s64
)(expires
- cfs_rq
->runtime_expires
) > 0)
1505 cfs_rq
->runtime_expires
= expires
;
1507 return cfs_rq
->runtime_remaining
> 0;
1511 * Note: This depends on the synchronization provided by sched_clock and the
1512 * fact that rq->clock snapshots this value.
1514 static void expire_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1516 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1517 struct rq
*rq
= rq_of(cfs_rq
);
1519 /* if the deadline is ahead of our clock, nothing to do */
1520 if (likely((s64
)(rq
->clock
- cfs_rq
->runtime_expires
) < 0))
1523 if (cfs_rq
->runtime_remaining
< 0)
1527 * If the local deadline has passed we have to consider the
1528 * possibility that our sched_clock is 'fast' and the global deadline
1529 * has not truly expired.
1531 * Fortunately we can check determine whether this the case by checking
1532 * whether the global deadline has advanced.
1535 if ((s64
)(cfs_rq
->runtime_expires
- cfs_b
->runtime_expires
) >= 0) {
1536 /* extend local deadline, drift is bounded above by 2 ticks */
1537 cfs_rq
->runtime_expires
+= TICK_NSEC
;
1539 /* global deadline is ahead, expiration has passed */
1540 cfs_rq
->runtime_remaining
= 0;
1544 static void __account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
1545 unsigned long delta_exec
)
1547 /* dock delta_exec before expiring quota (as it could span periods) */
1548 cfs_rq
->runtime_remaining
-= delta_exec
;
1549 expire_cfs_rq_runtime(cfs_rq
);
1551 if (likely(cfs_rq
->runtime_remaining
> 0))
1555 * if we're unable to extend our runtime we resched so that the active
1556 * hierarchy can be throttled
1558 if (!assign_cfs_rq_runtime(cfs_rq
) && likely(cfs_rq
->curr
))
1559 resched_task(rq_of(cfs_rq
)->curr
);
1562 static __always_inline
void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
1563 unsigned long delta_exec
)
1565 if (!cfs_bandwidth_used() || !cfs_rq
->runtime_enabled
)
1568 __account_cfs_rq_runtime(cfs_rq
, delta_exec
);
1571 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
1573 return cfs_bandwidth_used() && cfs_rq
->throttled
;
1576 /* check whether cfs_rq, or any parent, is throttled */
1577 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
1579 return cfs_bandwidth_used() && cfs_rq
->throttle_count
;
1583 * Ensure that neither of the group entities corresponding to src_cpu or
1584 * dest_cpu are members of a throttled hierarchy when performing group
1585 * load-balance operations.
1587 static inline int throttled_lb_pair(struct task_group
*tg
,
1588 int src_cpu
, int dest_cpu
)
1590 struct cfs_rq
*src_cfs_rq
, *dest_cfs_rq
;
1592 src_cfs_rq
= tg
->cfs_rq
[src_cpu
];
1593 dest_cfs_rq
= tg
->cfs_rq
[dest_cpu
];
1595 return throttled_hierarchy(src_cfs_rq
) ||
1596 throttled_hierarchy(dest_cfs_rq
);
1599 /* updated child weight may affect parent so we have to do this bottom up */
1600 static int tg_unthrottle_up(struct task_group
*tg
, void *data
)
1602 struct rq
*rq
= data
;
1603 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1605 cfs_rq
->throttle_count
--;
1607 if (!cfs_rq
->throttle_count
) {
1608 u64 delta
= rq
->clock_task
- cfs_rq
->load_stamp
;
1610 /* leaving throttled state, advance shares averaging windows */
1611 cfs_rq
->load_stamp
+= delta
;
1612 cfs_rq
->load_last
+= delta
;
1614 /* update entity weight now that we are on_rq again */
1615 update_cfs_shares(cfs_rq
);
1622 static int tg_throttle_down(struct task_group
*tg
, void *data
)
1624 struct rq
*rq
= data
;
1625 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1627 /* group is entering throttled state, record last load */
1628 if (!cfs_rq
->throttle_count
)
1629 update_cfs_load(cfs_rq
, 0);
1630 cfs_rq
->throttle_count
++;
1635 static void throttle_cfs_rq(struct cfs_rq
*cfs_rq
)
1637 struct rq
*rq
= rq_of(cfs_rq
);
1638 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1639 struct sched_entity
*se
;
1640 long task_delta
, dequeue
= 1;
1642 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1644 /* account load preceding throttle */
1646 walk_tg_tree_from(cfs_rq
->tg
, tg_throttle_down
, tg_nop
, (void *)rq
);
1649 task_delta
= cfs_rq
->h_nr_running
;
1650 for_each_sched_entity(se
) {
1651 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
1652 /* throttled entity or throttle-on-deactivate */
1657 dequeue_entity(qcfs_rq
, se
, DEQUEUE_SLEEP
);
1658 qcfs_rq
->h_nr_running
-= task_delta
;
1660 if (qcfs_rq
->load
.weight
)
1665 rq
->nr_running
-= task_delta
;
1667 cfs_rq
->throttled
= 1;
1668 cfs_rq
->throttled_timestamp
= rq
->clock
;
1669 raw_spin_lock(&cfs_b
->lock
);
1670 list_add_tail_rcu(&cfs_rq
->throttled_list
, &cfs_b
->throttled_cfs_rq
);
1671 raw_spin_unlock(&cfs_b
->lock
);
1674 void unthrottle_cfs_rq(struct cfs_rq
*cfs_rq
)
1676 struct rq
*rq
= rq_of(cfs_rq
);
1677 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1678 struct sched_entity
*se
;
1682 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1684 cfs_rq
->throttled
= 0;
1685 raw_spin_lock(&cfs_b
->lock
);
1686 cfs_b
->throttled_time
+= rq
->clock
- cfs_rq
->throttled_timestamp
;
1687 list_del_rcu(&cfs_rq
->throttled_list
);
1688 raw_spin_unlock(&cfs_b
->lock
);
1689 cfs_rq
->throttled_timestamp
= 0;
1691 update_rq_clock(rq
);
1692 /* update hierarchical throttle state */
1693 walk_tg_tree_from(cfs_rq
->tg
, tg_nop
, tg_unthrottle_up
, (void *)rq
);
1695 if (!cfs_rq
->load
.weight
)
1698 task_delta
= cfs_rq
->h_nr_running
;
1699 for_each_sched_entity(se
) {
1703 cfs_rq
= cfs_rq_of(se
);
1705 enqueue_entity(cfs_rq
, se
, ENQUEUE_WAKEUP
);
1706 cfs_rq
->h_nr_running
+= task_delta
;
1708 if (cfs_rq_throttled(cfs_rq
))
1713 rq
->nr_running
+= task_delta
;
1715 /* determine whether we need to wake up potentially idle cpu */
1716 if (rq
->curr
== rq
->idle
&& rq
->cfs
.nr_running
)
1717 resched_task(rq
->curr
);
1720 static u64
distribute_cfs_runtime(struct cfs_bandwidth
*cfs_b
,
1721 u64 remaining
, u64 expires
)
1723 struct cfs_rq
*cfs_rq
;
1724 u64 runtime
= remaining
;
1727 list_for_each_entry_rcu(cfs_rq
, &cfs_b
->throttled_cfs_rq
,
1729 struct rq
*rq
= rq_of(cfs_rq
);
1731 raw_spin_lock(&rq
->lock
);
1732 if (!cfs_rq_throttled(cfs_rq
))
1735 runtime
= -cfs_rq
->runtime_remaining
+ 1;
1736 if (runtime
> remaining
)
1737 runtime
= remaining
;
1738 remaining
-= runtime
;
1740 cfs_rq
->runtime_remaining
+= runtime
;
1741 cfs_rq
->runtime_expires
= expires
;
1743 /* we check whether we're throttled above */
1744 if (cfs_rq
->runtime_remaining
> 0)
1745 unthrottle_cfs_rq(cfs_rq
);
1748 raw_spin_unlock(&rq
->lock
);
1759 * Responsible for refilling a task_group's bandwidth and unthrottling its
1760 * cfs_rqs as appropriate. If there has been no activity within the last
1761 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1762 * used to track this state.
1764 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
)
1766 u64 runtime
, runtime_expires
;
1767 int idle
= 1, throttled
;
1769 raw_spin_lock(&cfs_b
->lock
);
1770 /* no need to continue the timer with no bandwidth constraint */
1771 if (cfs_b
->quota
== RUNTIME_INF
)
1774 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
1775 /* idle depends on !throttled (for the case of a large deficit) */
1776 idle
= cfs_b
->idle
&& !throttled
;
1777 cfs_b
->nr_periods
+= overrun
;
1779 /* if we're going inactive then everything else can be deferred */
1783 __refill_cfs_bandwidth_runtime(cfs_b
);
1786 /* mark as potentially idle for the upcoming period */
1791 /* account preceding periods in which throttling occurred */
1792 cfs_b
->nr_throttled
+= overrun
;
1795 * There are throttled entities so we must first use the new bandwidth
1796 * to unthrottle them before making it generally available. This
1797 * ensures that all existing debts will be paid before a new cfs_rq is
1800 runtime
= cfs_b
->runtime
;
1801 runtime_expires
= cfs_b
->runtime_expires
;
1805 * This check is repeated as we are holding onto the new bandwidth
1806 * while we unthrottle. This can potentially race with an unthrottled
1807 * group trying to acquire new bandwidth from the global pool.
1809 while (throttled
&& runtime
> 0) {
1810 raw_spin_unlock(&cfs_b
->lock
);
1811 /* we can't nest cfs_b->lock while distributing bandwidth */
1812 runtime
= distribute_cfs_runtime(cfs_b
, runtime
,
1814 raw_spin_lock(&cfs_b
->lock
);
1816 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
1819 /* return (any) remaining runtime */
1820 cfs_b
->runtime
= runtime
;
1822 * While we are ensured activity in the period following an
1823 * unthrottle, this also covers the case in which the new bandwidth is
1824 * insufficient to cover the existing bandwidth deficit. (Forcing the
1825 * timer to remain active while there are any throttled entities.)
1830 cfs_b
->timer_active
= 0;
1831 raw_spin_unlock(&cfs_b
->lock
);
1836 /* a cfs_rq won't donate quota below this amount */
1837 static const u64 min_cfs_rq_runtime
= 1 * NSEC_PER_MSEC
;
1838 /* minimum remaining period time to redistribute slack quota */
1839 static const u64 min_bandwidth_expiration
= 2 * NSEC_PER_MSEC
;
1840 /* how long we wait to gather additional slack before distributing */
1841 static const u64 cfs_bandwidth_slack_period
= 5 * NSEC_PER_MSEC
;
1843 /* are we near the end of the current quota period? */
1844 static int runtime_refresh_within(struct cfs_bandwidth
*cfs_b
, u64 min_expire
)
1846 struct hrtimer
*refresh_timer
= &cfs_b
->period_timer
;
1849 /* if the call-back is running a quota refresh is already occurring */
1850 if (hrtimer_callback_running(refresh_timer
))
1853 /* is a quota refresh about to occur? */
1854 remaining
= ktime_to_ns(hrtimer_expires_remaining(refresh_timer
));
1855 if (remaining
< min_expire
)
1861 static void start_cfs_slack_bandwidth(struct cfs_bandwidth
*cfs_b
)
1863 u64 min_left
= cfs_bandwidth_slack_period
+ min_bandwidth_expiration
;
1865 /* if there's a quota refresh soon don't bother with slack */
1866 if (runtime_refresh_within(cfs_b
, min_left
))
1869 start_bandwidth_timer(&cfs_b
->slack_timer
,
1870 ns_to_ktime(cfs_bandwidth_slack_period
));
1873 /* we know any runtime found here is valid as update_curr() precedes return */
1874 static void __return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1876 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1877 s64 slack_runtime
= cfs_rq
->runtime_remaining
- min_cfs_rq_runtime
;
1879 if (slack_runtime
<= 0)
1882 raw_spin_lock(&cfs_b
->lock
);
1883 if (cfs_b
->quota
!= RUNTIME_INF
&&
1884 cfs_rq
->runtime_expires
== cfs_b
->runtime_expires
) {
1885 cfs_b
->runtime
+= slack_runtime
;
1887 /* we are under rq->lock, defer unthrottling using a timer */
1888 if (cfs_b
->runtime
> sched_cfs_bandwidth_slice() &&
1889 !list_empty(&cfs_b
->throttled_cfs_rq
))
1890 start_cfs_slack_bandwidth(cfs_b
);
1892 raw_spin_unlock(&cfs_b
->lock
);
1894 /* even if it's not valid for return we don't want to try again */
1895 cfs_rq
->runtime_remaining
-= slack_runtime
;
1898 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1900 if (!cfs_bandwidth_used())
1903 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->nr_running
)
1906 __return_cfs_rq_runtime(cfs_rq
);
1910 * This is done with a timer (instead of inline with bandwidth return) since
1911 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
1913 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
)
1915 u64 runtime
= 0, slice
= sched_cfs_bandwidth_slice();
1918 /* confirm we're still not at a refresh boundary */
1919 if (runtime_refresh_within(cfs_b
, min_bandwidth_expiration
))
1922 raw_spin_lock(&cfs_b
->lock
);
1923 if (cfs_b
->quota
!= RUNTIME_INF
&& cfs_b
->runtime
> slice
) {
1924 runtime
= cfs_b
->runtime
;
1927 expires
= cfs_b
->runtime_expires
;
1928 raw_spin_unlock(&cfs_b
->lock
);
1933 runtime
= distribute_cfs_runtime(cfs_b
, runtime
, expires
);
1935 raw_spin_lock(&cfs_b
->lock
);
1936 if (expires
== cfs_b
->runtime_expires
)
1937 cfs_b
->runtime
= runtime
;
1938 raw_spin_unlock(&cfs_b
->lock
);
1942 * When a group wakes up we want to make sure that its quota is not already
1943 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1944 * runtime as update_curr() throttling can not not trigger until it's on-rq.
1946 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
)
1948 if (!cfs_bandwidth_used())
1951 /* an active group must be handled by the update_curr()->put() path */
1952 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->curr
)
1955 /* ensure the group is not already throttled */
1956 if (cfs_rq_throttled(cfs_rq
))
1959 /* update runtime allocation */
1960 account_cfs_rq_runtime(cfs_rq
, 0);
1961 if (cfs_rq
->runtime_remaining
<= 0)
1962 throttle_cfs_rq(cfs_rq
);
1965 /* conditionally throttle active cfs_rq's from put_prev_entity() */
1966 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1968 if (!cfs_bandwidth_used())
1971 if (likely(!cfs_rq
->runtime_enabled
|| cfs_rq
->runtime_remaining
> 0))
1975 * it's possible for a throttled entity to be forced into a running
1976 * state (e.g. set_curr_task), in this case we're finished.
1978 if (cfs_rq_throttled(cfs_rq
))
1981 throttle_cfs_rq(cfs_rq
);
1984 static inline u64
default_cfs_period(void);
1985 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
);
1986 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
);
1988 static enum hrtimer_restart
sched_cfs_slack_timer(struct hrtimer
*timer
)
1990 struct cfs_bandwidth
*cfs_b
=
1991 container_of(timer
, struct cfs_bandwidth
, slack_timer
);
1992 do_sched_cfs_slack_timer(cfs_b
);
1994 return HRTIMER_NORESTART
;
1997 static enum hrtimer_restart
sched_cfs_period_timer(struct hrtimer
*timer
)
1999 struct cfs_bandwidth
*cfs_b
=
2000 container_of(timer
, struct cfs_bandwidth
, period_timer
);
2006 now
= hrtimer_cb_get_time(timer
);
2007 overrun
= hrtimer_forward(timer
, now
, cfs_b
->period
);
2012 idle
= do_sched_cfs_period_timer(cfs_b
, overrun
);
2015 return idle
? HRTIMER_NORESTART
: HRTIMER_RESTART
;
2018 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2020 raw_spin_lock_init(&cfs_b
->lock
);
2022 cfs_b
->quota
= RUNTIME_INF
;
2023 cfs_b
->period
= ns_to_ktime(default_cfs_period());
2025 INIT_LIST_HEAD(&cfs_b
->throttled_cfs_rq
);
2026 hrtimer_init(&cfs_b
->period_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2027 cfs_b
->period_timer
.function
= sched_cfs_period_timer
;
2028 hrtimer_init(&cfs_b
->slack_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2029 cfs_b
->slack_timer
.function
= sched_cfs_slack_timer
;
2032 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2034 cfs_rq
->runtime_enabled
= 0;
2035 INIT_LIST_HEAD(&cfs_rq
->throttled_list
);
2038 /* requires cfs_b->lock, may release to reprogram timer */
2039 void __start_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2042 * The timer may be active because we're trying to set a new bandwidth
2043 * period or because we're racing with the tear-down path
2044 * (timer_active==0 becomes visible before the hrtimer call-back
2045 * terminates). In either case we ensure that it's re-programmed
2047 while (unlikely(hrtimer_active(&cfs_b
->period_timer
))) {
2048 raw_spin_unlock(&cfs_b
->lock
);
2049 /* ensure cfs_b->lock is available while we wait */
2050 hrtimer_cancel(&cfs_b
->period_timer
);
2052 raw_spin_lock(&cfs_b
->lock
);
2053 /* if someone else restarted the timer then we're done */
2054 if (cfs_b
->timer_active
)
2058 cfs_b
->timer_active
= 1;
2059 start_bandwidth_timer(&cfs_b
->period_timer
, cfs_b
->period
);
2062 static void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2064 hrtimer_cancel(&cfs_b
->period_timer
);
2065 hrtimer_cancel(&cfs_b
->slack_timer
);
2068 void unthrottle_offline_cfs_rqs(struct rq
*rq
)
2070 struct cfs_rq
*cfs_rq
;
2072 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
2073 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
2075 if (!cfs_rq
->runtime_enabled
)
2079 * clock_task is not advancing so we just need to make sure
2080 * there's some valid quota amount
2082 cfs_rq
->runtime_remaining
= cfs_b
->quota
;
2083 if (cfs_rq_throttled(cfs_rq
))
2084 unthrottle_cfs_rq(cfs_rq
);
2088 #else /* CONFIG_CFS_BANDWIDTH */
2089 static void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
2090 unsigned long delta_exec
) {}
2091 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2092 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
) {}
2093 static void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2095 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
2100 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
2105 static inline int throttled_lb_pair(struct task_group
*tg
,
2106 int src_cpu
, int dest_cpu
)
2111 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2113 #ifdef CONFIG_FAIR_GROUP_SCHED
2114 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2117 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
2121 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2122 void unthrottle_offline_cfs_rqs(struct rq
*rq
) {}
2124 #endif /* CONFIG_CFS_BANDWIDTH */
2126 /**************************************************
2127 * CFS operations on tasks:
2130 #ifdef CONFIG_SCHED_HRTICK
2131 static void hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2133 struct sched_entity
*se
= &p
->se
;
2134 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2136 WARN_ON(task_rq(p
) != rq
);
2138 if (hrtick_enabled(rq
) && cfs_rq
->nr_running
> 1) {
2139 u64 slice
= sched_slice(cfs_rq
, se
);
2140 u64 ran
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
2141 s64 delta
= slice
- ran
;
2150 * Don't schedule slices shorter than 10000ns, that just
2151 * doesn't make sense. Rely on vruntime for fairness.
2154 delta
= max_t(s64
, 10000LL, delta
);
2156 hrtick_start(rq
, delta
);
2161 * called from enqueue/dequeue and updates the hrtick when the
2162 * current task is from our class and nr_running is low enough
2165 static void hrtick_update(struct rq
*rq
)
2167 struct task_struct
*curr
= rq
->curr
;
2169 if (curr
->sched_class
!= &fair_sched_class
)
2172 if (cfs_rq_of(&curr
->se
)->nr_running
< sched_nr_latency
)
2173 hrtick_start_fair(rq
, curr
);
2175 #else /* !CONFIG_SCHED_HRTICK */
2177 hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2181 static inline void hrtick_update(struct rq
*rq
)
2187 * The enqueue_task method is called before nr_running is
2188 * increased. Here we update the fair scheduling stats and
2189 * then put the task into the rbtree:
2192 enqueue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2194 struct cfs_rq
*cfs_rq
;
2195 struct sched_entity
*se
= &p
->se
;
2197 for_each_sched_entity(se
) {
2200 cfs_rq
= cfs_rq_of(se
);
2201 enqueue_entity(cfs_rq
, se
, flags
);
2204 * end evaluation on encountering a throttled cfs_rq
2206 * note: in the case of encountering a throttled cfs_rq we will
2207 * post the final h_nr_running increment below.
2209 if (cfs_rq_throttled(cfs_rq
))
2211 cfs_rq
->h_nr_running
++;
2213 flags
= ENQUEUE_WAKEUP
;
2216 for_each_sched_entity(se
) {
2217 cfs_rq
= cfs_rq_of(se
);
2218 cfs_rq
->h_nr_running
++;
2220 if (cfs_rq_throttled(cfs_rq
))
2223 update_cfs_load(cfs_rq
, 0);
2224 update_cfs_shares(cfs_rq
);
2232 static void set_next_buddy(struct sched_entity
*se
);
2235 * The dequeue_task method is called before nr_running is
2236 * decreased. We remove the task from the rbtree and
2237 * update the fair scheduling stats:
2239 static void dequeue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2241 struct cfs_rq
*cfs_rq
;
2242 struct sched_entity
*se
= &p
->se
;
2243 int task_sleep
= flags
& DEQUEUE_SLEEP
;
2245 for_each_sched_entity(se
) {
2246 cfs_rq
= cfs_rq_of(se
);
2247 dequeue_entity(cfs_rq
, se
, flags
);
2250 * end evaluation on encountering a throttled cfs_rq
2252 * note: in the case of encountering a throttled cfs_rq we will
2253 * post the final h_nr_running decrement below.
2255 if (cfs_rq_throttled(cfs_rq
))
2257 cfs_rq
->h_nr_running
--;
2259 /* Don't dequeue parent if it has other entities besides us */
2260 if (cfs_rq
->load
.weight
) {
2262 * Bias pick_next to pick a task from this cfs_rq, as
2263 * p is sleeping when it is within its sched_slice.
2265 if (task_sleep
&& parent_entity(se
))
2266 set_next_buddy(parent_entity(se
));
2268 /* avoid re-evaluating load for this entity */
2269 se
= parent_entity(se
);
2272 flags
|= DEQUEUE_SLEEP
;
2275 for_each_sched_entity(se
) {
2276 cfs_rq
= cfs_rq_of(se
);
2277 cfs_rq
->h_nr_running
--;
2279 if (cfs_rq_throttled(cfs_rq
))
2282 update_cfs_load(cfs_rq
, 0);
2283 update_cfs_shares(cfs_rq
);
2292 /* Used instead of source_load when we know the type == 0 */
2293 static unsigned long weighted_cpuload(const int cpu
)
2295 return cpu_rq(cpu
)->load
.weight
;
2299 * Return a low guess at the load of a migration-source cpu weighted
2300 * according to the scheduling class and "nice" value.
2302 * We want to under-estimate the load of migration sources, to
2303 * balance conservatively.
2305 static unsigned long source_load(int cpu
, int type
)
2307 struct rq
*rq
= cpu_rq(cpu
);
2308 unsigned long total
= weighted_cpuload(cpu
);
2310 if (type
== 0 || !sched_feat(LB_BIAS
))
2313 return min(rq
->cpu_load
[type
-1], total
);
2317 * Return a high guess at the load of a migration-target cpu weighted
2318 * according to the scheduling class and "nice" value.
2320 static unsigned long target_load(int cpu
, int type
)
2322 struct rq
*rq
= cpu_rq(cpu
);
2323 unsigned long total
= weighted_cpuload(cpu
);
2325 if (type
== 0 || !sched_feat(LB_BIAS
))
2328 return max(rq
->cpu_load
[type
-1], total
);
2331 static unsigned long power_of(int cpu
)
2333 return cpu_rq(cpu
)->cpu_power
;
2336 static unsigned long cpu_avg_load_per_task(int cpu
)
2338 struct rq
*rq
= cpu_rq(cpu
);
2339 unsigned long nr_running
= ACCESS_ONCE(rq
->nr_running
);
2342 return rq
->load
.weight
/ nr_running
;
2348 static void task_waking_fair(struct task_struct
*p
)
2350 struct sched_entity
*se
= &p
->se
;
2351 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2354 #ifndef CONFIG_64BIT
2355 u64 min_vruntime_copy
;
2358 min_vruntime_copy
= cfs_rq
->min_vruntime_copy
;
2360 min_vruntime
= cfs_rq
->min_vruntime
;
2361 } while (min_vruntime
!= min_vruntime_copy
);
2363 min_vruntime
= cfs_rq
->min_vruntime
;
2366 se
->vruntime
-= min_vruntime
;
2369 #ifdef CONFIG_FAIR_GROUP_SCHED
2371 * effective_load() calculates the load change as seen from the root_task_group
2373 * Adding load to a group doesn't make a group heavier, but can cause movement
2374 * of group shares between cpus. Assuming the shares were perfectly aligned one
2375 * can calculate the shift in shares.
2377 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2378 * on this @cpu and results in a total addition (subtraction) of @wg to the
2379 * total group weight.
2381 * Given a runqueue weight distribution (rw_i) we can compute a shares
2382 * distribution (s_i) using:
2384 * s_i = rw_i / \Sum rw_j (1)
2386 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2387 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2388 * shares distribution (s_i):
2390 * rw_i = { 2, 4, 1, 0 }
2391 * s_i = { 2/7, 4/7, 1/7, 0 }
2393 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2394 * task used to run on and the CPU the waker is running on), we need to
2395 * compute the effect of waking a task on either CPU and, in case of a sync
2396 * wakeup, compute the effect of the current task going to sleep.
2398 * So for a change of @wl to the local @cpu with an overall group weight change
2399 * of @wl we can compute the new shares distribution (s'_i) using:
2401 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2403 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2404 * differences in waking a task to CPU 0. The additional task changes the
2405 * weight and shares distributions like:
2407 * rw'_i = { 3, 4, 1, 0 }
2408 * s'_i = { 3/8, 4/8, 1/8, 0 }
2410 * We can then compute the difference in effective weight by using:
2412 * dw_i = S * (s'_i - s_i) (3)
2414 * Where 'S' is the group weight as seen by its parent.
2416 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2417 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2418 * 4/7) times the weight of the group.
2420 static long effective_load(struct task_group
*tg
, int cpu
, long wl
, long wg
)
2422 struct sched_entity
*se
= tg
->se
[cpu
];
2424 if (!tg
->parent
) /* the trivial, non-cgroup case */
2427 for_each_sched_entity(se
) {
2433 * W = @wg + \Sum rw_j
2435 W
= wg
+ calc_tg_weight(tg
, se
->my_q
);
2440 w
= se
->my_q
->load
.weight
+ wl
;
2443 * wl = S * s'_i; see (2)
2446 wl
= (w
* tg
->shares
) / W
;
2451 * Per the above, wl is the new se->load.weight value; since
2452 * those are clipped to [MIN_SHARES, ...) do so now. See
2453 * calc_cfs_shares().
2455 if (wl
< MIN_SHARES
)
2459 * wl = dw_i = S * (s'_i - s_i); see (3)
2461 wl
-= se
->load
.weight
;
2464 * Recursively apply this logic to all parent groups to compute
2465 * the final effective load change on the root group. Since
2466 * only the @tg group gets extra weight, all parent groups can
2467 * only redistribute existing shares. @wl is the shift in shares
2468 * resulting from this level per the above.
2477 static inline unsigned long effective_load(struct task_group
*tg
, int cpu
,
2478 unsigned long wl
, unsigned long wg
)
2485 static int wake_affine(struct sched_domain
*sd
, struct task_struct
*p
, int sync
)
2487 s64 this_load
, load
;
2488 int idx
, this_cpu
, prev_cpu
;
2489 unsigned long tl_per_task
;
2490 struct task_group
*tg
;
2491 unsigned long weight
;
2495 this_cpu
= smp_processor_id();
2496 prev_cpu
= task_cpu(p
);
2497 load
= source_load(prev_cpu
, idx
);
2498 this_load
= target_load(this_cpu
, idx
);
2501 * If sync wakeup then subtract the (maximum possible)
2502 * effect of the currently running task from the load
2503 * of the current CPU:
2506 tg
= task_group(current
);
2507 weight
= current
->se
.load
.weight
;
2509 this_load
+= effective_load(tg
, this_cpu
, -weight
, -weight
);
2510 load
+= effective_load(tg
, prev_cpu
, 0, -weight
);
2514 weight
= p
->se
.load
.weight
;
2517 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2518 * due to the sync cause above having dropped this_load to 0, we'll
2519 * always have an imbalance, but there's really nothing you can do
2520 * about that, so that's good too.
2522 * Otherwise check if either cpus are near enough in load to allow this
2523 * task to be woken on this_cpu.
2525 if (this_load
> 0) {
2526 s64 this_eff_load
, prev_eff_load
;
2528 this_eff_load
= 100;
2529 this_eff_load
*= power_of(prev_cpu
);
2530 this_eff_load
*= this_load
+
2531 effective_load(tg
, this_cpu
, weight
, weight
);
2533 prev_eff_load
= 100 + (sd
->imbalance_pct
- 100) / 2;
2534 prev_eff_load
*= power_of(this_cpu
);
2535 prev_eff_load
*= load
+ effective_load(tg
, prev_cpu
, 0, weight
);
2537 balanced
= this_eff_load
<= prev_eff_load
;
2542 * If the currently running task will sleep within
2543 * a reasonable amount of time then attract this newly
2546 if (sync
&& balanced
)
2549 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine_attempts
);
2550 tl_per_task
= cpu_avg_load_per_task(this_cpu
);
2553 (this_load
<= load
&&
2554 this_load
+ target_load(prev_cpu
, idx
) <= tl_per_task
)) {
2556 * This domain has SD_WAKE_AFFINE and
2557 * p is cache cold in this domain, and
2558 * there is no bad imbalance.
2560 schedstat_inc(sd
, ttwu_move_affine
);
2561 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine
);
2569 * find_idlest_group finds and returns the least busy CPU group within the
2572 static struct sched_group
*
2573 find_idlest_group(struct sched_domain
*sd
, struct task_struct
*p
,
2574 int this_cpu
, int load_idx
)
2576 struct sched_group
*idlest
= NULL
, *group
= sd
->groups
;
2577 unsigned long min_load
= ULONG_MAX
, this_load
= 0;
2578 int imbalance
= 100 + (sd
->imbalance_pct
-100)/2;
2581 unsigned long load
, avg_load
;
2585 /* Skip over this group if it has no CPUs allowed */
2586 if (!cpumask_intersects(sched_group_cpus(group
),
2587 tsk_cpus_allowed(p
)))
2590 local_group
= cpumask_test_cpu(this_cpu
,
2591 sched_group_cpus(group
));
2593 /* Tally up the load of all CPUs in the group */
2596 for_each_cpu(i
, sched_group_cpus(group
)) {
2597 /* Bias balancing toward cpus of our domain */
2599 load
= source_load(i
, load_idx
);
2601 load
= target_load(i
, load_idx
);
2606 /* Adjust by relative CPU power of the group */
2607 avg_load
= (avg_load
* SCHED_POWER_SCALE
) / group
->sgp
->power
;
2610 this_load
= avg_load
;
2611 } else if (avg_load
< min_load
) {
2612 min_load
= avg_load
;
2615 } while (group
= group
->next
, group
!= sd
->groups
);
2617 if (!idlest
|| 100*this_load
< imbalance
*min_load
)
2623 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2626 find_idlest_cpu(struct sched_group
*group
, struct task_struct
*p
, int this_cpu
)
2628 unsigned long load
, min_load
= ULONG_MAX
;
2632 /* Traverse only the allowed CPUs */
2633 for_each_cpu_and(i
, sched_group_cpus(group
), tsk_cpus_allowed(p
)) {
2634 load
= weighted_cpuload(i
);
2636 if (load
< min_load
|| (load
== min_load
&& i
== this_cpu
)) {
2646 * Try and locate an idle CPU in the sched_domain.
2648 static int select_idle_sibling(struct task_struct
*p
, int target
)
2650 int cpu
= smp_processor_id();
2651 int prev_cpu
= task_cpu(p
);
2652 struct sched_domain
*sd
;
2653 struct sched_group
*sg
;
2657 * If the task is going to be woken-up on this cpu and if it is
2658 * already idle, then it is the right target.
2660 if (target
== cpu
&& idle_cpu(cpu
))
2664 * If the task is going to be woken-up on the cpu where it previously
2665 * ran and if it is currently idle, then it the right target.
2667 if (target
== prev_cpu
&& idle_cpu(prev_cpu
))
2671 * Otherwise, iterate the domains and find an elegible idle cpu.
2675 for_each_domain(target
, sd
) {
2676 if (!smt
&& (sd
->flags
& SD_SHARE_CPUPOWER
))
2679 if (!(sd
->flags
& SD_SHARE_PKG_RESOURCES
)) {
2689 if (!cpumask_intersects(sched_group_cpus(sg
),
2690 tsk_cpus_allowed(p
)))
2693 for_each_cpu(i
, sched_group_cpus(sg
)) {
2698 target
= cpumask_first_and(sched_group_cpus(sg
),
2699 tsk_cpus_allowed(p
));
2703 } while (sg
!= sd
->groups
);
2712 * sched_balance_self: balance the current task (running on cpu) in domains
2713 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2716 * Balance, ie. select the least loaded group.
2718 * Returns the target CPU number, or the same CPU if no balancing is needed.
2720 * preempt must be disabled.
2723 select_task_rq_fair(struct task_struct
*p
, int sd_flag
, int wake_flags
)
2725 struct sched_domain
*tmp
, *affine_sd
= NULL
, *sd
= NULL
;
2726 int cpu
= smp_processor_id();
2727 int prev_cpu
= task_cpu(p
);
2729 int want_affine
= 0;
2731 int sync
= wake_flags
& WF_SYNC
;
2733 if (sd_flag
& SD_BALANCE_WAKE
) {
2734 if (cpumask_test_cpu(cpu
, tsk_cpus_allowed(p
)))
2740 for_each_domain(cpu
, tmp
) {
2741 if (!(tmp
->flags
& SD_LOAD_BALANCE
))
2745 * If power savings logic is enabled for a domain, see if we
2746 * are not overloaded, if so, don't balance wider.
2748 if (tmp
->flags
& (SD_POWERSAVINGS_BALANCE
|SD_PREFER_LOCAL
)) {
2749 unsigned long power
= 0;
2750 unsigned long nr_running
= 0;
2751 unsigned long capacity
;
2754 for_each_cpu(i
, sched_domain_span(tmp
)) {
2755 power
+= power_of(i
);
2756 nr_running
+= cpu_rq(i
)->cfs
.nr_running
;
2759 capacity
= DIV_ROUND_CLOSEST(power
, SCHED_POWER_SCALE
);
2761 if (tmp
->flags
& SD_POWERSAVINGS_BALANCE
)
2764 if (nr_running
< capacity
)
2769 * If both cpu and prev_cpu are part of this domain,
2770 * cpu is a valid SD_WAKE_AFFINE target.
2772 if (want_affine
&& (tmp
->flags
& SD_WAKE_AFFINE
) &&
2773 cpumask_test_cpu(prev_cpu
, sched_domain_span(tmp
))) {
2778 if (!want_sd
&& !want_affine
)
2781 if (!(tmp
->flags
& sd_flag
))
2789 if (cpu
== prev_cpu
|| wake_affine(affine_sd
, p
, sync
))
2792 new_cpu
= select_idle_sibling(p
, prev_cpu
);
2797 int load_idx
= sd
->forkexec_idx
;
2798 struct sched_group
*group
;
2801 if (!(sd
->flags
& sd_flag
)) {
2806 if (sd_flag
& SD_BALANCE_WAKE
)
2807 load_idx
= sd
->wake_idx
;
2809 group
= find_idlest_group(sd
, p
, cpu
, load_idx
);
2815 new_cpu
= find_idlest_cpu(group
, p
, cpu
);
2816 if (new_cpu
== -1 || new_cpu
== cpu
) {
2817 /* Now try balancing at a lower domain level of cpu */
2822 /* Now try balancing at a lower domain level of new_cpu */
2824 weight
= sd
->span_weight
;
2826 for_each_domain(cpu
, tmp
) {
2827 if (weight
<= tmp
->span_weight
)
2829 if (tmp
->flags
& sd_flag
)
2832 /* while loop will break here if sd == NULL */
2839 #endif /* CONFIG_SMP */
2841 static unsigned long
2842 wakeup_gran(struct sched_entity
*curr
, struct sched_entity
*se
)
2844 unsigned long gran
= sysctl_sched_wakeup_granularity
;
2847 * Since its curr running now, convert the gran from real-time
2848 * to virtual-time in his units.
2850 * By using 'se' instead of 'curr' we penalize light tasks, so
2851 * they get preempted easier. That is, if 'se' < 'curr' then
2852 * the resulting gran will be larger, therefore penalizing the
2853 * lighter, if otoh 'se' > 'curr' then the resulting gran will
2854 * be smaller, again penalizing the lighter task.
2856 * This is especially important for buddies when the leftmost
2857 * task is higher priority than the buddy.
2859 return calc_delta_fair(gran
, se
);
2863 * Should 'se' preempt 'curr'.
2877 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
)
2879 s64 gran
, vdiff
= curr
->vruntime
- se
->vruntime
;
2884 gran
= wakeup_gran(curr
, se
);
2891 static void set_last_buddy(struct sched_entity
*se
)
2893 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
2896 for_each_sched_entity(se
)
2897 cfs_rq_of(se
)->last
= se
;
2900 static void set_next_buddy(struct sched_entity
*se
)
2902 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
2905 for_each_sched_entity(se
)
2906 cfs_rq_of(se
)->next
= se
;
2909 static void set_skip_buddy(struct sched_entity
*se
)
2911 for_each_sched_entity(se
)
2912 cfs_rq_of(se
)->skip
= se
;
2916 * Preempt the current task with a newly woken task if needed:
2918 static void check_preempt_wakeup(struct rq
*rq
, struct task_struct
*p
, int wake_flags
)
2920 struct task_struct
*curr
= rq
->curr
;
2921 struct sched_entity
*se
= &curr
->se
, *pse
= &p
->se
;
2922 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
2923 int scale
= cfs_rq
->nr_running
>= sched_nr_latency
;
2924 int next_buddy_marked
= 0;
2926 if (unlikely(se
== pse
))
2930 * This is possible from callers such as pull_task(), in which we
2931 * unconditionally check_prempt_curr() after an enqueue (which may have
2932 * lead to a throttle). This both saves work and prevents false
2933 * next-buddy nomination below.
2935 if (unlikely(throttled_hierarchy(cfs_rq_of(pse
))))
2938 if (sched_feat(NEXT_BUDDY
) && scale
&& !(wake_flags
& WF_FORK
)) {
2939 set_next_buddy(pse
);
2940 next_buddy_marked
= 1;
2944 * We can come here with TIF_NEED_RESCHED already set from new task
2947 * Note: this also catches the edge-case of curr being in a throttled
2948 * group (e.g. via set_curr_task), since update_curr() (in the
2949 * enqueue of curr) will have resulted in resched being set. This
2950 * prevents us from potentially nominating it as a false LAST_BUDDY
2953 if (test_tsk_need_resched(curr
))
2956 /* Idle tasks are by definition preempted by non-idle tasks. */
2957 if (unlikely(curr
->policy
== SCHED_IDLE
) &&
2958 likely(p
->policy
!= SCHED_IDLE
))
2962 * Batch and idle tasks do not preempt non-idle tasks (their preemption
2963 * is driven by the tick):
2965 if (unlikely(p
->policy
!= SCHED_NORMAL
))
2968 find_matching_se(&se
, &pse
);
2969 update_curr(cfs_rq_of(se
));
2971 if (wakeup_preempt_entity(se
, pse
) == 1) {
2973 * Bias pick_next to pick the sched entity that is
2974 * triggering this preemption.
2976 if (!next_buddy_marked
)
2977 set_next_buddy(pse
);
2986 * Only set the backward buddy when the current task is still
2987 * on the rq. This can happen when a wakeup gets interleaved
2988 * with schedule on the ->pre_schedule() or idle_balance()
2989 * point, either of which can * drop the rq lock.
2991 * Also, during early boot the idle thread is in the fair class,
2992 * for obvious reasons its a bad idea to schedule back to it.
2994 if (unlikely(!se
->on_rq
|| curr
== rq
->idle
))
2997 if (sched_feat(LAST_BUDDY
) && scale
&& entity_is_task(se
))
3001 static struct task_struct
*pick_next_task_fair(struct rq
*rq
)
3003 struct task_struct
*p
;
3004 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
3005 struct sched_entity
*se
;
3007 if (!cfs_rq
->nr_running
)
3011 se
= pick_next_entity(cfs_rq
);
3012 set_next_entity(cfs_rq
, se
);
3013 cfs_rq
= group_cfs_rq(se
);
3017 hrtick_start_fair(rq
, p
);
3023 * Account for a descheduled task:
3025 static void put_prev_task_fair(struct rq
*rq
, struct task_struct
*prev
)
3027 struct sched_entity
*se
= &prev
->se
;
3028 struct cfs_rq
*cfs_rq
;
3030 for_each_sched_entity(se
) {
3031 cfs_rq
= cfs_rq_of(se
);
3032 put_prev_entity(cfs_rq
, se
);
3037 * sched_yield() is very simple
3039 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3041 static void yield_task_fair(struct rq
*rq
)
3043 struct task_struct
*curr
= rq
->curr
;
3044 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
3045 struct sched_entity
*se
= &curr
->se
;
3048 * Are we the only task in the tree?
3050 if (unlikely(rq
->nr_running
== 1))
3053 clear_buddies(cfs_rq
, se
);
3055 if (curr
->policy
!= SCHED_BATCH
) {
3056 update_rq_clock(rq
);
3058 * Update run-time statistics of the 'current'.
3060 update_curr(cfs_rq
);
3066 static bool yield_to_task_fair(struct rq
*rq
, struct task_struct
*p
, bool preempt
)
3068 struct sched_entity
*se
= &p
->se
;
3070 /* throttled hierarchies are not runnable */
3071 if (!se
->on_rq
|| throttled_hierarchy(cfs_rq_of(se
)))
3074 /* Tell the scheduler that we'd really like pse to run next. */
3077 yield_task_fair(rq
);
3083 /**************************************************
3084 * Fair scheduling class load-balancing methods:
3088 * pull_task - move a task from a remote runqueue to the local runqueue.
3089 * Both runqueues must be locked.
3091 static void pull_task(struct rq
*src_rq
, struct task_struct
*p
,
3092 struct rq
*this_rq
, int this_cpu
)
3094 deactivate_task(src_rq
, p
, 0);
3095 set_task_cpu(p
, this_cpu
);
3096 activate_task(this_rq
, p
, 0);
3097 check_preempt_curr(this_rq
, p
, 0);
3101 * Is this task likely cache-hot:
3104 task_hot(struct task_struct
*p
, u64 now
, struct sched_domain
*sd
)
3108 if (p
->sched_class
!= &fair_sched_class
)
3111 if (unlikely(p
->policy
== SCHED_IDLE
))
3115 * Buddy candidates are cache hot:
3117 if (sched_feat(CACHE_HOT_BUDDY
) && this_rq()->nr_running
&&
3118 (&p
->se
== cfs_rq_of(&p
->se
)->next
||
3119 &p
->se
== cfs_rq_of(&p
->se
)->last
))
3122 if (sysctl_sched_migration_cost
== -1)
3124 if (sysctl_sched_migration_cost
== 0)
3127 delta
= now
- p
->se
.exec_start
;
3129 return delta
< (s64
)sysctl_sched_migration_cost
;
3133 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3136 int can_migrate_task(struct task_struct
*p
, struct rq
*rq
, int this_cpu
,
3137 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3140 int tsk_cache_hot
= 0;
3142 * We do not migrate tasks that are:
3143 * 1) running (obviously), or
3144 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3145 * 3) are cache-hot on their current CPU.
3147 if (!cpumask_test_cpu(this_cpu
, tsk_cpus_allowed(p
))) {
3148 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_affine
);
3153 if (task_running(rq
, p
)) {
3154 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_running
);
3159 * Aggressive migration if:
3160 * 1) task is cache cold, or
3161 * 2) too many balance attempts have failed.
3164 tsk_cache_hot
= task_hot(p
, rq
->clock_task
, sd
);
3165 if (!tsk_cache_hot
||
3166 sd
->nr_balance_failed
> sd
->cache_nice_tries
) {
3167 #ifdef CONFIG_SCHEDSTATS
3168 if (tsk_cache_hot
) {
3169 schedstat_inc(sd
, lb_hot_gained
[idle
]);
3170 schedstat_inc(p
, se
.statistics
.nr_forced_migrations
);
3176 if (tsk_cache_hot
) {
3177 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_hot
);
3184 * move_one_task tries to move exactly one task from busiest to this_rq, as
3185 * part of active balancing operations within "domain".
3186 * Returns 1 if successful and 0 otherwise.
3188 * Called with both runqueues locked.
3191 move_one_task(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3192 struct sched_domain
*sd
, enum cpu_idle_type idle
)
3194 struct task_struct
*p
, *n
;
3195 struct cfs_rq
*cfs_rq
;
3198 for_each_leaf_cfs_rq(busiest
, cfs_rq
) {
3199 list_for_each_entry_safe(p
, n
, &cfs_rq
->tasks
, se
.group_node
) {
3200 if (throttled_lb_pair(task_group(p
),
3201 busiest
->cpu
, this_cpu
))
3204 if (!can_migrate_task(p
, busiest
, this_cpu
,
3208 pull_task(busiest
, p
, this_rq
, this_cpu
);
3210 * Right now, this is only the second place pull_task()
3211 * is called, so we can safely collect pull_task()
3212 * stats here rather than inside pull_task().
3214 schedstat_inc(sd
, lb_gained
[idle
]);
3222 static unsigned long
3223 balance_tasks(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3224 unsigned long max_load_move
, struct sched_domain
*sd
,
3225 enum cpu_idle_type idle
, int *all_pinned
,
3226 struct cfs_rq
*busiest_cfs_rq
)
3228 int loops
= 0, pulled
= 0;
3229 long rem_load_move
= max_load_move
;
3230 struct task_struct
*p
, *n
;
3232 if (max_load_move
== 0)
3235 list_for_each_entry_safe(p
, n
, &busiest_cfs_rq
->tasks
, se
.group_node
) {
3236 if (loops
++ > sysctl_sched_nr_migrate
)
3239 if ((p
->se
.load
.weight
>> 1) > rem_load_move
||
3240 !can_migrate_task(p
, busiest
, this_cpu
, sd
, idle
,
3244 pull_task(busiest
, p
, this_rq
, this_cpu
);
3246 rem_load_move
-= p
->se
.load
.weight
;
3248 #ifdef CONFIG_PREEMPT
3250 * NEWIDLE balancing is a source of latency, so preemptible
3251 * kernels will stop after the first task is pulled to minimize
3252 * the critical section.
3254 if (idle
== CPU_NEWLY_IDLE
)
3259 * We only want to steal up to the prescribed amount of
3262 if (rem_load_move
<= 0)
3267 * Right now, this is one of only two places pull_task() is called,
3268 * so we can safely collect pull_task() stats here rather than
3269 * inside pull_task().
3271 schedstat_add(sd
, lb_gained
[idle
], pulled
);
3273 return max_load_move
- rem_load_move
;
3276 #ifdef CONFIG_FAIR_GROUP_SCHED
3278 * update tg->load_weight by folding this cpu's load_avg
3280 static int update_shares_cpu(struct task_group
*tg
, int cpu
)
3282 struct cfs_rq
*cfs_rq
;
3283 unsigned long flags
;
3290 cfs_rq
= tg
->cfs_rq
[cpu
];
3292 raw_spin_lock_irqsave(&rq
->lock
, flags
);
3294 update_rq_clock(rq
);
3295 update_cfs_load(cfs_rq
, 1);
3298 * We need to update shares after updating tg->load_weight in
3299 * order to adjust the weight of groups with long running tasks.
3301 update_cfs_shares(cfs_rq
);
3303 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
3308 static void update_shares(int cpu
)
3310 struct cfs_rq
*cfs_rq
;
3311 struct rq
*rq
= cpu_rq(cpu
);
3315 * Iterates the task_group tree in a bottom up fashion, see
3316 * list_add_leaf_cfs_rq() for details.
3318 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
3319 /* throttled entities do not contribute to load */
3320 if (throttled_hierarchy(cfs_rq
))
3323 update_shares_cpu(cfs_rq
->tg
, cpu
);
3329 * Compute the cpu's hierarchical load factor for each task group.
3330 * This needs to be done in a top-down fashion because the load of a child
3331 * group is a fraction of its parents load.
3333 static int tg_load_down(struct task_group
*tg
, void *data
)
3336 long cpu
= (long)data
;
3339 load
= cpu_rq(cpu
)->load
.weight
;
3341 load
= tg
->parent
->cfs_rq
[cpu
]->h_load
;
3342 load
*= tg
->se
[cpu
]->load
.weight
;
3343 load
/= tg
->parent
->cfs_rq
[cpu
]->load
.weight
+ 1;
3346 tg
->cfs_rq
[cpu
]->h_load
= load
;
3351 static void update_h_load(long cpu
)
3353 walk_tg_tree(tg_load_down
, tg_nop
, (void *)cpu
);
3356 static unsigned long
3357 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3358 unsigned long max_load_move
,
3359 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3362 long rem_load_move
= max_load_move
;
3363 struct cfs_rq
*busiest_cfs_rq
;
3366 update_h_load(cpu_of(busiest
));
3368 for_each_leaf_cfs_rq(busiest
, busiest_cfs_rq
) {
3369 unsigned long busiest_h_load
= busiest_cfs_rq
->h_load
;
3370 unsigned long busiest_weight
= busiest_cfs_rq
->load
.weight
;
3371 u64 rem_load
, moved_load
;
3374 * empty group or part of a throttled hierarchy
3376 if (!busiest_cfs_rq
->task_weight
||
3377 throttled_lb_pair(busiest_cfs_rq
->tg
, cpu_of(busiest
), this_cpu
))
3380 rem_load
= (u64
)rem_load_move
* busiest_weight
;
3381 rem_load
= div_u64(rem_load
, busiest_h_load
+ 1);
3383 moved_load
= balance_tasks(this_rq
, this_cpu
, busiest
,
3384 rem_load
, sd
, idle
, all_pinned
,
3390 moved_load
*= busiest_h_load
;
3391 moved_load
= div_u64(moved_load
, busiest_weight
+ 1);
3393 rem_load_move
-= moved_load
;
3394 if (rem_load_move
< 0)
3399 return max_load_move
- rem_load_move
;
3402 static inline void update_shares(int cpu
)
3406 static unsigned long
3407 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3408 unsigned long max_load_move
,
3409 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3412 return balance_tasks(this_rq
, this_cpu
, busiest
,
3413 max_load_move
, sd
, idle
, all_pinned
,
3419 * move_tasks tries to move up to max_load_move weighted load from busiest to
3420 * this_rq, as part of a balancing operation within domain "sd".
3421 * Returns 1 if successful and 0 otherwise.
3423 * Called with both runqueues locked.
3425 static int move_tasks(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
3426 unsigned long max_load_move
,
3427 struct sched_domain
*sd
, enum cpu_idle_type idle
,
3430 unsigned long total_load_moved
= 0, load_moved
;
3433 load_moved
= load_balance_fair(this_rq
, this_cpu
, busiest
,
3434 max_load_move
- total_load_moved
,
3435 sd
, idle
, all_pinned
);
3437 total_load_moved
+= load_moved
;
3439 #ifdef CONFIG_PREEMPT
3441 * NEWIDLE balancing is a source of latency, so preemptible
3442 * kernels will stop after the first task is pulled to minimize
3443 * the critical section.
3445 if (idle
== CPU_NEWLY_IDLE
&& this_rq
->nr_running
)
3448 if (raw_spin_is_contended(&this_rq
->lock
) ||
3449 raw_spin_is_contended(&busiest
->lock
))
3452 } while (load_moved
&& max_load_move
> total_load_moved
);
3454 return total_load_moved
> 0;
3457 /********** Helpers for find_busiest_group ************************/
3459 * sd_lb_stats - Structure to store the statistics of a sched_domain
3460 * during load balancing.
3462 struct sd_lb_stats
{
3463 struct sched_group
*busiest
; /* Busiest group in this sd */
3464 struct sched_group
*this; /* Local group in this sd */
3465 unsigned long total_load
; /* Total load of all groups in sd */
3466 unsigned long total_pwr
; /* Total power of all groups in sd */
3467 unsigned long avg_load
; /* Average load across all groups in sd */
3469 /** Statistics of this group */
3470 unsigned long this_load
;
3471 unsigned long this_load_per_task
;
3472 unsigned long this_nr_running
;
3473 unsigned long this_has_capacity
;
3474 unsigned int this_idle_cpus
;
3476 /* Statistics of the busiest group */
3477 unsigned int busiest_idle_cpus
;
3478 unsigned long max_load
;
3479 unsigned long busiest_load_per_task
;
3480 unsigned long busiest_nr_running
;
3481 unsigned long busiest_group_capacity
;
3482 unsigned long busiest_has_capacity
;
3483 unsigned int busiest_group_weight
;
3485 int group_imb
; /* Is there imbalance in this sd */
3486 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3487 int power_savings_balance
; /* Is powersave balance needed for this sd */
3488 struct sched_group
*group_min
; /* Least loaded group in sd */
3489 struct sched_group
*group_leader
; /* Group which relieves group_min */
3490 unsigned long min_load_per_task
; /* load_per_task in group_min */
3491 unsigned long leader_nr_running
; /* Nr running of group_leader */
3492 unsigned long min_nr_running
; /* Nr running of group_min */
3497 * sg_lb_stats - stats of a sched_group required for load_balancing
3499 struct sg_lb_stats
{
3500 unsigned long avg_load
; /*Avg load across the CPUs of the group */
3501 unsigned long group_load
; /* Total load over the CPUs of the group */
3502 unsigned long sum_nr_running
; /* Nr tasks running in the group */
3503 unsigned long sum_weighted_load
; /* Weighted load of group's tasks */
3504 unsigned long group_capacity
;
3505 unsigned long idle_cpus
;
3506 unsigned long group_weight
;
3507 int group_imb
; /* Is there an imbalance in the group ? */
3508 int group_has_capacity
; /* Is there extra capacity in the group? */
3512 * get_sd_load_idx - Obtain the load index for a given sched domain.
3513 * @sd: The sched_domain whose load_idx is to be obtained.
3514 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3516 static inline int get_sd_load_idx(struct sched_domain
*sd
,
3517 enum cpu_idle_type idle
)
3523 load_idx
= sd
->busy_idx
;
3526 case CPU_NEWLY_IDLE
:
3527 load_idx
= sd
->newidle_idx
;
3530 load_idx
= sd
->idle_idx
;
3538 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3540 * init_sd_power_savings_stats - Initialize power savings statistics for
3541 * the given sched_domain, during load balancing.
3543 * @sd: Sched domain whose power-savings statistics are to be initialized.
3544 * @sds: Variable containing the statistics for sd.
3545 * @idle: Idle status of the CPU at which we're performing load-balancing.
3547 static inline void init_sd_power_savings_stats(struct sched_domain
*sd
,
3548 struct sd_lb_stats
*sds
, enum cpu_idle_type idle
)
3551 * Busy processors will not participate in power savings
3554 if (idle
== CPU_NOT_IDLE
|| !(sd
->flags
& SD_POWERSAVINGS_BALANCE
))
3555 sds
->power_savings_balance
= 0;
3557 sds
->power_savings_balance
= 1;
3558 sds
->min_nr_running
= ULONG_MAX
;
3559 sds
->leader_nr_running
= 0;
3564 * update_sd_power_savings_stats - Update the power saving stats for a
3565 * sched_domain while performing load balancing.
3567 * @group: sched_group belonging to the sched_domain under consideration.
3568 * @sds: Variable containing the statistics of the sched_domain
3569 * @local_group: Does group contain the CPU for which we're performing
3571 * @sgs: Variable containing the statistics of the group.
3573 static inline void update_sd_power_savings_stats(struct sched_group
*group
,
3574 struct sd_lb_stats
*sds
, int local_group
, struct sg_lb_stats
*sgs
)
3577 if (!sds
->power_savings_balance
)
3581 * If the local group is idle or completely loaded
3582 * no need to do power savings balance at this domain
3584 if (local_group
&& (sds
->this_nr_running
>= sgs
->group_capacity
||
3585 !sds
->this_nr_running
))
3586 sds
->power_savings_balance
= 0;
3589 * If a group is already running at full capacity or idle,
3590 * don't include that group in power savings calculations
3592 if (!sds
->power_savings_balance
||
3593 sgs
->sum_nr_running
>= sgs
->group_capacity
||
3594 !sgs
->sum_nr_running
)
3598 * Calculate the group which has the least non-idle load.
3599 * This is the group from where we need to pick up the load
3602 if ((sgs
->sum_nr_running
< sds
->min_nr_running
) ||
3603 (sgs
->sum_nr_running
== sds
->min_nr_running
&&
3604 group_first_cpu(group
) > group_first_cpu(sds
->group_min
))) {
3605 sds
->group_min
= group
;
3606 sds
->min_nr_running
= sgs
->sum_nr_running
;
3607 sds
->min_load_per_task
= sgs
->sum_weighted_load
/
3608 sgs
->sum_nr_running
;
3612 * Calculate the group which is almost near its
3613 * capacity but still has some space to pick up some load
3614 * from other group and save more power
3616 if (sgs
->sum_nr_running
+ 1 > sgs
->group_capacity
)
3619 if (sgs
->sum_nr_running
> sds
->leader_nr_running
||
3620 (sgs
->sum_nr_running
== sds
->leader_nr_running
&&
3621 group_first_cpu(group
) < group_first_cpu(sds
->group_leader
))) {
3622 sds
->group_leader
= group
;
3623 sds
->leader_nr_running
= sgs
->sum_nr_running
;
3628 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3629 * @sds: Variable containing the statistics of the sched_domain
3630 * under consideration.
3631 * @this_cpu: Cpu at which we're currently performing load-balancing.
3632 * @imbalance: Variable to store the imbalance.
3635 * Check if we have potential to perform some power-savings balance.
3636 * If yes, set the busiest group to be the least loaded group in the
3637 * sched_domain, so that it's CPUs can be put to idle.
3639 * Returns 1 if there is potential to perform power-savings balance.
3642 static inline int check_power_save_busiest_group(struct sd_lb_stats
*sds
,
3643 int this_cpu
, unsigned long *imbalance
)
3645 if (!sds
->power_savings_balance
)
3648 if (sds
->this != sds
->group_leader
||
3649 sds
->group_leader
== sds
->group_min
)
3652 *imbalance
= sds
->min_load_per_task
;
3653 sds
->busiest
= sds
->group_min
;
3658 #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3659 static inline void init_sd_power_savings_stats(struct sched_domain
*sd
,
3660 struct sd_lb_stats
*sds
, enum cpu_idle_type idle
)
3665 static inline void update_sd_power_savings_stats(struct sched_group
*group
,
3666 struct sd_lb_stats
*sds
, int local_group
, struct sg_lb_stats
*sgs
)
3671 static inline int check_power_save_busiest_group(struct sd_lb_stats
*sds
,
3672 int this_cpu
, unsigned long *imbalance
)
3676 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3679 unsigned long default_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3681 return SCHED_POWER_SCALE
;
3684 unsigned long __weak
arch_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3686 return default_scale_freq_power(sd
, cpu
);
3689 unsigned long default_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3691 unsigned long weight
= sd
->span_weight
;
3692 unsigned long smt_gain
= sd
->smt_gain
;
3699 unsigned long __weak
arch_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3701 return default_scale_smt_power(sd
, cpu
);
3704 unsigned long scale_rt_power(int cpu
)
3706 struct rq
*rq
= cpu_rq(cpu
);
3707 u64 total
, available
;
3709 total
= sched_avg_period() + (rq
->clock
- rq
->age_stamp
);
3711 if (unlikely(total
< rq
->rt_avg
)) {
3712 /* Ensures that power won't end up being negative */
3715 available
= total
- rq
->rt_avg
;
3718 if (unlikely((s64
)total
< SCHED_POWER_SCALE
))
3719 total
= SCHED_POWER_SCALE
;
3721 total
>>= SCHED_POWER_SHIFT
;
3723 return div_u64(available
, total
);
3726 static void update_cpu_power(struct sched_domain
*sd
, int cpu
)
3728 unsigned long weight
= sd
->span_weight
;
3729 unsigned long power
= SCHED_POWER_SCALE
;
3730 struct sched_group
*sdg
= sd
->groups
;
3732 if ((sd
->flags
& SD_SHARE_CPUPOWER
) && weight
> 1) {
3733 if (sched_feat(ARCH_POWER
))
3734 power
*= arch_scale_smt_power(sd
, cpu
);
3736 power
*= default_scale_smt_power(sd
, cpu
);
3738 power
>>= SCHED_POWER_SHIFT
;
3741 sdg
->sgp
->power_orig
= power
;
3743 if (sched_feat(ARCH_POWER
))
3744 power
*= arch_scale_freq_power(sd
, cpu
);
3746 power
*= default_scale_freq_power(sd
, cpu
);
3748 power
>>= SCHED_POWER_SHIFT
;
3750 power
*= scale_rt_power(cpu
);
3751 power
>>= SCHED_POWER_SHIFT
;
3756 cpu_rq(cpu
)->cpu_power
= power
;
3757 sdg
->sgp
->power
= power
;
3760 void update_group_power(struct sched_domain
*sd
, int cpu
)
3762 struct sched_domain
*child
= sd
->child
;
3763 struct sched_group
*group
, *sdg
= sd
->groups
;
3764 unsigned long power
;
3767 update_cpu_power(sd
, cpu
);
3773 group
= child
->groups
;
3775 power
+= group
->sgp
->power
;
3776 group
= group
->next
;
3777 } while (group
!= child
->groups
);
3779 sdg
->sgp
->power
= power
;
3783 * Try and fix up capacity for tiny siblings, this is needed when
3784 * things like SD_ASYM_PACKING need f_b_g to select another sibling
3785 * which on its own isn't powerful enough.
3787 * See update_sd_pick_busiest() and check_asym_packing().
3790 fix_small_capacity(struct sched_domain
*sd
, struct sched_group
*group
)
3793 * Only siblings can have significantly less than SCHED_POWER_SCALE
3795 if (!(sd
->flags
& SD_SHARE_CPUPOWER
))
3799 * If ~90% of the cpu_power is still there, we're good.
3801 if (group
->sgp
->power
* 32 > group
->sgp
->power_orig
* 29)
3808 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3809 * @sd: The sched_domain whose statistics are to be updated.
3810 * @group: sched_group whose statistics are to be updated.
3811 * @this_cpu: Cpu for which load balance is currently performed.
3812 * @idle: Idle status of this_cpu
3813 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3814 * @local_group: Does group contain this_cpu.
3815 * @cpus: Set of cpus considered for load balancing.
3816 * @balance: Should we balance.
3817 * @sgs: variable to hold the statistics for this group.
3819 static inline void update_sg_lb_stats(struct sched_domain
*sd
,
3820 struct sched_group
*group
, int this_cpu
,
3821 enum cpu_idle_type idle
, int load_idx
,
3822 int local_group
, const struct cpumask
*cpus
,
3823 int *balance
, struct sg_lb_stats
*sgs
)
3825 unsigned long load
, max_cpu_load
, min_cpu_load
, max_nr_running
;
3827 unsigned int balance_cpu
= -1, first_idle_cpu
= 0;
3828 unsigned long avg_load_per_task
= 0;
3831 balance_cpu
= group_first_cpu(group
);
3833 /* Tally up the load of all CPUs in the group */
3835 min_cpu_load
= ~0UL;
3838 for_each_cpu_and(i
, sched_group_cpus(group
), cpus
) {
3839 struct rq
*rq
= cpu_rq(i
);
3841 /* Bias balancing toward cpus of our domain */
3843 if (idle_cpu(i
) && !first_idle_cpu
) {
3848 load
= target_load(i
, load_idx
);
3850 load
= source_load(i
, load_idx
);
3851 if (load
> max_cpu_load
) {
3852 max_cpu_load
= load
;
3853 max_nr_running
= rq
->nr_running
;
3855 if (min_cpu_load
> load
)
3856 min_cpu_load
= load
;
3859 sgs
->group_load
+= load
;
3860 sgs
->sum_nr_running
+= rq
->nr_running
;
3861 sgs
->sum_weighted_load
+= weighted_cpuload(i
);
3867 * First idle cpu or the first cpu(busiest) in this sched group
3868 * is eligible for doing load balancing at this and above
3869 * domains. In the newly idle case, we will allow all the cpu's
3870 * to do the newly idle load balance.
3872 if (idle
!= CPU_NEWLY_IDLE
&& local_group
) {
3873 if (balance_cpu
!= this_cpu
) {
3877 update_group_power(sd
, this_cpu
);
3880 /* Adjust by relative CPU power of the group */
3881 sgs
->avg_load
= (sgs
->group_load
*SCHED_POWER_SCALE
) / group
->sgp
->power
;
3884 * Consider the group unbalanced when the imbalance is larger
3885 * than the average weight of a task.
3887 * APZ: with cgroup the avg task weight can vary wildly and
3888 * might not be a suitable number - should we keep a
3889 * normalized nr_running number somewhere that negates
3892 if (sgs
->sum_nr_running
)
3893 avg_load_per_task
= sgs
->sum_weighted_load
/ sgs
->sum_nr_running
;
3895 if ((max_cpu_load
- min_cpu_load
) >= avg_load_per_task
&& max_nr_running
> 1)
3898 sgs
->group_capacity
= DIV_ROUND_CLOSEST(group
->sgp
->power
,
3900 if (!sgs
->group_capacity
)
3901 sgs
->group_capacity
= fix_small_capacity(sd
, group
);
3902 sgs
->group_weight
= group
->group_weight
;
3904 if (sgs
->group_capacity
> sgs
->sum_nr_running
)
3905 sgs
->group_has_capacity
= 1;
3909 * update_sd_pick_busiest - return 1 on busiest group
3910 * @sd: sched_domain whose statistics are to be checked
3911 * @sds: sched_domain statistics
3912 * @sg: sched_group candidate to be checked for being the busiest
3913 * @sgs: sched_group statistics
3914 * @this_cpu: the current cpu
3916 * Determine if @sg is a busier group than the previously selected
3919 static bool update_sd_pick_busiest(struct sched_domain
*sd
,
3920 struct sd_lb_stats
*sds
,
3921 struct sched_group
*sg
,
3922 struct sg_lb_stats
*sgs
,
3925 if (sgs
->avg_load
<= sds
->max_load
)
3928 if (sgs
->sum_nr_running
> sgs
->group_capacity
)
3935 * ASYM_PACKING needs to move all the work to the lowest
3936 * numbered CPUs in the group, therefore mark all groups
3937 * higher than ourself as busy.
3939 if ((sd
->flags
& SD_ASYM_PACKING
) && sgs
->sum_nr_running
&&
3940 this_cpu
< group_first_cpu(sg
)) {
3944 if (group_first_cpu(sds
->busiest
) > group_first_cpu(sg
))
3952 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3953 * @sd: sched_domain whose statistics are to be updated.
3954 * @this_cpu: Cpu for which load balance is currently performed.
3955 * @idle: Idle status of this_cpu
3956 * @cpus: Set of cpus considered for load balancing.
3957 * @balance: Should we balance.
3958 * @sds: variable to hold the statistics for this sched_domain.
3960 static inline void update_sd_lb_stats(struct sched_domain
*sd
, int this_cpu
,
3961 enum cpu_idle_type idle
, const struct cpumask
*cpus
,
3962 int *balance
, struct sd_lb_stats
*sds
)
3964 struct sched_domain
*child
= sd
->child
;
3965 struct sched_group
*sg
= sd
->groups
;
3966 struct sg_lb_stats sgs
;
3967 int load_idx
, prefer_sibling
= 0;
3969 if (child
&& child
->flags
& SD_PREFER_SIBLING
)
3972 init_sd_power_savings_stats(sd
, sds
, idle
);
3973 load_idx
= get_sd_load_idx(sd
, idle
);
3978 local_group
= cpumask_test_cpu(this_cpu
, sched_group_cpus(sg
));
3979 memset(&sgs
, 0, sizeof(sgs
));
3980 update_sg_lb_stats(sd
, sg
, this_cpu
, idle
, load_idx
,
3981 local_group
, cpus
, balance
, &sgs
);
3983 if (local_group
&& !(*balance
))
3986 sds
->total_load
+= sgs
.group_load
;
3987 sds
->total_pwr
+= sg
->sgp
->power
;
3990 * In case the child domain prefers tasks go to siblings
3991 * first, lower the sg capacity to one so that we'll try
3992 * and move all the excess tasks away. We lower the capacity
3993 * of a group only if the local group has the capacity to fit
3994 * these excess tasks, i.e. nr_running < group_capacity. The
3995 * extra check prevents the case where you always pull from the
3996 * heaviest group when it is already under-utilized (possible
3997 * with a large weight task outweighs the tasks on the system).
3999 if (prefer_sibling
&& !local_group
&& sds
->this_has_capacity
)
4000 sgs
.group_capacity
= min(sgs
.group_capacity
, 1UL);
4003 sds
->this_load
= sgs
.avg_load
;
4005 sds
->this_nr_running
= sgs
.sum_nr_running
;
4006 sds
->this_load_per_task
= sgs
.sum_weighted_load
;
4007 sds
->this_has_capacity
= sgs
.group_has_capacity
;
4008 sds
->this_idle_cpus
= sgs
.idle_cpus
;
4009 } else if (update_sd_pick_busiest(sd
, sds
, sg
, &sgs
, this_cpu
)) {
4010 sds
->max_load
= sgs
.avg_load
;
4012 sds
->busiest_nr_running
= sgs
.sum_nr_running
;
4013 sds
->busiest_idle_cpus
= sgs
.idle_cpus
;
4014 sds
->busiest_group_capacity
= sgs
.group_capacity
;
4015 sds
->busiest_load_per_task
= sgs
.sum_weighted_load
;
4016 sds
->busiest_has_capacity
= sgs
.group_has_capacity
;
4017 sds
->busiest_group_weight
= sgs
.group_weight
;
4018 sds
->group_imb
= sgs
.group_imb
;
4021 update_sd_power_savings_stats(sg
, sds
, local_group
, &sgs
);
4023 } while (sg
!= sd
->groups
);
4027 * check_asym_packing - Check to see if the group is packed into the
4030 * This is primarily intended to used at the sibling level. Some
4031 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4032 * case of POWER7, it can move to lower SMT modes only when higher
4033 * threads are idle. When in lower SMT modes, the threads will
4034 * perform better since they share less core resources. Hence when we
4035 * have idle threads, we want them to be the higher ones.
4037 * This packing function is run on idle threads. It checks to see if
4038 * the busiest CPU in this domain (core in the P7 case) has a higher
4039 * CPU number than the packing function is being run on. Here we are
4040 * assuming lower CPU number will be equivalent to lower a SMT thread
4043 * Returns 1 when packing is required and a task should be moved to
4044 * this CPU. The amount of the imbalance is returned in *imbalance.
4046 * @sd: The sched_domain whose packing is to be checked.
4047 * @sds: Statistics of the sched_domain which is to be packed
4048 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4049 * @imbalance: returns amount of imbalanced due to packing.
4051 static int check_asym_packing(struct sched_domain
*sd
,
4052 struct sd_lb_stats
*sds
,
4053 int this_cpu
, unsigned long *imbalance
)
4057 if (!(sd
->flags
& SD_ASYM_PACKING
))
4063 busiest_cpu
= group_first_cpu(sds
->busiest
);
4064 if (this_cpu
> busiest_cpu
)
4067 *imbalance
= DIV_ROUND_CLOSEST(sds
->max_load
* sds
->busiest
->sgp
->power
,
4073 * fix_small_imbalance - Calculate the minor imbalance that exists
4074 * amongst the groups of a sched_domain, during
4076 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4077 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4078 * @imbalance: Variable to store the imbalance.
4080 static inline void fix_small_imbalance(struct sd_lb_stats
*sds
,
4081 int this_cpu
, unsigned long *imbalance
)
4083 unsigned long tmp
, pwr_now
= 0, pwr_move
= 0;
4084 unsigned int imbn
= 2;
4085 unsigned long scaled_busy_load_per_task
;
4087 if (sds
->this_nr_running
) {
4088 sds
->this_load_per_task
/= sds
->this_nr_running
;
4089 if (sds
->busiest_load_per_task
>
4090 sds
->this_load_per_task
)
4093 sds
->this_load_per_task
=
4094 cpu_avg_load_per_task(this_cpu
);
4096 scaled_busy_load_per_task
= sds
->busiest_load_per_task
4097 * SCHED_POWER_SCALE
;
4098 scaled_busy_load_per_task
/= sds
->busiest
->sgp
->power
;
4100 if (sds
->max_load
- sds
->this_load
+ scaled_busy_load_per_task
>=
4101 (scaled_busy_load_per_task
* imbn
)) {
4102 *imbalance
= sds
->busiest_load_per_task
;
4107 * OK, we don't have enough imbalance to justify moving tasks,
4108 * however we may be able to increase total CPU power used by
4112 pwr_now
+= sds
->busiest
->sgp
->power
*
4113 min(sds
->busiest_load_per_task
, sds
->max_load
);
4114 pwr_now
+= sds
->this->sgp
->power
*
4115 min(sds
->this_load_per_task
, sds
->this_load
);
4116 pwr_now
/= SCHED_POWER_SCALE
;
4118 /* Amount of load we'd subtract */
4119 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4120 sds
->busiest
->sgp
->power
;
4121 if (sds
->max_load
> tmp
)
4122 pwr_move
+= sds
->busiest
->sgp
->power
*
4123 min(sds
->busiest_load_per_task
, sds
->max_load
- tmp
);
4125 /* Amount of load we'd add */
4126 if (sds
->max_load
* sds
->busiest
->sgp
->power
<
4127 sds
->busiest_load_per_task
* SCHED_POWER_SCALE
)
4128 tmp
= (sds
->max_load
* sds
->busiest
->sgp
->power
) /
4129 sds
->this->sgp
->power
;
4131 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4132 sds
->this->sgp
->power
;
4133 pwr_move
+= sds
->this->sgp
->power
*
4134 min(sds
->this_load_per_task
, sds
->this_load
+ tmp
);
4135 pwr_move
/= SCHED_POWER_SCALE
;
4137 /* Move if we gain throughput */
4138 if (pwr_move
> pwr_now
)
4139 *imbalance
= sds
->busiest_load_per_task
;
4143 * calculate_imbalance - Calculate the amount of imbalance present within the
4144 * groups of a given sched_domain during load balance.
4145 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4146 * @this_cpu: Cpu for which currently load balance is being performed.
4147 * @imbalance: The variable to store the imbalance.
4149 static inline void calculate_imbalance(struct sd_lb_stats
*sds
, int this_cpu
,
4150 unsigned long *imbalance
)
4152 unsigned long max_pull
, load_above_capacity
= ~0UL;
4154 sds
->busiest_load_per_task
/= sds
->busiest_nr_running
;
4155 if (sds
->group_imb
) {
4156 sds
->busiest_load_per_task
=
4157 min(sds
->busiest_load_per_task
, sds
->avg_load
);
4161 * In the presence of smp nice balancing, certain scenarios can have
4162 * max load less than avg load(as we skip the groups at or below
4163 * its cpu_power, while calculating max_load..)
4165 if (sds
->max_load
< sds
->avg_load
) {
4167 return fix_small_imbalance(sds
, this_cpu
, imbalance
);
4170 if (!sds
->group_imb
) {
4172 * Don't want to pull so many tasks that a group would go idle.
4174 load_above_capacity
= (sds
->busiest_nr_running
-
4175 sds
->busiest_group_capacity
);
4177 load_above_capacity
*= (SCHED_LOAD_SCALE
* SCHED_POWER_SCALE
);
4179 load_above_capacity
/= sds
->busiest
->sgp
->power
;
4183 * We're trying to get all the cpus to the average_load, so we don't
4184 * want to push ourselves above the average load, nor do we wish to
4185 * reduce the max loaded cpu below the average load. At the same time,
4186 * we also don't want to reduce the group load below the group capacity
4187 * (so that we can implement power-savings policies etc). Thus we look
4188 * for the minimum possible imbalance.
4189 * Be careful of negative numbers as they'll appear as very large values
4190 * with unsigned longs.
4192 max_pull
= min(sds
->max_load
- sds
->avg_load
, load_above_capacity
);
4194 /* How much load to actually move to equalise the imbalance */
4195 *imbalance
= min(max_pull
* sds
->busiest
->sgp
->power
,
4196 (sds
->avg_load
- sds
->this_load
) * sds
->this->sgp
->power
)
4197 / SCHED_POWER_SCALE
;
4200 * if *imbalance is less than the average load per runnable task
4201 * there is no guarantee that any tasks will be moved so we'll have
4202 * a think about bumping its value to force at least one task to be
4205 if (*imbalance
< sds
->busiest_load_per_task
)
4206 return fix_small_imbalance(sds
, this_cpu
, imbalance
);
4210 /******* find_busiest_group() helpers end here *********************/
4213 * find_busiest_group - Returns the busiest group within the sched_domain
4214 * if there is an imbalance. If there isn't an imbalance, and
4215 * the user has opted for power-savings, it returns a group whose
4216 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4217 * such a group exists.
4219 * Also calculates the amount of weighted load which should be moved
4220 * to restore balance.
4222 * @sd: The sched_domain whose busiest group is to be returned.
4223 * @this_cpu: The cpu for which load balancing is currently being performed.
4224 * @imbalance: Variable which stores amount of weighted load which should
4225 * be moved to restore balance/put a group to idle.
4226 * @idle: The idle status of this_cpu.
4227 * @cpus: The set of CPUs under consideration for load-balancing.
4228 * @balance: Pointer to a variable indicating if this_cpu
4229 * is the appropriate cpu to perform load balancing at this_level.
4231 * Returns: - the busiest group if imbalance exists.
4232 * - If no imbalance and user has opted for power-savings balance,
4233 * return the least loaded group whose CPUs can be
4234 * put to idle by rebalancing its tasks onto our group.
4236 static struct sched_group
*
4237 find_busiest_group(struct sched_domain
*sd
, int this_cpu
,
4238 unsigned long *imbalance
, enum cpu_idle_type idle
,
4239 const struct cpumask
*cpus
, int *balance
)
4241 struct sd_lb_stats sds
;
4243 memset(&sds
, 0, sizeof(sds
));
4246 * Compute the various statistics relavent for load balancing at
4249 update_sd_lb_stats(sd
, this_cpu
, idle
, cpus
, balance
, &sds
);
4252 * this_cpu is not the appropriate cpu to perform load balancing at
4258 if ((idle
== CPU_IDLE
|| idle
== CPU_NEWLY_IDLE
) &&
4259 check_asym_packing(sd
, &sds
, this_cpu
, imbalance
))
4262 /* There is no busy sibling group to pull tasks from */
4263 if (!sds
.busiest
|| sds
.busiest_nr_running
== 0)
4266 sds
.avg_load
= (SCHED_POWER_SCALE
* sds
.total_load
) / sds
.total_pwr
;
4269 * If the busiest group is imbalanced the below checks don't
4270 * work because they assumes all things are equal, which typically
4271 * isn't true due to cpus_allowed constraints and the like.
4276 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4277 if (idle
== CPU_NEWLY_IDLE
&& sds
.this_has_capacity
&&
4278 !sds
.busiest_has_capacity
)
4282 * If the local group is more busy than the selected busiest group
4283 * don't try and pull any tasks.
4285 if (sds
.this_load
>= sds
.max_load
)
4289 * Don't pull any tasks if this group is already above the domain
4292 if (sds
.this_load
>= sds
.avg_load
)
4295 if (idle
== CPU_IDLE
) {
4297 * This cpu is idle. If the busiest group load doesn't
4298 * have more tasks than the number of available cpu's and
4299 * there is no imbalance between this and busiest group
4300 * wrt to idle cpu's, it is balanced.
4302 if ((sds
.this_idle_cpus
<= sds
.busiest_idle_cpus
+ 1) &&
4303 sds
.busiest_nr_running
<= sds
.busiest_group_weight
)
4307 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4308 * imbalance_pct to be conservative.
4310 if (100 * sds
.max_load
<= sd
->imbalance_pct
* sds
.this_load
)
4315 /* Looks like there is an imbalance. Compute it */
4316 calculate_imbalance(&sds
, this_cpu
, imbalance
);
4321 * There is no obvious imbalance. But check if we can do some balancing
4324 if (check_power_save_busiest_group(&sds
, this_cpu
, imbalance
))
4332 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4335 find_busiest_queue(struct sched_domain
*sd
, struct sched_group
*group
,
4336 enum cpu_idle_type idle
, unsigned long imbalance
,
4337 const struct cpumask
*cpus
)
4339 struct rq
*busiest
= NULL
, *rq
;
4340 unsigned long max_load
= 0;
4343 for_each_cpu(i
, sched_group_cpus(group
)) {
4344 unsigned long power
= power_of(i
);
4345 unsigned long capacity
= DIV_ROUND_CLOSEST(power
,
4350 capacity
= fix_small_capacity(sd
, group
);
4352 if (!cpumask_test_cpu(i
, cpus
))
4356 wl
= weighted_cpuload(i
);
4359 * When comparing with imbalance, use weighted_cpuload()
4360 * which is not scaled with the cpu power.
4362 if (capacity
&& rq
->nr_running
== 1 && wl
> imbalance
)
4366 * For the load comparisons with the other cpu's, consider
4367 * the weighted_cpuload() scaled with the cpu power, so that
4368 * the load can be moved away from the cpu that is potentially
4369 * running at a lower capacity.
4371 wl
= (wl
* SCHED_POWER_SCALE
) / power
;
4373 if (wl
> max_load
) {
4383 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4384 * so long as it is large enough.
4386 #define MAX_PINNED_INTERVAL 512
4388 /* Working cpumask for load_balance and load_balance_newidle. */
4389 DEFINE_PER_CPU(cpumask_var_t
, load_balance_tmpmask
);
4391 static int need_active_balance(struct sched_domain
*sd
, int idle
,
4392 int busiest_cpu
, int this_cpu
)
4394 if (idle
== CPU_NEWLY_IDLE
) {
4397 * ASYM_PACKING needs to force migrate tasks from busy but
4398 * higher numbered CPUs in order to pack all tasks in the
4399 * lowest numbered CPUs.
4401 if ((sd
->flags
& SD_ASYM_PACKING
) && busiest_cpu
> this_cpu
)
4405 * The only task running in a non-idle cpu can be moved to this
4406 * cpu in an attempt to completely freeup the other CPU
4409 * The package power saving logic comes from
4410 * find_busiest_group(). If there are no imbalance, then
4411 * f_b_g() will return NULL. However when sched_mc={1,2} then
4412 * f_b_g() will select a group from which a running task may be
4413 * pulled to this cpu in order to make the other package idle.
4414 * If there is no opportunity to make a package idle and if
4415 * there are no imbalance, then f_b_g() will return NULL and no
4416 * action will be taken in load_balance_newidle().
4418 * Under normal task pull operation due to imbalance, there
4419 * will be more than one task in the source run queue and
4420 * move_tasks() will succeed. ld_moved will be true and this
4421 * active balance code will not be triggered.
4423 if (sched_mc_power_savings
< POWERSAVINGS_BALANCE_WAKEUP
)
4427 return unlikely(sd
->nr_balance_failed
> sd
->cache_nice_tries
+2);
4430 static int active_load_balance_cpu_stop(void *data
);
4433 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4434 * tasks if there is an imbalance.
4436 static int load_balance(int this_cpu
, struct rq
*this_rq
,
4437 struct sched_domain
*sd
, enum cpu_idle_type idle
,
4440 int ld_moved
, all_pinned
= 0, active_balance
= 0;
4441 struct sched_group
*group
;
4442 unsigned long imbalance
;
4444 unsigned long flags
;
4445 struct cpumask
*cpus
= __get_cpu_var(load_balance_tmpmask
);
4447 cpumask_copy(cpus
, cpu_active_mask
);
4449 schedstat_inc(sd
, lb_count
[idle
]);
4452 group
= find_busiest_group(sd
, this_cpu
, &imbalance
, idle
,
4459 schedstat_inc(sd
, lb_nobusyg
[idle
]);
4463 busiest
= find_busiest_queue(sd
, group
, idle
, imbalance
, cpus
);
4465 schedstat_inc(sd
, lb_nobusyq
[idle
]);
4469 BUG_ON(busiest
== this_rq
);
4471 schedstat_add(sd
, lb_imbalance
[idle
], imbalance
);
4474 if (busiest
->nr_running
> 1) {
4476 * Attempt to move tasks. If find_busiest_group has found
4477 * an imbalance but busiest->nr_running <= 1, the group is
4478 * still unbalanced. ld_moved simply stays zero, so it is
4479 * correctly treated as an imbalance.
4482 local_irq_save(flags
);
4483 double_rq_lock(this_rq
, busiest
);
4484 ld_moved
= move_tasks(this_rq
, this_cpu
, busiest
,
4485 imbalance
, sd
, idle
, &all_pinned
);
4486 double_rq_unlock(this_rq
, busiest
);
4487 local_irq_restore(flags
);
4490 * some other cpu did the load balance for us.
4492 if (ld_moved
&& this_cpu
!= smp_processor_id())
4493 resched_cpu(this_cpu
);
4495 /* All tasks on this runqueue were pinned by CPU affinity */
4496 if (unlikely(all_pinned
)) {
4497 cpumask_clear_cpu(cpu_of(busiest
), cpus
);
4498 if (!cpumask_empty(cpus
))
4505 schedstat_inc(sd
, lb_failed
[idle
]);
4507 * Increment the failure counter only on periodic balance.
4508 * We do not want newidle balance, which can be very
4509 * frequent, pollute the failure counter causing
4510 * excessive cache_hot migrations and active balances.
4512 if (idle
!= CPU_NEWLY_IDLE
)
4513 sd
->nr_balance_failed
++;
4515 if (need_active_balance(sd
, idle
, cpu_of(busiest
), this_cpu
)) {
4516 raw_spin_lock_irqsave(&busiest
->lock
, flags
);
4518 /* don't kick the active_load_balance_cpu_stop,
4519 * if the curr task on busiest cpu can't be
4522 if (!cpumask_test_cpu(this_cpu
,
4523 tsk_cpus_allowed(busiest
->curr
))) {
4524 raw_spin_unlock_irqrestore(&busiest
->lock
,
4527 goto out_one_pinned
;
4531 * ->active_balance synchronizes accesses to
4532 * ->active_balance_work. Once set, it's cleared
4533 * only after active load balance is finished.
4535 if (!busiest
->active_balance
) {
4536 busiest
->active_balance
= 1;
4537 busiest
->push_cpu
= this_cpu
;
4540 raw_spin_unlock_irqrestore(&busiest
->lock
, flags
);
4543 stop_one_cpu_nowait(cpu_of(busiest
),
4544 active_load_balance_cpu_stop
, busiest
,
4545 &busiest
->active_balance_work
);
4548 * We've kicked active balancing, reset the failure
4551 sd
->nr_balance_failed
= sd
->cache_nice_tries
+1;
4554 sd
->nr_balance_failed
= 0;
4556 if (likely(!active_balance
)) {
4557 /* We were unbalanced, so reset the balancing interval */
4558 sd
->balance_interval
= sd
->min_interval
;
4561 * If we've begun active balancing, start to back off. This
4562 * case may not be covered by the all_pinned logic if there
4563 * is only 1 task on the busy runqueue (because we don't call
4566 if (sd
->balance_interval
< sd
->max_interval
)
4567 sd
->balance_interval
*= 2;
4573 schedstat_inc(sd
, lb_balanced
[idle
]);
4575 sd
->nr_balance_failed
= 0;
4578 /* tune up the balancing interval */
4579 if ((all_pinned
&& sd
->balance_interval
< MAX_PINNED_INTERVAL
) ||
4580 (sd
->balance_interval
< sd
->max_interval
))
4581 sd
->balance_interval
*= 2;
4589 * idle_balance is called by schedule() if this_cpu is about to become
4590 * idle. Attempts to pull tasks from other CPUs.
4592 void idle_balance(int this_cpu
, struct rq
*this_rq
)
4594 struct sched_domain
*sd
;
4595 int pulled_task
= 0;
4596 unsigned long next_balance
= jiffies
+ HZ
;
4598 this_rq
->idle_stamp
= this_rq
->clock
;
4600 if (this_rq
->avg_idle
< sysctl_sched_migration_cost
)
4604 * Drop the rq->lock, but keep IRQ/preempt disabled.
4606 raw_spin_unlock(&this_rq
->lock
);
4608 update_shares(this_cpu
);
4610 for_each_domain(this_cpu
, sd
) {
4611 unsigned long interval
;
4614 if (!(sd
->flags
& SD_LOAD_BALANCE
))
4617 if (sd
->flags
& SD_BALANCE_NEWIDLE
) {
4618 /* If we've pulled tasks over stop searching: */
4619 pulled_task
= load_balance(this_cpu
, this_rq
,
4620 sd
, CPU_NEWLY_IDLE
, &balance
);
4623 interval
= msecs_to_jiffies(sd
->balance_interval
);
4624 if (time_after(next_balance
, sd
->last_balance
+ interval
))
4625 next_balance
= sd
->last_balance
+ interval
;
4627 this_rq
->idle_stamp
= 0;
4633 raw_spin_lock(&this_rq
->lock
);
4635 if (pulled_task
|| time_after(jiffies
, this_rq
->next_balance
)) {
4637 * We are going idle. next_balance may be set based on
4638 * a busy processor. So reset next_balance.
4640 this_rq
->next_balance
= next_balance
;
4645 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4646 * running tasks off the busiest CPU onto idle CPUs. It requires at
4647 * least 1 task to be running on each physical CPU where possible, and
4648 * avoids physical / logical imbalances.
4650 static int active_load_balance_cpu_stop(void *data
)
4652 struct rq
*busiest_rq
= data
;
4653 int busiest_cpu
= cpu_of(busiest_rq
);
4654 int target_cpu
= busiest_rq
->push_cpu
;
4655 struct rq
*target_rq
= cpu_rq(target_cpu
);
4656 struct sched_domain
*sd
;
4658 raw_spin_lock_irq(&busiest_rq
->lock
);
4660 /* make sure the requested cpu hasn't gone down in the meantime */
4661 if (unlikely(busiest_cpu
!= smp_processor_id() ||
4662 !busiest_rq
->active_balance
))
4665 /* Is there any task to move? */
4666 if (busiest_rq
->nr_running
<= 1)
4670 * This condition is "impossible", if it occurs
4671 * we need to fix it. Originally reported by
4672 * Bjorn Helgaas on a 128-cpu setup.
4674 BUG_ON(busiest_rq
== target_rq
);
4676 /* move a task from busiest_rq to target_rq */
4677 double_lock_balance(busiest_rq
, target_rq
);
4679 /* Search for an sd spanning us and the target CPU. */
4681 for_each_domain(target_cpu
, sd
) {
4682 if ((sd
->flags
& SD_LOAD_BALANCE
) &&
4683 cpumask_test_cpu(busiest_cpu
, sched_domain_span(sd
)))
4688 schedstat_inc(sd
, alb_count
);
4690 if (move_one_task(target_rq
, target_cpu
, busiest_rq
,
4692 schedstat_inc(sd
, alb_pushed
);
4694 schedstat_inc(sd
, alb_failed
);
4697 double_unlock_balance(busiest_rq
, target_rq
);
4699 busiest_rq
->active_balance
= 0;
4700 raw_spin_unlock_irq(&busiest_rq
->lock
);
4706 * idle load balancing details
4707 * - One of the idle CPUs nominates itself as idle load_balancer, while
4709 * - This idle load balancer CPU will also go into tickless mode when
4710 * it is idle, just like all other idle CPUs
4711 * - When one of the busy CPUs notice that there may be an idle rebalancing
4712 * needed, they will kick the idle load balancer, which then does idle
4713 * load balancing for all the idle CPUs.
4716 atomic_t load_balancer
;
4717 atomic_t first_pick_cpu
;
4718 atomic_t second_pick_cpu
;
4719 cpumask_var_t idle_cpus_mask
;
4720 cpumask_var_t grp_idle_mask
;
4721 unsigned long next_balance
; /* in jiffy units */
4722 } nohz ____cacheline_aligned
;
4724 int get_nohz_load_balancer(void)
4726 return atomic_read(&nohz
.load_balancer
);
4729 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4731 * lowest_flag_domain - Return lowest sched_domain containing flag.
4732 * @cpu: The cpu whose lowest level of sched domain is to
4734 * @flag: The flag to check for the lowest sched_domain
4735 * for the given cpu.
4737 * Returns the lowest sched_domain of a cpu which contains the given flag.
4739 static inline struct sched_domain
*lowest_flag_domain(int cpu
, int flag
)
4741 struct sched_domain
*sd
;
4743 for_each_domain(cpu
, sd
)
4744 if (sd
->flags
& flag
)
4751 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4752 * @cpu: The cpu whose domains we're iterating over.
4753 * @sd: variable holding the value of the power_savings_sd
4755 * @flag: The flag to filter the sched_domains to be iterated.
4757 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4758 * set, starting from the lowest sched_domain to the highest.
4760 #define for_each_flag_domain(cpu, sd, flag) \
4761 for (sd = lowest_flag_domain(cpu, flag); \
4762 (sd && (sd->flags & flag)); sd = sd->parent)
4765 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4766 * @ilb_group: group to be checked for semi-idleness
4768 * Returns: 1 if the group is semi-idle. 0 otherwise.
4770 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4771 * and atleast one non-idle CPU. This helper function checks if the given
4772 * sched_group is semi-idle or not.
4774 static inline int is_semi_idle_group(struct sched_group
*ilb_group
)
4776 cpumask_and(nohz
.grp_idle_mask
, nohz
.idle_cpus_mask
,
4777 sched_group_cpus(ilb_group
));
4780 * A sched_group is semi-idle when it has atleast one busy cpu
4781 * and atleast one idle cpu.
4783 if (cpumask_empty(nohz
.grp_idle_mask
))
4786 if (cpumask_equal(nohz
.grp_idle_mask
, sched_group_cpus(ilb_group
)))
4792 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4793 * @cpu: The cpu which is nominating a new idle_load_balancer.
4795 * Returns: Returns the id of the idle load balancer if it exists,
4796 * Else, returns >= nr_cpu_ids.
4798 * This algorithm picks the idle load balancer such that it belongs to a
4799 * semi-idle powersavings sched_domain. The idea is to try and avoid
4800 * completely idle packages/cores just for the purpose of idle load balancing
4801 * when there are other idle cpu's which are better suited for that job.
4803 static int find_new_ilb(int cpu
)
4805 struct sched_domain
*sd
;
4806 struct sched_group
*ilb_group
;
4807 int ilb
= nr_cpu_ids
;
4810 * Have idle load balancer selection from semi-idle packages only
4811 * when power-aware load balancing is enabled
4813 if (!(sched_smt_power_savings
|| sched_mc_power_savings
))
4817 * Optimize for the case when we have no idle CPUs or only one
4818 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4820 if (cpumask_weight(nohz
.idle_cpus_mask
) < 2)
4824 for_each_flag_domain(cpu
, sd
, SD_POWERSAVINGS_BALANCE
) {
4825 ilb_group
= sd
->groups
;
4828 if (is_semi_idle_group(ilb_group
)) {
4829 ilb
= cpumask_first(nohz
.grp_idle_mask
);
4833 ilb_group
= ilb_group
->next
;
4835 } while (ilb_group
!= sd
->groups
);
4843 #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4844 static inline int find_new_ilb(int call_cpu
)
4851 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4852 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4853 * CPU (if there is one).
4855 static void nohz_balancer_kick(int cpu
)
4859 nohz
.next_balance
++;
4861 ilb_cpu
= get_nohz_load_balancer();
4863 if (ilb_cpu
>= nr_cpu_ids
) {
4864 ilb_cpu
= cpumask_first(nohz
.idle_cpus_mask
);
4865 if (ilb_cpu
>= nr_cpu_ids
)
4869 if (!cpu_rq(ilb_cpu
)->nohz_balance_kick
) {
4870 cpu_rq(ilb_cpu
)->nohz_balance_kick
= 1;
4874 * Use smp_send_reschedule() instead of resched_cpu().
4875 * This way we generate a sched IPI on the target cpu which
4876 * is idle. And the softirq performing nohz idle load balance
4877 * will be run before returning from the IPI.
4879 smp_send_reschedule(ilb_cpu
);
4885 * This routine will try to nominate the ilb (idle load balancing)
4886 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4887 * load balancing on behalf of all those cpus.
4889 * When the ilb owner becomes busy, we will not have new ilb owner until some
4890 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
4891 * idle load balancing by kicking one of the idle CPUs.
4893 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
4894 * ilb owner CPU in future (when there is a need for idle load balancing on
4895 * behalf of all idle CPUs).
4897 void select_nohz_load_balancer(int stop_tick
)
4899 int cpu
= smp_processor_id();
4902 if (!cpu_active(cpu
)) {
4903 if (atomic_read(&nohz
.load_balancer
) != cpu
)
4907 * If we are going offline and still the leader,
4910 if (atomic_cmpxchg(&nohz
.load_balancer
, cpu
,
4917 cpumask_set_cpu(cpu
, nohz
.idle_cpus_mask
);
4919 if (atomic_read(&nohz
.first_pick_cpu
) == cpu
)
4920 atomic_cmpxchg(&nohz
.first_pick_cpu
, cpu
, nr_cpu_ids
);
4921 if (atomic_read(&nohz
.second_pick_cpu
) == cpu
)
4922 atomic_cmpxchg(&nohz
.second_pick_cpu
, cpu
, nr_cpu_ids
);
4924 if (atomic_read(&nohz
.load_balancer
) >= nr_cpu_ids
) {
4927 /* make me the ilb owner */
4928 if (atomic_cmpxchg(&nohz
.load_balancer
, nr_cpu_ids
,
4933 * Check to see if there is a more power-efficient
4936 new_ilb
= find_new_ilb(cpu
);
4937 if (new_ilb
< nr_cpu_ids
&& new_ilb
!= cpu
) {
4938 atomic_set(&nohz
.load_balancer
, nr_cpu_ids
);
4939 resched_cpu(new_ilb
);
4945 if (!cpumask_test_cpu(cpu
, nohz
.idle_cpus_mask
))
4948 cpumask_clear_cpu(cpu
, nohz
.idle_cpus_mask
);
4950 if (atomic_read(&nohz
.load_balancer
) == cpu
)
4951 if (atomic_cmpxchg(&nohz
.load_balancer
, cpu
,
4959 static DEFINE_SPINLOCK(balancing
);
4961 static unsigned long __read_mostly max_load_balance_interval
= HZ
/10;
4964 * Scale the max load_balance interval with the number of CPUs in the system.
4965 * This trades load-balance latency on larger machines for less cross talk.
4967 void update_max_interval(void)
4969 max_load_balance_interval
= HZ
*num_online_cpus()/10;
4973 * It checks each scheduling domain to see if it is due to be balanced,
4974 * and initiates a balancing operation if so.
4976 * Balancing parameters are set up in arch_init_sched_domains.
4978 static void rebalance_domains(int cpu
, enum cpu_idle_type idle
)
4981 struct rq
*rq
= cpu_rq(cpu
);
4982 unsigned long interval
;
4983 struct sched_domain
*sd
;
4984 /* Earliest time when we have to do rebalance again */
4985 unsigned long next_balance
= jiffies
+ 60*HZ
;
4986 int update_next_balance
= 0;
4992 for_each_domain(cpu
, sd
) {
4993 if (!(sd
->flags
& SD_LOAD_BALANCE
))
4996 interval
= sd
->balance_interval
;
4997 if (idle
!= CPU_IDLE
)
4998 interval
*= sd
->busy_factor
;
5000 /* scale ms to jiffies */
5001 interval
= msecs_to_jiffies(interval
);
5002 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
5004 need_serialize
= sd
->flags
& SD_SERIALIZE
;
5006 if (need_serialize
) {
5007 if (!spin_trylock(&balancing
))
5011 if (time_after_eq(jiffies
, sd
->last_balance
+ interval
)) {
5012 if (load_balance(cpu
, rq
, sd
, idle
, &balance
)) {
5014 * We've pulled tasks over so either we're no
5017 idle
= CPU_NOT_IDLE
;
5019 sd
->last_balance
= jiffies
;
5022 spin_unlock(&balancing
);
5024 if (time_after(next_balance
, sd
->last_balance
+ interval
)) {
5025 next_balance
= sd
->last_balance
+ interval
;
5026 update_next_balance
= 1;
5030 * Stop the load balance at this level. There is another
5031 * CPU in our sched group which is doing load balancing more
5040 * next_balance will be updated only when there is a need.
5041 * When the cpu is attached to null domain for ex, it will not be
5044 if (likely(update_next_balance
))
5045 rq
->next_balance
= next_balance
;
5050 * In CONFIG_NO_HZ case, the idle balance kickee will do the
5051 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5053 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
)
5055 struct rq
*this_rq
= cpu_rq(this_cpu
);
5059 if (idle
!= CPU_IDLE
|| !this_rq
->nohz_balance_kick
)
5062 for_each_cpu(balance_cpu
, nohz
.idle_cpus_mask
) {
5063 if (balance_cpu
== this_cpu
)
5067 * If this cpu gets work to do, stop the load balancing
5068 * work being done for other cpus. Next load
5069 * balancing owner will pick it up.
5071 if (need_resched()) {
5072 this_rq
->nohz_balance_kick
= 0;
5076 raw_spin_lock_irq(&this_rq
->lock
);
5077 update_rq_clock(this_rq
);
5078 update_cpu_load(this_rq
);
5079 raw_spin_unlock_irq(&this_rq
->lock
);
5081 rebalance_domains(balance_cpu
, CPU_IDLE
);
5083 rq
= cpu_rq(balance_cpu
);
5084 if (time_after(this_rq
->next_balance
, rq
->next_balance
))
5085 this_rq
->next_balance
= rq
->next_balance
;
5087 nohz
.next_balance
= this_rq
->next_balance
;
5088 this_rq
->nohz_balance_kick
= 0;
5092 * Current heuristic for kicking the idle load balancer
5093 * - first_pick_cpu is the one of the busy CPUs. It will kick
5094 * idle load balancer when it has more than one process active. This
5095 * eliminates the need for idle load balancing altogether when we have
5096 * only one running process in the system (common case).
5097 * - If there are more than one busy CPU, idle load balancer may have
5098 * to run for active_load_balance to happen (i.e., two busy CPUs are
5099 * SMT or core siblings and can run better if they move to different
5100 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
5101 * which will kick idle load balancer as soon as it has any load.
5103 static inline int nohz_kick_needed(struct rq
*rq
, int cpu
)
5105 unsigned long now
= jiffies
;
5107 int first_pick_cpu
, second_pick_cpu
;
5109 if (time_before(now
, nohz
.next_balance
))
5115 first_pick_cpu
= atomic_read(&nohz
.first_pick_cpu
);
5116 second_pick_cpu
= atomic_read(&nohz
.second_pick_cpu
);
5118 if (first_pick_cpu
< nr_cpu_ids
&& first_pick_cpu
!= cpu
&&
5119 second_pick_cpu
< nr_cpu_ids
&& second_pick_cpu
!= cpu
)
5122 ret
= atomic_cmpxchg(&nohz
.first_pick_cpu
, nr_cpu_ids
, cpu
);
5123 if (ret
== nr_cpu_ids
|| ret
== cpu
) {
5124 atomic_cmpxchg(&nohz
.second_pick_cpu
, cpu
, nr_cpu_ids
);
5125 if (rq
->nr_running
> 1)
5128 ret
= atomic_cmpxchg(&nohz
.second_pick_cpu
, nr_cpu_ids
, cpu
);
5129 if (ret
== nr_cpu_ids
|| ret
== cpu
) {
5137 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
) { }
5141 * run_rebalance_domains is triggered when needed from the scheduler tick.
5142 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5144 static void run_rebalance_domains(struct softirq_action
*h
)
5146 int this_cpu
= smp_processor_id();
5147 struct rq
*this_rq
= cpu_rq(this_cpu
);
5148 enum cpu_idle_type idle
= this_rq
->idle_balance
?
5149 CPU_IDLE
: CPU_NOT_IDLE
;
5151 rebalance_domains(this_cpu
, idle
);
5154 * If this cpu has a pending nohz_balance_kick, then do the
5155 * balancing on behalf of the other idle cpus whose ticks are
5158 nohz_idle_balance(this_cpu
, idle
);
5161 static inline int on_null_domain(int cpu
)
5163 return !rcu_dereference_sched(cpu_rq(cpu
)->sd
);
5167 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5169 void trigger_load_balance(struct rq
*rq
, int cpu
)
5171 /* Don't need to rebalance while attached to NULL domain */
5172 if (time_after_eq(jiffies
, rq
->next_balance
) &&
5173 likely(!on_null_domain(cpu
)))
5174 raise_softirq(SCHED_SOFTIRQ
);
5176 else if (nohz_kick_needed(rq
, cpu
) && likely(!on_null_domain(cpu
)))
5177 nohz_balancer_kick(cpu
);
5181 static void rq_online_fair(struct rq
*rq
)
5186 static void rq_offline_fair(struct rq
*rq
)
5191 #endif /* CONFIG_SMP */
5194 * scheduler tick hitting a task of our scheduling class:
5196 static void task_tick_fair(struct rq
*rq
, struct task_struct
*curr
, int queued
)
5198 struct cfs_rq
*cfs_rq
;
5199 struct sched_entity
*se
= &curr
->se
;
5201 for_each_sched_entity(se
) {
5202 cfs_rq
= cfs_rq_of(se
);
5203 entity_tick(cfs_rq
, se
, queued
);
5208 * called on fork with the child task as argument from the parent's context
5209 * - child not yet on the tasklist
5210 * - preemption disabled
5212 static void task_fork_fair(struct task_struct
*p
)
5214 struct cfs_rq
*cfs_rq
= task_cfs_rq(current
);
5215 struct sched_entity
*se
= &p
->se
, *curr
= cfs_rq
->curr
;
5216 int this_cpu
= smp_processor_id();
5217 struct rq
*rq
= this_rq();
5218 unsigned long flags
;
5220 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5222 update_rq_clock(rq
);
5224 if (unlikely(task_cpu(p
) != this_cpu
)) {
5226 __set_task_cpu(p
, this_cpu
);
5230 update_curr(cfs_rq
);
5233 se
->vruntime
= curr
->vruntime
;
5234 place_entity(cfs_rq
, se
, 1);
5236 if (sysctl_sched_child_runs_first
&& curr
&& entity_before(curr
, se
)) {
5238 * Upon rescheduling, sched_class::put_prev_task() will place
5239 * 'current' within the tree based on its new key value.
5241 swap(curr
->vruntime
, se
->vruntime
);
5242 resched_task(rq
->curr
);
5245 se
->vruntime
-= cfs_rq
->min_vruntime
;
5247 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5251 * Priority of the task has changed. Check to see if we preempt
5255 prio_changed_fair(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
5261 * Reschedule if we are currently running on this runqueue and
5262 * our priority decreased, or if we are not currently running on
5263 * this runqueue and our priority is higher than the current's
5265 if (rq
->curr
== p
) {
5266 if (p
->prio
> oldprio
)
5267 resched_task(rq
->curr
);
5269 check_preempt_curr(rq
, p
, 0);
5272 static void switched_from_fair(struct rq
*rq
, struct task_struct
*p
)
5274 struct sched_entity
*se
= &p
->se
;
5275 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5278 * Ensure the task's vruntime is normalized, so that when its
5279 * switched back to the fair class the enqueue_entity(.flags=0) will
5280 * do the right thing.
5282 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5283 * have normalized the vruntime, if it was !on_rq, then only when
5284 * the task is sleeping will it still have non-normalized vruntime.
5286 if (!se
->on_rq
&& p
->state
!= TASK_RUNNING
) {
5288 * Fix up our vruntime so that the current sleep doesn't
5289 * cause 'unlimited' sleep bonus.
5291 place_entity(cfs_rq
, se
, 0);
5292 se
->vruntime
-= cfs_rq
->min_vruntime
;
5297 * We switched to the sched_fair class.
5299 static void switched_to_fair(struct rq
*rq
, struct task_struct
*p
)
5305 * We were most likely switched from sched_rt, so
5306 * kick off the schedule if running, otherwise just see
5307 * if we can still preempt the current task.
5310 resched_task(rq
->curr
);
5312 check_preempt_curr(rq
, p
, 0);
5315 /* Account for a task changing its policy or group.
5317 * This routine is mostly called to set cfs_rq->curr field when a task
5318 * migrates between groups/classes.
5320 static void set_curr_task_fair(struct rq
*rq
)
5322 struct sched_entity
*se
= &rq
->curr
->se
;
5324 for_each_sched_entity(se
) {
5325 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5327 set_next_entity(cfs_rq
, se
);
5328 /* ensure bandwidth has been allocated on our new cfs_rq */
5329 account_cfs_rq_runtime(cfs_rq
, 0);
5333 void init_cfs_rq(struct cfs_rq
*cfs_rq
)
5335 cfs_rq
->tasks_timeline
= RB_ROOT
;
5336 INIT_LIST_HEAD(&cfs_rq
->tasks
);
5337 cfs_rq
->min_vruntime
= (u64
)(-(1LL << 20));
5338 #ifndef CONFIG_64BIT
5339 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
5343 #ifdef CONFIG_FAIR_GROUP_SCHED
5344 static void task_move_group_fair(struct task_struct
*p
, int on_rq
)
5347 * If the task was not on the rq at the time of this cgroup movement
5348 * it must have been asleep, sleeping tasks keep their ->vruntime
5349 * absolute on their old rq until wakeup (needed for the fair sleeper
5350 * bonus in place_entity()).
5352 * If it was on the rq, we've just 'preempted' it, which does convert
5353 * ->vruntime to a relative base.
5355 * Make sure both cases convert their relative position when migrating
5356 * to another cgroup's rq. This does somewhat interfere with the
5357 * fair sleeper stuff for the first placement, but who cares.
5360 p
->se
.vruntime
-= cfs_rq_of(&p
->se
)->min_vruntime
;
5361 set_task_rq(p
, task_cpu(p
));
5363 p
->se
.vruntime
+= cfs_rq_of(&p
->se
)->min_vruntime
;
5366 void free_fair_sched_group(struct task_group
*tg
)
5370 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5372 for_each_possible_cpu(i
) {
5374 kfree(tg
->cfs_rq
[i
]);
5383 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5385 struct cfs_rq
*cfs_rq
;
5386 struct sched_entity
*se
;
5389 tg
->cfs_rq
= kzalloc(sizeof(cfs_rq
) * nr_cpu_ids
, GFP_KERNEL
);
5392 tg
->se
= kzalloc(sizeof(se
) * nr_cpu_ids
, GFP_KERNEL
);
5396 tg
->shares
= NICE_0_LOAD
;
5398 init_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5400 for_each_possible_cpu(i
) {
5401 cfs_rq
= kzalloc_node(sizeof(struct cfs_rq
),
5402 GFP_KERNEL
, cpu_to_node(i
));
5406 se
= kzalloc_node(sizeof(struct sched_entity
),
5407 GFP_KERNEL
, cpu_to_node(i
));
5411 init_cfs_rq(cfs_rq
);
5412 init_tg_cfs_entry(tg
, cfs_rq
, se
, i
, parent
->se
[i
]);
5423 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
)
5425 struct rq
*rq
= cpu_rq(cpu
);
5426 unsigned long flags
;
5429 * Only empty task groups can be destroyed; so we can speculatively
5430 * check on_list without danger of it being re-added.
5432 if (!tg
->cfs_rq
[cpu
]->on_list
)
5435 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5436 list_del_leaf_cfs_rq(tg
->cfs_rq
[cpu
]);
5437 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5440 void init_tg_cfs_entry(struct task_group
*tg
, struct cfs_rq
*cfs_rq
,
5441 struct sched_entity
*se
, int cpu
,
5442 struct sched_entity
*parent
)
5444 struct rq
*rq
= cpu_rq(cpu
);
5449 /* allow initial update_cfs_load() to truncate */
5450 cfs_rq
->load_stamp
= 1;
5452 init_cfs_rq_runtime(cfs_rq
);
5454 tg
->cfs_rq
[cpu
] = cfs_rq
;
5457 /* se could be NULL for root_task_group */
5462 se
->cfs_rq
= &rq
->cfs
;
5464 se
->cfs_rq
= parent
->my_q
;
5467 update_load_set(&se
->load
, 0);
5468 se
->parent
= parent
;
5471 static DEFINE_MUTEX(shares_mutex
);
5473 int sched_group_set_shares(struct task_group
*tg
, unsigned long shares
)
5476 unsigned long flags
;
5479 * We can't change the weight of the root cgroup.
5484 shares
= clamp(shares
, scale_load(MIN_SHARES
), scale_load(MAX_SHARES
));
5486 mutex_lock(&shares_mutex
);
5487 if (tg
->shares
== shares
)
5490 tg
->shares
= shares
;
5491 for_each_possible_cpu(i
) {
5492 struct rq
*rq
= cpu_rq(i
);
5493 struct sched_entity
*se
;
5496 /* Propagate contribution to hierarchy */
5497 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5498 for_each_sched_entity(se
)
5499 update_cfs_shares(group_cfs_rq(se
));
5500 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5504 mutex_unlock(&shares_mutex
);
5507 #else /* CONFIG_FAIR_GROUP_SCHED */
5509 void free_fair_sched_group(struct task_group
*tg
) { }
5511 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5516 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
) { }
5518 #endif /* CONFIG_FAIR_GROUP_SCHED */
5521 static unsigned int get_rr_interval_fair(struct rq
*rq
, struct task_struct
*task
)
5523 struct sched_entity
*se
= &task
->se
;
5524 unsigned int rr_interval
= 0;
5527 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5530 if (rq
->cfs
.load
.weight
)
5531 rr_interval
= NS_TO_JIFFIES(sched_slice(&rq
->cfs
, se
));
5537 * All the scheduling class methods:
5539 const struct sched_class fair_sched_class
= {
5540 .next
= &idle_sched_class
,
5541 .enqueue_task
= enqueue_task_fair
,
5542 .dequeue_task
= dequeue_task_fair
,
5543 .yield_task
= yield_task_fair
,
5544 .yield_to_task
= yield_to_task_fair
,
5546 .check_preempt_curr
= check_preempt_wakeup
,
5548 .pick_next_task
= pick_next_task_fair
,
5549 .put_prev_task
= put_prev_task_fair
,
5552 .select_task_rq
= select_task_rq_fair
,
5554 .rq_online
= rq_online_fair
,
5555 .rq_offline
= rq_offline_fair
,
5557 .task_waking
= task_waking_fair
,
5560 .set_curr_task
= set_curr_task_fair
,
5561 .task_tick
= task_tick_fair
,
5562 .task_fork
= task_fork_fair
,
5564 .prio_changed
= prio_changed_fair
,
5565 .switched_from
= switched_from_fair
,
5566 .switched_to
= switched_to_fair
,
5568 .get_rr_interval
= get_rr_interval_fair
,
5570 #ifdef CONFIG_FAIR_GROUP_SCHED
5571 .task_move_group
= task_move_group_fair
,
5575 #ifdef CONFIG_SCHED_DEBUG
5576 void print_cfs_stats(struct seq_file
*m
, int cpu
)
5578 struct cfs_rq
*cfs_rq
;
5581 for_each_leaf_cfs_rq(cpu_rq(cpu
), cfs_rq
)
5582 print_cfs_rq(m
, cpu
, cfs_rq
);
5587 __init
void init_sched_fair_class(void)
5590 open_softirq(SCHED_SOFTIRQ
, run_rebalance_domains
);
5593 zalloc_cpumask_var(&nohz
.idle_cpus_mask
, GFP_NOWAIT
);
5594 alloc_cpumask_var(&nohz
.grp_idle_mask
, GFP_NOWAIT
);
5595 atomic_set(&nohz
.load_balancer
, nr_cpu_ids
);
5596 atomic_set(&nohz
.first_pick_cpu
, nr_cpu_ids
);
5597 atomic_set(&nohz
.second_pick_cpu
, nr_cpu_ids
);