sched: old sleeper bonus
[deliverable/linux.git] / kernel / sched.c
1 /*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/nmi.h>
32 #include <linux/init.h>
33 #include <linux/uaccess.h>
34 #include <linux/highmem.h>
35 #include <linux/smp_lock.h>
36 #include <asm/mmu_context.h>
37 #include <linux/interrupt.h>
38 #include <linux/capability.h>
39 #include <linux/completion.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/debug_locks.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/profile.h>
45 #include <linux/freezer.h>
46 #include <linux/vmalloc.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/smp.h>
51 #include <linux/threads.h>
52 #include <linux/timer.h>
53 #include <linux/rcupdate.h>
54 #include <linux/cpu.h>
55 #include <linux/cpuset.h>
56 #include <linux/percpu.h>
57 #include <linux/kthread.h>
58 #include <linux/seq_file.h>
59 #include <linux/sysctl.h>
60 #include <linux/syscalls.h>
61 #include <linux/times.h>
62 #include <linux/tsacct_kern.h>
63 #include <linux/kprobes.h>
64 #include <linux/delayacct.h>
65 #include <linux/reciprocal_div.h>
66 #include <linux/unistd.h>
67 #include <linux/pagemap.h>
68 #include <linux/hrtimer.h>
69 #include <linux/tick.h>
70 #include <linux/bootmem.h>
71
72 #include <asm/tlb.h>
73 #include <asm/irq_regs.h>
74
75 /*
76 * Scheduler clock - returns current time in nanosec units.
77 * This is default implementation.
78 * Architectures and sub-architectures can override this.
79 */
80 unsigned long long __attribute__((weak)) sched_clock(void)
81 {
82 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
83 }
84
85 /*
86 * Convert user-nice values [ -20 ... 0 ... 19 ]
87 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
88 * and back.
89 */
90 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
91 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
92 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
93
94 /*
95 * 'User priority' is the nice value converted to something we
96 * can work with better when scaling various scheduler parameters,
97 * it's a [ 0 ... 39 ] range.
98 */
99 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
100 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
101 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
102
103 /*
104 * Helpers for converting nanosecond timing to jiffy resolution
105 */
106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
107
108 #define NICE_0_LOAD SCHED_LOAD_SCALE
109 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
110
111 /*
112 * These are the 'tuning knobs' of the scheduler:
113 *
114 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
115 * Timeslices get refilled after they expire.
116 */
117 #define DEF_TIMESLICE (100 * HZ / 1000)
118
119 /*
120 * single value that denotes runtime == period, ie unlimited time.
121 */
122 #define RUNTIME_INF ((u64)~0ULL)
123
124 #ifdef CONFIG_SMP
125 /*
126 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
127 * Since cpu_power is a 'constant', we can use a reciprocal divide.
128 */
129 static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
130 {
131 return reciprocal_divide(load, sg->reciprocal_cpu_power);
132 }
133
134 /*
135 * Each time a sched group cpu_power is changed,
136 * we must compute its reciprocal value
137 */
138 static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
139 {
140 sg->__cpu_power += val;
141 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
142 }
143 #endif
144
145 static inline int rt_policy(int policy)
146 {
147 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
148 return 1;
149 return 0;
150 }
151
152 static inline int task_has_rt_policy(struct task_struct *p)
153 {
154 return rt_policy(p->policy);
155 }
156
157 /*
158 * This is the priority-queue data structure of the RT scheduling class:
159 */
160 struct rt_prio_array {
161 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
162 struct list_head queue[MAX_RT_PRIO];
163 };
164
165 struct rt_bandwidth {
166 ktime_t rt_period;
167 u64 rt_runtime;
168 spinlock_t rt_runtime_lock;
169 struct hrtimer rt_period_timer;
170 };
171
172 static struct rt_bandwidth def_rt_bandwidth;
173
174 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
175
176 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
177 {
178 struct rt_bandwidth *rt_b =
179 container_of(timer, struct rt_bandwidth, rt_period_timer);
180 ktime_t now;
181 int overrun;
182 int idle = 0;
183
184 for (;;) {
185 now = hrtimer_cb_get_time(timer);
186 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
187
188 if (!overrun)
189 break;
190
191 idle = do_sched_rt_period_timer(rt_b, overrun);
192 }
193
194 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
195 }
196
197 static
198 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
199 {
200 rt_b->rt_period = ns_to_ktime(period);
201 rt_b->rt_runtime = runtime;
202
203 spin_lock_init(&rt_b->rt_runtime_lock);
204
205 hrtimer_init(&rt_b->rt_period_timer,
206 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
207 rt_b->rt_period_timer.function = sched_rt_period_timer;
208 rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
209 }
210
211 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
212 {
213 ktime_t now;
214
215 if (rt_b->rt_runtime == RUNTIME_INF)
216 return;
217
218 if (hrtimer_active(&rt_b->rt_period_timer))
219 return;
220
221 spin_lock(&rt_b->rt_runtime_lock);
222 for (;;) {
223 if (hrtimer_active(&rt_b->rt_period_timer))
224 break;
225
226 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
227 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
228 hrtimer_start(&rt_b->rt_period_timer,
229 rt_b->rt_period_timer.expires,
230 HRTIMER_MODE_ABS);
231 }
232 spin_unlock(&rt_b->rt_runtime_lock);
233 }
234
235 #ifdef CONFIG_RT_GROUP_SCHED
236 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
237 {
238 hrtimer_cancel(&rt_b->rt_period_timer);
239 }
240 #endif
241
242 #ifdef CONFIG_GROUP_SCHED
243
244 #include <linux/cgroup.h>
245
246 struct cfs_rq;
247
248 static LIST_HEAD(task_groups);
249
250 /* task group related information */
251 struct task_group {
252 #ifdef CONFIG_CGROUP_SCHED
253 struct cgroup_subsys_state css;
254 #endif
255
256 #ifdef CONFIG_FAIR_GROUP_SCHED
257 /* schedulable entities of this group on each cpu */
258 struct sched_entity **se;
259 /* runqueue "owned" by this group on each cpu */
260 struct cfs_rq **cfs_rq;
261 unsigned long shares;
262 #endif
263
264 #ifdef CONFIG_RT_GROUP_SCHED
265 struct sched_rt_entity **rt_se;
266 struct rt_rq **rt_rq;
267
268 struct rt_bandwidth rt_bandwidth;
269 #endif
270
271 struct rcu_head rcu;
272 struct list_head list;
273 };
274
275 #ifdef CONFIG_FAIR_GROUP_SCHED
276 /* Default task group's sched entity on each cpu */
277 static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
278 /* Default task group's cfs_rq on each cpu */
279 static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
280 #endif
281
282 #ifdef CONFIG_RT_GROUP_SCHED
283 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
284 static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
285 #endif
286
287 /* task_group_lock serializes add/remove of task groups and also changes to
288 * a task group's cpu shares.
289 */
290 static DEFINE_SPINLOCK(task_group_lock);
291
292 /* doms_cur_mutex serializes access to doms_cur[] array */
293 static DEFINE_MUTEX(doms_cur_mutex);
294
295 #ifdef CONFIG_FAIR_GROUP_SCHED
296 #ifdef CONFIG_USER_SCHED
297 # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
298 #else
299 # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
300 #endif
301
302 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
303 #endif
304
305 /* Default task group.
306 * Every task in system belong to this group at bootup.
307 */
308 struct task_group init_task_group;
309
310 /* return group to which a task belongs */
311 static inline struct task_group *task_group(struct task_struct *p)
312 {
313 struct task_group *tg;
314
315 #ifdef CONFIG_USER_SCHED
316 tg = p->user->tg;
317 #elif defined(CONFIG_CGROUP_SCHED)
318 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
319 struct task_group, css);
320 #else
321 tg = &init_task_group;
322 #endif
323 return tg;
324 }
325
326 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
327 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
328 {
329 #ifdef CONFIG_FAIR_GROUP_SCHED
330 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
331 p->se.parent = task_group(p)->se[cpu];
332 #endif
333
334 #ifdef CONFIG_RT_GROUP_SCHED
335 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
336 p->rt.parent = task_group(p)->rt_se[cpu];
337 #endif
338 }
339
340 static inline void lock_doms_cur(void)
341 {
342 mutex_lock(&doms_cur_mutex);
343 }
344
345 static inline void unlock_doms_cur(void)
346 {
347 mutex_unlock(&doms_cur_mutex);
348 }
349
350 #else
351
352 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
353 static inline void lock_doms_cur(void) { }
354 static inline void unlock_doms_cur(void) { }
355
356 #endif /* CONFIG_GROUP_SCHED */
357
358 /* CFS-related fields in a runqueue */
359 struct cfs_rq {
360 struct load_weight load;
361 unsigned long nr_running;
362
363 u64 exec_clock;
364 u64 min_vruntime;
365
366 struct rb_root tasks_timeline;
367 struct rb_node *rb_leftmost;
368 struct rb_node *rb_load_balance_curr;
369 /* 'curr' points to currently running entity on this cfs_rq.
370 * It is set to NULL otherwise (i.e when none are currently running).
371 */
372 struct sched_entity *curr, *next;
373
374 unsigned long nr_spread_over;
375
376 #ifdef CONFIG_FAIR_GROUP_SCHED
377 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
378
379 /*
380 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
381 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
382 * (like users, containers etc.)
383 *
384 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
385 * list is used during load balance.
386 */
387 struct list_head leaf_cfs_rq_list;
388 struct task_group *tg; /* group that "owns" this runqueue */
389 #endif
390 };
391
392 /* Real-Time classes' related field in a runqueue: */
393 struct rt_rq {
394 struct rt_prio_array active;
395 unsigned long rt_nr_running;
396 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
397 int highest_prio; /* highest queued rt task prio */
398 #endif
399 #ifdef CONFIG_SMP
400 unsigned long rt_nr_migratory;
401 int overloaded;
402 #endif
403 int rt_throttled;
404 u64 rt_time;
405 u64 rt_runtime;
406 spinlock_t rt_runtime_lock;
407
408 #ifdef CONFIG_RT_GROUP_SCHED
409 unsigned long rt_nr_boosted;
410
411 struct rq *rq;
412 struct list_head leaf_rt_rq_list;
413 struct task_group *tg;
414 struct sched_rt_entity *rt_se;
415 #endif
416 };
417
418 #ifdef CONFIG_SMP
419
420 /*
421 * We add the notion of a root-domain which will be used to define per-domain
422 * variables. Each exclusive cpuset essentially defines an island domain by
423 * fully partitioning the member cpus from any other cpuset. Whenever a new
424 * exclusive cpuset is created, we also create and attach a new root-domain
425 * object.
426 *
427 */
428 struct root_domain {
429 atomic_t refcount;
430 cpumask_t span;
431 cpumask_t online;
432
433 /*
434 * The "RT overload" flag: it gets set if a CPU has more than
435 * one runnable RT task.
436 */
437 cpumask_t rto_mask;
438 atomic_t rto_count;
439 };
440
441 /*
442 * By default the system creates a single root-domain with all cpus as
443 * members (mimicking the global state we have today).
444 */
445 static struct root_domain def_root_domain;
446
447 #endif
448
449 /*
450 * This is the main, per-CPU runqueue data structure.
451 *
452 * Locking rule: those places that want to lock multiple runqueues
453 * (such as the load balancing or the thread migration code), lock
454 * acquire operations must be ordered by ascending &runqueue.
455 */
456 struct rq {
457 /* runqueue lock: */
458 spinlock_t lock;
459
460 /*
461 * nr_running and cpu_load should be in the same cacheline because
462 * remote CPUs use both these fields when doing load calculation.
463 */
464 unsigned long nr_running;
465 #define CPU_LOAD_IDX_MAX 5
466 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
467 unsigned char idle_at_tick;
468 #ifdef CONFIG_NO_HZ
469 unsigned long last_tick_seen;
470 unsigned char in_nohz_recently;
471 #endif
472 /* capture load from *all* tasks on this cpu: */
473 struct load_weight load;
474 unsigned long nr_load_updates;
475 u64 nr_switches;
476
477 struct cfs_rq cfs;
478 struct rt_rq rt;
479
480 #ifdef CONFIG_FAIR_GROUP_SCHED
481 /* list of leaf cfs_rq on this cpu: */
482 struct list_head leaf_cfs_rq_list;
483 #endif
484 #ifdef CONFIG_RT_GROUP_SCHED
485 struct list_head leaf_rt_rq_list;
486 #endif
487
488 /*
489 * This is part of a global counter where only the total sum
490 * over all CPUs matters. A task can increase this counter on
491 * one CPU and if it got migrated afterwards it may decrease
492 * it on another CPU. Always updated under the runqueue lock:
493 */
494 unsigned long nr_uninterruptible;
495
496 struct task_struct *curr, *idle;
497 unsigned long next_balance;
498 struct mm_struct *prev_mm;
499
500 u64 clock, prev_clock_raw;
501 s64 clock_max_delta;
502
503 unsigned int clock_warps, clock_overflows, clock_underflows;
504 u64 idle_clock;
505 unsigned int clock_deep_idle_events;
506 u64 tick_timestamp;
507
508 atomic_t nr_iowait;
509
510 #ifdef CONFIG_SMP
511 struct root_domain *rd;
512 struct sched_domain *sd;
513
514 /* For active balancing */
515 int active_balance;
516 int push_cpu;
517 /* cpu of this runqueue: */
518 int cpu;
519
520 struct task_struct *migration_thread;
521 struct list_head migration_queue;
522 #endif
523
524 #ifdef CONFIG_SCHED_HRTICK
525 unsigned long hrtick_flags;
526 ktime_t hrtick_expire;
527 struct hrtimer hrtick_timer;
528 #endif
529
530 #ifdef CONFIG_SCHEDSTATS
531 /* latency stats */
532 struct sched_info rq_sched_info;
533
534 /* sys_sched_yield() stats */
535 unsigned int yld_exp_empty;
536 unsigned int yld_act_empty;
537 unsigned int yld_both_empty;
538 unsigned int yld_count;
539
540 /* schedule() stats */
541 unsigned int sched_switch;
542 unsigned int sched_count;
543 unsigned int sched_goidle;
544
545 /* try_to_wake_up() stats */
546 unsigned int ttwu_count;
547 unsigned int ttwu_local;
548
549 /* BKL stats */
550 unsigned int bkl_count;
551 #endif
552 struct lock_class_key rq_lock_key;
553 };
554
555 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
556
557 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
558 {
559 rq->curr->sched_class->check_preempt_curr(rq, p);
560 }
561
562 static inline int cpu_of(struct rq *rq)
563 {
564 #ifdef CONFIG_SMP
565 return rq->cpu;
566 #else
567 return 0;
568 #endif
569 }
570
571 #ifdef CONFIG_NO_HZ
572 static inline bool nohz_on(int cpu)
573 {
574 return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
575 }
576
577 static inline u64 max_skipped_ticks(struct rq *rq)
578 {
579 return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
580 }
581
582 static inline void update_last_tick_seen(struct rq *rq)
583 {
584 rq->last_tick_seen = jiffies;
585 }
586 #else
587 static inline u64 max_skipped_ticks(struct rq *rq)
588 {
589 return 1;
590 }
591
592 static inline void update_last_tick_seen(struct rq *rq)
593 {
594 }
595 #endif
596
597 /*
598 * Update the per-runqueue clock, as finegrained as the platform can give
599 * us, but without assuming monotonicity, etc.:
600 */
601 static void __update_rq_clock(struct rq *rq)
602 {
603 u64 prev_raw = rq->prev_clock_raw;
604 u64 now = sched_clock();
605 s64 delta = now - prev_raw;
606 u64 clock = rq->clock;
607
608 #ifdef CONFIG_SCHED_DEBUG
609 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
610 #endif
611 /*
612 * Protect against sched_clock() occasionally going backwards:
613 */
614 if (unlikely(delta < 0)) {
615 clock++;
616 rq->clock_warps++;
617 } else {
618 /*
619 * Catch too large forward jumps too:
620 */
621 u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
622 u64 max_time = rq->tick_timestamp + max_jump;
623
624 if (unlikely(clock + delta > max_time)) {
625 if (clock < max_time)
626 clock = max_time;
627 else
628 clock++;
629 rq->clock_overflows++;
630 } else {
631 if (unlikely(delta > rq->clock_max_delta))
632 rq->clock_max_delta = delta;
633 clock += delta;
634 }
635 }
636
637 rq->prev_clock_raw = now;
638 rq->clock = clock;
639 }
640
641 static void update_rq_clock(struct rq *rq)
642 {
643 if (likely(smp_processor_id() == cpu_of(rq)))
644 __update_rq_clock(rq);
645 }
646
647 /*
648 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
649 * See detach_destroy_domains: synchronize_sched for details.
650 *
651 * The domain tree of any CPU may only be accessed from within
652 * preempt-disabled sections.
653 */
654 #define for_each_domain(cpu, __sd) \
655 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
656
657 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
658 #define this_rq() (&__get_cpu_var(runqueues))
659 #define task_rq(p) cpu_rq(task_cpu(p))
660 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
661
662 /*
663 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
664 */
665 #ifdef CONFIG_SCHED_DEBUG
666 # define const_debug __read_mostly
667 #else
668 # define const_debug static const
669 #endif
670
671 /*
672 * Debugging: various feature bits
673 */
674 enum {
675 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
676 SCHED_FEAT_WAKEUP_PREEMPT = 2,
677 SCHED_FEAT_START_DEBIT = 4,
678 SCHED_FEAT_AFFINE_WAKEUPS = 8,
679 SCHED_FEAT_CACHE_HOT_BUDDY = 16,
680 SCHED_FEAT_SYNC_WAKEUPS = 32,
681 SCHED_FEAT_HRTICK = 64,
682 SCHED_FEAT_DOUBLE_TICK = 128,
683 SCHED_FEAT_NORMALIZED_SLEEPER = 256,
684 };
685
686 const_debug unsigned int sysctl_sched_features =
687 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
688 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
689 SCHED_FEAT_START_DEBIT * 1 |
690 SCHED_FEAT_AFFINE_WAKEUPS * 1 |
691 SCHED_FEAT_CACHE_HOT_BUDDY * 1 |
692 SCHED_FEAT_SYNC_WAKEUPS * 1 |
693 SCHED_FEAT_HRTICK * 1 |
694 SCHED_FEAT_DOUBLE_TICK * 0 |
695 SCHED_FEAT_NORMALIZED_SLEEPER * 1;
696
697 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
698
699 /*
700 * Number of tasks to iterate in a single balance run.
701 * Limited because this is done with IRQs disabled.
702 */
703 const_debug unsigned int sysctl_sched_nr_migrate = 32;
704
705 /*
706 * period over which we measure -rt task cpu usage in us.
707 * default: 1s
708 */
709 unsigned int sysctl_sched_rt_period = 1000000;
710
711 static __read_mostly int scheduler_running;
712
713 /*
714 * part of the period that we allow rt tasks to run in us.
715 * default: 0.95s
716 */
717 int sysctl_sched_rt_runtime = 950000;
718
719 static inline u64 global_rt_period(void)
720 {
721 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
722 }
723
724 static inline u64 global_rt_runtime(void)
725 {
726 if (sysctl_sched_rt_period < 0)
727 return RUNTIME_INF;
728
729 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
730 }
731
732 static const unsigned long long time_sync_thresh = 100000;
733
734 static DEFINE_PER_CPU(unsigned long long, time_offset);
735 static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
736
737 /*
738 * Global lock which we take every now and then to synchronize
739 * the CPUs time. This method is not warp-safe, but it's good
740 * enough to synchronize slowly diverging time sources and thus
741 * it's good enough for tracing:
742 */
743 static DEFINE_SPINLOCK(time_sync_lock);
744 static unsigned long long prev_global_time;
745
746 static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
747 {
748 unsigned long flags;
749
750 spin_lock_irqsave(&time_sync_lock, flags);
751
752 if (time < prev_global_time) {
753 per_cpu(time_offset, cpu) += prev_global_time - time;
754 time = prev_global_time;
755 } else {
756 prev_global_time = time;
757 }
758
759 spin_unlock_irqrestore(&time_sync_lock, flags);
760
761 return time;
762 }
763
764 static unsigned long long __cpu_clock(int cpu)
765 {
766 unsigned long long now;
767 unsigned long flags;
768 struct rq *rq;
769
770 /*
771 * Only call sched_clock() if the scheduler has already been
772 * initialized (some code might call cpu_clock() very early):
773 */
774 if (unlikely(!scheduler_running))
775 return 0;
776
777 local_irq_save(flags);
778 rq = cpu_rq(cpu);
779 update_rq_clock(rq);
780 now = rq->clock;
781 local_irq_restore(flags);
782
783 return now;
784 }
785
786 /*
787 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
788 * clock constructed from sched_clock():
789 */
790 unsigned long long cpu_clock(int cpu)
791 {
792 unsigned long long prev_cpu_time, time, delta_time;
793
794 prev_cpu_time = per_cpu(prev_cpu_time, cpu);
795 time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
796 delta_time = time-prev_cpu_time;
797
798 if (unlikely(delta_time > time_sync_thresh))
799 time = __sync_cpu_clock(time, cpu);
800
801 return time;
802 }
803 EXPORT_SYMBOL_GPL(cpu_clock);
804
805 #ifndef prepare_arch_switch
806 # define prepare_arch_switch(next) do { } while (0)
807 #endif
808 #ifndef finish_arch_switch
809 # define finish_arch_switch(prev) do { } while (0)
810 #endif
811
812 static inline int task_current(struct rq *rq, struct task_struct *p)
813 {
814 return rq->curr == p;
815 }
816
817 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
818 static inline int task_running(struct rq *rq, struct task_struct *p)
819 {
820 return task_current(rq, p);
821 }
822
823 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
824 {
825 }
826
827 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
828 {
829 #ifdef CONFIG_DEBUG_SPINLOCK
830 /* this is a valid case when another task releases the spinlock */
831 rq->lock.owner = current;
832 #endif
833 /*
834 * If we are tracking spinlock dependencies then we have to
835 * fix up the runqueue lock - which gets 'carried over' from
836 * prev into current:
837 */
838 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
839
840 spin_unlock_irq(&rq->lock);
841 }
842
843 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
844 static inline int task_running(struct rq *rq, struct task_struct *p)
845 {
846 #ifdef CONFIG_SMP
847 return p->oncpu;
848 #else
849 return task_current(rq, p);
850 #endif
851 }
852
853 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
854 {
855 #ifdef CONFIG_SMP
856 /*
857 * We can optimise this out completely for !SMP, because the
858 * SMP rebalancing from interrupt is the only thing that cares
859 * here.
860 */
861 next->oncpu = 1;
862 #endif
863 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
864 spin_unlock_irq(&rq->lock);
865 #else
866 spin_unlock(&rq->lock);
867 #endif
868 }
869
870 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
871 {
872 #ifdef CONFIG_SMP
873 /*
874 * After ->oncpu is cleared, the task can be moved to a different CPU.
875 * We must ensure this doesn't happen until the switch is completely
876 * finished.
877 */
878 smp_wmb();
879 prev->oncpu = 0;
880 #endif
881 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
882 local_irq_enable();
883 #endif
884 }
885 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
886
887 /*
888 * __task_rq_lock - lock the runqueue a given task resides on.
889 * Must be called interrupts disabled.
890 */
891 static inline struct rq *__task_rq_lock(struct task_struct *p)
892 __acquires(rq->lock)
893 {
894 for (;;) {
895 struct rq *rq = task_rq(p);
896 spin_lock(&rq->lock);
897 if (likely(rq == task_rq(p)))
898 return rq;
899 spin_unlock(&rq->lock);
900 }
901 }
902
903 /*
904 * task_rq_lock - lock the runqueue a given task resides on and disable
905 * interrupts. Note the ordering: we can safely lookup the task_rq without
906 * explicitly disabling preemption.
907 */
908 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
909 __acquires(rq->lock)
910 {
911 struct rq *rq;
912
913 for (;;) {
914 local_irq_save(*flags);
915 rq = task_rq(p);
916 spin_lock(&rq->lock);
917 if (likely(rq == task_rq(p)))
918 return rq;
919 spin_unlock_irqrestore(&rq->lock, *flags);
920 }
921 }
922
923 static void __task_rq_unlock(struct rq *rq)
924 __releases(rq->lock)
925 {
926 spin_unlock(&rq->lock);
927 }
928
929 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
930 __releases(rq->lock)
931 {
932 spin_unlock_irqrestore(&rq->lock, *flags);
933 }
934
935 /*
936 * this_rq_lock - lock this runqueue and disable interrupts.
937 */
938 static struct rq *this_rq_lock(void)
939 __acquires(rq->lock)
940 {
941 struct rq *rq;
942
943 local_irq_disable();
944 rq = this_rq();
945 spin_lock(&rq->lock);
946
947 return rq;
948 }
949
950 /*
951 * We are going deep-idle (irqs are disabled):
952 */
953 void sched_clock_idle_sleep_event(void)
954 {
955 struct rq *rq = cpu_rq(smp_processor_id());
956
957 spin_lock(&rq->lock);
958 __update_rq_clock(rq);
959 spin_unlock(&rq->lock);
960 rq->clock_deep_idle_events++;
961 }
962 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
963
964 /*
965 * We just idled delta nanoseconds (called with irqs disabled):
966 */
967 void sched_clock_idle_wakeup_event(u64 delta_ns)
968 {
969 struct rq *rq = cpu_rq(smp_processor_id());
970 u64 now = sched_clock();
971
972 rq->idle_clock += delta_ns;
973 /*
974 * Override the previous timestamp and ignore all
975 * sched_clock() deltas that occured while we idled,
976 * and use the PM-provided delta_ns to advance the
977 * rq clock:
978 */
979 spin_lock(&rq->lock);
980 rq->prev_clock_raw = now;
981 rq->clock += delta_ns;
982 spin_unlock(&rq->lock);
983 touch_softlockup_watchdog();
984 }
985 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
986
987 static void __resched_task(struct task_struct *p, int tif_bit);
988
989 static inline void resched_task(struct task_struct *p)
990 {
991 __resched_task(p, TIF_NEED_RESCHED);
992 }
993
994 #ifdef CONFIG_SCHED_HRTICK
995 /*
996 * Use HR-timers to deliver accurate preemption points.
997 *
998 * Its all a bit involved since we cannot program an hrt while holding the
999 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1000 * reschedule event.
1001 *
1002 * When we get rescheduled we reprogram the hrtick_timer outside of the
1003 * rq->lock.
1004 */
1005 static inline void resched_hrt(struct task_struct *p)
1006 {
1007 __resched_task(p, TIF_HRTICK_RESCHED);
1008 }
1009
1010 static inline void resched_rq(struct rq *rq)
1011 {
1012 unsigned long flags;
1013
1014 spin_lock_irqsave(&rq->lock, flags);
1015 resched_task(rq->curr);
1016 spin_unlock_irqrestore(&rq->lock, flags);
1017 }
1018
1019 enum {
1020 HRTICK_SET, /* re-programm hrtick_timer */
1021 HRTICK_RESET, /* not a new slice */
1022 };
1023
1024 /*
1025 * Use hrtick when:
1026 * - enabled by features
1027 * - hrtimer is actually high res
1028 */
1029 static inline int hrtick_enabled(struct rq *rq)
1030 {
1031 if (!sched_feat(HRTICK))
1032 return 0;
1033 return hrtimer_is_hres_active(&rq->hrtick_timer);
1034 }
1035
1036 /*
1037 * Called to set the hrtick timer state.
1038 *
1039 * called with rq->lock held and irqs disabled
1040 */
1041 static void hrtick_start(struct rq *rq, u64 delay, int reset)
1042 {
1043 assert_spin_locked(&rq->lock);
1044
1045 /*
1046 * preempt at: now + delay
1047 */
1048 rq->hrtick_expire =
1049 ktime_add_ns(rq->hrtick_timer.base->get_time(), delay);
1050 /*
1051 * indicate we need to program the timer
1052 */
1053 __set_bit(HRTICK_SET, &rq->hrtick_flags);
1054 if (reset)
1055 __set_bit(HRTICK_RESET, &rq->hrtick_flags);
1056
1057 /*
1058 * New slices are called from the schedule path and don't need a
1059 * forced reschedule.
1060 */
1061 if (reset)
1062 resched_hrt(rq->curr);
1063 }
1064
1065 static void hrtick_clear(struct rq *rq)
1066 {
1067 if (hrtimer_active(&rq->hrtick_timer))
1068 hrtimer_cancel(&rq->hrtick_timer);
1069 }
1070
1071 /*
1072 * Update the timer from the possible pending state.
1073 */
1074 static void hrtick_set(struct rq *rq)
1075 {
1076 ktime_t time;
1077 int set, reset;
1078 unsigned long flags;
1079
1080 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1081
1082 spin_lock_irqsave(&rq->lock, flags);
1083 set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags);
1084 reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags);
1085 time = rq->hrtick_expire;
1086 clear_thread_flag(TIF_HRTICK_RESCHED);
1087 spin_unlock_irqrestore(&rq->lock, flags);
1088
1089 if (set) {
1090 hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS);
1091 if (reset && !hrtimer_active(&rq->hrtick_timer))
1092 resched_rq(rq);
1093 } else
1094 hrtick_clear(rq);
1095 }
1096
1097 /*
1098 * High-resolution timer tick.
1099 * Runs from hardirq context with interrupts disabled.
1100 */
1101 static enum hrtimer_restart hrtick(struct hrtimer *timer)
1102 {
1103 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1104
1105 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1106
1107 spin_lock(&rq->lock);
1108 __update_rq_clock(rq);
1109 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1110 spin_unlock(&rq->lock);
1111
1112 return HRTIMER_NORESTART;
1113 }
1114
1115 static inline void init_rq_hrtick(struct rq *rq)
1116 {
1117 rq->hrtick_flags = 0;
1118 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1119 rq->hrtick_timer.function = hrtick;
1120 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1121 }
1122
1123 void hrtick_resched(void)
1124 {
1125 struct rq *rq;
1126 unsigned long flags;
1127
1128 if (!test_thread_flag(TIF_HRTICK_RESCHED))
1129 return;
1130
1131 local_irq_save(flags);
1132 rq = cpu_rq(smp_processor_id());
1133 hrtick_set(rq);
1134 local_irq_restore(flags);
1135 }
1136 #else
1137 static inline void hrtick_clear(struct rq *rq)
1138 {
1139 }
1140
1141 static inline void hrtick_set(struct rq *rq)
1142 {
1143 }
1144
1145 static inline void init_rq_hrtick(struct rq *rq)
1146 {
1147 }
1148
1149 void hrtick_resched(void)
1150 {
1151 }
1152 #endif
1153
1154 /*
1155 * resched_task - mark a task 'to be rescheduled now'.
1156 *
1157 * On UP this means the setting of the need_resched flag, on SMP it
1158 * might also involve a cross-CPU call to trigger the scheduler on
1159 * the target CPU.
1160 */
1161 #ifdef CONFIG_SMP
1162
1163 #ifndef tsk_is_polling
1164 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1165 #endif
1166
1167 static void __resched_task(struct task_struct *p, int tif_bit)
1168 {
1169 int cpu;
1170
1171 assert_spin_locked(&task_rq(p)->lock);
1172
1173 if (unlikely(test_tsk_thread_flag(p, tif_bit)))
1174 return;
1175
1176 set_tsk_thread_flag(p, tif_bit);
1177
1178 cpu = task_cpu(p);
1179 if (cpu == smp_processor_id())
1180 return;
1181
1182 /* NEED_RESCHED must be visible before we test polling */
1183 smp_mb();
1184 if (!tsk_is_polling(p))
1185 smp_send_reschedule(cpu);
1186 }
1187
1188 static void resched_cpu(int cpu)
1189 {
1190 struct rq *rq = cpu_rq(cpu);
1191 unsigned long flags;
1192
1193 if (!spin_trylock_irqsave(&rq->lock, flags))
1194 return;
1195 resched_task(cpu_curr(cpu));
1196 spin_unlock_irqrestore(&rq->lock, flags);
1197 }
1198
1199 #ifdef CONFIG_NO_HZ
1200 /*
1201 * When add_timer_on() enqueues a timer into the timer wheel of an
1202 * idle CPU then this timer might expire before the next timer event
1203 * which is scheduled to wake up that CPU. In case of a completely
1204 * idle system the next event might even be infinite time into the
1205 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1206 * leaves the inner idle loop so the newly added timer is taken into
1207 * account when the CPU goes back to idle and evaluates the timer
1208 * wheel for the next timer event.
1209 */
1210 void wake_up_idle_cpu(int cpu)
1211 {
1212 struct rq *rq = cpu_rq(cpu);
1213
1214 if (cpu == smp_processor_id())
1215 return;
1216
1217 /*
1218 * This is safe, as this function is called with the timer
1219 * wheel base lock of (cpu) held. When the CPU is on the way
1220 * to idle and has not yet set rq->curr to idle then it will
1221 * be serialized on the timer wheel base lock and take the new
1222 * timer into account automatically.
1223 */
1224 if (rq->curr != rq->idle)
1225 return;
1226
1227 /*
1228 * We can set TIF_RESCHED on the idle task of the other CPU
1229 * lockless. The worst case is that the other CPU runs the
1230 * idle task through an additional NOOP schedule()
1231 */
1232 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
1233
1234 /* NEED_RESCHED must be visible before we test polling */
1235 smp_mb();
1236 if (!tsk_is_polling(rq->idle))
1237 smp_send_reschedule(cpu);
1238 }
1239 #endif
1240
1241 #else
1242 static void __resched_task(struct task_struct *p, int tif_bit)
1243 {
1244 assert_spin_locked(&task_rq(p)->lock);
1245 set_tsk_thread_flag(p, tif_bit);
1246 }
1247 #endif
1248
1249 #if BITS_PER_LONG == 32
1250 # define WMULT_CONST (~0UL)
1251 #else
1252 # define WMULT_CONST (1UL << 32)
1253 #endif
1254
1255 #define WMULT_SHIFT 32
1256
1257 /*
1258 * Shift right and round:
1259 */
1260 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1261
1262 static unsigned long
1263 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1264 struct load_weight *lw)
1265 {
1266 u64 tmp;
1267
1268 if (unlikely(!lw->inv_weight))
1269 lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1);
1270
1271 tmp = (u64)delta_exec * weight;
1272 /*
1273 * Check whether we'd overflow the 64-bit multiplication:
1274 */
1275 if (unlikely(tmp > WMULT_CONST))
1276 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1277 WMULT_SHIFT/2);
1278 else
1279 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1280
1281 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1282 }
1283
1284 static inline unsigned long
1285 calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
1286 {
1287 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
1288 }
1289
1290 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1291 {
1292 lw->weight += inc;
1293 lw->inv_weight = 0;
1294 }
1295
1296 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1297 {
1298 lw->weight -= dec;
1299 lw->inv_weight = 0;
1300 }
1301
1302 /*
1303 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1304 * of tasks with abnormal "nice" values across CPUs the contribution that
1305 * each task makes to its run queue's load is weighted according to its
1306 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1307 * scaled version of the new time slice allocation that they receive on time
1308 * slice expiry etc.
1309 */
1310
1311 #define WEIGHT_IDLEPRIO 2
1312 #define WMULT_IDLEPRIO (1 << 31)
1313
1314 /*
1315 * Nice levels are multiplicative, with a gentle 10% change for every
1316 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1317 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1318 * that remained on nice 0.
1319 *
1320 * The "10% effect" is relative and cumulative: from _any_ nice level,
1321 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1322 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1323 * If a task goes up by ~10% and another task goes down by ~10% then
1324 * the relative distance between them is ~25%.)
1325 */
1326 static const int prio_to_weight[40] = {
1327 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1328 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1329 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1330 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1331 /* 0 */ 1024, 820, 655, 526, 423,
1332 /* 5 */ 335, 272, 215, 172, 137,
1333 /* 10 */ 110, 87, 70, 56, 45,
1334 /* 15 */ 36, 29, 23, 18, 15,
1335 };
1336
1337 /*
1338 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1339 *
1340 * In cases where the weight does not change often, we can use the
1341 * precalculated inverse to speed up arithmetics by turning divisions
1342 * into multiplications:
1343 */
1344 static const u32 prio_to_wmult[40] = {
1345 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1346 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1347 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1348 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1349 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1350 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1351 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1352 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1353 };
1354
1355 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1356
1357 /*
1358 * runqueue iterator, to support SMP load-balancing between different
1359 * scheduling classes, without having to expose their internal data
1360 * structures to the load-balancing proper:
1361 */
1362 struct rq_iterator {
1363 void *arg;
1364 struct task_struct *(*start)(void *);
1365 struct task_struct *(*next)(void *);
1366 };
1367
1368 #ifdef CONFIG_SMP
1369 static unsigned long
1370 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1371 unsigned long max_load_move, struct sched_domain *sd,
1372 enum cpu_idle_type idle, int *all_pinned,
1373 int *this_best_prio, struct rq_iterator *iterator);
1374
1375 static int
1376 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1377 struct sched_domain *sd, enum cpu_idle_type idle,
1378 struct rq_iterator *iterator);
1379 #endif
1380
1381 #ifdef CONFIG_CGROUP_CPUACCT
1382 static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1383 #else
1384 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1385 #endif
1386
1387 #ifdef CONFIG_SMP
1388 static unsigned long source_load(int cpu, int type);
1389 static unsigned long target_load(int cpu, int type);
1390 static unsigned long cpu_avg_load_per_task(int cpu);
1391 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1392 #endif /* CONFIG_SMP */
1393
1394 #include "sched_stats.h"
1395 #include "sched_idletask.c"
1396 #include "sched_fair.c"
1397 #include "sched_rt.c"
1398 #ifdef CONFIG_SCHED_DEBUG
1399 # include "sched_debug.c"
1400 #endif
1401
1402 #define sched_class_highest (&rt_sched_class)
1403
1404 static inline void inc_load(struct rq *rq, const struct task_struct *p)
1405 {
1406 update_load_add(&rq->load, p->se.load.weight);
1407 }
1408
1409 static inline void dec_load(struct rq *rq, const struct task_struct *p)
1410 {
1411 update_load_sub(&rq->load, p->se.load.weight);
1412 }
1413
1414 static void inc_nr_running(struct task_struct *p, struct rq *rq)
1415 {
1416 rq->nr_running++;
1417 inc_load(rq, p);
1418 }
1419
1420 static void dec_nr_running(struct task_struct *p, struct rq *rq)
1421 {
1422 rq->nr_running--;
1423 dec_load(rq, p);
1424 }
1425
1426 static void set_load_weight(struct task_struct *p)
1427 {
1428 if (task_has_rt_policy(p)) {
1429 p->se.load.weight = prio_to_weight[0] * 2;
1430 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1431 return;
1432 }
1433
1434 /*
1435 * SCHED_IDLE tasks get minimal weight:
1436 */
1437 if (p->policy == SCHED_IDLE) {
1438 p->se.load.weight = WEIGHT_IDLEPRIO;
1439 p->se.load.inv_weight = WMULT_IDLEPRIO;
1440 return;
1441 }
1442
1443 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1444 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1445 }
1446
1447 static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1448 {
1449 sched_info_queued(p);
1450 p->sched_class->enqueue_task(rq, p, wakeup);
1451 p->se.on_rq = 1;
1452 }
1453
1454 static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1455 {
1456 p->sched_class->dequeue_task(rq, p, sleep);
1457 p->se.on_rq = 0;
1458 }
1459
1460 /*
1461 * __normal_prio - return the priority that is based on the static prio
1462 */
1463 static inline int __normal_prio(struct task_struct *p)
1464 {
1465 return p->static_prio;
1466 }
1467
1468 /*
1469 * Calculate the expected normal priority: i.e. priority
1470 * without taking RT-inheritance into account. Might be
1471 * boosted by interactivity modifiers. Changes upon fork,
1472 * setprio syscalls, and whenever the interactivity
1473 * estimator recalculates.
1474 */
1475 static inline int normal_prio(struct task_struct *p)
1476 {
1477 int prio;
1478
1479 if (task_has_rt_policy(p))
1480 prio = MAX_RT_PRIO-1 - p->rt_priority;
1481 else
1482 prio = __normal_prio(p);
1483 return prio;
1484 }
1485
1486 /*
1487 * Calculate the current priority, i.e. the priority
1488 * taken into account by the scheduler. This value might
1489 * be boosted by RT tasks, or might be boosted by
1490 * interactivity modifiers. Will be RT if the task got
1491 * RT-boosted. If not then it returns p->normal_prio.
1492 */
1493 static int effective_prio(struct task_struct *p)
1494 {
1495 p->normal_prio = normal_prio(p);
1496 /*
1497 * If we are RT tasks or we were boosted to RT priority,
1498 * keep the priority unchanged. Otherwise, update priority
1499 * to the normal priority:
1500 */
1501 if (!rt_prio(p->prio))
1502 return p->normal_prio;
1503 return p->prio;
1504 }
1505
1506 /*
1507 * activate_task - move a task to the runqueue.
1508 */
1509 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1510 {
1511 if (task_contributes_to_load(p))
1512 rq->nr_uninterruptible--;
1513
1514 enqueue_task(rq, p, wakeup);
1515 inc_nr_running(p, rq);
1516 }
1517
1518 /*
1519 * deactivate_task - remove a task from the runqueue.
1520 */
1521 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1522 {
1523 if (task_contributes_to_load(p))
1524 rq->nr_uninterruptible++;
1525
1526 dequeue_task(rq, p, sleep);
1527 dec_nr_running(p, rq);
1528 }
1529
1530 /**
1531 * task_curr - is this task currently executing on a CPU?
1532 * @p: the task in question.
1533 */
1534 inline int task_curr(const struct task_struct *p)
1535 {
1536 return cpu_curr(task_cpu(p)) == p;
1537 }
1538
1539 /* Used instead of source_load when we know the type == 0 */
1540 unsigned long weighted_cpuload(const int cpu)
1541 {
1542 return cpu_rq(cpu)->load.weight;
1543 }
1544
1545 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1546 {
1547 set_task_rq(p, cpu);
1548 #ifdef CONFIG_SMP
1549 /*
1550 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1551 * successfuly executed on another CPU. We must ensure that updates of
1552 * per-task data have been completed by this moment.
1553 */
1554 smp_wmb();
1555 task_thread_info(p)->cpu = cpu;
1556 #endif
1557 }
1558
1559 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1560 const struct sched_class *prev_class,
1561 int oldprio, int running)
1562 {
1563 if (prev_class != p->sched_class) {
1564 if (prev_class->switched_from)
1565 prev_class->switched_from(rq, p, running);
1566 p->sched_class->switched_to(rq, p, running);
1567 } else
1568 p->sched_class->prio_changed(rq, p, oldprio, running);
1569 }
1570
1571 #ifdef CONFIG_SMP
1572
1573 /*
1574 * Is this task likely cache-hot:
1575 */
1576 static int
1577 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1578 {
1579 s64 delta;
1580
1581 /*
1582 * Buddy candidates are cache hot:
1583 */
1584 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
1585 return 1;
1586
1587 if (p->sched_class != &fair_sched_class)
1588 return 0;
1589
1590 if (sysctl_sched_migration_cost == -1)
1591 return 1;
1592 if (sysctl_sched_migration_cost == 0)
1593 return 0;
1594
1595 delta = now - p->se.exec_start;
1596
1597 return delta < (s64)sysctl_sched_migration_cost;
1598 }
1599
1600
1601 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1602 {
1603 int old_cpu = task_cpu(p);
1604 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
1605 struct cfs_rq *old_cfsrq = task_cfs_rq(p),
1606 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
1607 u64 clock_offset;
1608
1609 clock_offset = old_rq->clock - new_rq->clock;
1610
1611 #ifdef CONFIG_SCHEDSTATS
1612 if (p->se.wait_start)
1613 p->se.wait_start -= clock_offset;
1614 if (p->se.sleep_start)
1615 p->se.sleep_start -= clock_offset;
1616 if (p->se.block_start)
1617 p->se.block_start -= clock_offset;
1618 if (old_cpu != new_cpu) {
1619 schedstat_inc(p, se.nr_migrations);
1620 if (task_hot(p, old_rq->clock, NULL))
1621 schedstat_inc(p, se.nr_forced2_migrations);
1622 }
1623 #endif
1624 p->se.vruntime -= old_cfsrq->min_vruntime -
1625 new_cfsrq->min_vruntime;
1626
1627 __set_task_cpu(p, new_cpu);
1628 }
1629
1630 struct migration_req {
1631 struct list_head list;
1632
1633 struct task_struct *task;
1634 int dest_cpu;
1635
1636 struct completion done;
1637 };
1638
1639 /*
1640 * The task's runqueue lock must be held.
1641 * Returns true if you have to wait for migration thread.
1642 */
1643 static int
1644 migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1645 {
1646 struct rq *rq = task_rq(p);
1647
1648 /*
1649 * If the task is not on a runqueue (and not running), then
1650 * it is sufficient to simply update the task's cpu field.
1651 */
1652 if (!p->se.on_rq && !task_running(rq, p)) {
1653 set_task_cpu(p, dest_cpu);
1654 return 0;
1655 }
1656
1657 init_completion(&req->done);
1658 req->task = p;
1659 req->dest_cpu = dest_cpu;
1660 list_add(&req->list, &rq->migration_queue);
1661
1662 return 1;
1663 }
1664
1665 /*
1666 * wait_task_inactive - wait for a thread to unschedule.
1667 *
1668 * The caller must ensure that the task *will* unschedule sometime soon,
1669 * else this function might spin for a *long* time. This function can't
1670 * be called with interrupts off, or it may introduce deadlock with
1671 * smp_call_function() if an IPI is sent by the same process we are
1672 * waiting to become inactive.
1673 */
1674 void wait_task_inactive(struct task_struct *p)
1675 {
1676 unsigned long flags;
1677 int running, on_rq;
1678 struct rq *rq;
1679
1680 for (;;) {
1681 /*
1682 * We do the initial early heuristics without holding
1683 * any task-queue locks at all. We'll only try to get
1684 * the runqueue lock when things look like they will
1685 * work out!
1686 */
1687 rq = task_rq(p);
1688
1689 /*
1690 * If the task is actively running on another CPU
1691 * still, just relax and busy-wait without holding
1692 * any locks.
1693 *
1694 * NOTE! Since we don't hold any locks, it's not
1695 * even sure that "rq" stays as the right runqueue!
1696 * But we don't care, since "task_running()" will
1697 * return false if the runqueue has changed and p
1698 * is actually now running somewhere else!
1699 */
1700 while (task_running(rq, p))
1701 cpu_relax();
1702
1703 /*
1704 * Ok, time to look more closely! We need the rq
1705 * lock now, to be *sure*. If we're wrong, we'll
1706 * just go back and repeat.
1707 */
1708 rq = task_rq_lock(p, &flags);
1709 running = task_running(rq, p);
1710 on_rq = p->se.on_rq;
1711 task_rq_unlock(rq, &flags);
1712
1713 /*
1714 * Was it really running after all now that we
1715 * checked with the proper locks actually held?
1716 *
1717 * Oops. Go back and try again..
1718 */
1719 if (unlikely(running)) {
1720 cpu_relax();
1721 continue;
1722 }
1723
1724 /*
1725 * It's not enough that it's not actively running,
1726 * it must be off the runqueue _entirely_, and not
1727 * preempted!
1728 *
1729 * So if it wa still runnable (but just not actively
1730 * running right now), it's preempted, and we should
1731 * yield - it could be a while.
1732 */
1733 if (unlikely(on_rq)) {
1734 schedule_timeout_uninterruptible(1);
1735 continue;
1736 }
1737
1738 /*
1739 * Ahh, all good. It wasn't running, and it wasn't
1740 * runnable, which means that it will never become
1741 * running in the future either. We're all done!
1742 */
1743 break;
1744 }
1745 }
1746
1747 /***
1748 * kick_process - kick a running thread to enter/exit the kernel
1749 * @p: the to-be-kicked thread
1750 *
1751 * Cause a process which is running on another CPU to enter
1752 * kernel-mode, without any delay. (to get signals handled.)
1753 *
1754 * NOTE: this function doesnt have to take the runqueue lock,
1755 * because all it wants to ensure is that the remote task enters
1756 * the kernel. If the IPI races and the task has been migrated
1757 * to another CPU then no harm is done and the purpose has been
1758 * achieved as well.
1759 */
1760 void kick_process(struct task_struct *p)
1761 {
1762 int cpu;
1763
1764 preempt_disable();
1765 cpu = task_cpu(p);
1766 if ((cpu != smp_processor_id()) && task_curr(p))
1767 smp_send_reschedule(cpu);
1768 preempt_enable();
1769 }
1770
1771 /*
1772 * Return a low guess at the load of a migration-source cpu weighted
1773 * according to the scheduling class and "nice" value.
1774 *
1775 * We want to under-estimate the load of migration sources, to
1776 * balance conservatively.
1777 */
1778 static unsigned long source_load(int cpu, int type)
1779 {
1780 struct rq *rq = cpu_rq(cpu);
1781 unsigned long total = weighted_cpuload(cpu);
1782
1783 if (type == 0)
1784 return total;
1785
1786 return min(rq->cpu_load[type-1], total);
1787 }
1788
1789 /*
1790 * Return a high guess at the load of a migration-target cpu weighted
1791 * according to the scheduling class and "nice" value.
1792 */
1793 static unsigned long target_load(int cpu, int type)
1794 {
1795 struct rq *rq = cpu_rq(cpu);
1796 unsigned long total = weighted_cpuload(cpu);
1797
1798 if (type == 0)
1799 return total;
1800
1801 return max(rq->cpu_load[type-1], total);
1802 }
1803
1804 /*
1805 * Return the average load per task on the cpu's run queue
1806 */
1807 static unsigned long cpu_avg_load_per_task(int cpu)
1808 {
1809 struct rq *rq = cpu_rq(cpu);
1810 unsigned long total = weighted_cpuload(cpu);
1811 unsigned long n = rq->nr_running;
1812
1813 return n ? total / n : SCHED_LOAD_SCALE;
1814 }
1815
1816 /*
1817 * find_idlest_group finds and returns the least busy CPU group within the
1818 * domain.
1819 */
1820 static struct sched_group *
1821 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1822 {
1823 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1824 unsigned long min_load = ULONG_MAX, this_load = 0;
1825 int load_idx = sd->forkexec_idx;
1826 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1827
1828 do {
1829 unsigned long load, avg_load;
1830 int local_group;
1831 int i;
1832
1833 /* Skip over this group if it has no CPUs allowed */
1834 if (!cpus_intersects(group->cpumask, p->cpus_allowed))
1835 continue;
1836
1837 local_group = cpu_isset(this_cpu, group->cpumask);
1838
1839 /* Tally up the load of all CPUs in the group */
1840 avg_load = 0;
1841
1842 for_each_cpu_mask(i, group->cpumask) {
1843 /* Bias balancing toward cpus of our domain */
1844 if (local_group)
1845 load = source_load(i, load_idx);
1846 else
1847 load = target_load(i, load_idx);
1848
1849 avg_load += load;
1850 }
1851
1852 /* Adjust by relative CPU power of the group */
1853 avg_load = sg_div_cpu_power(group,
1854 avg_load * SCHED_LOAD_SCALE);
1855
1856 if (local_group) {
1857 this_load = avg_load;
1858 this = group;
1859 } else if (avg_load < min_load) {
1860 min_load = avg_load;
1861 idlest = group;
1862 }
1863 } while (group = group->next, group != sd->groups);
1864
1865 if (!idlest || 100*this_load < imbalance*min_load)
1866 return NULL;
1867 return idlest;
1868 }
1869
1870 /*
1871 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1872 */
1873 static int
1874 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
1875 cpumask_t *tmp)
1876 {
1877 unsigned long load, min_load = ULONG_MAX;
1878 int idlest = -1;
1879 int i;
1880
1881 /* Traverse only the allowed CPUs */
1882 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
1883
1884 for_each_cpu_mask(i, *tmp) {
1885 load = weighted_cpuload(i);
1886
1887 if (load < min_load || (load == min_load && i == this_cpu)) {
1888 min_load = load;
1889 idlest = i;
1890 }
1891 }
1892
1893 return idlest;
1894 }
1895
1896 /*
1897 * sched_balance_self: balance the current task (running on cpu) in domains
1898 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1899 * SD_BALANCE_EXEC.
1900 *
1901 * Balance, ie. select the least loaded group.
1902 *
1903 * Returns the target CPU number, or the same CPU if no balancing is needed.
1904 *
1905 * preempt must be disabled.
1906 */
1907 static int sched_balance_self(int cpu, int flag)
1908 {
1909 struct task_struct *t = current;
1910 struct sched_domain *tmp, *sd = NULL;
1911
1912 for_each_domain(cpu, tmp) {
1913 /*
1914 * If power savings logic is enabled for a domain, stop there.
1915 */
1916 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1917 break;
1918 if (tmp->flags & flag)
1919 sd = tmp;
1920 }
1921
1922 while (sd) {
1923 cpumask_t span, tmpmask;
1924 struct sched_group *group;
1925 int new_cpu, weight;
1926
1927 if (!(sd->flags & flag)) {
1928 sd = sd->child;
1929 continue;
1930 }
1931
1932 span = sd->span;
1933 group = find_idlest_group(sd, t, cpu);
1934 if (!group) {
1935 sd = sd->child;
1936 continue;
1937 }
1938
1939 new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
1940 if (new_cpu == -1 || new_cpu == cpu) {
1941 /* Now try balancing at a lower domain level of cpu */
1942 sd = sd->child;
1943 continue;
1944 }
1945
1946 /* Now try balancing at a lower domain level of new_cpu */
1947 cpu = new_cpu;
1948 sd = NULL;
1949 weight = cpus_weight(span);
1950 for_each_domain(cpu, tmp) {
1951 if (weight <= cpus_weight(tmp->span))
1952 break;
1953 if (tmp->flags & flag)
1954 sd = tmp;
1955 }
1956 /* while loop will break here if sd == NULL */
1957 }
1958
1959 return cpu;
1960 }
1961
1962 #endif /* CONFIG_SMP */
1963
1964 /***
1965 * try_to_wake_up - wake up a thread
1966 * @p: the to-be-woken-up thread
1967 * @state: the mask of task states that can be woken
1968 * @sync: do a synchronous wakeup?
1969 *
1970 * Put it on the run-queue if it's not already there. The "current"
1971 * thread is always on the run-queue (except when the actual
1972 * re-schedule is in progress), and as such you're allowed to do
1973 * the simpler "current->state = TASK_RUNNING" to mark yourself
1974 * runnable without the overhead of this.
1975 *
1976 * returns failure only if the task is already active.
1977 */
1978 static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1979 {
1980 int cpu, orig_cpu, this_cpu, success = 0;
1981 unsigned long flags;
1982 long old_state;
1983 struct rq *rq;
1984
1985 if (!sched_feat(SYNC_WAKEUPS))
1986 sync = 0;
1987
1988 smp_wmb();
1989 rq = task_rq_lock(p, &flags);
1990 old_state = p->state;
1991 if (!(old_state & state))
1992 goto out;
1993
1994 if (p->se.on_rq)
1995 goto out_running;
1996
1997 cpu = task_cpu(p);
1998 orig_cpu = cpu;
1999 this_cpu = smp_processor_id();
2000
2001 #ifdef CONFIG_SMP
2002 if (unlikely(task_running(rq, p)))
2003 goto out_activate;
2004
2005 cpu = p->sched_class->select_task_rq(p, sync);
2006 if (cpu != orig_cpu) {
2007 set_task_cpu(p, cpu);
2008 task_rq_unlock(rq, &flags);
2009 /* might preempt at this point */
2010 rq = task_rq_lock(p, &flags);
2011 old_state = p->state;
2012 if (!(old_state & state))
2013 goto out;
2014 if (p->se.on_rq)
2015 goto out_running;
2016
2017 this_cpu = smp_processor_id();
2018 cpu = task_cpu(p);
2019 }
2020
2021 #ifdef CONFIG_SCHEDSTATS
2022 schedstat_inc(rq, ttwu_count);
2023 if (cpu == this_cpu)
2024 schedstat_inc(rq, ttwu_local);
2025 else {
2026 struct sched_domain *sd;
2027 for_each_domain(this_cpu, sd) {
2028 if (cpu_isset(cpu, sd->span)) {
2029 schedstat_inc(sd, ttwu_wake_remote);
2030 break;
2031 }
2032 }
2033 }
2034 #endif
2035
2036 out_activate:
2037 #endif /* CONFIG_SMP */
2038 schedstat_inc(p, se.nr_wakeups);
2039 if (sync)
2040 schedstat_inc(p, se.nr_wakeups_sync);
2041 if (orig_cpu != cpu)
2042 schedstat_inc(p, se.nr_wakeups_migrate);
2043 if (cpu == this_cpu)
2044 schedstat_inc(p, se.nr_wakeups_local);
2045 else
2046 schedstat_inc(p, se.nr_wakeups_remote);
2047 update_rq_clock(rq);
2048 activate_task(rq, p, 1);
2049 success = 1;
2050
2051 out_running:
2052 check_preempt_curr(rq, p);
2053
2054 p->state = TASK_RUNNING;
2055 #ifdef CONFIG_SMP
2056 if (p->sched_class->task_wake_up)
2057 p->sched_class->task_wake_up(rq, p);
2058 #endif
2059 out:
2060 task_rq_unlock(rq, &flags);
2061
2062 return success;
2063 }
2064
2065 int wake_up_process(struct task_struct *p)
2066 {
2067 return try_to_wake_up(p, TASK_ALL, 0);
2068 }
2069 EXPORT_SYMBOL(wake_up_process);
2070
2071 int wake_up_state(struct task_struct *p, unsigned int state)
2072 {
2073 return try_to_wake_up(p, state, 0);
2074 }
2075
2076 /*
2077 * Perform scheduler related setup for a newly forked process p.
2078 * p is forked by current.
2079 *
2080 * __sched_fork() is basic setup used by init_idle() too:
2081 */
2082 static void __sched_fork(struct task_struct *p)
2083 {
2084 p->se.exec_start = 0;
2085 p->se.sum_exec_runtime = 0;
2086 p->se.prev_sum_exec_runtime = 0;
2087 p->se.last_wakeup = 0;
2088 p->se.avg_overlap = 0;
2089
2090 #ifdef CONFIG_SCHEDSTATS
2091 p->se.wait_start = 0;
2092 p->se.sum_sleep_runtime = 0;
2093 p->se.sleep_start = 0;
2094 p->se.block_start = 0;
2095 p->se.sleep_max = 0;
2096 p->se.block_max = 0;
2097 p->se.exec_max = 0;
2098 p->se.slice_max = 0;
2099 p->se.wait_max = 0;
2100 #endif
2101
2102 INIT_LIST_HEAD(&p->rt.run_list);
2103 p->se.on_rq = 0;
2104
2105 #ifdef CONFIG_PREEMPT_NOTIFIERS
2106 INIT_HLIST_HEAD(&p->preempt_notifiers);
2107 #endif
2108
2109 /*
2110 * We mark the process as running here, but have not actually
2111 * inserted it onto the runqueue yet. This guarantees that
2112 * nobody will actually run it, and a signal or other external
2113 * event cannot wake it up and insert it on the runqueue either.
2114 */
2115 p->state = TASK_RUNNING;
2116 }
2117
2118 /*
2119 * fork()/clone()-time setup:
2120 */
2121 void sched_fork(struct task_struct *p, int clone_flags)
2122 {
2123 int cpu = get_cpu();
2124
2125 __sched_fork(p);
2126
2127 #ifdef CONFIG_SMP
2128 cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
2129 #endif
2130 set_task_cpu(p, cpu);
2131
2132 /*
2133 * Make sure we do not leak PI boosting priority to the child:
2134 */
2135 p->prio = current->normal_prio;
2136 if (!rt_prio(p->prio))
2137 p->sched_class = &fair_sched_class;
2138
2139 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2140 if (likely(sched_info_on()))
2141 memset(&p->sched_info, 0, sizeof(p->sched_info));
2142 #endif
2143 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
2144 p->oncpu = 0;
2145 #endif
2146 #ifdef CONFIG_PREEMPT
2147 /* Want to start with kernel preemption disabled. */
2148 task_thread_info(p)->preempt_count = 1;
2149 #endif
2150 put_cpu();
2151 }
2152
2153 /*
2154 * wake_up_new_task - wake up a newly created task for the first time.
2155 *
2156 * This function will do some initial scheduler statistics housekeeping
2157 * that must be done for every newly created context, then puts the task
2158 * on the runqueue and wakes it.
2159 */
2160 void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2161 {
2162 unsigned long flags;
2163 struct rq *rq;
2164
2165 rq = task_rq_lock(p, &flags);
2166 BUG_ON(p->state != TASK_RUNNING);
2167 update_rq_clock(rq);
2168
2169 p->prio = effective_prio(p);
2170
2171 if (!p->sched_class->task_new || !current->se.on_rq) {
2172 activate_task(rq, p, 0);
2173 } else {
2174 /*
2175 * Let the scheduling class do new task startup
2176 * management (if any):
2177 */
2178 p->sched_class->task_new(rq, p);
2179 inc_nr_running(p, rq);
2180 }
2181 check_preempt_curr(rq, p);
2182 #ifdef CONFIG_SMP
2183 if (p->sched_class->task_wake_up)
2184 p->sched_class->task_wake_up(rq, p);
2185 #endif
2186 task_rq_unlock(rq, &flags);
2187 }
2188
2189 #ifdef CONFIG_PREEMPT_NOTIFIERS
2190
2191 /**
2192 * preempt_notifier_register - tell me when current is being being preempted & rescheduled
2193 * @notifier: notifier struct to register
2194 */
2195 void preempt_notifier_register(struct preempt_notifier *notifier)
2196 {
2197 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2198 }
2199 EXPORT_SYMBOL_GPL(preempt_notifier_register);
2200
2201 /**
2202 * preempt_notifier_unregister - no longer interested in preemption notifications
2203 * @notifier: notifier struct to unregister
2204 *
2205 * This is safe to call from within a preemption notifier.
2206 */
2207 void preempt_notifier_unregister(struct preempt_notifier *notifier)
2208 {
2209 hlist_del(&notifier->link);
2210 }
2211 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2212
2213 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2214 {
2215 struct preempt_notifier *notifier;
2216 struct hlist_node *node;
2217
2218 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2219 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2220 }
2221
2222 static void
2223 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2224 struct task_struct *next)
2225 {
2226 struct preempt_notifier *notifier;
2227 struct hlist_node *node;
2228
2229 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2230 notifier->ops->sched_out(notifier, next);
2231 }
2232
2233 #else
2234
2235 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2236 {
2237 }
2238
2239 static void
2240 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2241 struct task_struct *next)
2242 {
2243 }
2244
2245 #endif
2246
2247 /**
2248 * prepare_task_switch - prepare to switch tasks
2249 * @rq: the runqueue preparing to switch
2250 * @prev: the current task that is being switched out
2251 * @next: the task we are going to switch to.
2252 *
2253 * This is called with the rq lock held and interrupts off. It must
2254 * be paired with a subsequent finish_task_switch after the context
2255 * switch.
2256 *
2257 * prepare_task_switch sets up locking and calls architecture specific
2258 * hooks.
2259 */
2260 static inline void
2261 prepare_task_switch(struct rq *rq, struct task_struct *prev,
2262 struct task_struct *next)
2263 {
2264 fire_sched_out_preempt_notifiers(prev, next);
2265 prepare_lock_switch(rq, next);
2266 prepare_arch_switch(next);
2267 }
2268
2269 /**
2270 * finish_task_switch - clean up after a task-switch
2271 * @rq: runqueue associated with task-switch
2272 * @prev: the thread we just switched away from.
2273 *
2274 * finish_task_switch must be called after the context switch, paired
2275 * with a prepare_task_switch call before the context switch.
2276 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2277 * and do any other architecture-specific cleanup actions.
2278 *
2279 * Note that we may have delayed dropping an mm in context_switch(). If
2280 * so, we finish that here outside of the runqueue lock. (Doing it
2281 * with the lock held can cause deadlocks; see schedule() for
2282 * details.)
2283 */
2284 static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2285 __releases(rq->lock)
2286 {
2287 struct mm_struct *mm = rq->prev_mm;
2288 long prev_state;
2289
2290 rq->prev_mm = NULL;
2291
2292 /*
2293 * A task struct has one reference for the use as "current".
2294 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2295 * schedule one last time. The schedule call will never return, and
2296 * the scheduled task must drop that reference.
2297 * The test for TASK_DEAD must occur while the runqueue locks are
2298 * still held, otherwise prev could be scheduled on another cpu, die
2299 * there before we look at prev->state, and then the reference would
2300 * be dropped twice.
2301 * Manfred Spraul <manfred@colorfullife.com>
2302 */
2303 prev_state = prev->state;
2304 finish_arch_switch(prev);
2305 finish_lock_switch(rq, prev);
2306 #ifdef CONFIG_SMP
2307 if (current->sched_class->post_schedule)
2308 current->sched_class->post_schedule(rq);
2309 #endif
2310
2311 fire_sched_in_preempt_notifiers(current);
2312 if (mm)
2313 mmdrop(mm);
2314 if (unlikely(prev_state == TASK_DEAD)) {
2315 /*
2316 * Remove function-return probe instances associated with this
2317 * task and put them back on the free list.
2318 */
2319 kprobe_flush_task(prev);
2320 put_task_struct(prev);
2321 }
2322 }
2323
2324 /**
2325 * schedule_tail - first thing a freshly forked thread must call.
2326 * @prev: the thread we just switched away from.
2327 */
2328 asmlinkage void schedule_tail(struct task_struct *prev)
2329 __releases(rq->lock)
2330 {
2331 struct rq *rq = this_rq();
2332
2333 finish_task_switch(rq, prev);
2334 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
2335 /* In this case, finish_task_switch does not reenable preemption */
2336 preempt_enable();
2337 #endif
2338 if (current->set_child_tid)
2339 put_user(task_pid_vnr(current), current->set_child_tid);
2340 }
2341
2342 /*
2343 * context_switch - switch to the new MM and the new
2344 * thread's register state.
2345 */
2346 static inline void
2347 context_switch(struct rq *rq, struct task_struct *prev,
2348 struct task_struct *next)
2349 {
2350 struct mm_struct *mm, *oldmm;
2351
2352 prepare_task_switch(rq, prev, next);
2353 mm = next->mm;
2354 oldmm = prev->active_mm;
2355 /*
2356 * For paravirt, this is coupled with an exit in switch_to to
2357 * combine the page table reload and the switch backend into
2358 * one hypercall.
2359 */
2360 arch_enter_lazy_cpu_mode();
2361
2362 if (unlikely(!mm)) {
2363 next->active_mm = oldmm;
2364 atomic_inc(&oldmm->mm_count);
2365 enter_lazy_tlb(oldmm, next);
2366 } else
2367 switch_mm(oldmm, mm, next);
2368
2369 if (unlikely(!prev->mm)) {
2370 prev->active_mm = NULL;
2371 rq->prev_mm = oldmm;
2372 }
2373 /*
2374 * Since the runqueue lock will be released by the next
2375 * task (which is an invalid locking op but in the case
2376 * of the scheduler it's an obvious special-case), so we
2377 * do an early lockdep release here:
2378 */
2379 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
2380 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2381 #endif
2382
2383 /* Here we just switch the register state and the stack. */
2384 switch_to(prev, next, prev);
2385
2386 barrier();
2387 /*
2388 * this_rq must be evaluated again because prev may have moved
2389 * CPUs since it called schedule(), thus the 'rq' on its stack
2390 * frame will be invalid.
2391 */
2392 finish_task_switch(this_rq(), prev);
2393 }
2394
2395 /*
2396 * nr_running, nr_uninterruptible and nr_context_switches:
2397 *
2398 * externally visible scheduler statistics: current number of runnable
2399 * threads, current number of uninterruptible-sleeping threads, total
2400 * number of context switches performed since bootup.
2401 */
2402 unsigned long nr_running(void)
2403 {
2404 unsigned long i, sum = 0;
2405
2406 for_each_online_cpu(i)
2407 sum += cpu_rq(i)->nr_running;
2408
2409 return sum;
2410 }
2411
2412 unsigned long nr_uninterruptible(void)
2413 {
2414 unsigned long i, sum = 0;
2415
2416 for_each_possible_cpu(i)
2417 sum += cpu_rq(i)->nr_uninterruptible;
2418
2419 /*
2420 * Since we read the counters lockless, it might be slightly
2421 * inaccurate. Do not allow it to go below zero though:
2422 */
2423 if (unlikely((long)sum < 0))
2424 sum = 0;
2425
2426 return sum;
2427 }
2428
2429 unsigned long long nr_context_switches(void)
2430 {
2431 int i;
2432 unsigned long long sum = 0;
2433
2434 for_each_possible_cpu(i)
2435 sum += cpu_rq(i)->nr_switches;
2436
2437 return sum;
2438 }
2439
2440 unsigned long nr_iowait(void)
2441 {
2442 unsigned long i, sum = 0;
2443
2444 for_each_possible_cpu(i)
2445 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2446
2447 return sum;
2448 }
2449
2450 unsigned long nr_active(void)
2451 {
2452 unsigned long i, running = 0, uninterruptible = 0;
2453
2454 for_each_online_cpu(i) {
2455 running += cpu_rq(i)->nr_running;
2456 uninterruptible += cpu_rq(i)->nr_uninterruptible;
2457 }
2458
2459 if (unlikely((long)uninterruptible < 0))
2460 uninterruptible = 0;
2461
2462 return running + uninterruptible;
2463 }
2464
2465 /*
2466 * Update rq->cpu_load[] statistics. This function is usually called every
2467 * scheduler tick (TICK_NSEC).
2468 */
2469 static void update_cpu_load(struct rq *this_rq)
2470 {
2471 unsigned long this_load = this_rq->load.weight;
2472 int i, scale;
2473
2474 this_rq->nr_load_updates++;
2475
2476 /* Update our load: */
2477 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2478 unsigned long old_load, new_load;
2479
2480 /* scale is effectively 1 << i now, and >> i divides by scale */
2481
2482 old_load = this_rq->cpu_load[i];
2483 new_load = this_load;
2484 /*
2485 * Round up the averaging division if load is increasing. This
2486 * prevents us from getting stuck on 9 if the load is 10, for
2487 * example.
2488 */
2489 if (new_load > old_load)
2490 new_load += scale-1;
2491 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2492 }
2493 }
2494
2495 #ifdef CONFIG_SMP
2496
2497 /*
2498 * double_rq_lock - safely lock two runqueues
2499 *
2500 * Note this does not disable interrupts like task_rq_lock,
2501 * you need to do so manually before calling.
2502 */
2503 static void double_rq_lock(struct rq *rq1, struct rq *rq2)
2504 __acquires(rq1->lock)
2505 __acquires(rq2->lock)
2506 {
2507 BUG_ON(!irqs_disabled());
2508 if (rq1 == rq2) {
2509 spin_lock(&rq1->lock);
2510 __acquire(rq2->lock); /* Fake it out ;) */
2511 } else {
2512 if (rq1 < rq2) {
2513 spin_lock(&rq1->lock);
2514 spin_lock(&rq2->lock);
2515 } else {
2516 spin_lock(&rq2->lock);
2517 spin_lock(&rq1->lock);
2518 }
2519 }
2520 update_rq_clock(rq1);
2521 update_rq_clock(rq2);
2522 }
2523
2524 /*
2525 * double_rq_unlock - safely unlock two runqueues
2526 *
2527 * Note this does not restore interrupts like task_rq_unlock,
2528 * you need to do so manually after calling.
2529 */
2530 static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2531 __releases(rq1->lock)
2532 __releases(rq2->lock)
2533 {
2534 spin_unlock(&rq1->lock);
2535 if (rq1 != rq2)
2536 spin_unlock(&rq2->lock);
2537 else
2538 __release(rq2->lock);
2539 }
2540
2541 /*
2542 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2543 */
2544 static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2545 __releases(this_rq->lock)
2546 __acquires(busiest->lock)
2547 __acquires(this_rq->lock)
2548 {
2549 int ret = 0;
2550
2551 if (unlikely(!irqs_disabled())) {
2552 /* printk() doesn't work good under rq->lock */
2553 spin_unlock(&this_rq->lock);
2554 BUG_ON(1);
2555 }
2556 if (unlikely(!spin_trylock(&busiest->lock))) {
2557 if (busiest < this_rq) {
2558 spin_unlock(&this_rq->lock);
2559 spin_lock(&busiest->lock);
2560 spin_lock(&this_rq->lock);
2561 ret = 1;
2562 } else
2563 spin_lock(&busiest->lock);
2564 }
2565 return ret;
2566 }
2567
2568 /*
2569 * If dest_cpu is allowed for this process, migrate the task to it.
2570 * This is accomplished by forcing the cpu_allowed mask to only
2571 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
2572 * the cpu_allowed mask is restored.
2573 */
2574 static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2575 {
2576 struct migration_req req;
2577 unsigned long flags;
2578 struct rq *rq;
2579
2580 rq = task_rq_lock(p, &flags);
2581 if (!cpu_isset(dest_cpu, p->cpus_allowed)
2582 || unlikely(cpu_is_offline(dest_cpu)))
2583 goto out;
2584
2585 /* force the process onto the specified CPU */
2586 if (migrate_task(p, dest_cpu, &req)) {
2587 /* Need to wait for migration thread (might exit: take ref). */
2588 struct task_struct *mt = rq->migration_thread;
2589
2590 get_task_struct(mt);
2591 task_rq_unlock(rq, &flags);
2592 wake_up_process(mt);
2593 put_task_struct(mt);
2594 wait_for_completion(&req.done);
2595
2596 return;
2597 }
2598 out:
2599 task_rq_unlock(rq, &flags);
2600 }
2601
2602 /*
2603 * sched_exec - execve() is a valuable balancing opportunity, because at
2604 * this point the task has the smallest effective memory and cache footprint.
2605 */
2606 void sched_exec(void)
2607 {
2608 int new_cpu, this_cpu = get_cpu();
2609 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
2610 put_cpu();
2611 if (new_cpu != this_cpu)
2612 sched_migrate_task(current, new_cpu);
2613 }
2614
2615 /*
2616 * pull_task - move a task from a remote runqueue to the local runqueue.
2617 * Both runqueues must be locked.
2618 */
2619 static void pull_task(struct rq *src_rq, struct task_struct *p,
2620 struct rq *this_rq, int this_cpu)
2621 {
2622 deactivate_task(src_rq, p, 0);
2623 set_task_cpu(p, this_cpu);
2624 activate_task(this_rq, p, 0);
2625 /*
2626 * Note that idle threads have a prio of MAX_PRIO, for this test
2627 * to be always true for them.
2628 */
2629 check_preempt_curr(this_rq, p);
2630 }
2631
2632 /*
2633 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2634 */
2635 static
2636 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2637 struct sched_domain *sd, enum cpu_idle_type idle,
2638 int *all_pinned)
2639 {
2640 /*
2641 * We do not migrate tasks that are:
2642 * 1) running (obviously), or
2643 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2644 * 3) are cache-hot on their current CPU.
2645 */
2646 if (!cpu_isset(this_cpu, p->cpus_allowed)) {
2647 schedstat_inc(p, se.nr_failed_migrations_affine);
2648 return 0;
2649 }
2650 *all_pinned = 0;
2651
2652 if (task_running(rq, p)) {
2653 schedstat_inc(p, se.nr_failed_migrations_running);
2654 return 0;
2655 }
2656
2657 /*
2658 * Aggressive migration if:
2659 * 1) task is cache cold, or
2660 * 2) too many balance attempts have failed.
2661 */
2662
2663 if (!task_hot(p, rq->clock, sd) ||
2664 sd->nr_balance_failed > sd->cache_nice_tries) {
2665 #ifdef CONFIG_SCHEDSTATS
2666 if (task_hot(p, rq->clock, sd)) {
2667 schedstat_inc(sd, lb_hot_gained[idle]);
2668 schedstat_inc(p, se.nr_forced_migrations);
2669 }
2670 #endif
2671 return 1;
2672 }
2673
2674 if (task_hot(p, rq->clock, sd)) {
2675 schedstat_inc(p, se.nr_failed_migrations_hot);
2676 return 0;
2677 }
2678 return 1;
2679 }
2680
2681 static unsigned long
2682 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2683 unsigned long max_load_move, struct sched_domain *sd,
2684 enum cpu_idle_type idle, int *all_pinned,
2685 int *this_best_prio, struct rq_iterator *iterator)
2686 {
2687 int loops = 0, pulled = 0, pinned = 0, skip_for_load;
2688 struct task_struct *p;
2689 long rem_load_move = max_load_move;
2690
2691 if (max_load_move == 0)
2692 goto out;
2693
2694 pinned = 1;
2695
2696 /*
2697 * Start the load-balancing iterator:
2698 */
2699 p = iterator->start(iterator->arg);
2700 next:
2701 if (!p || loops++ > sysctl_sched_nr_migrate)
2702 goto out;
2703 /*
2704 * To help distribute high priority tasks across CPUs we don't
2705 * skip a task if it will be the highest priority task (i.e. smallest
2706 * prio value) on its new queue regardless of its load weight
2707 */
2708 skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
2709 SCHED_LOAD_SCALE_FUZZ;
2710 if ((skip_for_load && p->prio >= *this_best_prio) ||
2711 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2712 p = iterator->next(iterator->arg);
2713 goto next;
2714 }
2715
2716 pull_task(busiest, p, this_rq, this_cpu);
2717 pulled++;
2718 rem_load_move -= p->se.load.weight;
2719
2720 /*
2721 * We only want to steal up to the prescribed amount of weighted load.
2722 */
2723 if (rem_load_move > 0) {
2724 if (p->prio < *this_best_prio)
2725 *this_best_prio = p->prio;
2726 p = iterator->next(iterator->arg);
2727 goto next;
2728 }
2729 out:
2730 /*
2731 * Right now, this is one of only two places pull_task() is called,
2732 * so we can safely collect pull_task() stats here rather than
2733 * inside pull_task().
2734 */
2735 schedstat_add(sd, lb_gained[idle], pulled);
2736
2737 if (all_pinned)
2738 *all_pinned = pinned;
2739
2740 return max_load_move - rem_load_move;
2741 }
2742
2743 /*
2744 * move_tasks tries to move up to max_load_move weighted load from busiest to
2745 * this_rq, as part of a balancing operation within domain "sd".
2746 * Returns 1 if successful and 0 otherwise.
2747 *
2748 * Called with both runqueues locked.
2749 */
2750 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2751 unsigned long max_load_move,
2752 struct sched_domain *sd, enum cpu_idle_type idle,
2753 int *all_pinned)
2754 {
2755 const struct sched_class *class = sched_class_highest;
2756 unsigned long total_load_moved = 0;
2757 int this_best_prio = this_rq->curr->prio;
2758
2759 do {
2760 total_load_moved +=
2761 class->load_balance(this_rq, this_cpu, busiest,
2762 max_load_move - total_load_moved,
2763 sd, idle, all_pinned, &this_best_prio);
2764 class = class->next;
2765 } while (class && max_load_move > total_load_moved);
2766
2767 return total_load_moved > 0;
2768 }
2769
2770 static int
2771 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2772 struct sched_domain *sd, enum cpu_idle_type idle,
2773 struct rq_iterator *iterator)
2774 {
2775 struct task_struct *p = iterator->start(iterator->arg);
2776 int pinned = 0;
2777
2778 while (p) {
2779 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2780 pull_task(busiest, p, this_rq, this_cpu);
2781 /*
2782 * Right now, this is only the second place pull_task()
2783 * is called, so we can safely collect pull_task()
2784 * stats here rather than inside pull_task().
2785 */
2786 schedstat_inc(sd, lb_gained[idle]);
2787
2788 return 1;
2789 }
2790 p = iterator->next(iterator->arg);
2791 }
2792
2793 return 0;
2794 }
2795
2796 /*
2797 * move_one_task tries to move exactly one task from busiest to this_rq, as
2798 * part of active balancing operations within "domain".
2799 * Returns 1 if successful and 0 otherwise.
2800 *
2801 * Called with both runqueues locked.
2802 */
2803 static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2804 struct sched_domain *sd, enum cpu_idle_type idle)
2805 {
2806 const struct sched_class *class;
2807
2808 for (class = sched_class_highest; class; class = class->next)
2809 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
2810 return 1;
2811
2812 return 0;
2813 }
2814
2815 /*
2816 * find_busiest_group finds and returns the busiest CPU group within the
2817 * domain. It calculates and returns the amount of weighted load which
2818 * should be moved to restore balance via the imbalance parameter.
2819 */
2820 static struct sched_group *
2821 find_busiest_group(struct sched_domain *sd, int this_cpu,
2822 unsigned long *imbalance, enum cpu_idle_type idle,
2823 int *sd_idle, const cpumask_t *cpus, int *balance)
2824 {
2825 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2826 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
2827 unsigned long max_pull;
2828 unsigned long busiest_load_per_task, busiest_nr_running;
2829 unsigned long this_load_per_task, this_nr_running;
2830 int load_idx, group_imb = 0;
2831 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2832 int power_savings_balance = 1;
2833 unsigned long leader_nr_running = 0, min_load_per_task = 0;
2834 unsigned long min_nr_running = ULONG_MAX;
2835 struct sched_group *group_min = NULL, *group_leader = NULL;
2836 #endif
2837
2838 max_load = this_load = total_load = total_pwr = 0;
2839 busiest_load_per_task = busiest_nr_running = 0;
2840 this_load_per_task = this_nr_running = 0;
2841 if (idle == CPU_NOT_IDLE)
2842 load_idx = sd->busy_idx;
2843 else if (idle == CPU_NEWLY_IDLE)
2844 load_idx = sd->newidle_idx;
2845 else
2846 load_idx = sd->idle_idx;
2847
2848 do {
2849 unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
2850 int local_group;
2851 int i;
2852 int __group_imb = 0;
2853 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2854 unsigned long sum_nr_running, sum_weighted_load;
2855
2856 local_group = cpu_isset(this_cpu, group->cpumask);
2857
2858 if (local_group)
2859 balance_cpu = first_cpu(group->cpumask);
2860
2861 /* Tally up the load of all CPUs in the group */
2862 sum_weighted_load = sum_nr_running = avg_load = 0;
2863 max_cpu_load = 0;
2864 min_cpu_load = ~0UL;
2865
2866 for_each_cpu_mask(i, group->cpumask) {
2867 struct rq *rq;
2868
2869 if (!cpu_isset(i, *cpus))
2870 continue;
2871
2872 rq = cpu_rq(i);
2873
2874 if (*sd_idle && rq->nr_running)
2875 *sd_idle = 0;
2876
2877 /* Bias balancing toward cpus of our domain */
2878 if (local_group) {
2879 if (idle_cpu(i) && !first_idle_cpu) {
2880 first_idle_cpu = 1;
2881 balance_cpu = i;
2882 }
2883
2884 load = target_load(i, load_idx);
2885 } else {
2886 load = source_load(i, load_idx);
2887 if (load > max_cpu_load)
2888 max_cpu_load = load;
2889 if (min_cpu_load > load)
2890 min_cpu_load = load;
2891 }
2892
2893 avg_load += load;
2894 sum_nr_running += rq->nr_running;
2895 sum_weighted_load += weighted_cpuload(i);
2896 }
2897
2898 /*
2899 * First idle cpu or the first cpu(busiest) in this sched group
2900 * is eligible for doing load balancing at this and above
2901 * domains. In the newly idle case, we will allow all the cpu's
2902 * to do the newly idle load balance.
2903 */
2904 if (idle != CPU_NEWLY_IDLE && local_group &&
2905 balance_cpu != this_cpu && balance) {
2906 *balance = 0;
2907 goto ret;
2908 }
2909
2910 total_load += avg_load;
2911 total_pwr += group->__cpu_power;
2912
2913 /* Adjust by relative CPU power of the group */
2914 avg_load = sg_div_cpu_power(group,
2915 avg_load * SCHED_LOAD_SCALE);
2916
2917 if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
2918 __group_imb = 1;
2919
2920 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
2921
2922 if (local_group) {
2923 this_load = avg_load;
2924 this = group;
2925 this_nr_running = sum_nr_running;
2926 this_load_per_task = sum_weighted_load;
2927 } else if (avg_load > max_load &&
2928 (sum_nr_running > group_capacity || __group_imb)) {
2929 max_load = avg_load;
2930 busiest = group;
2931 busiest_nr_running = sum_nr_running;
2932 busiest_load_per_task = sum_weighted_load;
2933 group_imb = __group_imb;
2934 }
2935
2936 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2937 /*
2938 * Busy processors will not participate in power savings
2939 * balance.
2940 */
2941 if (idle == CPU_NOT_IDLE ||
2942 !(sd->flags & SD_POWERSAVINGS_BALANCE))
2943 goto group_next;
2944
2945 /*
2946 * If the local group is idle or completely loaded
2947 * no need to do power savings balance at this domain
2948 */
2949 if (local_group && (this_nr_running >= group_capacity ||
2950 !this_nr_running))
2951 power_savings_balance = 0;
2952
2953 /*
2954 * If a group is already running at full capacity or idle,
2955 * don't include that group in power savings calculations
2956 */
2957 if (!power_savings_balance || sum_nr_running >= group_capacity
2958 || !sum_nr_running)
2959 goto group_next;
2960
2961 /*
2962 * Calculate the group which has the least non-idle load.
2963 * This is the group from where we need to pick up the load
2964 * for saving power
2965 */
2966 if ((sum_nr_running < min_nr_running) ||
2967 (sum_nr_running == min_nr_running &&
2968 first_cpu(group->cpumask) <
2969 first_cpu(group_min->cpumask))) {
2970 group_min = group;
2971 min_nr_running = sum_nr_running;
2972 min_load_per_task = sum_weighted_load /
2973 sum_nr_running;
2974 }
2975
2976 /*
2977 * Calculate the group which is almost near its
2978 * capacity but still has some space to pick up some load
2979 * from other group and save more power
2980 */
2981 if (sum_nr_running <= group_capacity - 1) {
2982 if (sum_nr_running > leader_nr_running ||
2983 (sum_nr_running == leader_nr_running &&
2984 first_cpu(group->cpumask) >
2985 first_cpu(group_leader->cpumask))) {
2986 group_leader = group;
2987 leader_nr_running = sum_nr_running;
2988 }
2989 }
2990 group_next:
2991 #endif
2992 group = group->next;
2993 } while (group != sd->groups);
2994
2995 if (!busiest || this_load >= max_load || busiest_nr_running == 0)
2996 goto out_balanced;
2997
2998 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2999
3000 if (this_load >= avg_load ||
3001 100*max_load <= sd->imbalance_pct*this_load)
3002 goto out_balanced;
3003
3004 busiest_load_per_task /= busiest_nr_running;
3005 if (group_imb)
3006 busiest_load_per_task = min(busiest_load_per_task, avg_load);
3007
3008 /*
3009 * We're trying to get all the cpus to the average_load, so we don't
3010 * want to push ourselves above the average load, nor do we wish to
3011 * reduce the max loaded cpu below the average load, as either of these
3012 * actions would just result in more rebalancing later, and ping-pong
3013 * tasks around. Thus we look for the minimum possible imbalance.
3014 * Negative imbalances (*we* are more loaded than anyone else) will
3015 * be counted as no imbalance for these purposes -- we can't fix that
3016 * by pulling tasks to us. Be careful of negative numbers as they'll
3017 * appear as very large values with unsigned longs.
3018 */
3019 if (max_load <= busiest_load_per_task)
3020 goto out_balanced;
3021
3022 /*
3023 * In the presence of smp nice balancing, certain scenarios can have
3024 * max load less than avg load(as we skip the groups at or below
3025 * its cpu_power, while calculating max_load..)
3026 */
3027 if (max_load < avg_load) {
3028 *imbalance = 0;
3029 goto small_imbalance;
3030 }
3031
3032 /* Don't want to pull so many tasks that a group would go idle */
3033 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
3034
3035 /* How much load to actually move to equalise the imbalance */
3036 *imbalance = min(max_pull * busiest->__cpu_power,
3037 (avg_load - this_load) * this->__cpu_power)
3038 / SCHED_LOAD_SCALE;
3039
3040 /*
3041 * if *imbalance is less than the average load per runnable task
3042 * there is no gaurantee that any tasks will be moved so we'll have
3043 * a think about bumping its value to force at least one task to be
3044 * moved
3045 */
3046 if (*imbalance < busiest_load_per_task) {
3047 unsigned long tmp, pwr_now, pwr_move;
3048 unsigned int imbn;
3049
3050 small_imbalance:
3051 pwr_move = pwr_now = 0;
3052 imbn = 2;
3053 if (this_nr_running) {
3054 this_load_per_task /= this_nr_running;
3055 if (busiest_load_per_task > this_load_per_task)
3056 imbn = 1;
3057 } else
3058 this_load_per_task = SCHED_LOAD_SCALE;
3059
3060 if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
3061 busiest_load_per_task * imbn) {
3062 *imbalance = busiest_load_per_task;
3063 return busiest;
3064 }
3065
3066 /*
3067 * OK, we don't have enough imbalance to justify moving tasks,
3068 * however we may be able to increase total CPU power used by
3069 * moving them.
3070 */
3071
3072 pwr_now += busiest->__cpu_power *
3073 min(busiest_load_per_task, max_load);
3074 pwr_now += this->__cpu_power *
3075 min(this_load_per_task, this_load);
3076 pwr_now /= SCHED_LOAD_SCALE;
3077
3078 /* Amount of load we'd subtract */
3079 tmp = sg_div_cpu_power(busiest,
3080 busiest_load_per_task * SCHED_LOAD_SCALE);
3081 if (max_load > tmp)
3082 pwr_move += busiest->__cpu_power *
3083 min(busiest_load_per_task, max_load - tmp);
3084
3085 /* Amount of load we'd add */
3086 if (max_load * busiest->__cpu_power <
3087 busiest_load_per_task * SCHED_LOAD_SCALE)
3088 tmp = sg_div_cpu_power(this,
3089 max_load * busiest->__cpu_power);
3090 else
3091 tmp = sg_div_cpu_power(this,
3092 busiest_load_per_task * SCHED_LOAD_SCALE);
3093 pwr_move += this->__cpu_power *
3094 min(this_load_per_task, this_load + tmp);
3095 pwr_move /= SCHED_LOAD_SCALE;
3096
3097 /* Move if we gain throughput */
3098 if (pwr_move > pwr_now)
3099 *imbalance = busiest_load_per_task;
3100 }
3101
3102 return busiest;
3103
3104 out_balanced:
3105 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3106 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3107 goto ret;
3108
3109 if (this == group_leader && group_leader != group_min) {
3110 *imbalance = min_load_per_task;
3111 return group_min;
3112 }
3113 #endif
3114 ret:
3115 *imbalance = 0;
3116 return NULL;
3117 }
3118
3119 /*
3120 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3121 */
3122 static struct rq *
3123 find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3124 unsigned long imbalance, const cpumask_t *cpus)
3125 {
3126 struct rq *busiest = NULL, *rq;
3127 unsigned long max_load = 0;
3128 int i;
3129
3130 for_each_cpu_mask(i, group->cpumask) {
3131 unsigned long wl;
3132
3133 if (!cpu_isset(i, *cpus))
3134 continue;
3135
3136 rq = cpu_rq(i);
3137 wl = weighted_cpuload(i);
3138
3139 if (rq->nr_running == 1 && wl > imbalance)
3140 continue;
3141
3142 if (wl > max_load) {
3143 max_load = wl;
3144 busiest = rq;
3145 }
3146 }
3147
3148 return busiest;
3149 }
3150
3151 /*
3152 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3153 * so long as it is large enough.
3154 */
3155 #define MAX_PINNED_INTERVAL 512
3156
3157 /*
3158 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3159 * tasks if there is an imbalance.
3160 */
3161 static int load_balance(int this_cpu, struct rq *this_rq,
3162 struct sched_domain *sd, enum cpu_idle_type idle,
3163 int *balance, cpumask_t *cpus)
3164 {
3165 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3166 struct sched_group *group;
3167 unsigned long imbalance;
3168 struct rq *busiest;
3169 unsigned long flags;
3170
3171 cpus_setall(*cpus);
3172
3173 /*
3174 * When power savings policy is enabled for the parent domain, idle
3175 * sibling can pick up load irrespective of busy siblings. In this case,
3176 * let the state of idle sibling percolate up as CPU_IDLE, instead of
3177 * portraying it as CPU_NOT_IDLE.
3178 */
3179 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
3180 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3181 sd_idle = 1;
3182
3183 schedstat_inc(sd, lb_count[idle]);
3184
3185 redo:
3186 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
3187 cpus, balance);
3188
3189 if (*balance == 0)
3190 goto out_balanced;
3191
3192 if (!group) {
3193 schedstat_inc(sd, lb_nobusyg[idle]);
3194 goto out_balanced;
3195 }
3196
3197 busiest = find_busiest_queue(group, idle, imbalance, cpus);
3198 if (!busiest) {
3199 schedstat_inc(sd, lb_nobusyq[idle]);
3200 goto out_balanced;
3201 }
3202
3203 BUG_ON(busiest == this_rq);
3204
3205 schedstat_add(sd, lb_imbalance[idle], imbalance);
3206
3207 ld_moved = 0;
3208 if (busiest->nr_running > 1) {
3209 /*
3210 * Attempt to move tasks. If find_busiest_group has found
3211 * an imbalance but busiest->nr_running <= 1, the group is
3212 * still unbalanced. ld_moved simply stays zero, so it is
3213 * correctly treated as an imbalance.
3214 */
3215 local_irq_save(flags);
3216 double_rq_lock(this_rq, busiest);
3217 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3218 imbalance, sd, idle, &all_pinned);
3219 double_rq_unlock(this_rq, busiest);
3220 local_irq_restore(flags);
3221
3222 /*
3223 * some other cpu did the load balance for us.
3224 */
3225 if (ld_moved && this_cpu != smp_processor_id())
3226 resched_cpu(this_cpu);
3227
3228 /* All tasks on this runqueue were pinned by CPU affinity */
3229 if (unlikely(all_pinned)) {
3230 cpu_clear(cpu_of(busiest), *cpus);
3231 if (!cpus_empty(*cpus))
3232 goto redo;
3233 goto out_balanced;
3234 }
3235 }
3236
3237 if (!ld_moved) {
3238 schedstat_inc(sd, lb_failed[idle]);
3239 sd->nr_balance_failed++;
3240
3241 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
3242
3243 spin_lock_irqsave(&busiest->lock, flags);
3244
3245 /* don't kick the migration_thread, if the curr
3246 * task on busiest cpu can't be moved to this_cpu
3247 */
3248 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
3249 spin_unlock_irqrestore(&busiest->lock, flags);
3250 all_pinned = 1;
3251 goto out_one_pinned;
3252 }
3253
3254 if (!busiest->active_balance) {
3255 busiest->active_balance = 1;
3256 busiest->push_cpu = this_cpu;
3257 active_balance = 1;
3258 }
3259 spin_unlock_irqrestore(&busiest->lock, flags);
3260 if (active_balance)
3261 wake_up_process(busiest->migration_thread);
3262
3263 /*
3264 * We've kicked active balancing, reset the failure
3265 * counter.
3266 */
3267 sd->nr_balance_failed = sd->cache_nice_tries+1;
3268 }
3269 } else
3270 sd->nr_balance_failed = 0;
3271
3272 if (likely(!active_balance)) {
3273 /* We were unbalanced, so reset the balancing interval */
3274 sd->balance_interval = sd->min_interval;
3275 } else {
3276 /*
3277 * If we've begun active balancing, start to back off. This
3278 * case may not be covered by the all_pinned logic if there
3279 * is only 1 task on the busy runqueue (because we don't call
3280 * move_tasks).
3281 */
3282 if (sd->balance_interval < sd->max_interval)
3283 sd->balance_interval *= 2;
3284 }
3285
3286 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3287 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3288 return -1;
3289 return ld_moved;
3290
3291 out_balanced:
3292 schedstat_inc(sd, lb_balanced[idle]);
3293
3294 sd->nr_balance_failed = 0;
3295
3296 out_one_pinned:
3297 /* tune up the balancing interval */
3298 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3299 (sd->balance_interval < sd->max_interval))
3300 sd->balance_interval *= 2;
3301
3302 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3303 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3304 return -1;
3305 return 0;
3306 }
3307
3308 /*
3309 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3310 * tasks if there is an imbalance.
3311 *
3312 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
3313 * this_rq is locked.
3314 */
3315 static int
3316 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3317 cpumask_t *cpus)
3318 {
3319 struct sched_group *group;
3320 struct rq *busiest = NULL;
3321 unsigned long imbalance;
3322 int ld_moved = 0;
3323 int sd_idle = 0;
3324 int all_pinned = 0;
3325
3326 cpus_setall(*cpus);
3327
3328 /*
3329 * When power savings policy is enabled for the parent domain, idle
3330 * sibling can pick up load irrespective of busy siblings. In this case,
3331 * let the state of idle sibling percolate up as IDLE, instead of
3332 * portraying it as CPU_NOT_IDLE.
3333 */
3334 if (sd->flags & SD_SHARE_CPUPOWER &&
3335 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3336 sd_idle = 1;
3337
3338 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
3339 redo:
3340 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
3341 &sd_idle, cpus, NULL);
3342 if (!group) {
3343 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
3344 goto out_balanced;
3345 }
3346
3347 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
3348 if (!busiest) {
3349 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
3350 goto out_balanced;
3351 }
3352
3353 BUG_ON(busiest == this_rq);
3354
3355 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
3356
3357 ld_moved = 0;
3358 if (busiest->nr_running > 1) {
3359 /* Attempt to move tasks */
3360 double_lock_balance(this_rq, busiest);
3361 /* this_rq->clock is already updated */
3362 update_rq_clock(busiest);
3363 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3364 imbalance, sd, CPU_NEWLY_IDLE,
3365 &all_pinned);
3366 spin_unlock(&busiest->lock);
3367
3368 if (unlikely(all_pinned)) {
3369 cpu_clear(cpu_of(busiest), *cpus);
3370 if (!cpus_empty(*cpus))
3371 goto redo;
3372 }
3373 }
3374
3375 if (!ld_moved) {
3376 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
3377 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3378 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3379 return -1;
3380 } else
3381 sd->nr_balance_failed = 0;
3382
3383 return ld_moved;
3384
3385 out_balanced:
3386 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
3387 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3388 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3389 return -1;
3390 sd->nr_balance_failed = 0;
3391
3392 return 0;
3393 }
3394
3395 /*
3396 * idle_balance is called by schedule() if this_cpu is about to become
3397 * idle. Attempts to pull tasks from other CPUs.
3398 */
3399 static void idle_balance(int this_cpu, struct rq *this_rq)
3400 {
3401 struct sched_domain *sd;
3402 int pulled_task = -1;
3403 unsigned long next_balance = jiffies + HZ;
3404 cpumask_t tmpmask;
3405
3406 for_each_domain(this_cpu, sd) {
3407 unsigned long interval;
3408
3409 if (!(sd->flags & SD_LOAD_BALANCE))
3410 continue;
3411
3412 if (sd->flags & SD_BALANCE_NEWIDLE)
3413 /* If we've pulled tasks over stop searching: */
3414 pulled_task = load_balance_newidle(this_cpu, this_rq,
3415 sd, &tmpmask);
3416
3417 interval = msecs_to_jiffies(sd->balance_interval);
3418 if (time_after(next_balance, sd->last_balance + interval))
3419 next_balance = sd->last_balance + interval;
3420 if (pulled_task)
3421 break;
3422 }
3423 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3424 /*
3425 * We are going idle. next_balance may be set based on
3426 * a busy processor. So reset next_balance.
3427 */
3428 this_rq->next_balance = next_balance;
3429 }
3430 }
3431
3432 /*
3433 * active_load_balance is run by migration threads. It pushes running tasks
3434 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
3435 * running on each physical CPU where possible, and avoids physical /
3436 * logical imbalances.
3437 *
3438 * Called with busiest_rq locked.
3439 */
3440 static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3441 {
3442 int target_cpu = busiest_rq->push_cpu;
3443 struct sched_domain *sd;
3444 struct rq *target_rq;
3445
3446 /* Is there any task to move? */
3447 if (busiest_rq->nr_running <= 1)
3448 return;
3449
3450 target_rq = cpu_rq(target_cpu);
3451
3452 /*
3453 * This condition is "impossible", if it occurs
3454 * we need to fix it. Originally reported by
3455 * Bjorn Helgaas on a 128-cpu setup.
3456 */
3457 BUG_ON(busiest_rq == target_rq);
3458
3459 /* move a task from busiest_rq to target_rq */
3460 double_lock_balance(busiest_rq, target_rq);
3461 update_rq_clock(busiest_rq);
3462 update_rq_clock(target_rq);
3463
3464 /* Search for an sd spanning us and the target CPU. */
3465 for_each_domain(target_cpu, sd) {
3466 if ((sd->flags & SD_LOAD_BALANCE) &&
3467 cpu_isset(busiest_cpu, sd->span))
3468 break;
3469 }
3470
3471 if (likely(sd)) {
3472 schedstat_inc(sd, alb_count);
3473
3474 if (move_one_task(target_rq, target_cpu, busiest_rq,
3475 sd, CPU_IDLE))
3476 schedstat_inc(sd, alb_pushed);
3477 else
3478 schedstat_inc(sd, alb_failed);
3479 }
3480 spin_unlock(&target_rq->lock);
3481 }
3482
3483 #ifdef CONFIG_NO_HZ
3484 static struct {
3485 atomic_t load_balancer;
3486 cpumask_t cpu_mask;
3487 } nohz ____cacheline_aligned = {
3488 .load_balancer = ATOMIC_INIT(-1),
3489 .cpu_mask = CPU_MASK_NONE,
3490 };
3491
3492 /*
3493 * This routine will try to nominate the ilb (idle load balancing)
3494 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3495 * load balancing on behalf of all those cpus. If all the cpus in the system
3496 * go into this tickless mode, then there will be no ilb owner (as there is
3497 * no need for one) and all the cpus will sleep till the next wakeup event
3498 * arrives...
3499 *
3500 * For the ilb owner, tick is not stopped. And this tick will be used
3501 * for idle load balancing. ilb owner will still be part of
3502 * nohz.cpu_mask..
3503 *
3504 * While stopping the tick, this cpu will become the ilb owner if there
3505 * is no other owner. And will be the owner till that cpu becomes busy
3506 * or if all cpus in the system stop their ticks at which point
3507 * there is no need for ilb owner.
3508 *
3509 * When the ilb owner becomes busy, it nominates another owner, during the
3510 * next busy scheduler_tick()
3511 */
3512 int select_nohz_load_balancer(int stop_tick)
3513 {
3514 int cpu = smp_processor_id();
3515
3516 if (stop_tick) {
3517 cpu_set(cpu, nohz.cpu_mask);
3518 cpu_rq(cpu)->in_nohz_recently = 1;
3519
3520 /*
3521 * If we are going offline and still the leader, give up!
3522 */
3523 if (cpu_is_offline(cpu) &&
3524 atomic_read(&nohz.load_balancer) == cpu) {
3525 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3526 BUG();
3527 return 0;
3528 }
3529
3530 /* time for ilb owner also to sleep */
3531 if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
3532 if (atomic_read(&nohz.load_balancer) == cpu)
3533 atomic_set(&nohz.load_balancer, -1);
3534 return 0;
3535 }
3536
3537 if (atomic_read(&nohz.load_balancer) == -1) {
3538 /* make me the ilb owner */
3539 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
3540 return 1;
3541 } else if (atomic_read(&nohz.load_balancer) == cpu)
3542 return 1;
3543 } else {
3544 if (!cpu_isset(cpu, nohz.cpu_mask))
3545 return 0;
3546
3547 cpu_clear(cpu, nohz.cpu_mask);
3548
3549 if (atomic_read(&nohz.load_balancer) == cpu)
3550 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3551 BUG();
3552 }
3553 return 0;
3554 }
3555 #endif
3556
3557 static DEFINE_SPINLOCK(balancing);
3558
3559 /*
3560 * It checks each scheduling domain to see if it is due to be balanced,
3561 * and initiates a balancing operation if so.
3562 *
3563 * Balancing parameters are set up in arch_init_sched_domains.
3564 */
3565 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3566 {
3567 int balance = 1;
3568 struct rq *rq = cpu_rq(cpu);
3569 unsigned long interval;
3570 struct sched_domain *sd;
3571 /* Earliest time when we have to do rebalance again */
3572 unsigned long next_balance = jiffies + 60*HZ;
3573 int update_next_balance = 0;
3574 cpumask_t tmp;
3575
3576 for_each_domain(cpu, sd) {
3577 if (!(sd->flags & SD_LOAD_BALANCE))
3578 continue;
3579
3580 interval = sd->balance_interval;
3581 if (idle != CPU_IDLE)
3582 interval *= sd->busy_factor;
3583
3584 /* scale ms to jiffies */
3585 interval = msecs_to_jiffies(interval);
3586 if (unlikely(!interval))
3587 interval = 1;
3588 if (interval > HZ*NR_CPUS/10)
3589 interval = HZ*NR_CPUS/10;
3590
3591
3592 if (sd->flags & SD_SERIALIZE) {
3593 if (!spin_trylock(&balancing))
3594 goto out;
3595 }
3596
3597 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3598 if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
3599 /*
3600 * We've pulled tasks over so either we're no
3601 * longer idle, or one of our SMT siblings is
3602 * not idle.
3603 */
3604 idle = CPU_NOT_IDLE;
3605 }
3606 sd->last_balance = jiffies;
3607 }
3608 if (sd->flags & SD_SERIALIZE)
3609 spin_unlock(&balancing);
3610 out:
3611 if (time_after(next_balance, sd->last_balance + interval)) {
3612 next_balance = sd->last_balance + interval;
3613 update_next_balance = 1;
3614 }
3615
3616 /*
3617 * Stop the load balance at this level. There is another
3618 * CPU in our sched group which is doing load balancing more
3619 * actively.
3620 */
3621 if (!balance)
3622 break;
3623 }
3624
3625 /*
3626 * next_balance will be updated only when there is a need.
3627 * When the cpu is attached to null domain for ex, it will not be
3628 * updated.
3629 */
3630 if (likely(update_next_balance))
3631 rq->next_balance = next_balance;
3632 }
3633
3634 /*
3635 * run_rebalance_domains is triggered when needed from the scheduler tick.
3636 * In CONFIG_NO_HZ case, the idle load balance owner will do the
3637 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3638 */
3639 static void run_rebalance_domains(struct softirq_action *h)
3640 {
3641 int this_cpu = smp_processor_id();
3642 struct rq *this_rq = cpu_rq(this_cpu);
3643 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3644 CPU_IDLE : CPU_NOT_IDLE;
3645
3646 rebalance_domains(this_cpu, idle);
3647
3648 #ifdef CONFIG_NO_HZ
3649 /*
3650 * If this cpu is the owner for idle load balancing, then do the
3651 * balancing on behalf of the other idle cpus whose ticks are
3652 * stopped.
3653 */
3654 if (this_rq->idle_at_tick &&
3655 atomic_read(&nohz.load_balancer) == this_cpu) {
3656 cpumask_t cpus = nohz.cpu_mask;
3657 struct rq *rq;
3658 int balance_cpu;
3659
3660 cpu_clear(this_cpu, cpus);
3661 for_each_cpu_mask(balance_cpu, cpus) {
3662 /*
3663 * If this cpu gets work to do, stop the load balancing
3664 * work being done for other cpus. Next load
3665 * balancing owner will pick it up.
3666 */
3667 if (need_resched())
3668 break;
3669
3670 rebalance_domains(balance_cpu, CPU_IDLE);
3671
3672 rq = cpu_rq(balance_cpu);
3673 if (time_after(this_rq->next_balance, rq->next_balance))
3674 this_rq->next_balance = rq->next_balance;
3675 }
3676 }
3677 #endif
3678 }
3679
3680 /*
3681 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
3682 *
3683 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
3684 * idle load balancing owner or decide to stop the periodic load balancing,
3685 * if the whole system is idle.
3686 */
3687 static inline void trigger_load_balance(struct rq *rq, int cpu)
3688 {
3689 #ifdef CONFIG_NO_HZ
3690 /*
3691 * If we were in the nohz mode recently and busy at the current
3692 * scheduler tick, then check if we need to nominate new idle
3693 * load balancer.
3694 */
3695 if (rq->in_nohz_recently && !rq->idle_at_tick) {
3696 rq->in_nohz_recently = 0;
3697
3698 if (atomic_read(&nohz.load_balancer) == cpu) {
3699 cpu_clear(cpu, nohz.cpu_mask);
3700 atomic_set(&nohz.load_balancer, -1);
3701 }
3702
3703 if (atomic_read(&nohz.load_balancer) == -1) {
3704 /*
3705 * simple selection for now: Nominate the
3706 * first cpu in the nohz list to be the next
3707 * ilb owner.
3708 *
3709 * TBD: Traverse the sched domains and nominate
3710 * the nearest cpu in the nohz.cpu_mask.
3711 */
3712 int ilb = first_cpu(nohz.cpu_mask);
3713
3714 if (ilb < nr_cpu_ids)
3715 resched_cpu(ilb);
3716 }
3717 }
3718
3719 /*
3720 * If this cpu is idle and doing idle load balancing for all the
3721 * cpus with ticks stopped, is it time for that to stop?
3722 */
3723 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
3724 cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
3725 resched_cpu(cpu);
3726 return;
3727 }
3728
3729 /*
3730 * If this cpu is idle and the idle load balancing is done by
3731 * someone else, then no need raise the SCHED_SOFTIRQ
3732 */
3733 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
3734 cpu_isset(cpu, nohz.cpu_mask))
3735 return;
3736 #endif
3737 if (time_after_eq(jiffies, rq->next_balance))
3738 raise_softirq(SCHED_SOFTIRQ);
3739 }
3740
3741 #else /* CONFIG_SMP */
3742
3743 /*
3744 * on UP we do not need to balance between CPUs:
3745 */
3746 static inline void idle_balance(int cpu, struct rq *rq)
3747 {
3748 }
3749
3750 #endif
3751
3752 DEFINE_PER_CPU(struct kernel_stat, kstat);
3753
3754 EXPORT_PER_CPU_SYMBOL(kstat);
3755
3756 /*
3757 * Return p->sum_exec_runtime plus any more ns on the sched_clock
3758 * that have not yet been banked in case the task is currently running.
3759 */
3760 unsigned long long task_sched_runtime(struct task_struct *p)
3761 {
3762 unsigned long flags;
3763 u64 ns, delta_exec;
3764 struct rq *rq;
3765
3766 rq = task_rq_lock(p, &flags);
3767 ns = p->se.sum_exec_runtime;
3768 if (task_current(rq, p)) {
3769 update_rq_clock(rq);
3770 delta_exec = rq->clock - p->se.exec_start;
3771 if ((s64)delta_exec > 0)
3772 ns += delta_exec;
3773 }
3774 task_rq_unlock(rq, &flags);
3775
3776 return ns;
3777 }
3778
3779 /*
3780 * Account user cpu time to a process.
3781 * @p: the process that the cpu time gets accounted to
3782 * @cputime: the cpu time spent in user space since the last update
3783 */
3784 void account_user_time(struct task_struct *p, cputime_t cputime)
3785 {
3786 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3787 cputime64_t tmp;
3788
3789 p->utime = cputime_add(p->utime, cputime);
3790
3791 /* Add user time to cpustat. */
3792 tmp = cputime_to_cputime64(cputime);
3793 if (TASK_NICE(p) > 0)
3794 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3795 else
3796 cpustat->user = cputime64_add(cpustat->user, tmp);
3797 }
3798
3799 /*
3800 * Account guest cpu time to a process.
3801 * @p: the process that the cpu time gets accounted to
3802 * @cputime: the cpu time spent in virtual machine since the last update
3803 */
3804 static void account_guest_time(struct task_struct *p, cputime_t cputime)
3805 {
3806 cputime64_t tmp;
3807 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3808
3809 tmp = cputime_to_cputime64(cputime);
3810
3811 p->utime = cputime_add(p->utime, cputime);
3812 p->gtime = cputime_add(p->gtime, cputime);
3813
3814 cpustat->user = cputime64_add(cpustat->user, tmp);
3815 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3816 }
3817
3818 /*
3819 * Account scaled user cpu time to a process.
3820 * @p: the process that the cpu time gets accounted to
3821 * @cputime: the cpu time spent in user space since the last update
3822 */
3823 void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
3824 {
3825 p->utimescaled = cputime_add(p->utimescaled, cputime);
3826 }
3827
3828 /*
3829 * Account system cpu time to a process.
3830 * @p: the process that the cpu time gets accounted to
3831 * @hardirq_offset: the offset to subtract from hardirq_count()
3832 * @cputime: the cpu time spent in kernel space since the last update
3833 */
3834 void account_system_time(struct task_struct *p, int hardirq_offset,
3835 cputime_t cputime)
3836 {
3837 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3838 struct rq *rq = this_rq();
3839 cputime64_t tmp;
3840
3841 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
3842 return account_guest_time(p, cputime);
3843
3844 p->stime = cputime_add(p->stime, cputime);
3845
3846 /* Add system time to cpustat. */
3847 tmp = cputime_to_cputime64(cputime);
3848 if (hardirq_count() - hardirq_offset)
3849 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3850 else if (softirq_count())
3851 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3852 else if (p != rq->idle)
3853 cpustat->system = cputime64_add(cpustat->system, tmp);
3854 else if (atomic_read(&rq->nr_iowait) > 0)
3855 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3856 else
3857 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3858 /* Account for system time used */
3859 acct_update_integrals(p);
3860 }
3861
3862 /*
3863 * Account scaled system cpu time to a process.
3864 * @p: the process that the cpu time gets accounted to
3865 * @hardirq_offset: the offset to subtract from hardirq_count()
3866 * @cputime: the cpu time spent in kernel space since the last update
3867 */
3868 void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
3869 {
3870 p->stimescaled = cputime_add(p->stimescaled, cputime);
3871 }
3872
3873 /*
3874 * Account for involuntary wait time.
3875 * @p: the process from which the cpu time has been stolen
3876 * @steal: the cpu time spent in involuntary wait
3877 */
3878 void account_steal_time(struct task_struct *p, cputime_t steal)
3879 {
3880 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3881 cputime64_t tmp = cputime_to_cputime64(steal);
3882 struct rq *rq = this_rq();
3883
3884 if (p == rq->idle) {
3885 p->stime = cputime_add(p->stime, steal);
3886 if (atomic_read(&rq->nr_iowait) > 0)
3887 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3888 else
3889 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3890 } else
3891 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3892 }
3893
3894 /*
3895 * This function gets called by the timer code, with HZ frequency.
3896 * We call it with interrupts disabled.
3897 *
3898 * It also gets called by the fork code, when changing the parent's
3899 * timeslices.
3900 */
3901 void scheduler_tick(void)
3902 {
3903 int cpu = smp_processor_id();
3904 struct rq *rq = cpu_rq(cpu);
3905 struct task_struct *curr = rq->curr;
3906 u64 next_tick = rq->tick_timestamp + TICK_NSEC;
3907
3908 spin_lock(&rq->lock);
3909 __update_rq_clock(rq);
3910 /*
3911 * Let rq->clock advance by at least TICK_NSEC:
3912 */
3913 if (unlikely(rq->clock < next_tick)) {
3914 rq->clock = next_tick;
3915 rq->clock_underflows++;
3916 }
3917 rq->tick_timestamp = rq->clock;
3918 update_last_tick_seen(rq);
3919 update_cpu_load(rq);
3920 curr->sched_class->task_tick(rq, curr, 0);
3921 spin_unlock(&rq->lock);
3922
3923 #ifdef CONFIG_SMP
3924 rq->idle_at_tick = idle_cpu(cpu);
3925 trigger_load_balance(rq, cpu);
3926 #endif
3927 }
3928
3929 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3930
3931 void __kprobes add_preempt_count(int val)
3932 {
3933 /*
3934 * Underflow?
3935 */
3936 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3937 return;
3938 preempt_count() += val;
3939 /*
3940 * Spinlock count overflowing soon?
3941 */
3942 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3943 PREEMPT_MASK - 10);
3944 }
3945 EXPORT_SYMBOL(add_preempt_count);
3946
3947 void __kprobes sub_preempt_count(int val)
3948 {
3949 /*
3950 * Underflow?
3951 */
3952 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3953 return;
3954 /*
3955 * Is the spinlock portion underflowing?
3956 */
3957 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3958 !(preempt_count() & PREEMPT_MASK)))
3959 return;
3960
3961 preempt_count() -= val;
3962 }
3963 EXPORT_SYMBOL(sub_preempt_count);
3964
3965 #endif
3966
3967 /*
3968 * Print scheduling while atomic bug:
3969 */
3970 static noinline void __schedule_bug(struct task_struct *prev)
3971 {
3972 struct pt_regs *regs = get_irq_regs();
3973
3974 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3975 prev->comm, prev->pid, preempt_count());
3976
3977 debug_show_held_locks(prev);
3978 if (irqs_disabled())
3979 print_irqtrace_events(prev);
3980
3981 if (regs)
3982 show_regs(regs);
3983 else
3984 dump_stack();
3985 }
3986
3987 /*
3988 * Various schedule()-time debugging checks and statistics:
3989 */
3990 static inline void schedule_debug(struct task_struct *prev)
3991 {
3992 /*
3993 * Test if we are atomic. Since do_exit() needs to call into
3994 * schedule() atomically, we ignore that path for now.
3995 * Otherwise, whine if we are scheduling when we should not be.
3996 */
3997 if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
3998 __schedule_bug(prev);
3999
4000 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4001
4002 schedstat_inc(this_rq(), sched_count);
4003 #ifdef CONFIG_SCHEDSTATS
4004 if (unlikely(prev->lock_depth >= 0)) {
4005 schedstat_inc(this_rq(), bkl_count);
4006 schedstat_inc(prev, sched_info.bkl_count);
4007 }
4008 #endif
4009 }
4010
4011 /*
4012 * Pick up the highest-prio task:
4013 */
4014 static inline struct task_struct *
4015 pick_next_task(struct rq *rq, struct task_struct *prev)
4016 {
4017 const struct sched_class *class;
4018 struct task_struct *p;
4019
4020 /*
4021 * Optimization: we know that if all tasks are in
4022 * the fair class we can call that function directly:
4023 */
4024 if (likely(rq->nr_running == rq->cfs.nr_running)) {
4025 p = fair_sched_class.pick_next_task(rq);
4026 if (likely(p))
4027 return p;
4028 }
4029
4030 class = sched_class_highest;
4031 for ( ; ; ) {
4032 p = class->pick_next_task(rq);
4033 if (p)
4034 return p;
4035 /*
4036 * Will never be NULL as the idle class always
4037 * returns a non-NULL p:
4038 */
4039 class = class->next;
4040 }
4041 }
4042
4043 /*
4044 * schedule() is the main scheduler function.
4045 */
4046 asmlinkage void __sched schedule(void)
4047 {
4048 struct task_struct *prev, *next;
4049 unsigned long *switch_count;
4050 struct rq *rq;
4051 int cpu;
4052
4053 need_resched:
4054 preempt_disable();
4055 cpu = smp_processor_id();
4056 rq = cpu_rq(cpu);
4057 rcu_qsctr_inc(cpu);
4058 prev = rq->curr;
4059 switch_count = &prev->nivcsw;
4060
4061 release_kernel_lock(prev);
4062 need_resched_nonpreemptible:
4063
4064 schedule_debug(prev);
4065
4066 hrtick_clear(rq);
4067
4068 /*
4069 * Do the rq-clock update outside the rq lock:
4070 */
4071 local_irq_disable();
4072 __update_rq_clock(rq);
4073 spin_lock(&rq->lock);
4074 clear_tsk_need_resched(prev);
4075
4076 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4077 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
4078 signal_pending(prev))) {
4079 prev->state = TASK_RUNNING;
4080 } else {
4081 deactivate_task(rq, prev, 1);
4082 }
4083 switch_count = &prev->nvcsw;
4084 }
4085
4086 #ifdef CONFIG_SMP
4087 if (prev->sched_class->pre_schedule)
4088 prev->sched_class->pre_schedule(rq, prev);
4089 #endif
4090
4091 if (unlikely(!rq->nr_running))
4092 idle_balance(cpu, rq);
4093
4094 prev->sched_class->put_prev_task(rq, prev);
4095 next = pick_next_task(rq, prev);
4096
4097 sched_info_switch(prev, next);
4098
4099 if (likely(prev != next)) {
4100 rq->nr_switches++;
4101 rq->curr = next;
4102 ++*switch_count;
4103
4104 context_switch(rq, prev, next); /* unlocks the rq */
4105 /*
4106 * the context switch might have flipped the stack from under
4107 * us, hence refresh the local variables.
4108 */
4109 cpu = smp_processor_id();
4110 rq = cpu_rq(cpu);
4111 } else
4112 spin_unlock_irq(&rq->lock);
4113
4114 hrtick_set(rq);
4115
4116 if (unlikely(reacquire_kernel_lock(current) < 0))
4117 goto need_resched_nonpreemptible;
4118
4119 preempt_enable_no_resched();
4120 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4121 goto need_resched;
4122 }
4123 EXPORT_SYMBOL(schedule);
4124
4125 #ifdef CONFIG_PREEMPT
4126 /*
4127 * this is the entry point to schedule() from in-kernel preemption
4128 * off of preempt_enable. Kernel preemptions off return from interrupt
4129 * occur there and call schedule directly.
4130 */
4131 asmlinkage void __sched preempt_schedule(void)
4132 {
4133 struct thread_info *ti = current_thread_info();
4134 struct task_struct *task = current;
4135 int saved_lock_depth;
4136
4137 /*
4138 * If there is a non-zero preempt_count or interrupts are disabled,
4139 * we do not want to preempt the current task. Just return..
4140 */
4141 if (likely(ti->preempt_count || irqs_disabled()))
4142 return;
4143
4144 do {
4145 add_preempt_count(PREEMPT_ACTIVE);
4146
4147 /*
4148 * We keep the big kernel semaphore locked, but we
4149 * clear ->lock_depth so that schedule() doesnt
4150 * auto-release the semaphore:
4151 */
4152 saved_lock_depth = task->lock_depth;
4153 task->lock_depth = -1;
4154 schedule();
4155 task->lock_depth = saved_lock_depth;
4156 sub_preempt_count(PREEMPT_ACTIVE);
4157
4158 /*
4159 * Check again in case we missed a preemption opportunity
4160 * between schedule and now.
4161 */
4162 barrier();
4163 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
4164 }
4165 EXPORT_SYMBOL(preempt_schedule);
4166
4167 /*
4168 * this is the entry point to schedule() from kernel preemption
4169 * off of irq context.
4170 * Note, that this is called and return with irqs disabled. This will
4171 * protect us against recursive calling from irq.
4172 */
4173 asmlinkage void __sched preempt_schedule_irq(void)
4174 {
4175 struct thread_info *ti = current_thread_info();
4176 struct task_struct *task = current;
4177 int saved_lock_depth;
4178
4179 /* Catch callers which need to be fixed */
4180 BUG_ON(ti->preempt_count || !irqs_disabled());
4181
4182 do {
4183 add_preempt_count(PREEMPT_ACTIVE);
4184
4185 /*
4186 * We keep the big kernel semaphore locked, but we
4187 * clear ->lock_depth so that schedule() doesnt
4188 * auto-release the semaphore:
4189 */
4190 saved_lock_depth = task->lock_depth;
4191 task->lock_depth = -1;
4192 local_irq_enable();
4193 schedule();
4194 local_irq_disable();
4195 task->lock_depth = saved_lock_depth;
4196 sub_preempt_count(PREEMPT_ACTIVE);
4197
4198 /*
4199 * Check again in case we missed a preemption opportunity
4200 * between schedule and now.
4201 */
4202 barrier();
4203 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
4204 }
4205
4206 #endif /* CONFIG_PREEMPT */
4207
4208 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
4209 void *key)
4210 {
4211 return try_to_wake_up(curr->private, mode, sync);
4212 }
4213 EXPORT_SYMBOL(default_wake_function);
4214
4215 /*
4216 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4217 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
4218 * number) then we wake all the non-exclusive tasks and one exclusive task.
4219 *
4220 * There are circumstances in which we can try to wake a task which has already
4221 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4222 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4223 */
4224 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4225 int nr_exclusive, int sync, void *key)
4226 {
4227 wait_queue_t *curr, *next;
4228
4229 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
4230 unsigned flags = curr->flags;
4231
4232 if (curr->func(curr, mode, sync, key) &&
4233 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
4234 break;
4235 }
4236 }
4237
4238 /**
4239 * __wake_up - wake up threads blocked on a waitqueue.
4240 * @q: the waitqueue
4241 * @mode: which threads
4242 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4243 * @key: is directly passed to the wakeup function
4244 */
4245 void __wake_up(wait_queue_head_t *q, unsigned int mode,
4246 int nr_exclusive, void *key)
4247 {
4248 unsigned long flags;
4249
4250 spin_lock_irqsave(&q->lock, flags);
4251 __wake_up_common(q, mode, nr_exclusive, 0, key);
4252 spin_unlock_irqrestore(&q->lock, flags);
4253 }
4254 EXPORT_SYMBOL(__wake_up);
4255
4256 /*
4257 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4258 */
4259 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4260 {
4261 __wake_up_common(q, mode, 1, 0, NULL);
4262 }
4263
4264 /**
4265 * __wake_up_sync - wake up threads blocked on a waitqueue.
4266 * @q: the waitqueue
4267 * @mode: which threads
4268 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4269 *
4270 * The sync wakeup differs that the waker knows that it will schedule
4271 * away soon, so while the target thread will be woken up, it will not
4272 * be migrated to another CPU - ie. the two threads are 'synchronized'
4273 * with each other. This can prevent needless bouncing between CPUs.
4274 *
4275 * On UP it can prevent extra preemption.
4276 */
4277 void
4278 __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4279 {
4280 unsigned long flags;
4281 int sync = 1;
4282
4283 if (unlikely(!q))
4284 return;
4285
4286 if (unlikely(!nr_exclusive))
4287 sync = 0;
4288
4289 spin_lock_irqsave(&q->lock, flags);
4290 __wake_up_common(q, mode, nr_exclusive, sync, NULL);
4291 spin_unlock_irqrestore(&q->lock, flags);
4292 }
4293 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4294
4295 void complete(struct completion *x)
4296 {
4297 unsigned long flags;
4298
4299 spin_lock_irqsave(&x->wait.lock, flags);
4300 x->done++;
4301 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4302 spin_unlock_irqrestore(&x->wait.lock, flags);
4303 }
4304 EXPORT_SYMBOL(complete);
4305
4306 void complete_all(struct completion *x)
4307 {
4308 unsigned long flags;
4309
4310 spin_lock_irqsave(&x->wait.lock, flags);
4311 x->done += UINT_MAX/2;
4312 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4313 spin_unlock_irqrestore(&x->wait.lock, flags);
4314 }
4315 EXPORT_SYMBOL(complete_all);
4316
4317 static inline long __sched
4318 do_wait_for_common(struct completion *x, long timeout, int state)
4319 {
4320 if (!x->done) {
4321 DECLARE_WAITQUEUE(wait, current);
4322
4323 wait.flags |= WQ_FLAG_EXCLUSIVE;
4324 __add_wait_queue_tail(&x->wait, &wait);
4325 do {
4326 if ((state == TASK_INTERRUPTIBLE &&
4327 signal_pending(current)) ||
4328 (state == TASK_KILLABLE &&
4329 fatal_signal_pending(current))) {
4330 __remove_wait_queue(&x->wait, &wait);
4331 return -ERESTARTSYS;
4332 }
4333 __set_current_state(state);
4334 spin_unlock_irq(&x->wait.lock);
4335 timeout = schedule_timeout(timeout);
4336 spin_lock_irq(&x->wait.lock);
4337 if (!timeout) {
4338 __remove_wait_queue(&x->wait, &wait);
4339 return timeout;
4340 }
4341 } while (!x->done);
4342 __remove_wait_queue(&x->wait, &wait);
4343 }
4344 x->done--;
4345 return timeout;
4346 }
4347
4348 static long __sched
4349 wait_for_common(struct completion *x, long timeout, int state)
4350 {
4351 might_sleep();
4352
4353 spin_lock_irq(&x->wait.lock);
4354 timeout = do_wait_for_common(x, timeout, state);
4355 spin_unlock_irq(&x->wait.lock);
4356 return timeout;
4357 }
4358
4359 void __sched wait_for_completion(struct completion *x)
4360 {
4361 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4362 }
4363 EXPORT_SYMBOL(wait_for_completion);
4364
4365 unsigned long __sched
4366 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4367 {
4368 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4369 }
4370 EXPORT_SYMBOL(wait_for_completion_timeout);
4371
4372 int __sched wait_for_completion_interruptible(struct completion *x)
4373 {
4374 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4375 if (t == -ERESTARTSYS)
4376 return t;
4377 return 0;
4378 }
4379 EXPORT_SYMBOL(wait_for_completion_interruptible);
4380
4381 unsigned long __sched
4382 wait_for_completion_interruptible_timeout(struct completion *x,
4383 unsigned long timeout)
4384 {
4385 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4386 }
4387 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4388
4389 int __sched wait_for_completion_killable(struct completion *x)
4390 {
4391 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4392 if (t == -ERESTARTSYS)
4393 return t;
4394 return 0;
4395 }
4396 EXPORT_SYMBOL(wait_for_completion_killable);
4397
4398 static long __sched
4399 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4400 {
4401 unsigned long flags;
4402 wait_queue_t wait;
4403
4404 init_waitqueue_entry(&wait, current);
4405
4406 __set_current_state(state);
4407
4408 spin_lock_irqsave(&q->lock, flags);
4409 __add_wait_queue(q, &wait);
4410 spin_unlock(&q->lock);
4411 timeout = schedule_timeout(timeout);
4412 spin_lock_irq(&q->lock);
4413 __remove_wait_queue(q, &wait);
4414 spin_unlock_irqrestore(&q->lock, flags);
4415
4416 return timeout;
4417 }
4418
4419 void __sched interruptible_sleep_on(wait_queue_head_t *q)
4420 {
4421 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4422 }
4423 EXPORT_SYMBOL(interruptible_sleep_on);
4424
4425 long __sched
4426 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
4427 {
4428 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
4429 }
4430 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4431
4432 void __sched sleep_on(wait_queue_head_t *q)
4433 {
4434 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4435 }
4436 EXPORT_SYMBOL(sleep_on);
4437
4438 long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
4439 {
4440 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
4441 }
4442 EXPORT_SYMBOL(sleep_on_timeout);
4443
4444 #ifdef CONFIG_RT_MUTEXES
4445
4446 /*
4447 * rt_mutex_setprio - set the current priority of a task
4448 * @p: task
4449 * @prio: prio value (kernel-internal form)
4450 *
4451 * This function changes the 'effective' priority of a task. It does
4452 * not touch ->normal_prio like __setscheduler().
4453 *
4454 * Used by the rt_mutex code to implement priority inheritance logic.
4455 */
4456 void rt_mutex_setprio(struct task_struct *p, int prio)
4457 {
4458 unsigned long flags;
4459 int oldprio, on_rq, running;
4460 struct rq *rq;
4461 const struct sched_class *prev_class = p->sched_class;
4462
4463 BUG_ON(prio < 0 || prio > MAX_PRIO);
4464
4465 rq = task_rq_lock(p, &flags);
4466 update_rq_clock(rq);
4467
4468 oldprio = p->prio;
4469 on_rq = p->se.on_rq;
4470 running = task_current(rq, p);
4471 if (on_rq)
4472 dequeue_task(rq, p, 0);
4473 if (running)
4474 p->sched_class->put_prev_task(rq, p);
4475
4476 if (rt_prio(prio))
4477 p->sched_class = &rt_sched_class;
4478 else
4479 p->sched_class = &fair_sched_class;
4480
4481 p->prio = prio;
4482
4483 if (running)
4484 p->sched_class->set_curr_task(rq);
4485 if (on_rq) {
4486 enqueue_task(rq, p, 0);
4487
4488 check_class_changed(rq, p, prev_class, oldprio, running);
4489 }
4490 task_rq_unlock(rq, &flags);
4491 }
4492
4493 #endif
4494
4495 void set_user_nice(struct task_struct *p, long nice)
4496 {
4497 int old_prio, delta, on_rq;
4498 unsigned long flags;
4499 struct rq *rq;
4500
4501 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4502 return;
4503 /*
4504 * We have to be careful, if called from sys_setpriority(),
4505 * the task might be in the middle of scheduling on another CPU.
4506 */
4507 rq = task_rq_lock(p, &flags);
4508 update_rq_clock(rq);
4509 /*
4510 * The RT priorities are set via sched_setscheduler(), but we still
4511 * allow the 'normal' nice value to be set - but as expected
4512 * it wont have any effect on scheduling until the task is
4513 * SCHED_FIFO/SCHED_RR:
4514 */
4515 if (task_has_rt_policy(p)) {
4516 p->static_prio = NICE_TO_PRIO(nice);
4517 goto out_unlock;
4518 }
4519 on_rq = p->se.on_rq;
4520 if (on_rq) {
4521 dequeue_task(rq, p, 0);
4522 dec_load(rq, p);
4523 }
4524
4525 p->static_prio = NICE_TO_PRIO(nice);
4526 set_load_weight(p);
4527 old_prio = p->prio;
4528 p->prio = effective_prio(p);
4529 delta = p->prio - old_prio;
4530
4531 if (on_rq) {
4532 enqueue_task(rq, p, 0);
4533 inc_load(rq, p);
4534 /*
4535 * If the task increased its priority or is running and
4536 * lowered its priority, then reschedule its CPU:
4537 */
4538 if (delta < 0 || (delta > 0 && task_running(rq, p)))
4539 resched_task(rq->curr);
4540 }
4541 out_unlock:
4542 task_rq_unlock(rq, &flags);
4543 }
4544 EXPORT_SYMBOL(set_user_nice);
4545
4546 /*
4547 * can_nice - check if a task can reduce its nice value
4548 * @p: task
4549 * @nice: nice value
4550 */
4551 int can_nice(const struct task_struct *p, const int nice)
4552 {
4553 /* convert nice value [19,-20] to rlimit style value [1,40] */
4554 int nice_rlim = 20 - nice;
4555
4556 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
4557 capable(CAP_SYS_NICE));
4558 }
4559
4560 #ifdef __ARCH_WANT_SYS_NICE
4561
4562 /*
4563 * sys_nice - change the priority of the current process.
4564 * @increment: priority increment
4565 *
4566 * sys_setpriority is a more generic, but much slower function that
4567 * does similar things.
4568 */
4569 asmlinkage long sys_nice(int increment)
4570 {
4571 long nice, retval;
4572
4573 /*
4574 * Setpriority might change our priority at the same moment.
4575 * We don't have to worry. Conceptually one call occurs first
4576 * and we have a single winner.
4577 */
4578 if (increment < -40)
4579 increment = -40;
4580 if (increment > 40)
4581 increment = 40;
4582
4583 nice = PRIO_TO_NICE(current->static_prio) + increment;
4584 if (nice < -20)
4585 nice = -20;
4586 if (nice > 19)
4587 nice = 19;
4588
4589 if (increment < 0 && !can_nice(current, nice))
4590 return -EPERM;
4591
4592 retval = security_task_setnice(current, nice);
4593 if (retval)
4594 return retval;
4595
4596 set_user_nice(current, nice);
4597 return 0;
4598 }
4599
4600 #endif
4601
4602 /**
4603 * task_prio - return the priority value of a given task.
4604 * @p: the task in question.
4605 *
4606 * This is the priority value as seen by users in /proc.
4607 * RT tasks are offset by -200. Normal tasks are centered
4608 * around 0, value goes from -16 to +15.
4609 */
4610 int task_prio(const struct task_struct *p)
4611 {
4612 return p->prio - MAX_RT_PRIO;
4613 }
4614
4615 /**
4616 * task_nice - return the nice value of a given task.
4617 * @p: the task in question.
4618 */
4619 int task_nice(const struct task_struct *p)
4620 {
4621 return TASK_NICE(p);
4622 }
4623 EXPORT_SYMBOL(task_nice);
4624
4625 /**
4626 * idle_cpu - is a given cpu idle currently?
4627 * @cpu: the processor in question.
4628 */
4629 int idle_cpu(int cpu)
4630 {
4631 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4632 }
4633
4634 /**
4635 * idle_task - return the idle task for a given cpu.
4636 * @cpu: the processor in question.
4637 */
4638 struct task_struct *idle_task(int cpu)
4639 {
4640 return cpu_rq(cpu)->idle;
4641 }
4642
4643 /**
4644 * find_process_by_pid - find a process with a matching PID value.
4645 * @pid: the pid in question.
4646 */
4647 static struct task_struct *find_process_by_pid(pid_t pid)
4648 {
4649 return pid ? find_task_by_vpid(pid) : current;
4650 }
4651
4652 /* Actually do priority change: must hold rq lock. */
4653 static void
4654 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4655 {
4656 BUG_ON(p->se.on_rq);
4657
4658 p->policy = policy;
4659 switch (p->policy) {
4660 case SCHED_NORMAL:
4661 case SCHED_BATCH:
4662 case SCHED_IDLE:
4663 p->sched_class = &fair_sched_class;
4664 break;
4665 case SCHED_FIFO:
4666 case SCHED_RR:
4667 p->sched_class = &rt_sched_class;
4668 break;
4669 }
4670
4671 p->rt_priority = prio;
4672 p->normal_prio = normal_prio(p);
4673 /* we are holding p->pi_lock already */
4674 p->prio = rt_mutex_getprio(p);
4675 set_load_weight(p);
4676 }
4677
4678 /**
4679 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4680 * @p: the task in question.
4681 * @policy: new policy.
4682 * @param: structure containing the new RT priority.
4683 *
4684 * NOTE that the task may be already dead.
4685 */
4686 int sched_setscheduler(struct task_struct *p, int policy,
4687 struct sched_param *param)
4688 {
4689 int retval, oldprio, oldpolicy = -1, on_rq, running;
4690 unsigned long flags;
4691 const struct sched_class *prev_class = p->sched_class;
4692 struct rq *rq;
4693
4694 /* may grab non-irq protected spin_locks */
4695 BUG_ON(in_interrupt());
4696 recheck:
4697 /* double check policy once rq lock held */
4698 if (policy < 0)
4699 policy = oldpolicy = p->policy;
4700 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
4701 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4702 policy != SCHED_IDLE)
4703 return -EINVAL;
4704 /*
4705 * Valid priorities for SCHED_FIFO and SCHED_RR are
4706 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4707 * SCHED_BATCH and SCHED_IDLE is 0.
4708 */
4709 if (param->sched_priority < 0 ||
4710 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4711 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4712 return -EINVAL;
4713 if (rt_policy(policy) != (param->sched_priority != 0))
4714 return -EINVAL;
4715
4716 /*
4717 * Allow unprivileged RT tasks to decrease priority:
4718 */
4719 if (!capable(CAP_SYS_NICE)) {
4720 if (rt_policy(policy)) {
4721 unsigned long rlim_rtprio;
4722
4723 if (!lock_task_sighand(p, &flags))
4724 return -ESRCH;
4725 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
4726 unlock_task_sighand(p, &flags);
4727
4728 /* can't set/change the rt policy */
4729 if (policy != p->policy && !rlim_rtprio)
4730 return -EPERM;
4731
4732 /* can't increase priority */
4733 if (param->sched_priority > p->rt_priority &&
4734 param->sched_priority > rlim_rtprio)
4735 return -EPERM;
4736 }
4737 /*
4738 * Like positive nice levels, dont allow tasks to
4739 * move out of SCHED_IDLE either:
4740 */
4741 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4742 return -EPERM;
4743
4744 /* can't change other user's priorities */
4745 if ((current->euid != p->euid) &&
4746 (current->euid != p->uid))
4747 return -EPERM;
4748 }
4749
4750 #ifdef CONFIG_RT_GROUP_SCHED
4751 /*
4752 * Do not allow realtime tasks into groups that have no runtime
4753 * assigned.
4754 */
4755 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
4756 return -EPERM;
4757 #endif
4758
4759 retval = security_task_setscheduler(p, policy, param);
4760 if (retval)
4761 return retval;
4762 /*
4763 * make sure no PI-waiters arrive (or leave) while we are
4764 * changing the priority of the task:
4765 */
4766 spin_lock_irqsave(&p->pi_lock, flags);
4767 /*
4768 * To be able to change p->policy safely, the apropriate
4769 * runqueue lock must be held.
4770 */
4771 rq = __task_rq_lock(p);
4772 /* recheck policy now with rq lock held */
4773 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4774 policy = oldpolicy = -1;
4775 __task_rq_unlock(rq);
4776 spin_unlock_irqrestore(&p->pi_lock, flags);
4777 goto recheck;
4778 }
4779 update_rq_clock(rq);
4780 on_rq = p->se.on_rq;
4781 running = task_current(rq, p);
4782 if (on_rq)
4783 deactivate_task(rq, p, 0);
4784 if (running)
4785 p->sched_class->put_prev_task(rq, p);
4786
4787 oldprio = p->prio;
4788 __setscheduler(rq, p, policy, param->sched_priority);
4789
4790 if (running)
4791 p->sched_class->set_curr_task(rq);
4792 if (on_rq) {
4793 activate_task(rq, p, 0);
4794
4795 check_class_changed(rq, p, prev_class, oldprio, running);
4796 }
4797 __task_rq_unlock(rq);
4798 spin_unlock_irqrestore(&p->pi_lock, flags);
4799
4800 rt_mutex_adjust_pi(p);
4801
4802 return 0;
4803 }
4804 EXPORT_SYMBOL_GPL(sched_setscheduler);
4805
4806 static int
4807 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4808 {
4809 struct sched_param lparam;
4810 struct task_struct *p;
4811 int retval;
4812
4813 if (!param || pid < 0)
4814 return -EINVAL;
4815 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4816 return -EFAULT;
4817
4818 rcu_read_lock();
4819 retval = -ESRCH;
4820 p = find_process_by_pid(pid);
4821 if (p != NULL)
4822 retval = sched_setscheduler(p, policy, &lparam);
4823 rcu_read_unlock();
4824
4825 return retval;
4826 }
4827
4828 /**
4829 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4830 * @pid: the pid in question.
4831 * @policy: new policy.
4832 * @param: structure containing the new RT priority.
4833 */
4834 asmlinkage long
4835 sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4836 {
4837 /* negative values for policy are not valid */
4838 if (policy < 0)
4839 return -EINVAL;
4840
4841 return do_sched_setscheduler(pid, policy, param);
4842 }
4843
4844 /**
4845 * sys_sched_setparam - set/change the RT priority of a thread
4846 * @pid: the pid in question.
4847 * @param: structure containing the new RT priority.
4848 */
4849 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4850 {
4851 return do_sched_setscheduler(pid, -1, param);
4852 }
4853
4854 /**
4855 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4856 * @pid: the pid in question.
4857 */
4858 asmlinkage long sys_sched_getscheduler(pid_t pid)
4859 {
4860 struct task_struct *p;
4861 int retval;
4862
4863 if (pid < 0)
4864 return -EINVAL;
4865
4866 retval = -ESRCH;
4867 read_lock(&tasklist_lock);
4868 p = find_process_by_pid(pid);
4869 if (p) {
4870 retval = security_task_getscheduler(p);
4871 if (!retval)
4872 retval = p->policy;
4873 }
4874 read_unlock(&tasklist_lock);
4875 return retval;
4876 }
4877
4878 /**
4879 * sys_sched_getscheduler - get the RT priority of a thread
4880 * @pid: the pid in question.
4881 * @param: structure containing the RT priority.
4882 */
4883 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4884 {
4885 struct sched_param lp;
4886 struct task_struct *p;
4887 int retval;
4888
4889 if (!param || pid < 0)
4890 return -EINVAL;
4891
4892 read_lock(&tasklist_lock);
4893 p = find_process_by_pid(pid);
4894 retval = -ESRCH;
4895 if (!p)
4896 goto out_unlock;
4897
4898 retval = security_task_getscheduler(p);
4899 if (retval)
4900 goto out_unlock;
4901
4902 lp.sched_priority = p->rt_priority;
4903 read_unlock(&tasklist_lock);
4904
4905 /*
4906 * This one might sleep, we cannot do it with a spinlock held ...
4907 */
4908 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4909
4910 return retval;
4911
4912 out_unlock:
4913 read_unlock(&tasklist_lock);
4914 return retval;
4915 }
4916
4917 long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
4918 {
4919 cpumask_t cpus_allowed;
4920 cpumask_t new_mask = *in_mask;
4921 struct task_struct *p;
4922 int retval;
4923
4924 get_online_cpus();
4925 read_lock(&tasklist_lock);
4926
4927 p = find_process_by_pid(pid);
4928 if (!p) {
4929 read_unlock(&tasklist_lock);
4930 put_online_cpus();
4931 return -ESRCH;
4932 }
4933
4934 /*
4935 * It is not safe to call set_cpus_allowed with the
4936 * tasklist_lock held. We will bump the task_struct's
4937 * usage count and then drop tasklist_lock.
4938 */
4939 get_task_struct(p);
4940 read_unlock(&tasklist_lock);
4941
4942 retval = -EPERM;
4943 if ((current->euid != p->euid) && (current->euid != p->uid) &&
4944 !capable(CAP_SYS_NICE))
4945 goto out_unlock;
4946
4947 retval = security_task_setscheduler(p, 0, NULL);
4948 if (retval)
4949 goto out_unlock;
4950
4951 cpuset_cpus_allowed(p, &cpus_allowed);
4952 cpus_and(new_mask, new_mask, cpus_allowed);
4953 again:
4954 retval = set_cpus_allowed_ptr(p, &new_mask);
4955
4956 if (!retval) {
4957 cpuset_cpus_allowed(p, &cpus_allowed);
4958 if (!cpus_subset(new_mask, cpus_allowed)) {
4959 /*
4960 * We must have raced with a concurrent cpuset
4961 * update. Just reset the cpus_allowed to the
4962 * cpuset's cpus_allowed
4963 */
4964 new_mask = cpus_allowed;
4965 goto again;
4966 }
4967 }
4968 out_unlock:
4969 put_task_struct(p);
4970 put_online_cpus();
4971 return retval;
4972 }
4973
4974 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4975 cpumask_t *new_mask)
4976 {
4977 if (len < sizeof(cpumask_t)) {
4978 memset(new_mask, 0, sizeof(cpumask_t));
4979 } else if (len > sizeof(cpumask_t)) {
4980 len = sizeof(cpumask_t);
4981 }
4982 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4983 }
4984
4985 /**
4986 * sys_sched_setaffinity - set the cpu affinity of a process
4987 * @pid: pid of the process
4988 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4989 * @user_mask_ptr: user-space pointer to the new cpu mask
4990 */
4991 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
4992 unsigned long __user *user_mask_ptr)
4993 {
4994 cpumask_t new_mask;
4995 int retval;
4996
4997 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
4998 if (retval)
4999 return retval;
5000
5001 return sched_setaffinity(pid, &new_mask);
5002 }
5003
5004 /*
5005 * Represents all cpu's present in the system
5006 * In systems capable of hotplug, this map could dynamically grow
5007 * as new cpu's are detected in the system via any platform specific
5008 * method, such as ACPI for e.g.
5009 */
5010
5011 cpumask_t cpu_present_map __read_mostly;
5012 EXPORT_SYMBOL(cpu_present_map);
5013
5014 #ifndef CONFIG_SMP
5015 cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
5016 EXPORT_SYMBOL(cpu_online_map);
5017
5018 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
5019 EXPORT_SYMBOL(cpu_possible_map);
5020 #endif
5021
5022 long sched_getaffinity(pid_t pid, cpumask_t *mask)
5023 {
5024 struct task_struct *p;
5025 int retval;
5026
5027 get_online_cpus();
5028 read_lock(&tasklist_lock);
5029
5030 retval = -ESRCH;
5031 p = find_process_by_pid(pid);
5032 if (!p)
5033 goto out_unlock;
5034
5035 retval = security_task_getscheduler(p);
5036 if (retval)
5037 goto out_unlock;
5038
5039 cpus_and(*mask, p->cpus_allowed, cpu_online_map);
5040
5041 out_unlock:
5042 read_unlock(&tasklist_lock);
5043 put_online_cpus();
5044
5045 return retval;
5046 }
5047
5048 /**
5049 * sys_sched_getaffinity - get the cpu affinity of a process
5050 * @pid: pid of the process
5051 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5052 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5053 */
5054 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
5055 unsigned long __user *user_mask_ptr)
5056 {
5057 int ret;
5058 cpumask_t mask;
5059
5060 if (len < sizeof(cpumask_t))
5061 return -EINVAL;
5062
5063 ret = sched_getaffinity(pid, &mask);
5064 if (ret < 0)
5065 return ret;
5066
5067 if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
5068 return -EFAULT;
5069
5070 return sizeof(cpumask_t);
5071 }
5072
5073 /**
5074 * sys_sched_yield - yield the current processor to other threads.
5075 *
5076 * This function yields the current CPU to other tasks. If there are no
5077 * other threads running on this CPU then this function will return.
5078 */
5079 asmlinkage long sys_sched_yield(void)
5080 {
5081 struct rq *rq = this_rq_lock();
5082
5083 schedstat_inc(rq, yld_count);
5084 current->sched_class->yield_task(rq);
5085
5086 /*
5087 * Since we are going to call schedule() anyway, there's
5088 * no need to preempt or enable interrupts:
5089 */
5090 __release(rq->lock);
5091 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5092 _raw_spin_unlock(&rq->lock);
5093 preempt_enable_no_resched();
5094
5095 schedule();
5096
5097 return 0;
5098 }
5099
5100 static void __cond_resched(void)
5101 {
5102 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
5103 __might_sleep(__FILE__, __LINE__);
5104 #endif
5105 /*
5106 * The BKS might be reacquired before we have dropped
5107 * PREEMPT_ACTIVE, which could trigger a second
5108 * cond_resched() call.
5109 */
5110 do {
5111 add_preempt_count(PREEMPT_ACTIVE);
5112 schedule();
5113 sub_preempt_count(PREEMPT_ACTIVE);
5114 } while (need_resched());
5115 }
5116
5117 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
5118 int __sched _cond_resched(void)
5119 {
5120 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
5121 system_state == SYSTEM_RUNNING) {
5122 __cond_resched();
5123 return 1;
5124 }
5125 return 0;
5126 }
5127 EXPORT_SYMBOL(_cond_resched);
5128 #endif
5129
5130 /*
5131 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
5132 * call schedule, and on return reacquire the lock.
5133 *
5134 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5135 * operations here to prevent schedule() from being called twice (once via
5136 * spin_unlock(), once by hand).
5137 */
5138 int cond_resched_lock(spinlock_t *lock)
5139 {
5140 int resched = need_resched() && system_state == SYSTEM_RUNNING;
5141 int ret = 0;
5142
5143 if (spin_needbreak(lock) || resched) {
5144 spin_unlock(lock);
5145 if (resched && need_resched())
5146 __cond_resched();
5147 else
5148 cpu_relax();
5149 ret = 1;
5150 spin_lock(lock);
5151 }
5152 return ret;
5153 }
5154 EXPORT_SYMBOL(cond_resched_lock);
5155
5156 int __sched cond_resched_softirq(void)
5157 {
5158 BUG_ON(!in_softirq());
5159
5160 if (need_resched() && system_state == SYSTEM_RUNNING) {
5161 local_bh_enable();
5162 __cond_resched();
5163 local_bh_disable();
5164 return 1;
5165 }
5166 return 0;
5167 }
5168 EXPORT_SYMBOL(cond_resched_softirq);
5169
5170 /**
5171 * yield - yield the current processor to other threads.
5172 *
5173 * This is a shortcut for kernel-space yielding - it marks the
5174 * thread runnable and calls sys_sched_yield().
5175 */
5176 void __sched yield(void)
5177 {
5178 set_current_state(TASK_RUNNING);
5179 sys_sched_yield();
5180 }
5181 EXPORT_SYMBOL(yield);
5182
5183 /*
5184 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5185 * that process accounting knows that this is a task in IO wait state.
5186 *
5187 * But don't do that if it is a deliberate, throttling IO wait (this task
5188 * has set its backing_dev_info: the queue against which it should throttle)
5189 */
5190 void __sched io_schedule(void)
5191 {
5192 struct rq *rq = &__raw_get_cpu_var(runqueues);
5193
5194 delayacct_blkio_start();
5195 atomic_inc(&rq->nr_iowait);
5196 schedule();
5197 atomic_dec(&rq->nr_iowait);
5198 delayacct_blkio_end();
5199 }
5200 EXPORT_SYMBOL(io_schedule);
5201
5202 long __sched io_schedule_timeout(long timeout)
5203 {
5204 struct rq *rq = &__raw_get_cpu_var(runqueues);
5205 long ret;
5206
5207 delayacct_blkio_start();
5208 atomic_inc(&rq->nr_iowait);
5209 ret = schedule_timeout(timeout);
5210 atomic_dec(&rq->nr_iowait);
5211 delayacct_blkio_end();
5212 return ret;
5213 }
5214
5215 /**
5216 * sys_sched_get_priority_max - return maximum RT priority.
5217 * @policy: scheduling class.
5218 *
5219 * this syscall returns the maximum rt_priority that can be used
5220 * by a given scheduling class.
5221 */
5222 asmlinkage long sys_sched_get_priority_max(int policy)
5223 {
5224 int ret = -EINVAL;
5225
5226 switch (policy) {
5227 case SCHED_FIFO:
5228 case SCHED_RR:
5229 ret = MAX_USER_RT_PRIO-1;
5230 break;
5231 case SCHED_NORMAL:
5232 case SCHED_BATCH:
5233 case SCHED_IDLE:
5234 ret = 0;
5235 break;
5236 }
5237 return ret;
5238 }
5239
5240 /**
5241 * sys_sched_get_priority_min - return minimum RT priority.
5242 * @policy: scheduling class.
5243 *
5244 * this syscall returns the minimum rt_priority that can be used
5245 * by a given scheduling class.
5246 */
5247 asmlinkage long sys_sched_get_priority_min(int policy)
5248 {
5249 int ret = -EINVAL;
5250
5251 switch (policy) {
5252 case SCHED_FIFO:
5253 case SCHED_RR:
5254 ret = 1;
5255 break;
5256 case SCHED_NORMAL:
5257 case SCHED_BATCH:
5258 case SCHED_IDLE:
5259 ret = 0;
5260 }
5261 return ret;
5262 }
5263
5264 /**
5265 * sys_sched_rr_get_interval - return the default timeslice of a process.
5266 * @pid: pid of the process.
5267 * @interval: userspace pointer to the timeslice value.
5268 *
5269 * this syscall writes the default timeslice value of a given process
5270 * into the user-space timespec buffer. A value of '0' means infinity.
5271 */
5272 asmlinkage
5273 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
5274 {
5275 struct task_struct *p;
5276 unsigned int time_slice;
5277 int retval;
5278 struct timespec t;
5279
5280 if (pid < 0)
5281 return -EINVAL;
5282
5283 retval = -ESRCH;
5284 read_lock(&tasklist_lock);
5285 p = find_process_by_pid(pid);
5286 if (!p)
5287 goto out_unlock;
5288
5289 retval = security_task_getscheduler(p);
5290 if (retval)
5291 goto out_unlock;
5292
5293 /*
5294 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
5295 * tasks that are on an otherwise idle runqueue:
5296 */
5297 time_slice = 0;
5298 if (p->policy == SCHED_RR) {
5299 time_slice = DEF_TIMESLICE;
5300 } else if (p->policy != SCHED_FIFO) {
5301 struct sched_entity *se = &p->se;
5302 unsigned long flags;
5303 struct rq *rq;
5304
5305 rq = task_rq_lock(p, &flags);
5306 if (rq->cfs.load.weight)
5307 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
5308 task_rq_unlock(rq, &flags);
5309 }
5310 read_unlock(&tasklist_lock);
5311 jiffies_to_timespec(time_slice, &t);
5312 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5313 return retval;
5314
5315 out_unlock:
5316 read_unlock(&tasklist_lock);
5317 return retval;
5318 }
5319
5320 static const char stat_nam[] = "RSDTtZX";
5321
5322 void sched_show_task(struct task_struct *p)
5323 {
5324 unsigned long free = 0;
5325 unsigned state;
5326
5327 state = p->state ? __ffs(p->state) + 1 : 0;
5328 printk(KERN_INFO "%-13.13s %c", p->comm,
5329 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5330 #if BITS_PER_LONG == 32
5331 if (state == TASK_RUNNING)
5332 printk(KERN_CONT " running ");
5333 else
5334 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5335 #else
5336 if (state == TASK_RUNNING)
5337 printk(KERN_CONT " running task ");
5338 else
5339 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5340 #endif
5341 #ifdef CONFIG_DEBUG_STACK_USAGE
5342 {
5343 unsigned long *n = end_of_stack(p);
5344 while (!*n)
5345 n++;
5346 free = (unsigned long)n - (unsigned long)end_of_stack(p);
5347 }
5348 #endif
5349 printk(KERN_CONT "%5lu %5d %6d\n", free,
5350 task_pid_nr(p), task_pid_nr(p->real_parent));
5351
5352 show_stack(p, NULL);
5353 }
5354
5355 void show_state_filter(unsigned long state_filter)
5356 {
5357 struct task_struct *g, *p;
5358
5359 #if BITS_PER_LONG == 32
5360 printk(KERN_INFO
5361 " task PC stack pid father\n");
5362 #else
5363 printk(KERN_INFO
5364 " task PC stack pid father\n");
5365 #endif
5366 read_lock(&tasklist_lock);
5367 do_each_thread(g, p) {
5368 /*
5369 * reset the NMI-timeout, listing all files on a slow
5370 * console might take alot of time:
5371 */
5372 touch_nmi_watchdog();
5373 if (!state_filter || (p->state & state_filter))
5374 sched_show_task(p);
5375 } while_each_thread(g, p);
5376
5377 touch_all_softlockup_watchdogs();
5378
5379 #ifdef CONFIG_SCHED_DEBUG
5380 sysrq_sched_debug_show();
5381 #endif
5382 read_unlock(&tasklist_lock);
5383 /*
5384 * Only show locks if all tasks are dumped:
5385 */
5386 if (state_filter == -1)
5387 debug_show_all_locks();
5388 }
5389
5390 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5391 {
5392 idle->sched_class = &idle_sched_class;
5393 }
5394
5395 /**
5396 * init_idle - set up an idle thread for a given CPU
5397 * @idle: task in question
5398 * @cpu: cpu the idle task belongs to
5399 *
5400 * NOTE: this function does not set the idle thread's NEED_RESCHED
5401 * flag, to make booting more robust.
5402 */
5403 void __cpuinit init_idle(struct task_struct *idle, int cpu)
5404 {
5405 struct rq *rq = cpu_rq(cpu);
5406 unsigned long flags;
5407
5408 __sched_fork(idle);
5409 idle->se.exec_start = sched_clock();
5410
5411 idle->prio = idle->normal_prio = MAX_PRIO;
5412 idle->cpus_allowed = cpumask_of_cpu(cpu);
5413 __set_task_cpu(idle, cpu);
5414
5415 spin_lock_irqsave(&rq->lock, flags);
5416 rq->curr = rq->idle = idle;
5417 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5418 idle->oncpu = 1;
5419 #endif
5420 spin_unlock_irqrestore(&rq->lock, flags);
5421
5422 /* Set the preempt count _outside_ the spinlocks! */
5423 task_thread_info(idle)->preempt_count = 0;
5424
5425 /*
5426 * The idle tasks have their own, simple scheduling class:
5427 */
5428 idle->sched_class = &idle_sched_class;
5429 }
5430
5431 /*
5432 * In a system that switches off the HZ timer nohz_cpu_mask
5433 * indicates which cpus entered this state. This is used
5434 * in the rcu update to wait only for active cpus. For system
5435 * which do not switch off the HZ timer nohz_cpu_mask should
5436 * always be CPU_MASK_NONE.
5437 */
5438 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
5439
5440 /*
5441 * Increase the granularity value when there are more CPUs,
5442 * because with more CPUs the 'effective latency' as visible
5443 * to users decreases. But the relationship is not linear,
5444 * so pick a second-best guess by going with the log2 of the
5445 * number of CPUs.
5446 *
5447 * This idea comes from the SD scheduler of Con Kolivas:
5448 */
5449 static inline void sched_init_granularity(void)
5450 {
5451 unsigned int factor = 1 + ilog2(num_online_cpus());
5452 const unsigned long limit = 200000000;
5453
5454 sysctl_sched_min_granularity *= factor;
5455 if (sysctl_sched_min_granularity > limit)
5456 sysctl_sched_min_granularity = limit;
5457
5458 sysctl_sched_latency *= factor;
5459 if (sysctl_sched_latency > limit)
5460 sysctl_sched_latency = limit;
5461
5462 sysctl_sched_wakeup_granularity *= factor;
5463 }
5464
5465 #ifdef CONFIG_SMP
5466 /*
5467 * This is how migration works:
5468 *
5469 * 1) we queue a struct migration_req structure in the source CPU's
5470 * runqueue and wake up that CPU's migration thread.
5471 * 2) we down() the locked semaphore => thread blocks.
5472 * 3) migration thread wakes up (implicitly it forces the migrated
5473 * thread off the CPU)
5474 * 4) it gets the migration request and checks whether the migrated
5475 * task is still in the wrong runqueue.
5476 * 5) if it's in the wrong runqueue then the migration thread removes
5477 * it and puts it into the right queue.
5478 * 6) migration thread up()s the semaphore.
5479 * 7) we wake up and the migration is done.
5480 */
5481
5482 /*
5483 * Change a given task's CPU affinity. Migrate the thread to a
5484 * proper CPU and schedule it away if the CPU it's executing on
5485 * is removed from the allowed bitmask.
5486 *
5487 * NOTE: the caller must have a valid reference to the task, the
5488 * task must not exit() & deallocate itself prematurely. The
5489 * call is not atomic; no spinlocks may be held.
5490 */
5491 int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5492 {
5493 struct migration_req req;
5494 unsigned long flags;
5495 struct rq *rq;
5496 int ret = 0;
5497
5498 rq = task_rq_lock(p, &flags);
5499 if (!cpus_intersects(*new_mask, cpu_online_map)) {
5500 ret = -EINVAL;
5501 goto out;
5502 }
5503
5504 if (p->sched_class->set_cpus_allowed)
5505 p->sched_class->set_cpus_allowed(p, new_mask);
5506 else {
5507 p->cpus_allowed = *new_mask;
5508 p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
5509 }
5510
5511 /* Can the task run on the task's current CPU? If so, we're done */
5512 if (cpu_isset(task_cpu(p), *new_mask))
5513 goto out;
5514
5515 if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
5516 /* Need help from migration thread: drop lock and wait. */
5517 task_rq_unlock(rq, &flags);
5518 wake_up_process(rq->migration_thread);
5519 wait_for_completion(&req.done);
5520 tlb_migrate_finish(p->mm);
5521 return 0;
5522 }
5523 out:
5524 task_rq_unlock(rq, &flags);
5525
5526 return ret;
5527 }
5528 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5529
5530 /*
5531 * Move (not current) task off this cpu, onto dest cpu. We're doing
5532 * this because either it can't run here any more (set_cpus_allowed()
5533 * away from this CPU, or CPU going down), or because we're
5534 * attempting to rebalance this task on exec (sched_exec).
5535 *
5536 * So we race with normal scheduler movements, but that's OK, as long
5537 * as the task is no longer on this CPU.
5538 *
5539 * Returns non-zero if task was successfully migrated.
5540 */
5541 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5542 {
5543 struct rq *rq_dest, *rq_src;
5544 int ret = 0, on_rq;
5545
5546 if (unlikely(cpu_is_offline(dest_cpu)))
5547 return ret;
5548
5549 rq_src = cpu_rq(src_cpu);
5550 rq_dest = cpu_rq(dest_cpu);
5551
5552 double_rq_lock(rq_src, rq_dest);
5553 /* Already moved. */
5554 if (task_cpu(p) != src_cpu)
5555 goto out;
5556 /* Affinity changed (again). */
5557 if (!cpu_isset(dest_cpu, p->cpus_allowed))
5558 goto out;
5559
5560 on_rq = p->se.on_rq;
5561 if (on_rq)
5562 deactivate_task(rq_src, p, 0);
5563
5564 set_task_cpu(p, dest_cpu);
5565 if (on_rq) {
5566 activate_task(rq_dest, p, 0);
5567 check_preempt_curr(rq_dest, p);
5568 }
5569 ret = 1;
5570 out:
5571 double_rq_unlock(rq_src, rq_dest);
5572 return ret;
5573 }
5574
5575 /*
5576 * migration_thread - this is a highprio system thread that performs
5577 * thread migration by bumping thread off CPU then 'pushing' onto
5578 * another runqueue.
5579 */
5580 static int migration_thread(void *data)
5581 {
5582 int cpu = (long)data;
5583 struct rq *rq;
5584
5585 rq = cpu_rq(cpu);
5586 BUG_ON(rq->migration_thread != current);
5587
5588 set_current_state(TASK_INTERRUPTIBLE);
5589 while (!kthread_should_stop()) {
5590 struct migration_req *req;
5591 struct list_head *head;
5592
5593 spin_lock_irq(&rq->lock);
5594
5595 if (cpu_is_offline(cpu)) {
5596 spin_unlock_irq(&rq->lock);
5597 goto wait_to_die;
5598 }
5599
5600 if (rq->active_balance) {
5601 active_load_balance(rq, cpu);
5602 rq->active_balance = 0;
5603 }
5604
5605 head = &rq->migration_queue;
5606
5607 if (list_empty(head)) {
5608 spin_unlock_irq(&rq->lock);
5609 schedule();
5610 set_current_state(TASK_INTERRUPTIBLE);
5611 continue;
5612 }
5613 req = list_entry(head->next, struct migration_req, list);
5614 list_del_init(head->next);
5615
5616 spin_unlock(&rq->lock);
5617 __migrate_task(req->task, cpu, req->dest_cpu);
5618 local_irq_enable();
5619
5620 complete(&req->done);
5621 }
5622 __set_current_state(TASK_RUNNING);
5623 return 0;
5624
5625 wait_to_die:
5626 /* Wait for kthread_stop */
5627 set_current_state(TASK_INTERRUPTIBLE);
5628 while (!kthread_should_stop()) {
5629 schedule();
5630 set_current_state(TASK_INTERRUPTIBLE);
5631 }
5632 __set_current_state(TASK_RUNNING);
5633 return 0;
5634 }
5635
5636 #ifdef CONFIG_HOTPLUG_CPU
5637
5638 static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5639 {
5640 int ret;
5641
5642 local_irq_disable();
5643 ret = __migrate_task(p, src_cpu, dest_cpu);
5644 local_irq_enable();
5645 return ret;
5646 }
5647
5648 /*
5649 * Figure out where task on dead CPU should go, use force if necessary.
5650 * NOTE: interrupts should be disabled by the caller
5651 */
5652 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5653 {
5654 unsigned long flags;
5655 cpumask_t mask;
5656 struct rq *rq;
5657 int dest_cpu;
5658
5659 do {
5660 /* On same node? */
5661 mask = node_to_cpumask(cpu_to_node(dead_cpu));
5662 cpus_and(mask, mask, p->cpus_allowed);
5663 dest_cpu = any_online_cpu(mask);
5664
5665 /* On any allowed CPU? */
5666 if (dest_cpu >= nr_cpu_ids)
5667 dest_cpu = any_online_cpu(p->cpus_allowed);
5668
5669 /* No more Mr. Nice Guy. */
5670 if (dest_cpu >= nr_cpu_ids) {
5671 cpumask_t cpus_allowed;
5672
5673 cpuset_cpus_allowed_locked(p, &cpus_allowed);
5674 /*
5675 * Try to stay on the same cpuset, where the
5676 * current cpuset may be a subset of all cpus.
5677 * The cpuset_cpus_allowed_locked() variant of
5678 * cpuset_cpus_allowed() will not block. It must be
5679 * called within calls to cpuset_lock/cpuset_unlock.
5680 */
5681 rq = task_rq_lock(p, &flags);
5682 p->cpus_allowed = cpus_allowed;
5683 dest_cpu = any_online_cpu(p->cpus_allowed);
5684 task_rq_unlock(rq, &flags);
5685
5686 /*
5687 * Don't tell them about moving exiting tasks or
5688 * kernel threads (both mm NULL), since they never
5689 * leave kernel.
5690 */
5691 if (p->mm && printk_ratelimit()) {
5692 printk(KERN_INFO "process %d (%s) no "
5693 "longer affine to cpu%d\n",
5694 task_pid_nr(p), p->comm, dead_cpu);
5695 }
5696 }
5697 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
5698 }
5699
5700 /*
5701 * While a dead CPU has no uninterruptible tasks queued at this point,
5702 * it might still have a nonzero ->nr_uninterruptible counter, because
5703 * for performance reasons the counter is not stricly tracking tasks to
5704 * their home CPUs. So we just add the counter to another CPU's counter,
5705 * to keep the global sum constant after CPU-down:
5706 */
5707 static void migrate_nr_uninterruptible(struct rq *rq_src)
5708 {
5709 struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
5710 unsigned long flags;
5711
5712 local_irq_save(flags);
5713 double_rq_lock(rq_src, rq_dest);
5714 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5715 rq_src->nr_uninterruptible = 0;
5716 double_rq_unlock(rq_src, rq_dest);
5717 local_irq_restore(flags);
5718 }
5719
5720 /* Run through task list and migrate tasks from the dead cpu. */
5721 static void migrate_live_tasks(int src_cpu)
5722 {
5723 struct task_struct *p, *t;
5724
5725 read_lock(&tasklist_lock);
5726
5727 do_each_thread(t, p) {
5728 if (p == current)
5729 continue;
5730
5731 if (task_cpu(p) == src_cpu)
5732 move_task_off_dead_cpu(src_cpu, p);
5733 } while_each_thread(t, p);
5734
5735 read_unlock(&tasklist_lock);
5736 }
5737
5738 /*
5739 * Schedules idle task to be the next runnable task on current CPU.
5740 * It does so by boosting its priority to highest possible.
5741 * Used by CPU offline code.
5742 */
5743 void sched_idle_next(void)
5744 {
5745 int this_cpu = smp_processor_id();
5746 struct rq *rq = cpu_rq(this_cpu);
5747 struct task_struct *p = rq->idle;
5748 unsigned long flags;
5749
5750 /* cpu has to be offline */
5751 BUG_ON(cpu_online(this_cpu));
5752
5753 /*
5754 * Strictly not necessary since rest of the CPUs are stopped by now
5755 * and interrupts disabled on the current cpu.
5756 */
5757 spin_lock_irqsave(&rq->lock, flags);
5758
5759 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5760
5761 update_rq_clock(rq);
5762 activate_task(rq, p, 0);
5763
5764 spin_unlock_irqrestore(&rq->lock, flags);
5765 }
5766
5767 /*
5768 * Ensures that the idle task is using init_mm right before its cpu goes
5769 * offline.
5770 */
5771 void idle_task_exit(void)
5772 {
5773 struct mm_struct *mm = current->active_mm;
5774
5775 BUG_ON(cpu_online(smp_processor_id()));
5776
5777 if (mm != &init_mm)
5778 switch_mm(mm, &init_mm, current);
5779 mmdrop(mm);
5780 }
5781
5782 /* called under rq->lock with disabled interrupts */
5783 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5784 {
5785 struct rq *rq = cpu_rq(dead_cpu);
5786
5787 /* Must be exiting, otherwise would be on tasklist. */
5788 BUG_ON(!p->exit_state);
5789
5790 /* Cannot have done final schedule yet: would have vanished. */
5791 BUG_ON(p->state == TASK_DEAD);
5792
5793 get_task_struct(p);
5794
5795 /*
5796 * Drop lock around migration; if someone else moves it,
5797 * that's OK. No task can be added to this CPU, so iteration is
5798 * fine.
5799 */
5800 spin_unlock_irq(&rq->lock);
5801 move_task_off_dead_cpu(dead_cpu, p);
5802 spin_lock_irq(&rq->lock);
5803
5804 put_task_struct(p);
5805 }
5806
5807 /* release_task() removes task from tasklist, so we won't find dead tasks. */
5808 static void migrate_dead_tasks(unsigned int dead_cpu)
5809 {
5810 struct rq *rq = cpu_rq(dead_cpu);
5811 struct task_struct *next;
5812
5813 for ( ; ; ) {
5814 if (!rq->nr_running)
5815 break;
5816 update_rq_clock(rq);
5817 next = pick_next_task(rq, rq->curr);
5818 if (!next)
5819 break;
5820 migrate_dead(dead_cpu, next);
5821
5822 }
5823 }
5824 #endif /* CONFIG_HOTPLUG_CPU */
5825
5826 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5827
5828 static struct ctl_table sd_ctl_dir[] = {
5829 {
5830 .procname = "sched_domain",
5831 .mode = 0555,
5832 },
5833 {0, },
5834 };
5835
5836 static struct ctl_table sd_ctl_root[] = {
5837 {
5838 .ctl_name = CTL_KERN,
5839 .procname = "kernel",
5840 .mode = 0555,
5841 .child = sd_ctl_dir,
5842 },
5843 {0, },
5844 };
5845
5846 static struct ctl_table *sd_alloc_ctl_entry(int n)
5847 {
5848 struct ctl_table *entry =
5849 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5850
5851 return entry;
5852 }
5853
5854 static void sd_free_ctl_entry(struct ctl_table **tablep)
5855 {
5856 struct ctl_table *entry;
5857
5858 /*
5859 * In the intermediate directories, both the child directory and
5860 * procname are dynamically allocated and could fail but the mode
5861 * will always be set. In the lowest directory the names are
5862 * static strings and all have proc handlers.
5863 */
5864 for (entry = *tablep; entry->mode; entry++) {
5865 if (entry->child)
5866 sd_free_ctl_entry(&entry->child);
5867 if (entry->proc_handler == NULL)
5868 kfree(entry->procname);
5869 }
5870
5871 kfree(*tablep);
5872 *tablep = NULL;
5873 }
5874
5875 static void
5876 set_table_entry(struct ctl_table *entry,
5877 const char *procname, void *data, int maxlen,
5878 mode_t mode, proc_handler *proc_handler)
5879 {
5880 entry->procname = procname;
5881 entry->data = data;
5882 entry->maxlen = maxlen;
5883 entry->mode = mode;
5884 entry->proc_handler = proc_handler;
5885 }
5886
5887 static struct ctl_table *
5888 sd_alloc_ctl_domain_table(struct sched_domain *sd)
5889 {
5890 struct ctl_table *table = sd_alloc_ctl_entry(12);
5891
5892 if (table == NULL)
5893 return NULL;
5894
5895 set_table_entry(&table[0], "min_interval", &sd->min_interval,
5896 sizeof(long), 0644, proc_doulongvec_minmax);
5897 set_table_entry(&table[1], "max_interval", &sd->max_interval,
5898 sizeof(long), 0644, proc_doulongvec_minmax);
5899 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5900 sizeof(int), 0644, proc_dointvec_minmax);
5901 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5902 sizeof(int), 0644, proc_dointvec_minmax);
5903 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5904 sizeof(int), 0644, proc_dointvec_minmax);
5905 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5906 sizeof(int), 0644, proc_dointvec_minmax);
5907 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5908 sizeof(int), 0644, proc_dointvec_minmax);
5909 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5910 sizeof(int), 0644, proc_dointvec_minmax);
5911 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5912 sizeof(int), 0644, proc_dointvec_minmax);
5913 set_table_entry(&table[9], "cache_nice_tries",
5914 &sd->cache_nice_tries,
5915 sizeof(int), 0644, proc_dointvec_minmax);
5916 set_table_entry(&table[10], "flags", &sd->flags,
5917 sizeof(int), 0644, proc_dointvec_minmax);
5918 /* &table[11] is terminator */
5919
5920 return table;
5921 }
5922
5923 static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5924 {
5925 struct ctl_table *entry, *table;
5926 struct sched_domain *sd;
5927 int domain_num = 0, i;
5928 char buf[32];
5929
5930 for_each_domain(cpu, sd)
5931 domain_num++;
5932 entry = table = sd_alloc_ctl_entry(domain_num + 1);
5933 if (table == NULL)
5934 return NULL;
5935
5936 i = 0;
5937 for_each_domain(cpu, sd) {
5938 snprintf(buf, 32, "domain%d", i);
5939 entry->procname = kstrdup(buf, GFP_KERNEL);
5940 entry->mode = 0555;
5941 entry->child = sd_alloc_ctl_domain_table(sd);
5942 entry++;
5943 i++;
5944 }
5945 return table;
5946 }
5947
5948 static struct ctl_table_header *sd_sysctl_header;
5949 static void register_sched_domain_sysctl(void)
5950 {
5951 int i, cpu_num = num_online_cpus();
5952 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5953 char buf[32];
5954
5955 WARN_ON(sd_ctl_dir[0].child);
5956 sd_ctl_dir[0].child = entry;
5957
5958 if (entry == NULL)
5959 return;
5960
5961 for_each_online_cpu(i) {
5962 snprintf(buf, 32, "cpu%d", i);
5963 entry->procname = kstrdup(buf, GFP_KERNEL);
5964 entry->mode = 0555;
5965 entry->child = sd_alloc_ctl_cpu_table(i);
5966 entry++;
5967 }
5968
5969 WARN_ON(sd_sysctl_header);
5970 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5971 }
5972
5973 /* may be called multiple times per register */
5974 static void unregister_sched_domain_sysctl(void)
5975 {
5976 if (sd_sysctl_header)
5977 unregister_sysctl_table(sd_sysctl_header);
5978 sd_sysctl_header = NULL;
5979 if (sd_ctl_dir[0].child)
5980 sd_free_ctl_entry(&sd_ctl_dir[0].child);
5981 }
5982 #else
5983 static void register_sched_domain_sysctl(void)
5984 {
5985 }
5986 static void unregister_sched_domain_sysctl(void)
5987 {
5988 }
5989 #endif
5990
5991 /*
5992 * migration_call - callback that gets triggered when a CPU is added.
5993 * Here we can start up the necessary migration thread for the new CPU.
5994 */
5995 static int __cpuinit
5996 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5997 {
5998 struct task_struct *p;
5999 int cpu = (long)hcpu;
6000 unsigned long flags;
6001 struct rq *rq;
6002
6003 switch (action) {
6004
6005 case CPU_UP_PREPARE:
6006 case CPU_UP_PREPARE_FROZEN:
6007 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
6008 if (IS_ERR(p))
6009 return NOTIFY_BAD;
6010 kthread_bind(p, cpu);
6011 /* Must be high prio: stop_machine expects to yield to it. */
6012 rq = task_rq_lock(p, &flags);
6013 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
6014 task_rq_unlock(rq, &flags);
6015 cpu_rq(cpu)->migration_thread = p;
6016 break;
6017
6018 case CPU_ONLINE:
6019 case CPU_ONLINE_FROZEN:
6020 /* Strictly unnecessary, as first user will wake it. */
6021 wake_up_process(cpu_rq(cpu)->migration_thread);
6022
6023 /* Update our root-domain */
6024 rq = cpu_rq(cpu);
6025 spin_lock_irqsave(&rq->lock, flags);
6026 if (rq->rd) {
6027 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6028 cpu_set(cpu, rq->rd->online);
6029 }
6030 spin_unlock_irqrestore(&rq->lock, flags);
6031 break;
6032
6033 #ifdef CONFIG_HOTPLUG_CPU
6034 case CPU_UP_CANCELED:
6035 case CPU_UP_CANCELED_FROZEN:
6036 if (!cpu_rq(cpu)->migration_thread)
6037 break;
6038 /* Unbind it from offline cpu so it can run. Fall thru. */
6039 kthread_bind(cpu_rq(cpu)->migration_thread,
6040 any_online_cpu(cpu_online_map));
6041 kthread_stop(cpu_rq(cpu)->migration_thread);
6042 cpu_rq(cpu)->migration_thread = NULL;
6043 break;
6044
6045 case CPU_DEAD:
6046 case CPU_DEAD_FROZEN:
6047 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
6048 migrate_live_tasks(cpu);
6049 rq = cpu_rq(cpu);
6050 kthread_stop(rq->migration_thread);
6051 rq->migration_thread = NULL;
6052 /* Idle task back to normal (off runqueue, low prio) */
6053 spin_lock_irq(&rq->lock);
6054 update_rq_clock(rq);
6055 deactivate_task(rq, rq->idle, 0);
6056 rq->idle->static_prio = MAX_PRIO;
6057 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
6058 rq->idle->sched_class = &idle_sched_class;
6059 migrate_dead_tasks(cpu);
6060 spin_unlock_irq(&rq->lock);
6061 cpuset_unlock();
6062 migrate_nr_uninterruptible(rq);
6063 BUG_ON(rq->nr_running != 0);
6064
6065 /*
6066 * No need to migrate the tasks: it was best-effort if
6067 * they didn't take sched_hotcpu_mutex. Just wake up
6068 * the requestors.
6069 */
6070 spin_lock_irq(&rq->lock);
6071 while (!list_empty(&rq->migration_queue)) {
6072 struct migration_req *req;
6073
6074 req = list_entry(rq->migration_queue.next,
6075 struct migration_req, list);
6076 list_del_init(&req->list);
6077 complete(&req->done);
6078 }
6079 spin_unlock_irq(&rq->lock);
6080 break;
6081
6082 case CPU_DYING:
6083 case CPU_DYING_FROZEN:
6084 /* Update our root-domain */
6085 rq = cpu_rq(cpu);
6086 spin_lock_irqsave(&rq->lock, flags);
6087 if (rq->rd) {
6088 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6089 cpu_clear(cpu, rq->rd->online);
6090 }
6091 spin_unlock_irqrestore(&rq->lock, flags);
6092 break;
6093 #endif
6094 }
6095 return NOTIFY_OK;
6096 }
6097
6098 /* Register at highest priority so that task migration (migrate_all_tasks)
6099 * happens before everything else.
6100 */
6101 static struct notifier_block __cpuinitdata migration_notifier = {
6102 .notifier_call = migration_call,
6103 .priority = 10
6104 };
6105
6106 void __init migration_init(void)
6107 {
6108 void *cpu = (void *)(long)smp_processor_id();
6109 int err;
6110
6111 /* Start one for the boot CPU: */
6112 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6113 BUG_ON(err == NOTIFY_BAD);
6114 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6115 register_cpu_notifier(&migration_notifier);
6116 }
6117 #endif
6118
6119 #ifdef CONFIG_SMP
6120
6121 #ifdef CONFIG_SCHED_DEBUG
6122
6123 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6124 cpumask_t *groupmask)
6125 {
6126 struct sched_group *group = sd->groups;
6127 char str[256];
6128
6129 cpulist_scnprintf(str, sizeof(str), sd->span);
6130 cpus_clear(*groupmask);
6131
6132 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6133
6134 if (!(sd->flags & SD_LOAD_BALANCE)) {
6135 printk("does not load-balance\n");
6136 if (sd->parent)
6137 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6138 " has parent");
6139 return -1;
6140 }
6141
6142 printk(KERN_CONT "span %s\n", str);
6143
6144 if (!cpu_isset(cpu, sd->span)) {
6145 printk(KERN_ERR "ERROR: domain->span does not contain "
6146 "CPU%d\n", cpu);
6147 }
6148 if (!cpu_isset(cpu, group->cpumask)) {
6149 printk(KERN_ERR "ERROR: domain->groups does not contain"
6150 " CPU%d\n", cpu);
6151 }
6152
6153 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6154 do {
6155 if (!group) {
6156 printk("\n");
6157 printk(KERN_ERR "ERROR: group is NULL\n");
6158 break;
6159 }
6160
6161 if (!group->__cpu_power) {
6162 printk(KERN_CONT "\n");
6163 printk(KERN_ERR "ERROR: domain->cpu_power not "
6164 "set\n");
6165 break;
6166 }
6167
6168 if (!cpus_weight(group->cpumask)) {
6169 printk(KERN_CONT "\n");
6170 printk(KERN_ERR "ERROR: empty group\n");
6171 break;
6172 }
6173
6174 if (cpus_intersects(*groupmask, group->cpumask)) {
6175 printk(KERN_CONT "\n");
6176 printk(KERN_ERR "ERROR: repeated CPUs\n");
6177 break;
6178 }
6179
6180 cpus_or(*groupmask, *groupmask, group->cpumask);
6181
6182 cpulist_scnprintf(str, sizeof(str), group->cpumask);
6183 printk(KERN_CONT " %s", str);
6184
6185 group = group->next;
6186 } while (group != sd->groups);
6187 printk(KERN_CONT "\n");
6188
6189 if (!cpus_equal(sd->span, *groupmask))
6190 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6191
6192 if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
6193 printk(KERN_ERR "ERROR: parent span is not a superset "
6194 "of domain->span\n");
6195 return 0;
6196 }
6197
6198 static void sched_domain_debug(struct sched_domain *sd, int cpu)
6199 {
6200 cpumask_t *groupmask;
6201 int level = 0;
6202
6203 if (!sd) {
6204 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6205 return;
6206 }
6207
6208 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6209
6210 groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
6211 if (!groupmask) {
6212 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6213 return;
6214 }
6215
6216 for (;;) {
6217 if (sched_domain_debug_one(sd, cpu, level, groupmask))
6218 break;
6219 level++;
6220 sd = sd->parent;
6221 if (!sd)
6222 break;
6223 }
6224 kfree(groupmask);
6225 }
6226 #else
6227 # define sched_domain_debug(sd, cpu) do { } while (0)
6228 #endif
6229
6230 static int sd_degenerate(struct sched_domain *sd)
6231 {
6232 if (cpus_weight(sd->span) == 1)
6233 return 1;
6234
6235 /* Following flags need at least 2 groups */
6236 if (sd->flags & (SD_LOAD_BALANCE |
6237 SD_BALANCE_NEWIDLE |
6238 SD_BALANCE_FORK |
6239 SD_BALANCE_EXEC |
6240 SD_SHARE_CPUPOWER |
6241 SD_SHARE_PKG_RESOURCES)) {
6242 if (sd->groups != sd->groups->next)
6243 return 0;
6244 }
6245
6246 /* Following flags don't use groups */
6247 if (sd->flags & (SD_WAKE_IDLE |
6248 SD_WAKE_AFFINE |
6249 SD_WAKE_BALANCE))
6250 return 0;
6251
6252 return 1;
6253 }
6254
6255 static int
6256 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6257 {
6258 unsigned long cflags = sd->flags, pflags = parent->flags;
6259
6260 if (sd_degenerate(parent))
6261 return 1;
6262
6263 if (!cpus_equal(sd->span, parent->span))
6264 return 0;
6265
6266 /* Does parent contain flags not in child? */
6267 /* WAKE_BALANCE is a subset of WAKE_AFFINE */
6268 if (cflags & SD_WAKE_AFFINE)
6269 pflags &= ~SD_WAKE_BALANCE;
6270 /* Flags needing groups don't count if only 1 group in parent */
6271 if (parent->groups == parent->groups->next) {
6272 pflags &= ~(SD_LOAD_BALANCE |
6273 SD_BALANCE_NEWIDLE |
6274 SD_BALANCE_FORK |
6275 SD_BALANCE_EXEC |
6276 SD_SHARE_CPUPOWER |
6277 SD_SHARE_PKG_RESOURCES);
6278 }
6279 if (~cflags & pflags)
6280 return 0;
6281
6282 return 1;
6283 }
6284
6285 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6286 {
6287 unsigned long flags;
6288 const struct sched_class *class;
6289
6290 spin_lock_irqsave(&rq->lock, flags);
6291
6292 if (rq->rd) {
6293 struct root_domain *old_rd = rq->rd;
6294
6295 for (class = sched_class_highest; class; class = class->next) {
6296 if (class->leave_domain)
6297 class->leave_domain(rq);
6298 }
6299
6300 cpu_clear(rq->cpu, old_rd->span);
6301 cpu_clear(rq->cpu, old_rd->online);
6302
6303 if (atomic_dec_and_test(&old_rd->refcount))
6304 kfree(old_rd);
6305 }
6306
6307 atomic_inc(&rd->refcount);
6308 rq->rd = rd;
6309
6310 cpu_set(rq->cpu, rd->span);
6311 if (cpu_isset(rq->cpu, cpu_online_map))
6312 cpu_set(rq->cpu, rd->online);
6313
6314 for (class = sched_class_highest; class; class = class->next) {
6315 if (class->join_domain)
6316 class->join_domain(rq);
6317 }
6318
6319 spin_unlock_irqrestore(&rq->lock, flags);
6320 }
6321
6322 static void init_rootdomain(struct root_domain *rd)
6323 {
6324 memset(rd, 0, sizeof(*rd));
6325
6326 cpus_clear(rd->span);
6327 cpus_clear(rd->online);
6328 }
6329
6330 static void init_defrootdomain(void)
6331 {
6332 init_rootdomain(&def_root_domain);
6333 atomic_set(&def_root_domain.refcount, 1);
6334 }
6335
6336 static struct root_domain *alloc_rootdomain(void)
6337 {
6338 struct root_domain *rd;
6339
6340 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6341 if (!rd)
6342 return NULL;
6343
6344 init_rootdomain(rd);
6345
6346 return rd;
6347 }
6348
6349 /*
6350 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
6351 * hold the hotplug lock.
6352 */
6353 static void
6354 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6355 {
6356 struct rq *rq = cpu_rq(cpu);
6357 struct sched_domain *tmp;
6358
6359 /* Remove the sched domains which do not contribute to scheduling. */
6360 for (tmp = sd; tmp; tmp = tmp->parent) {
6361 struct sched_domain *parent = tmp->parent;
6362 if (!parent)
6363 break;
6364 if (sd_parent_degenerate(tmp, parent)) {
6365 tmp->parent = parent->parent;
6366 if (parent->parent)
6367 parent->parent->child = tmp;
6368 }
6369 }
6370
6371 if (sd && sd_degenerate(sd)) {
6372 sd = sd->parent;
6373 if (sd)
6374 sd->child = NULL;
6375 }
6376
6377 sched_domain_debug(sd, cpu);
6378
6379 rq_attach_root(rq, rd);
6380 rcu_assign_pointer(rq->sd, sd);
6381 }
6382
6383 /* cpus with isolated domains */
6384 static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
6385
6386 /* Setup the mask of cpus configured for isolated domains */
6387 static int __init isolated_cpu_setup(char *str)
6388 {
6389 int ints[NR_CPUS], i;
6390
6391 str = get_options(str, ARRAY_SIZE(ints), ints);
6392 cpus_clear(cpu_isolated_map);
6393 for (i = 1; i <= ints[0]; i++)
6394 if (ints[i] < NR_CPUS)
6395 cpu_set(ints[i], cpu_isolated_map);
6396 return 1;
6397 }
6398
6399 __setup("isolcpus=", isolated_cpu_setup);
6400
6401 /*
6402 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6403 * to a function which identifies what group(along with sched group) a CPU
6404 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
6405 * (due to the fact that we keep track of groups covered with a cpumask_t).
6406 *
6407 * init_sched_build_groups will build a circular linked list of the groups
6408 * covered by the given span, and will set each group's ->cpumask correctly,
6409 * and ->cpu_power to 0.
6410 */
6411 static void
6412 init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6413 int (*group_fn)(int cpu, const cpumask_t *cpu_map,
6414 struct sched_group **sg,
6415 cpumask_t *tmpmask),
6416 cpumask_t *covered, cpumask_t *tmpmask)
6417 {
6418 struct sched_group *first = NULL, *last = NULL;
6419 int i;
6420
6421 cpus_clear(*covered);
6422
6423 for_each_cpu_mask(i, *span) {
6424 struct sched_group *sg;
6425 int group = group_fn(i, cpu_map, &sg, tmpmask);
6426 int j;
6427
6428 if (cpu_isset(i, *covered))
6429 continue;
6430
6431 cpus_clear(sg->cpumask);
6432 sg->__cpu_power = 0;
6433
6434 for_each_cpu_mask(j, *span) {
6435 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6436 continue;
6437
6438 cpu_set(j, *covered);
6439 cpu_set(j, sg->cpumask);
6440 }
6441 if (!first)
6442 first = sg;
6443 if (last)
6444 last->next = sg;
6445 last = sg;
6446 }
6447 last->next = first;
6448 }
6449
6450 #define SD_NODES_PER_DOMAIN 16
6451
6452 #ifdef CONFIG_NUMA
6453
6454 /**
6455 * find_next_best_node - find the next node to include in a sched_domain
6456 * @node: node whose sched_domain we're building
6457 * @used_nodes: nodes already in the sched_domain
6458 *
6459 * Find the next node to include in a given scheduling domain. Simply
6460 * finds the closest node not already in the @used_nodes map.
6461 *
6462 * Should use nodemask_t.
6463 */
6464 static int find_next_best_node(int node, nodemask_t *used_nodes)
6465 {
6466 int i, n, val, min_val, best_node = 0;
6467
6468 min_val = INT_MAX;
6469
6470 for (i = 0; i < MAX_NUMNODES; i++) {
6471 /* Start at @node */
6472 n = (node + i) % MAX_NUMNODES;
6473
6474 if (!nr_cpus_node(n))
6475 continue;
6476
6477 /* Skip already used nodes */
6478 if (node_isset(n, *used_nodes))
6479 continue;
6480
6481 /* Simple min distance search */
6482 val = node_distance(node, n);
6483
6484 if (val < min_val) {
6485 min_val = val;
6486 best_node = n;
6487 }
6488 }
6489
6490 node_set(best_node, *used_nodes);
6491 return best_node;
6492 }
6493
6494 /**
6495 * sched_domain_node_span - get a cpumask for a node's sched_domain
6496 * @node: node whose cpumask we're constructing
6497 *
6498 * Given a node, construct a good cpumask for its sched_domain to span. It
6499 * should be one that prevents unnecessary balancing, but also spreads tasks
6500 * out optimally.
6501 */
6502 static void sched_domain_node_span(int node, cpumask_t *span)
6503 {
6504 nodemask_t used_nodes;
6505 node_to_cpumask_ptr(nodemask, node);
6506 int i;
6507
6508 cpus_clear(*span);
6509 nodes_clear(used_nodes);
6510
6511 cpus_or(*span, *span, *nodemask);
6512 node_set(node, used_nodes);
6513
6514 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
6515 int next_node = find_next_best_node(node, &used_nodes);
6516
6517 node_to_cpumask_ptr_next(nodemask, next_node);
6518 cpus_or(*span, *span, *nodemask);
6519 }
6520 }
6521 #endif
6522
6523 int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6524
6525 /*
6526 * SMT sched-domains:
6527 */
6528 #ifdef CONFIG_SCHED_SMT
6529 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
6530 static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
6531
6532 static int
6533 cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6534 cpumask_t *unused)
6535 {
6536 if (sg)
6537 *sg = &per_cpu(sched_group_cpus, cpu);
6538 return cpu;
6539 }
6540 #endif
6541
6542 /*
6543 * multi-core sched-domains:
6544 */
6545 #ifdef CONFIG_SCHED_MC
6546 static DEFINE_PER_CPU(struct sched_domain, core_domains);
6547 static DEFINE_PER_CPU(struct sched_group, sched_group_core);
6548 #endif
6549
6550 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6551 static int
6552 cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6553 cpumask_t *mask)
6554 {
6555 int group;
6556
6557 *mask = per_cpu(cpu_sibling_map, cpu);
6558 cpus_and(*mask, *mask, *cpu_map);
6559 group = first_cpu(*mask);
6560 if (sg)
6561 *sg = &per_cpu(sched_group_core, group);
6562 return group;
6563 }
6564 #elif defined(CONFIG_SCHED_MC)
6565 static int
6566 cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6567 cpumask_t *unused)
6568 {
6569 if (sg)
6570 *sg = &per_cpu(sched_group_core, cpu);
6571 return cpu;
6572 }
6573 #endif
6574
6575 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
6576 static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
6577
6578 static int
6579 cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6580 cpumask_t *mask)
6581 {
6582 int group;
6583 #ifdef CONFIG_SCHED_MC
6584 *mask = cpu_coregroup_map(cpu);
6585 cpus_and(*mask, *mask, *cpu_map);
6586 group = first_cpu(*mask);
6587 #elif defined(CONFIG_SCHED_SMT)
6588 *mask = per_cpu(cpu_sibling_map, cpu);
6589 cpus_and(*mask, *mask, *cpu_map);
6590 group = first_cpu(*mask);
6591 #else
6592 group = cpu;
6593 #endif
6594 if (sg)
6595 *sg = &per_cpu(sched_group_phys, group);
6596 return group;
6597 }
6598
6599 #ifdef CONFIG_NUMA
6600 /*
6601 * The init_sched_build_groups can't handle what we want to do with node
6602 * groups, so roll our own. Now each node has its own list of groups which
6603 * gets dynamically allocated.
6604 */
6605 static DEFINE_PER_CPU(struct sched_domain, node_domains);
6606 static struct sched_group ***sched_group_nodes_bycpu;
6607
6608 static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
6609 static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
6610
6611 static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
6612 struct sched_group **sg, cpumask_t *nodemask)
6613 {
6614 int group;
6615
6616 *nodemask = node_to_cpumask(cpu_to_node(cpu));
6617 cpus_and(*nodemask, *nodemask, *cpu_map);
6618 group = first_cpu(*nodemask);
6619
6620 if (sg)
6621 *sg = &per_cpu(sched_group_allnodes, group);
6622 return group;
6623 }
6624
6625 static void init_numa_sched_groups_power(struct sched_group *group_head)
6626 {
6627 struct sched_group *sg = group_head;
6628 int j;
6629
6630 if (!sg)
6631 return;
6632 do {
6633 for_each_cpu_mask(j, sg->cpumask) {
6634 struct sched_domain *sd;
6635
6636 sd = &per_cpu(phys_domains, j);
6637 if (j != first_cpu(sd->groups->cpumask)) {
6638 /*
6639 * Only add "power" once for each
6640 * physical package.
6641 */
6642 continue;
6643 }
6644
6645 sg_inc_cpu_power(sg, sd->groups->__cpu_power);
6646 }
6647 sg = sg->next;
6648 } while (sg != group_head);
6649 }
6650 #endif
6651
6652 #ifdef CONFIG_NUMA
6653 /* Free memory allocated for various sched_group structures */
6654 static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6655 {
6656 int cpu, i;
6657
6658 for_each_cpu_mask(cpu, *cpu_map) {
6659 struct sched_group **sched_group_nodes
6660 = sched_group_nodes_bycpu[cpu];
6661
6662 if (!sched_group_nodes)
6663 continue;
6664
6665 for (i = 0; i < MAX_NUMNODES; i++) {
6666 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6667
6668 *nodemask = node_to_cpumask(i);
6669 cpus_and(*nodemask, *nodemask, *cpu_map);
6670 if (cpus_empty(*nodemask))
6671 continue;
6672
6673 if (sg == NULL)
6674 continue;
6675 sg = sg->next;
6676 next_sg:
6677 oldsg = sg;
6678 sg = sg->next;
6679 kfree(oldsg);
6680 if (oldsg != sched_group_nodes[i])
6681 goto next_sg;
6682 }
6683 kfree(sched_group_nodes);
6684 sched_group_nodes_bycpu[cpu] = NULL;
6685 }
6686 }
6687 #else
6688 static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6689 {
6690 }
6691 #endif
6692
6693 /*
6694 * Initialize sched groups cpu_power.
6695 *
6696 * cpu_power indicates the capacity of sched group, which is used while
6697 * distributing the load between different sched groups in a sched domain.
6698 * Typically cpu_power for all the groups in a sched domain will be same unless
6699 * there are asymmetries in the topology. If there are asymmetries, group
6700 * having more cpu_power will pickup more load compared to the group having
6701 * less cpu_power.
6702 *
6703 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
6704 * the maximum number of tasks a group can handle in the presence of other idle
6705 * or lightly loaded groups in the same sched domain.
6706 */
6707 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6708 {
6709 struct sched_domain *child;
6710 struct sched_group *group;
6711
6712 WARN_ON(!sd || !sd->groups);
6713
6714 if (cpu != first_cpu(sd->groups->cpumask))
6715 return;
6716
6717 child = sd->child;
6718
6719 sd->groups->__cpu_power = 0;
6720
6721 /*
6722 * For perf policy, if the groups in child domain share resources
6723 * (for example cores sharing some portions of the cache hierarchy
6724 * or SMT), then set this domain groups cpu_power such that each group
6725 * can handle only one task, when there are other idle groups in the
6726 * same sched domain.
6727 */
6728 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
6729 (child->flags &
6730 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
6731 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
6732 return;
6733 }
6734
6735 /*
6736 * add cpu_power of each child group to this groups cpu_power
6737 */
6738 group = child->groups;
6739 do {
6740 sg_inc_cpu_power(sd->groups, group->__cpu_power);
6741 group = group->next;
6742 } while (group != child->groups);
6743 }
6744
6745 /*
6746 * Initializers for schedule domains
6747 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6748 */
6749
6750 #define SD_INIT(sd, type) sd_init_##type(sd)
6751 #define SD_INIT_FUNC(type) \
6752 static noinline void sd_init_##type(struct sched_domain *sd) \
6753 { \
6754 memset(sd, 0, sizeof(*sd)); \
6755 *sd = SD_##type##_INIT; \
6756 }
6757
6758 SD_INIT_FUNC(CPU)
6759 #ifdef CONFIG_NUMA
6760 SD_INIT_FUNC(ALLNODES)
6761 SD_INIT_FUNC(NODE)
6762 #endif
6763 #ifdef CONFIG_SCHED_SMT
6764 SD_INIT_FUNC(SIBLING)
6765 #endif
6766 #ifdef CONFIG_SCHED_MC
6767 SD_INIT_FUNC(MC)
6768 #endif
6769
6770 /*
6771 * To minimize stack usage kmalloc room for cpumasks and share the
6772 * space as the usage in build_sched_domains() dictates. Used only
6773 * if the amount of space is significant.
6774 */
6775 struct allmasks {
6776 cpumask_t tmpmask; /* make this one first */
6777 union {
6778 cpumask_t nodemask;
6779 cpumask_t this_sibling_map;
6780 cpumask_t this_core_map;
6781 };
6782 cpumask_t send_covered;
6783
6784 #ifdef CONFIG_NUMA
6785 cpumask_t domainspan;
6786 cpumask_t covered;
6787 cpumask_t notcovered;
6788 #endif
6789 };
6790
6791 #if NR_CPUS > 128
6792 #define SCHED_CPUMASK_ALLOC 1
6793 #define SCHED_CPUMASK_FREE(v) kfree(v)
6794 #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
6795 #else
6796 #define SCHED_CPUMASK_ALLOC 0
6797 #define SCHED_CPUMASK_FREE(v)
6798 #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
6799 #endif
6800
6801 #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
6802 ((unsigned long)(a) + offsetof(struct allmasks, v))
6803
6804 /*
6805 * Build sched domains for a given set of cpus and attach the sched domains
6806 * to the individual cpus
6807 */
6808 static int build_sched_domains(const cpumask_t *cpu_map)
6809 {
6810 int i;
6811 struct root_domain *rd;
6812 SCHED_CPUMASK_DECLARE(allmasks);
6813 cpumask_t *tmpmask;
6814 #ifdef CONFIG_NUMA
6815 struct sched_group **sched_group_nodes = NULL;
6816 int sd_allnodes = 0;
6817
6818 /*
6819 * Allocate the per-node list of sched groups
6820 */
6821 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
6822 GFP_KERNEL);
6823 if (!sched_group_nodes) {
6824 printk(KERN_WARNING "Can not alloc sched group node list\n");
6825 return -ENOMEM;
6826 }
6827 #endif
6828
6829 rd = alloc_rootdomain();
6830 if (!rd) {
6831 printk(KERN_WARNING "Cannot alloc root domain\n");
6832 #ifdef CONFIG_NUMA
6833 kfree(sched_group_nodes);
6834 #endif
6835 return -ENOMEM;
6836 }
6837
6838 #if SCHED_CPUMASK_ALLOC
6839 /* get space for all scratch cpumask variables */
6840 allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
6841 if (!allmasks) {
6842 printk(KERN_WARNING "Cannot alloc cpumask array\n");
6843 kfree(rd);
6844 #ifdef CONFIG_NUMA
6845 kfree(sched_group_nodes);
6846 #endif
6847 return -ENOMEM;
6848 }
6849 #endif
6850 tmpmask = (cpumask_t *)allmasks;
6851
6852
6853 #ifdef CONFIG_NUMA
6854 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
6855 #endif
6856
6857 /*
6858 * Set up domains for cpus specified by the cpu_map.
6859 */
6860 for_each_cpu_mask(i, *cpu_map) {
6861 struct sched_domain *sd = NULL, *p;
6862 SCHED_CPUMASK_VAR(nodemask, allmasks);
6863
6864 *nodemask = node_to_cpumask(cpu_to_node(i));
6865 cpus_and(*nodemask, *nodemask, *cpu_map);
6866
6867 #ifdef CONFIG_NUMA
6868 if (cpus_weight(*cpu_map) >
6869 SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
6870 sd = &per_cpu(allnodes_domains, i);
6871 SD_INIT(sd, ALLNODES);
6872 sd->span = *cpu_map;
6873 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
6874 p = sd;
6875 sd_allnodes = 1;
6876 } else
6877 p = NULL;
6878
6879 sd = &per_cpu(node_domains, i);
6880 SD_INIT(sd, NODE);
6881 sched_domain_node_span(cpu_to_node(i), &sd->span);
6882 sd->parent = p;
6883 if (p)
6884 p->child = sd;
6885 cpus_and(sd->span, sd->span, *cpu_map);
6886 #endif
6887
6888 p = sd;
6889 sd = &per_cpu(phys_domains, i);
6890 SD_INIT(sd, CPU);
6891 sd->span = *nodemask;
6892 sd->parent = p;
6893 if (p)
6894 p->child = sd;
6895 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
6896
6897 #ifdef CONFIG_SCHED_MC
6898 p = sd;
6899 sd = &per_cpu(core_domains, i);
6900 SD_INIT(sd, MC);
6901 sd->span = cpu_coregroup_map(i);
6902 cpus_and(sd->span, sd->span, *cpu_map);
6903 sd->parent = p;
6904 p->child = sd;
6905 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
6906 #endif
6907
6908 #ifdef CONFIG_SCHED_SMT
6909 p = sd;
6910 sd = &per_cpu(cpu_domains, i);
6911 SD_INIT(sd, SIBLING);
6912 sd->span = per_cpu(cpu_sibling_map, i);
6913 cpus_and(sd->span, sd->span, *cpu_map);
6914 sd->parent = p;
6915 p->child = sd;
6916 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
6917 #endif
6918 }
6919
6920 #ifdef CONFIG_SCHED_SMT
6921 /* Set up CPU (sibling) groups */
6922 for_each_cpu_mask(i, *cpu_map) {
6923 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
6924 SCHED_CPUMASK_VAR(send_covered, allmasks);
6925
6926 *this_sibling_map = per_cpu(cpu_sibling_map, i);
6927 cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
6928 if (i != first_cpu(*this_sibling_map))
6929 continue;
6930
6931 init_sched_build_groups(this_sibling_map, cpu_map,
6932 &cpu_to_cpu_group,
6933 send_covered, tmpmask);
6934 }
6935 #endif
6936
6937 #ifdef CONFIG_SCHED_MC
6938 /* Set up multi-core groups */
6939 for_each_cpu_mask(i, *cpu_map) {
6940 SCHED_CPUMASK_VAR(this_core_map, allmasks);
6941 SCHED_CPUMASK_VAR(send_covered, allmasks);
6942
6943 *this_core_map = cpu_coregroup_map(i);
6944 cpus_and(*this_core_map, *this_core_map, *cpu_map);
6945 if (i != first_cpu(*this_core_map))
6946 continue;
6947
6948 init_sched_build_groups(this_core_map, cpu_map,
6949 &cpu_to_core_group,
6950 send_covered, tmpmask);
6951 }
6952 #endif
6953
6954 /* Set up physical groups */
6955 for (i = 0; i < MAX_NUMNODES; i++) {
6956 SCHED_CPUMASK_VAR(nodemask, allmasks);
6957 SCHED_CPUMASK_VAR(send_covered, allmasks);
6958
6959 *nodemask = node_to_cpumask(i);
6960 cpus_and(*nodemask, *nodemask, *cpu_map);
6961 if (cpus_empty(*nodemask))
6962 continue;
6963
6964 init_sched_build_groups(nodemask, cpu_map,
6965 &cpu_to_phys_group,
6966 send_covered, tmpmask);
6967 }
6968
6969 #ifdef CONFIG_NUMA
6970 /* Set up node groups */
6971 if (sd_allnodes) {
6972 SCHED_CPUMASK_VAR(send_covered, allmasks);
6973
6974 init_sched_build_groups(cpu_map, cpu_map,
6975 &cpu_to_allnodes_group,
6976 send_covered, tmpmask);
6977 }
6978
6979 for (i = 0; i < MAX_NUMNODES; i++) {
6980 /* Set up node groups */
6981 struct sched_group *sg, *prev;
6982 SCHED_CPUMASK_VAR(nodemask, allmasks);
6983 SCHED_CPUMASK_VAR(domainspan, allmasks);
6984 SCHED_CPUMASK_VAR(covered, allmasks);
6985 int j;
6986
6987 *nodemask = node_to_cpumask(i);
6988 cpus_clear(*covered);
6989
6990 cpus_and(*nodemask, *nodemask, *cpu_map);
6991 if (cpus_empty(*nodemask)) {
6992 sched_group_nodes[i] = NULL;
6993 continue;
6994 }
6995
6996 sched_domain_node_span(i, domainspan);
6997 cpus_and(*domainspan, *domainspan, *cpu_map);
6998
6999 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
7000 if (!sg) {
7001 printk(KERN_WARNING "Can not alloc domain group for "
7002 "node %d\n", i);
7003 goto error;
7004 }
7005 sched_group_nodes[i] = sg;
7006 for_each_cpu_mask(j, *nodemask) {
7007 struct sched_domain *sd;
7008
7009 sd = &per_cpu(node_domains, j);
7010 sd->groups = sg;
7011 }
7012 sg->__cpu_power = 0;
7013 sg->cpumask = *nodemask;
7014 sg->next = sg;
7015 cpus_or(*covered, *covered, *nodemask);
7016 prev = sg;
7017
7018 for (j = 0; j < MAX_NUMNODES; j++) {
7019 SCHED_CPUMASK_VAR(notcovered, allmasks);
7020 int n = (i + j) % MAX_NUMNODES;
7021 node_to_cpumask_ptr(pnodemask, n);
7022
7023 cpus_complement(*notcovered, *covered);
7024 cpus_and(*tmpmask, *notcovered, *cpu_map);
7025 cpus_and(*tmpmask, *tmpmask, *domainspan);
7026 if (cpus_empty(*tmpmask))
7027 break;
7028
7029 cpus_and(*tmpmask, *tmpmask, *pnodemask);
7030 if (cpus_empty(*tmpmask))
7031 continue;
7032
7033 sg = kmalloc_node(sizeof(struct sched_group),
7034 GFP_KERNEL, i);
7035 if (!sg) {
7036 printk(KERN_WARNING
7037 "Can not alloc domain group for node %d\n", j);
7038 goto error;
7039 }
7040 sg->__cpu_power = 0;
7041 sg->cpumask = *tmpmask;
7042 sg->next = prev->next;
7043 cpus_or(*covered, *covered, *tmpmask);
7044 prev->next = sg;
7045 prev = sg;
7046 }
7047 }
7048 #endif
7049
7050 /* Calculate CPU power for physical packages and nodes */
7051 #ifdef CONFIG_SCHED_SMT
7052 for_each_cpu_mask(i, *cpu_map) {
7053 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7054
7055 init_sched_groups_power(i, sd);
7056 }
7057 #endif
7058 #ifdef CONFIG_SCHED_MC
7059 for_each_cpu_mask(i, *cpu_map) {
7060 struct sched_domain *sd = &per_cpu(core_domains, i);
7061
7062 init_sched_groups_power(i, sd);
7063 }
7064 #endif
7065
7066 for_each_cpu_mask(i, *cpu_map) {
7067 struct sched_domain *sd = &per_cpu(phys_domains, i);
7068
7069 init_sched_groups_power(i, sd);
7070 }
7071
7072 #ifdef CONFIG_NUMA
7073 for (i = 0; i < MAX_NUMNODES; i++)
7074 init_numa_sched_groups_power(sched_group_nodes[i]);
7075
7076 if (sd_allnodes) {
7077 struct sched_group *sg;
7078
7079 cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
7080 tmpmask);
7081 init_numa_sched_groups_power(sg);
7082 }
7083 #endif
7084
7085 /* Attach the domains */
7086 for_each_cpu_mask(i, *cpu_map) {
7087 struct sched_domain *sd;
7088 #ifdef CONFIG_SCHED_SMT
7089 sd = &per_cpu(cpu_domains, i);
7090 #elif defined(CONFIG_SCHED_MC)
7091 sd = &per_cpu(core_domains, i);
7092 #else
7093 sd = &per_cpu(phys_domains, i);
7094 #endif
7095 cpu_attach_domain(sd, rd, i);
7096 }
7097
7098 SCHED_CPUMASK_FREE((void *)allmasks);
7099 return 0;
7100
7101 #ifdef CONFIG_NUMA
7102 error:
7103 free_sched_groups(cpu_map, tmpmask);
7104 SCHED_CPUMASK_FREE((void *)allmasks);
7105 return -ENOMEM;
7106 #endif
7107 }
7108
7109 static cpumask_t *doms_cur; /* current sched domains */
7110 static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7111
7112 /*
7113 * Special case: If a kmalloc of a doms_cur partition (array of
7114 * cpumask_t) fails, then fallback to a single sched domain,
7115 * as determined by the single cpumask_t fallback_doms.
7116 */
7117 static cpumask_t fallback_doms;
7118
7119 void __attribute__((weak)) arch_update_cpu_topology(void)
7120 {
7121 }
7122
7123 /*
7124 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7125 * For now this just excludes isolated cpus, but could be used to
7126 * exclude other special cases in the future.
7127 */
7128 static int arch_init_sched_domains(const cpumask_t *cpu_map)
7129 {
7130 int err;
7131
7132 arch_update_cpu_topology();
7133 ndoms_cur = 1;
7134 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
7135 if (!doms_cur)
7136 doms_cur = &fallback_doms;
7137 cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
7138 err = build_sched_domains(doms_cur);
7139 register_sched_domain_sysctl();
7140
7141 return err;
7142 }
7143
7144 static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
7145 cpumask_t *tmpmask)
7146 {
7147 free_sched_groups(cpu_map, tmpmask);
7148 }
7149
7150 /*
7151 * Detach sched domains from a group of cpus specified in cpu_map
7152 * These cpus will now be attached to the NULL domain
7153 */
7154 static void detach_destroy_domains(const cpumask_t *cpu_map)
7155 {
7156 cpumask_t tmpmask;
7157 int i;
7158
7159 unregister_sched_domain_sysctl();
7160
7161 for_each_cpu_mask(i, *cpu_map)
7162 cpu_attach_domain(NULL, &def_root_domain, i);
7163 synchronize_sched();
7164 arch_destroy_sched_domains(cpu_map, &tmpmask);
7165 }
7166
7167 /*
7168 * Partition sched domains as specified by the 'ndoms_new'
7169 * cpumasks in the array doms_new[] of cpumasks. This compares
7170 * doms_new[] to the current sched domain partitioning, doms_cur[].
7171 * It destroys each deleted domain and builds each new domain.
7172 *
7173 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
7174 * The masks don't intersect (don't overlap.) We should setup one
7175 * sched domain for each mask. CPUs not in any of the cpumasks will
7176 * not be load balanced. If the same cpumask appears both in the
7177 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7178 * it as it is.
7179 *
7180 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7181 * ownership of it and will kfree it when done with it. If the caller
7182 * failed the kmalloc call, then it can pass in doms_new == NULL,
7183 * and partition_sched_domains() will fallback to the single partition
7184 * 'fallback_doms'.
7185 *
7186 * Call with hotplug lock held
7187 */
7188 void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
7189 {
7190 int i, j;
7191
7192 lock_doms_cur();
7193
7194 /* always unregister in case we don't destroy any domains */
7195 unregister_sched_domain_sysctl();
7196
7197 if (doms_new == NULL) {
7198 ndoms_new = 1;
7199 doms_new = &fallback_doms;
7200 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7201 }
7202
7203 /* Destroy deleted domains */
7204 for (i = 0; i < ndoms_cur; i++) {
7205 for (j = 0; j < ndoms_new; j++) {
7206 if (cpus_equal(doms_cur[i], doms_new[j]))
7207 goto match1;
7208 }
7209 /* no match - a current sched domain not in new doms_new[] */
7210 detach_destroy_domains(doms_cur + i);
7211 match1:
7212 ;
7213 }
7214
7215 /* Build new domains */
7216 for (i = 0; i < ndoms_new; i++) {
7217 for (j = 0; j < ndoms_cur; j++) {
7218 if (cpus_equal(doms_new[i], doms_cur[j]))
7219 goto match2;
7220 }
7221 /* no match - add a new doms_new */
7222 build_sched_domains(doms_new + i);
7223 match2:
7224 ;
7225 }
7226
7227 /* Remember the new sched domains */
7228 if (doms_cur != &fallback_doms)
7229 kfree(doms_cur);
7230 doms_cur = doms_new;
7231 ndoms_cur = ndoms_new;
7232
7233 register_sched_domain_sysctl();
7234
7235 unlock_doms_cur();
7236 }
7237
7238 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7239 int arch_reinit_sched_domains(void)
7240 {
7241 int err;
7242
7243 get_online_cpus();
7244 detach_destroy_domains(&cpu_online_map);
7245 err = arch_init_sched_domains(&cpu_online_map);
7246 put_online_cpus();
7247
7248 return err;
7249 }
7250
7251 static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7252 {
7253 int ret;
7254
7255 if (buf[0] != '0' && buf[0] != '1')
7256 return -EINVAL;
7257
7258 if (smt)
7259 sched_smt_power_savings = (buf[0] == '1');
7260 else
7261 sched_mc_power_savings = (buf[0] == '1');
7262
7263 ret = arch_reinit_sched_domains();
7264
7265 return ret ? ret : count;
7266 }
7267
7268 #ifdef CONFIG_SCHED_MC
7269 static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
7270 {
7271 return sprintf(page, "%u\n", sched_mc_power_savings);
7272 }
7273 static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
7274 const char *buf, size_t count)
7275 {
7276 return sched_power_savings_store(buf, count, 0);
7277 }
7278 static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
7279 sched_mc_power_savings_store);
7280 #endif
7281
7282 #ifdef CONFIG_SCHED_SMT
7283 static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
7284 {
7285 return sprintf(page, "%u\n", sched_smt_power_savings);
7286 }
7287 static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
7288 const char *buf, size_t count)
7289 {
7290 return sched_power_savings_store(buf, count, 1);
7291 }
7292 static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
7293 sched_smt_power_savings_store);
7294 #endif
7295
7296 int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7297 {
7298 int err = 0;
7299
7300 #ifdef CONFIG_SCHED_SMT
7301 if (smt_capable())
7302 err = sysfs_create_file(&cls->kset.kobj,
7303 &attr_sched_smt_power_savings.attr);
7304 #endif
7305 #ifdef CONFIG_SCHED_MC
7306 if (!err && mc_capable())
7307 err = sysfs_create_file(&cls->kset.kobj,
7308 &attr_sched_mc_power_savings.attr);
7309 #endif
7310 return err;
7311 }
7312 #endif
7313
7314 /*
7315 * Force a reinitialization of the sched domains hierarchy. The domains
7316 * and groups cannot be updated in place without racing with the balancing
7317 * code, so we temporarily attach all running cpus to the NULL domain
7318 * which will prevent rebalancing while the sched domains are recalculated.
7319 */
7320 static int update_sched_domains(struct notifier_block *nfb,
7321 unsigned long action, void *hcpu)
7322 {
7323 switch (action) {
7324 case CPU_UP_PREPARE:
7325 case CPU_UP_PREPARE_FROZEN:
7326 case CPU_DOWN_PREPARE:
7327 case CPU_DOWN_PREPARE_FROZEN:
7328 detach_destroy_domains(&cpu_online_map);
7329 return NOTIFY_OK;
7330
7331 case CPU_UP_CANCELED:
7332 case CPU_UP_CANCELED_FROZEN:
7333 case CPU_DOWN_FAILED:
7334 case CPU_DOWN_FAILED_FROZEN:
7335 case CPU_ONLINE:
7336 case CPU_ONLINE_FROZEN:
7337 case CPU_DEAD:
7338 case CPU_DEAD_FROZEN:
7339 /*
7340 * Fall through and re-initialise the domains.
7341 */
7342 break;
7343 default:
7344 return NOTIFY_DONE;
7345 }
7346
7347 /* The hotplug lock is already held by cpu_up/cpu_down */
7348 arch_init_sched_domains(&cpu_online_map);
7349
7350 return NOTIFY_OK;
7351 }
7352
7353 void __init sched_init_smp(void)
7354 {
7355 cpumask_t non_isolated_cpus;
7356
7357 #if defined(CONFIG_NUMA)
7358 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7359 GFP_KERNEL);
7360 BUG_ON(sched_group_nodes_bycpu == NULL);
7361 #endif
7362 get_online_cpus();
7363 arch_init_sched_domains(&cpu_online_map);
7364 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
7365 if (cpus_empty(non_isolated_cpus))
7366 cpu_set(smp_processor_id(), non_isolated_cpus);
7367 put_online_cpus();
7368 /* XXX: Theoretical race here - CPU may be hotplugged now */
7369 hotcpu_notifier(update_sched_domains, 0);
7370
7371 /* Move init over to a non-isolated CPU */
7372 if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
7373 BUG();
7374 sched_init_granularity();
7375 }
7376 #else
7377 void __init sched_init_smp(void)
7378 {
7379 #if defined(CONFIG_NUMA)
7380 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7381 GFP_KERNEL);
7382 BUG_ON(sched_group_nodes_bycpu == NULL);
7383 #endif
7384 sched_init_granularity();
7385 }
7386 #endif /* CONFIG_SMP */
7387
7388 int in_sched_functions(unsigned long addr)
7389 {
7390 return in_lock_functions(addr) ||
7391 (addr >= (unsigned long)__sched_text_start
7392 && addr < (unsigned long)__sched_text_end);
7393 }
7394
7395 static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7396 {
7397 cfs_rq->tasks_timeline = RB_ROOT;
7398 #ifdef CONFIG_FAIR_GROUP_SCHED
7399 cfs_rq->rq = rq;
7400 #endif
7401 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7402 }
7403
7404 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7405 {
7406 struct rt_prio_array *array;
7407 int i;
7408
7409 array = &rt_rq->active;
7410 for (i = 0; i < MAX_RT_PRIO; i++) {
7411 INIT_LIST_HEAD(array->queue + i);
7412 __clear_bit(i, array->bitmap);
7413 }
7414 /* delimiter for bitsearch: */
7415 __set_bit(MAX_RT_PRIO, array->bitmap);
7416
7417 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
7418 rt_rq->highest_prio = MAX_RT_PRIO;
7419 #endif
7420 #ifdef CONFIG_SMP
7421 rt_rq->rt_nr_migratory = 0;
7422 rt_rq->overloaded = 0;
7423 #endif
7424
7425 rt_rq->rt_time = 0;
7426 rt_rq->rt_throttled = 0;
7427 rt_rq->rt_runtime = 0;
7428 spin_lock_init(&rt_rq->rt_runtime_lock);
7429
7430 #ifdef CONFIG_RT_GROUP_SCHED
7431 rt_rq->rt_nr_boosted = 0;
7432 rt_rq->rq = rq;
7433 #endif
7434 }
7435
7436 #ifdef CONFIG_FAIR_GROUP_SCHED
7437 static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
7438 struct cfs_rq *cfs_rq, struct sched_entity *se,
7439 int cpu, int add)
7440 {
7441 tg->cfs_rq[cpu] = cfs_rq;
7442 init_cfs_rq(cfs_rq, rq);
7443 cfs_rq->tg = tg;
7444 if (add)
7445 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7446
7447 tg->se[cpu] = se;
7448 se->cfs_rq = &rq->cfs;
7449 se->my_q = cfs_rq;
7450 se->load.weight = tg->shares;
7451 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
7452 se->parent = NULL;
7453 }
7454 #endif
7455
7456 #ifdef CONFIG_RT_GROUP_SCHED
7457 static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
7458 struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
7459 int cpu, int add)
7460 {
7461 tg->rt_rq[cpu] = rt_rq;
7462 init_rt_rq(rt_rq, rq);
7463 rt_rq->tg = tg;
7464 rt_rq->rt_se = rt_se;
7465 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
7466 if (add)
7467 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
7468
7469 tg->rt_se[cpu] = rt_se;
7470 rt_se->rt_rq = &rq->rt;
7471 rt_se->my_q = rt_rq;
7472 rt_se->parent = NULL;
7473 INIT_LIST_HEAD(&rt_se->run_list);
7474 }
7475 #endif
7476
7477 void __init sched_init(void)
7478 {
7479 int i, j;
7480 unsigned long alloc_size = 0, ptr;
7481
7482 #ifdef CONFIG_FAIR_GROUP_SCHED
7483 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7484 #endif
7485 #ifdef CONFIG_RT_GROUP_SCHED
7486 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7487 #endif
7488 /*
7489 * As sched_init() is called before page_alloc is setup,
7490 * we use alloc_bootmem().
7491 */
7492 if (alloc_size) {
7493 ptr = (unsigned long)alloc_bootmem_low(alloc_size);
7494
7495 #ifdef CONFIG_FAIR_GROUP_SCHED
7496 init_task_group.se = (struct sched_entity **)ptr;
7497 ptr += nr_cpu_ids * sizeof(void **);
7498
7499 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
7500 ptr += nr_cpu_ids * sizeof(void **);
7501 #endif
7502 #ifdef CONFIG_RT_GROUP_SCHED
7503 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7504 ptr += nr_cpu_ids * sizeof(void **);
7505
7506 init_task_group.rt_rq = (struct rt_rq **)ptr;
7507 #endif
7508 }
7509
7510 #ifdef CONFIG_SMP
7511 init_defrootdomain();
7512 #endif
7513
7514 init_rt_bandwidth(&def_rt_bandwidth,
7515 global_rt_period(), global_rt_runtime());
7516
7517 #ifdef CONFIG_RT_GROUP_SCHED
7518 init_rt_bandwidth(&init_task_group.rt_bandwidth,
7519 global_rt_period(), global_rt_runtime());
7520 #endif
7521
7522 #ifdef CONFIG_GROUP_SCHED
7523 list_add(&init_task_group.list, &task_groups);
7524 #endif
7525
7526 for_each_possible_cpu(i) {
7527 struct rq *rq;
7528
7529 rq = cpu_rq(i);
7530 spin_lock_init(&rq->lock);
7531 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
7532 rq->nr_running = 0;
7533 rq->clock = 1;
7534 update_last_tick_seen(rq);
7535 init_cfs_rq(&rq->cfs, rq);
7536 init_rt_rq(&rq->rt, rq);
7537 #ifdef CONFIG_FAIR_GROUP_SCHED
7538 init_task_group.shares = init_task_group_load;
7539 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7540 init_tg_cfs_entry(rq, &init_task_group,
7541 &per_cpu(init_cfs_rq, i),
7542 &per_cpu(init_sched_entity, i), i, 1);
7543
7544 #endif
7545 #ifdef CONFIG_RT_GROUP_SCHED
7546 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7547 init_tg_rt_entry(rq, &init_task_group,
7548 &per_cpu(init_rt_rq, i),
7549 &per_cpu(init_sched_rt_entity, i), i, 1);
7550 #else
7551 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7552 #endif
7553
7554 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7555 rq->cpu_load[j] = 0;
7556 #ifdef CONFIG_SMP
7557 rq->sd = NULL;
7558 rq->rd = NULL;
7559 rq->active_balance = 0;
7560 rq->next_balance = jiffies;
7561 rq->push_cpu = 0;
7562 rq->cpu = i;
7563 rq->migration_thread = NULL;
7564 INIT_LIST_HEAD(&rq->migration_queue);
7565 rq_attach_root(rq, &def_root_domain);
7566 #endif
7567 init_rq_hrtick(rq);
7568 atomic_set(&rq->nr_iowait, 0);
7569 }
7570
7571 set_load_weight(&init_task);
7572
7573 #ifdef CONFIG_PREEMPT_NOTIFIERS
7574 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7575 #endif
7576
7577 #ifdef CONFIG_SMP
7578 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
7579 #endif
7580
7581 #ifdef CONFIG_RT_MUTEXES
7582 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
7583 #endif
7584
7585 /*
7586 * The boot idle thread does lazy MMU switching as well:
7587 */
7588 atomic_inc(&init_mm.mm_count);
7589 enter_lazy_tlb(&init_mm, current);
7590
7591 /*
7592 * Make us the idle thread. Technically, schedule() should not be
7593 * called from this thread, however somewhere below it might be,
7594 * but because we are the idle thread, we just pick up running again
7595 * when this runqueue becomes "idle".
7596 */
7597 init_idle(current, smp_processor_id());
7598 /*
7599 * During early bootup we pretend to be a normal task:
7600 */
7601 current->sched_class = &fair_sched_class;
7602
7603 scheduler_running = 1;
7604 }
7605
7606 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
7607 void __might_sleep(char *file, int line)
7608 {
7609 #ifdef in_atomic
7610 static unsigned long prev_jiffy; /* ratelimiting */
7611
7612 if ((in_atomic() || irqs_disabled()) &&
7613 system_state == SYSTEM_RUNNING && !oops_in_progress) {
7614 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7615 return;
7616 prev_jiffy = jiffies;
7617 printk(KERN_ERR "BUG: sleeping function called from invalid"
7618 " context at %s:%d\n", file, line);
7619 printk("in_atomic():%d, irqs_disabled():%d\n",
7620 in_atomic(), irqs_disabled());
7621 debug_show_held_locks(current);
7622 if (irqs_disabled())
7623 print_irqtrace_events(current);
7624 dump_stack();
7625 }
7626 #endif
7627 }
7628 EXPORT_SYMBOL(__might_sleep);
7629 #endif
7630
7631 #ifdef CONFIG_MAGIC_SYSRQ
7632 static void normalize_task(struct rq *rq, struct task_struct *p)
7633 {
7634 int on_rq;
7635 update_rq_clock(rq);
7636 on_rq = p->se.on_rq;
7637 if (on_rq)
7638 deactivate_task(rq, p, 0);
7639 __setscheduler(rq, p, SCHED_NORMAL, 0);
7640 if (on_rq) {
7641 activate_task(rq, p, 0);
7642 resched_task(rq->curr);
7643 }
7644 }
7645
7646 void normalize_rt_tasks(void)
7647 {
7648 struct task_struct *g, *p;
7649 unsigned long flags;
7650 struct rq *rq;
7651
7652 read_lock_irqsave(&tasklist_lock, flags);
7653 do_each_thread(g, p) {
7654 /*
7655 * Only normalize user tasks:
7656 */
7657 if (!p->mm)
7658 continue;
7659
7660 p->se.exec_start = 0;
7661 #ifdef CONFIG_SCHEDSTATS
7662 p->se.wait_start = 0;
7663 p->se.sleep_start = 0;
7664 p->se.block_start = 0;
7665 #endif
7666 task_rq(p)->clock = 0;
7667
7668 if (!rt_task(p)) {
7669 /*
7670 * Renice negative nice level userspace
7671 * tasks back to 0:
7672 */
7673 if (TASK_NICE(p) < 0 && p->mm)
7674 set_user_nice(p, 0);
7675 continue;
7676 }
7677
7678 spin_lock(&p->pi_lock);
7679 rq = __task_rq_lock(p);
7680
7681 normalize_task(rq, p);
7682
7683 __task_rq_unlock(rq);
7684 spin_unlock(&p->pi_lock);
7685 } while_each_thread(g, p);
7686
7687 read_unlock_irqrestore(&tasklist_lock, flags);
7688 }
7689
7690 #endif /* CONFIG_MAGIC_SYSRQ */
7691
7692 #ifdef CONFIG_IA64
7693 /*
7694 * These functions are only useful for the IA64 MCA handling.
7695 *
7696 * They can only be called when the whole system has been
7697 * stopped - every CPU needs to be quiescent, and no scheduling
7698 * activity can take place. Using them for anything else would
7699 * be a serious bug, and as a result, they aren't even visible
7700 * under any other configuration.
7701 */
7702
7703 /**
7704 * curr_task - return the current task for a given cpu.
7705 * @cpu: the processor in question.
7706 *
7707 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7708 */
7709 struct task_struct *curr_task(int cpu)
7710 {
7711 return cpu_curr(cpu);
7712 }
7713
7714 /**
7715 * set_curr_task - set the current task for a given cpu.
7716 * @cpu: the processor in question.
7717 * @p: the task pointer to set.
7718 *
7719 * Description: This function must only be used when non-maskable interrupts
7720 * are serviced on a separate stack. It allows the architecture to switch the
7721 * notion of the current task on a cpu in a non-blocking manner. This function
7722 * must be called with all CPU's synchronized, and interrupts disabled, the
7723 * and caller must save the original value of the current task (see
7724 * curr_task() above) and restore that value before reenabling interrupts and
7725 * re-starting the system.
7726 *
7727 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7728 */
7729 void set_curr_task(int cpu, struct task_struct *p)
7730 {
7731 cpu_curr(cpu) = p;
7732 }
7733
7734 #endif
7735
7736 #ifdef CONFIG_FAIR_GROUP_SCHED
7737 static void free_fair_sched_group(struct task_group *tg)
7738 {
7739 int i;
7740
7741 for_each_possible_cpu(i) {
7742 if (tg->cfs_rq)
7743 kfree(tg->cfs_rq[i]);
7744 if (tg->se)
7745 kfree(tg->se[i]);
7746 }
7747
7748 kfree(tg->cfs_rq);
7749 kfree(tg->se);
7750 }
7751
7752 static int alloc_fair_sched_group(struct task_group *tg)
7753 {
7754 struct cfs_rq *cfs_rq;
7755 struct sched_entity *se;
7756 struct rq *rq;
7757 int i;
7758
7759 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7760 if (!tg->cfs_rq)
7761 goto err;
7762 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7763 if (!tg->se)
7764 goto err;
7765
7766 tg->shares = NICE_0_LOAD;
7767
7768 for_each_possible_cpu(i) {
7769 rq = cpu_rq(i);
7770
7771 cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
7772 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7773 if (!cfs_rq)
7774 goto err;
7775
7776 se = kmalloc_node(sizeof(struct sched_entity),
7777 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7778 if (!se)
7779 goto err;
7780
7781 init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
7782 }
7783
7784 return 1;
7785
7786 err:
7787 return 0;
7788 }
7789
7790 static inline void register_fair_sched_group(struct task_group *tg, int cpu)
7791 {
7792 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
7793 &cpu_rq(cpu)->leaf_cfs_rq_list);
7794 }
7795
7796 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
7797 {
7798 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
7799 }
7800 #else
7801 static inline void free_fair_sched_group(struct task_group *tg)
7802 {
7803 }
7804
7805 static inline int alloc_fair_sched_group(struct task_group *tg)
7806 {
7807 return 1;
7808 }
7809
7810 static inline void register_fair_sched_group(struct task_group *tg, int cpu)
7811 {
7812 }
7813
7814 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
7815 {
7816 }
7817 #endif
7818
7819 #ifdef CONFIG_RT_GROUP_SCHED
7820 static void free_rt_sched_group(struct task_group *tg)
7821 {
7822 int i;
7823
7824 destroy_rt_bandwidth(&tg->rt_bandwidth);
7825
7826 for_each_possible_cpu(i) {
7827 if (tg->rt_rq)
7828 kfree(tg->rt_rq[i]);
7829 if (tg->rt_se)
7830 kfree(tg->rt_se[i]);
7831 }
7832
7833 kfree(tg->rt_rq);
7834 kfree(tg->rt_se);
7835 }
7836
7837 static int alloc_rt_sched_group(struct task_group *tg)
7838 {
7839 struct rt_rq *rt_rq;
7840 struct sched_rt_entity *rt_se;
7841 struct rq *rq;
7842 int i;
7843
7844 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
7845 if (!tg->rt_rq)
7846 goto err;
7847 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
7848 if (!tg->rt_se)
7849 goto err;
7850
7851 init_rt_bandwidth(&tg->rt_bandwidth,
7852 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
7853
7854 for_each_possible_cpu(i) {
7855 rq = cpu_rq(i);
7856
7857 rt_rq = kmalloc_node(sizeof(struct rt_rq),
7858 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7859 if (!rt_rq)
7860 goto err;
7861
7862 rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
7863 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7864 if (!rt_se)
7865 goto err;
7866
7867 init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
7868 }
7869
7870 return 1;
7871
7872 err:
7873 return 0;
7874 }
7875
7876 static inline void register_rt_sched_group(struct task_group *tg, int cpu)
7877 {
7878 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
7879 &cpu_rq(cpu)->leaf_rt_rq_list);
7880 }
7881
7882 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
7883 {
7884 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
7885 }
7886 #else
7887 static inline void free_rt_sched_group(struct task_group *tg)
7888 {
7889 }
7890
7891 static inline int alloc_rt_sched_group(struct task_group *tg)
7892 {
7893 return 1;
7894 }
7895
7896 static inline void register_rt_sched_group(struct task_group *tg, int cpu)
7897 {
7898 }
7899
7900 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
7901 {
7902 }
7903 #endif
7904
7905 #ifdef CONFIG_GROUP_SCHED
7906 static void free_sched_group(struct task_group *tg)
7907 {
7908 free_fair_sched_group(tg);
7909 free_rt_sched_group(tg);
7910 kfree(tg);
7911 }
7912
7913 /* allocate runqueue etc for a new task group */
7914 struct task_group *sched_create_group(void)
7915 {
7916 struct task_group *tg;
7917 unsigned long flags;
7918 int i;
7919
7920 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7921 if (!tg)
7922 return ERR_PTR(-ENOMEM);
7923
7924 if (!alloc_fair_sched_group(tg))
7925 goto err;
7926
7927 if (!alloc_rt_sched_group(tg))
7928 goto err;
7929
7930 spin_lock_irqsave(&task_group_lock, flags);
7931 for_each_possible_cpu(i) {
7932 register_fair_sched_group(tg, i);
7933 register_rt_sched_group(tg, i);
7934 }
7935 list_add_rcu(&tg->list, &task_groups);
7936 spin_unlock_irqrestore(&task_group_lock, flags);
7937
7938 return tg;
7939
7940 err:
7941 free_sched_group(tg);
7942 return ERR_PTR(-ENOMEM);
7943 }
7944
7945 /* rcu callback to free various structures associated with a task group */
7946 static void free_sched_group_rcu(struct rcu_head *rhp)
7947 {
7948 /* now it should be safe to free those cfs_rqs */
7949 free_sched_group(container_of(rhp, struct task_group, rcu));
7950 }
7951
7952 /* Destroy runqueue etc associated with a task group */
7953 void sched_destroy_group(struct task_group *tg)
7954 {
7955 unsigned long flags;
7956 int i;
7957
7958 spin_lock_irqsave(&task_group_lock, flags);
7959 for_each_possible_cpu(i) {
7960 unregister_fair_sched_group(tg, i);
7961 unregister_rt_sched_group(tg, i);
7962 }
7963 list_del_rcu(&tg->list);
7964 spin_unlock_irqrestore(&task_group_lock, flags);
7965
7966 /* wait for possible concurrent references to cfs_rqs complete */
7967 call_rcu(&tg->rcu, free_sched_group_rcu);
7968 }
7969
7970 /* change task's runqueue when it moves between groups.
7971 * The caller of this function should have put the task in its new group
7972 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7973 * reflect its new group.
7974 */
7975 void sched_move_task(struct task_struct *tsk)
7976 {
7977 int on_rq, running;
7978 unsigned long flags;
7979 struct rq *rq;
7980
7981 rq = task_rq_lock(tsk, &flags);
7982
7983 update_rq_clock(rq);
7984
7985 running = task_current(rq, tsk);
7986 on_rq = tsk->se.on_rq;
7987
7988 if (on_rq)
7989 dequeue_task(rq, tsk, 0);
7990 if (unlikely(running))
7991 tsk->sched_class->put_prev_task(rq, tsk);
7992
7993 set_task_rq(tsk, task_cpu(tsk));
7994
7995 #ifdef CONFIG_FAIR_GROUP_SCHED
7996 if (tsk->sched_class->moved_group)
7997 tsk->sched_class->moved_group(tsk);
7998 #endif
7999
8000 if (unlikely(running))
8001 tsk->sched_class->set_curr_task(rq);
8002 if (on_rq)
8003 enqueue_task(rq, tsk, 0);
8004
8005 task_rq_unlock(rq, &flags);
8006 }
8007 #endif
8008
8009 #ifdef CONFIG_FAIR_GROUP_SCHED
8010 static void set_se_shares(struct sched_entity *se, unsigned long shares)
8011 {
8012 struct cfs_rq *cfs_rq = se->cfs_rq;
8013 struct rq *rq = cfs_rq->rq;
8014 int on_rq;
8015
8016 spin_lock_irq(&rq->lock);
8017
8018 on_rq = se->on_rq;
8019 if (on_rq)
8020 dequeue_entity(cfs_rq, se, 0);
8021
8022 se->load.weight = shares;
8023 se->load.inv_weight = div64_64((1ULL<<32), shares);
8024
8025 if (on_rq)
8026 enqueue_entity(cfs_rq, se, 0);
8027
8028 spin_unlock_irq(&rq->lock);
8029 }
8030
8031 static DEFINE_MUTEX(shares_mutex);
8032
8033 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8034 {
8035 int i;
8036 unsigned long flags;
8037
8038 /*
8039 * A weight of 0 or 1 can cause arithmetics problems.
8040 * (The default weight is 1024 - so there's no practical
8041 * limitation from this.)
8042 */
8043 if (shares < 2)
8044 shares = 2;
8045
8046 mutex_lock(&shares_mutex);
8047 if (tg->shares == shares)
8048 goto done;
8049
8050 spin_lock_irqsave(&task_group_lock, flags);
8051 for_each_possible_cpu(i)
8052 unregister_fair_sched_group(tg, i);
8053 spin_unlock_irqrestore(&task_group_lock, flags);
8054
8055 /* wait for any ongoing reference to this group to finish */
8056 synchronize_sched();
8057
8058 /*
8059 * Now we are free to modify the group's share on each cpu
8060 * w/o tripping rebalance_share or load_balance_fair.
8061 */
8062 tg->shares = shares;
8063 for_each_possible_cpu(i)
8064 set_se_shares(tg->se[i], shares);
8065
8066 /*
8067 * Enable load balance activity on this group, by inserting it back on
8068 * each cpu's rq->leaf_cfs_rq_list.
8069 */
8070 spin_lock_irqsave(&task_group_lock, flags);
8071 for_each_possible_cpu(i)
8072 register_fair_sched_group(tg, i);
8073 spin_unlock_irqrestore(&task_group_lock, flags);
8074 done:
8075 mutex_unlock(&shares_mutex);
8076 return 0;
8077 }
8078
8079 unsigned long sched_group_shares(struct task_group *tg)
8080 {
8081 return tg->shares;
8082 }
8083 #endif
8084
8085 #ifdef CONFIG_RT_GROUP_SCHED
8086 /*
8087 * Ensure that the real time constraints are schedulable.
8088 */
8089 static DEFINE_MUTEX(rt_constraints_mutex);
8090
8091 static unsigned long to_ratio(u64 period, u64 runtime)
8092 {
8093 if (runtime == RUNTIME_INF)
8094 return 1ULL << 16;
8095
8096 return div64_64(runtime << 16, period);
8097 }
8098
8099 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8100 {
8101 struct task_group *tgi;
8102 unsigned long total = 0;
8103 unsigned long global_ratio =
8104 to_ratio(global_rt_period(), global_rt_runtime());
8105
8106 rcu_read_lock();
8107 list_for_each_entry_rcu(tgi, &task_groups, list) {
8108 if (tgi == tg)
8109 continue;
8110
8111 total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
8112 tgi->rt_bandwidth.rt_runtime);
8113 }
8114 rcu_read_unlock();
8115
8116 return total + to_ratio(period, runtime) < global_ratio;
8117 }
8118
8119 /* Must be called with tasklist_lock held */
8120 static inline int tg_has_rt_tasks(struct task_group *tg)
8121 {
8122 struct task_struct *g, *p;
8123 do_each_thread(g, p) {
8124 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8125 return 1;
8126 } while_each_thread(g, p);
8127 return 0;
8128 }
8129
8130 static int tg_set_bandwidth(struct task_group *tg,
8131 u64 rt_period, u64 rt_runtime)
8132 {
8133 int i, err = 0;
8134
8135 mutex_lock(&rt_constraints_mutex);
8136 read_lock(&tasklist_lock);
8137 if (rt_runtime == 0 && tg_has_rt_tasks(tg)) {
8138 err = -EBUSY;
8139 goto unlock;
8140 }
8141 if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
8142 err = -EINVAL;
8143 goto unlock;
8144 }
8145
8146 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8147 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8148 tg->rt_bandwidth.rt_runtime = rt_runtime;
8149
8150 for_each_possible_cpu(i) {
8151 struct rt_rq *rt_rq = tg->rt_rq[i];
8152
8153 spin_lock(&rt_rq->rt_runtime_lock);
8154 rt_rq->rt_runtime = rt_runtime;
8155 spin_unlock(&rt_rq->rt_runtime_lock);
8156 }
8157 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8158 unlock:
8159 read_unlock(&tasklist_lock);
8160 mutex_unlock(&rt_constraints_mutex);
8161
8162 return err;
8163 }
8164
8165 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8166 {
8167 u64 rt_runtime, rt_period;
8168
8169 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8170 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8171 if (rt_runtime_us < 0)
8172 rt_runtime = RUNTIME_INF;
8173
8174 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8175 }
8176
8177 long sched_group_rt_runtime(struct task_group *tg)
8178 {
8179 u64 rt_runtime_us;
8180
8181 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
8182 return -1;
8183
8184 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
8185 do_div(rt_runtime_us, NSEC_PER_USEC);
8186 return rt_runtime_us;
8187 }
8188
8189 int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8190 {
8191 u64 rt_runtime, rt_period;
8192
8193 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8194 rt_runtime = tg->rt_bandwidth.rt_runtime;
8195
8196 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8197 }
8198
8199 long sched_group_rt_period(struct task_group *tg)
8200 {
8201 u64 rt_period_us;
8202
8203 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8204 do_div(rt_period_us, NSEC_PER_USEC);
8205 return rt_period_us;
8206 }
8207
8208 static int sched_rt_global_constraints(void)
8209 {
8210 int ret = 0;
8211
8212 mutex_lock(&rt_constraints_mutex);
8213 if (!__rt_schedulable(NULL, 1, 0))
8214 ret = -EINVAL;
8215 mutex_unlock(&rt_constraints_mutex);
8216
8217 return ret;
8218 }
8219 #else
8220 static int sched_rt_global_constraints(void)
8221 {
8222 unsigned long flags;
8223 int i;
8224
8225 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
8226 for_each_possible_cpu(i) {
8227 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8228
8229 spin_lock(&rt_rq->rt_runtime_lock);
8230 rt_rq->rt_runtime = global_rt_runtime();
8231 spin_unlock(&rt_rq->rt_runtime_lock);
8232 }
8233 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
8234
8235 return 0;
8236 }
8237 #endif
8238
8239 int sched_rt_handler(struct ctl_table *table, int write,
8240 struct file *filp, void __user *buffer, size_t *lenp,
8241 loff_t *ppos)
8242 {
8243 int ret;
8244 int old_period, old_runtime;
8245 static DEFINE_MUTEX(mutex);
8246
8247 mutex_lock(&mutex);
8248 old_period = sysctl_sched_rt_period;
8249 old_runtime = sysctl_sched_rt_runtime;
8250
8251 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
8252
8253 if (!ret && write) {
8254 ret = sched_rt_global_constraints();
8255 if (ret) {
8256 sysctl_sched_rt_period = old_period;
8257 sysctl_sched_rt_runtime = old_runtime;
8258 } else {
8259 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8260 def_rt_bandwidth.rt_period =
8261 ns_to_ktime(global_rt_period());
8262 }
8263 }
8264 mutex_unlock(&mutex);
8265
8266 return ret;
8267 }
8268
8269 #ifdef CONFIG_CGROUP_SCHED
8270
8271 /* return corresponding task_group object of a cgroup */
8272 static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
8273 {
8274 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8275 struct task_group, css);
8276 }
8277
8278 static struct cgroup_subsys_state *
8279 cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8280 {
8281 struct task_group *tg;
8282
8283 if (!cgrp->parent) {
8284 /* This is early initialization for the top cgroup */
8285 init_task_group.css.cgroup = cgrp;
8286 return &init_task_group.css;
8287 }
8288
8289 /* we support only 1-level deep hierarchical scheduler atm */
8290 if (cgrp->parent->parent)
8291 return ERR_PTR(-EINVAL);
8292
8293 tg = sched_create_group();
8294 if (IS_ERR(tg))
8295 return ERR_PTR(-ENOMEM);
8296
8297 /* Bind the cgroup to task_group object we just created */
8298 tg->css.cgroup = cgrp;
8299
8300 return &tg->css;
8301 }
8302
8303 static void
8304 cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
8305 {
8306 struct task_group *tg = cgroup_tg(cgrp);
8307
8308 sched_destroy_group(tg);
8309 }
8310
8311 static int
8312 cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8313 struct task_struct *tsk)
8314 {
8315 #ifdef CONFIG_RT_GROUP_SCHED
8316 /* Don't accept realtime tasks when there is no way for them to run */
8317 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
8318 return -EINVAL;
8319 #else
8320 /* We don't support RT-tasks being in separate groups */
8321 if (tsk->sched_class != &fair_sched_class)
8322 return -EINVAL;
8323 #endif
8324
8325 return 0;
8326 }
8327
8328 static void
8329 cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8330 struct cgroup *old_cont, struct task_struct *tsk)
8331 {
8332 sched_move_task(tsk);
8333 }
8334
8335 #ifdef CONFIG_FAIR_GROUP_SCHED
8336 static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8337 u64 shareval)
8338 {
8339 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
8340 }
8341
8342 static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
8343 {
8344 struct task_group *tg = cgroup_tg(cgrp);
8345
8346 return (u64) tg->shares;
8347 }
8348 #endif
8349
8350 #ifdef CONFIG_RT_GROUP_SCHED
8351 static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
8352 struct file *file,
8353 const char __user *userbuf,
8354 size_t nbytes, loff_t *unused_ppos)
8355 {
8356 char buffer[64];
8357 int retval = 0;
8358 s64 val;
8359 char *end;
8360
8361 if (!nbytes)
8362 return -EINVAL;
8363 if (nbytes >= sizeof(buffer))
8364 return -E2BIG;
8365 if (copy_from_user(buffer, userbuf, nbytes))
8366 return -EFAULT;
8367
8368 buffer[nbytes] = 0; /* nul-terminate */
8369
8370 /* strip newline if necessary */
8371 if (nbytes && (buffer[nbytes-1] == '\n'))
8372 buffer[nbytes-1] = 0;
8373 val = simple_strtoll(buffer, &end, 0);
8374 if (*end)
8375 return -EINVAL;
8376
8377 /* Pass to subsystem */
8378 retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
8379 if (!retval)
8380 retval = nbytes;
8381 return retval;
8382 }
8383
8384 static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
8385 struct file *file,
8386 char __user *buf, size_t nbytes,
8387 loff_t *ppos)
8388 {
8389 char tmp[64];
8390 long val = sched_group_rt_runtime(cgroup_tg(cgrp));
8391 int len = sprintf(tmp, "%ld\n", val);
8392
8393 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
8394 }
8395
8396 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8397 u64 rt_period_us)
8398 {
8399 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8400 }
8401
8402 static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8403 {
8404 return sched_group_rt_period(cgroup_tg(cgrp));
8405 }
8406 #endif
8407
8408 static struct cftype cpu_files[] = {
8409 #ifdef CONFIG_FAIR_GROUP_SCHED
8410 {
8411 .name = "shares",
8412 .read_uint = cpu_shares_read_uint,
8413 .write_uint = cpu_shares_write_uint,
8414 },
8415 #endif
8416 #ifdef CONFIG_RT_GROUP_SCHED
8417 {
8418 .name = "rt_runtime_us",
8419 .read = cpu_rt_runtime_read,
8420 .write = cpu_rt_runtime_write,
8421 },
8422 {
8423 .name = "rt_period_us",
8424 .read_uint = cpu_rt_period_read_uint,
8425 .write_uint = cpu_rt_period_write_uint,
8426 },
8427 #endif
8428 };
8429
8430 static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8431 {
8432 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
8433 }
8434
8435 struct cgroup_subsys cpu_cgroup_subsys = {
8436 .name = "cpu",
8437 .create = cpu_cgroup_create,
8438 .destroy = cpu_cgroup_destroy,
8439 .can_attach = cpu_cgroup_can_attach,
8440 .attach = cpu_cgroup_attach,
8441 .populate = cpu_cgroup_populate,
8442 .subsys_id = cpu_cgroup_subsys_id,
8443 .early_init = 1,
8444 };
8445
8446 #endif /* CONFIG_CGROUP_SCHED */
8447
8448 #ifdef CONFIG_CGROUP_CPUACCT
8449
8450 /*
8451 * CPU accounting code for task groups.
8452 *
8453 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8454 * (balbir@in.ibm.com).
8455 */
8456
8457 /* track cpu usage of a group of tasks */
8458 struct cpuacct {
8459 struct cgroup_subsys_state css;
8460 /* cpuusage holds pointer to a u64-type object on every cpu */
8461 u64 *cpuusage;
8462 };
8463
8464 struct cgroup_subsys cpuacct_subsys;
8465
8466 /* return cpu accounting group corresponding to this container */
8467 static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
8468 {
8469 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
8470 struct cpuacct, css);
8471 }
8472
8473 /* return cpu accounting group to which this task belongs */
8474 static inline struct cpuacct *task_ca(struct task_struct *tsk)
8475 {
8476 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
8477 struct cpuacct, css);
8478 }
8479
8480 /* create a new cpu accounting group */
8481 static struct cgroup_subsys_state *cpuacct_create(
8482 struct cgroup_subsys *ss, struct cgroup *cgrp)
8483 {
8484 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
8485
8486 if (!ca)
8487 return ERR_PTR(-ENOMEM);
8488
8489 ca->cpuusage = alloc_percpu(u64);
8490 if (!ca->cpuusage) {
8491 kfree(ca);
8492 return ERR_PTR(-ENOMEM);
8493 }
8494
8495 return &ca->css;
8496 }
8497
8498 /* destroy an existing cpu accounting group */
8499 static void
8500 cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
8501 {
8502 struct cpuacct *ca = cgroup_ca(cgrp);
8503
8504 free_percpu(ca->cpuusage);
8505 kfree(ca);
8506 }
8507
8508 /* return total cpu usage (in nanoseconds) of a group */
8509 static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
8510 {
8511 struct cpuacct *ca = cgroup_ca(cgrp);
8512 u64 totalcpuusage = 0;
8513 int i;
8514
8515 for_each_possible_cpu(i) {
8516 u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
8517
8518 /*
8519 * Take rq->lock to make 64-bit addition safe on 32-bit
8520 * platforms.
8521 */
8522 spin_lock_irq(&cpu_rq(i)->lock);
8523 totalcpuusage += *cpuusage;
8524 spin_unlock_irq(&cpu_rq(i)->lock);
8525 }
8526
8527 return totalcpuusage;
8528 }
8529
8530 static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8531 u64 reset)
8532 {
8533 struct cpuacct *ca = cgroup_ca(cgrp);
8534 int err = 0;
8535 int i;
8536
8537 if (reset) {
8538 err = -EINVAL;
8539 goto out;
8540 }
8541
8542 for_each_possible_cpu(i) {
8543 u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
8544
8545 spin_lock_irq(&cpu_rq(i)->lock);
8546 *cpuusage = 0;
8547 spin_unlock_irq(&cpu_rq(i)->lock);
8548 }
8549 out:
8550 return err;
8551 }
8552
8553 static struct cftype files[] = {
8554 {
8555 .name = "usage",
8556 .read_uint = cpuusage_read,
8557 .write_uint = cpuusage_write,
8558 },
8559 };
8560
8561 static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
8562 {
8563 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
8564 }
8565
8566 /*
8567 * charge this task's execution time to its accounting group.
8568 *
8569 * called with rq->lock held.
8570 */
8571 static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
8572 {
8573 struct cpuacct *ca;
8574
8575 if (!cpuacct_subsys.active)
8576 return;
8577
8578 ca = task_ca(tsk);
8579 if (ca) {
8580 u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
8581
8582 *cpuusage += cputime;
8583 }
8584 }
8585
8586 struct cgroup_subsys cpuacct_subsys = {
8587 .name = "cpuacct",
8588 .create = cpuacct_create,
8589 .destroy = cpuacct_destroy,
8590 .populate = cpuacct_populate,
8591 .subsys_id = cpuacct_subsys_id,
8592 };
8593 #endif /* CONFIG_CGROUP_CPUACCT */
This page took 0.204355 seconds and 6 git commands to generate.