cpu: convert 'cpu' and 'machinecheck' sysdev_class to a regular subsystem
[deliverable/linux.git] / kernel / sched.c
1 /*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/nmi.h>
32 #include <linux/init.h>
33 #include <linux/uaccess.h>
34 #include <linux/highmem.h>
35 #include <asm/mmu_context.h>
36 #include <linux/interrupt.h>
37 #include <linux/capability.h>
38 #include <linux/completion.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/debug_locks.h>
41 #include <linux/perf_event.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/profile.h>
45 #include <linux/freezer.h>
46 #include <linux/vmalloc.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/smp.h>
51 #include <linux/threads.h>
52 #include <linux/timer.h>
53 #include <linux/rcupdate.h>
54 #include <linux/cpu.h>
55 #include <linux/cpuset.h>
56 #include <linux/percpu.h>
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <linux/stop_machine.h>
60 #include <linux/sysctl.h>
61 #include <linux/syscalls.h>
62 #include <linux/times.h>
63 #include <linux/tsacct_kern.h>
64 #include <linux/kprobes.h>
65 #include <linux/delayacct.h>
66 #include <linux/unistd.h>
67 #include <linux/pagemap.h>
68 #include <linux/hrtimer.h>
69 #include <linux/tick.h>
70 #include <linux/debugfs.h>
71 #include <linux/ctype.h>
72 #include <linux/ftrace.h>
73 #include <linux/slab.h>
74
75 #include <asm/tlb.h>
76 #include <asm/irq_regs.h>
77 #include <asm/mutex.h>
78 #ifdef CONFIG_PARAVIRT
79 #include <asm/paravirt.h>
80 #endif
81
82 #include "sched_cpupri.h"
83 #include "workqueue_sched.h"
84 #include "sched_autogroup.h"
85
86 #define CREATE_TRACE_POINTS
87 #include <trace/events/sched.h>
88
89 /*
90 * Convert user-nice values [ -20 ... 0 ... 19 ]
91 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
92 * and back.
93 */
94 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
95 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
96 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
97
98 /*
99 * 'User priority' is the nice value converted to something we
100 * can work with better when scaling various scheduler parameters,
101 * it's a [ 0 ... 39 ] range.
102 */
103 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
104 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
105 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
106
107 /*
108 * Helpers for converting nanosecond timing to jiffy resolution
109 */
110 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
111
112 #define NICE_0_LOAD SCHED_LOAD_SCALE
113 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
114
115 /*
116 * These are the 'tuning knobs' of the scheduler:
117 *
118 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
119 * Timeslices get refilled after they expire.
120 */
121 #define DEF_TIMESLICE (100 * HZ / 1000)
122
123 /*
124 * single value that denotes runtime == period, ie unlimited time.
125 */
126 #define RUNTIME_INF ((u64)~0ULL)
127
128 static inline int rt_policy(int policy)
129 {
130 if (policy == SCHED_FIFO || policy == SCHED_RR)
131 return 1;
132 return 0;
133 }
134
135 static inline int task_has_rt_policy(struct task_struct *p)
136 {
137 return rt_policy(p->policy);
138 }
139
140 /*
141 * This is the priority-queue data structure of the RT scheduling class:
142 */
143 struct rt_prio_array {
144 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
145 struct list_head queue[MAX_RT_PRIO];
146 };
147
148 struct rt_bandwidth {
149 /* nests inside the rq lock: */
150 raw_spinlock_t rt_runtime_lock;
151 ktime_t rt_period;
152 u64 rt_runtime;
153 struct hrtimer rt_period_timer;
154 };
155
156 static struct rt_bandwidth def_rt_bandwidth;
157
158 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
159
160 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
161 {
162 struct rt_bandwidth *rt_b =
163 container_of(timer, struct rt_bandwidth, rt_period_timer);
164 ktime_t now;
165 int overrun;
166 int idle = 0;
167
168 for (;;) {
169 now = hrtimer_cb_get_time(timer);
170 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
171
172 if (!overrun)
173 break;
174
175 idle = do_sched_rt_period_timer(rt_b, overrun);
176 }
177
178 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
179 }
180
181 static
182 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
183 {
184 rt_b->rt_period = ns_to_ktime(period);
185 rt_b->rt_runtime = runtime;
186
187 raw_spin_lock_init(&rt_b->rt_runtime_lock);
188
189 hrtimer_init(&rt_b->rt_period_timer,
190 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
191 rt_b->rt_period_timer.function = sched_rt_period_timer;
192 }
193
194 static inline int rt_bandwidth_enabled(void)
195 {
196 return sysctl_sched_rt_runtime >= 0;
197 }
198
199 static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
200 {
201 unsigned long delta;
202 ktime_t soft, hard, now;
203
204 for (;;) {
205 if (hrtimer_active(period_timer))
206 break;
207
208 now = hrtimer_cb_get_time(period_timer);
209 hrtimer_forward(period_timer, now, period);
210
211 soft = hrtimer_get_softexpires(period_timer);
212 hard = hrtimer_get_expires(period_timer);
213 delta = ktime_to_ns(ktime_sub(hard, soft));
214 __hrtimer_start_range_ns(period_timer, soft, delta,
215 HRTIMER_MODE_ABS_PINNED, 0);
216 }
217 }
218
219 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
220 {
221 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
222 return;
223
224 if (hrtimer_active(&rt_b->rt_period_timer))
225 return;
226
227 raw_spin_lock(&rt_b->rt_runtime_lock);
228 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
229 raw_spin_unlock(&rt_b->rt_runtime_lock);
230 }
231
232 #ifdef CONFIG_RT_GROUP_SCHED
233 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
234 {
235 hrtimer_cancel(&rt_b->rt_period_timer);
236 }
237 #endif
238
239 /*
240 * sched_domains_mutex serializes calls to init_sched_domains,
241 * detach_destroy_domains and partition_sched_domains.
242 */
243 static DEFINE_MUTEX(sched_domains_mutex);
244
245 #ifdef CONFIG_CGROUP_SCHED
246
247 #include <linux/cgroup.h>
248
249 struct cfs_rq;
250
251 static LIST_HEAD(task_groups);
252
253 struct cfs_bandwidth {
254 #ifdef CONFIG_CFS_BANDWIDTH
255 raw_spinlock_t lock;
256 ktime_t period;
257 u64 quota, runtime;
258 s64 hierarchal_quota;
259 u64 runtime_expires;
260
261 int idle, timer_active;
262 struct hrtimer period_timer, slack_timer;
263 struct list_head throttled_cfs_rq;
264
265 /* statistics */
266 int nr_periods, nr_throttled;
267 u64 throttled_time;
268 #endif
269 };
270
271 /* task group related information */
272 struct task_group {
273 struct cgroup_subsys_state css;
274
275 #ifdef CONFIG_FAIR_GROUP_SCHED
276 /* schedulable entities of this group on each cpu */
277 struct sched_entity **se;
278 /* runqueue "owned" by this group on each cpu */
279 struct cfs_rq **cfs_rq;
280 unsigned long shares;
281
282 atomic_t load_weight;
283 #endif
284
285 #ifdef CONFIG_RT_GROUP_SCHED
286 struct sched_rt_entity **rt_se;
287 struct rt_rq **rt_rq;
288
289 struct rt_bandwidth rt_bandwidth;
290 #endif
291
292 struct rcu_head rcu;
293 struct list_head list;
294
295 struct task_group *parent;
296 struct list_head siblings;
297 struct list_head children;
298
299 #ifdef CONFIG_SCHED_AUTOGROUP
300 struct autogroup *autogroup;
301 #endif
302
303 struct cfs_bandwidth cfs_bandwidth;
304 };
305
306 /* task_group_lock serializes the addition/removal of task groups */
307 static DEFINE_SPINLOCK(task_group_lock);
308
309 #ifdef CONFIG_FAIR_GROUP_SCHED
310
311 # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
312
313 /*
314 * A weight of 0 or 1 can cause arithmetics problems.
315 * A weight of a cfs_rq is the sum of weights of which entities
316 * are queued on this cfs_rq, so a weight of a entity should not be
317 * too large, so as the shares value of a task group.
318 * (The default weight is 1024 - so there's no practical
319 * limitation from this.)
320 */
321 #define MIN_SHARES (1UL << 1)
322 #define MAX_SHARES (1UL << 18)
323
324 static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
325 #endif
326
327 /* Default task group.
328 * Every task in system belong to this group at bootup.
329 */
330 struct task_group root_task_group;
331
332 #endif /* CONFIG_CGROUP_SCHED */
333
334 /* CFS-related fields in a runqueue */
335 struct cfs_rq {
336 struct load_weight load;
337 unsigned long nr_running, h_nr_running;
338
339 u64 exec_clock;
340 u64 min_vruntime;
341 #ifndef CONFIG_64BIT
342 u64 min_vruntime_copy;
343 #endif
344
345 struct rb_root tasks_timeline;
346 struct rb_node *rb_leftmost;
347
348 struct list_head tasks;
349 struct list_head *balance_iterator;
350
351 /*
352 * 'curr' points to currently running entity on this cfs_rq.
353 * It is set to NULL otherwise (i.e when none are currently running).
354 */
355 struct sched_entity *curr, *next, *last, *skip;
356
357 #ifdef CONFIG_SCHED_DEBUG
358 unsigned int nr_spread_over;
359 #endif
360
361 #ifdef CONFIG_FAIR_GROUP_SCHED
362 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
363
364 /*
365 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
366 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
367 * (like users, containers etc.)
368 *
369 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
370 * list is used during load balance.
371 */
372 int on_list;
373 struct list_head leaf_cfs_rq_list;
374 struct task_group *tg; /* group that "owns" this runqueue */
375
376 #ifdef CONFIG_SMP
377 /*
378 * the part of load.weight contributed by tasks
379 */
380 unsigned long task_weight;
381
382 /*
383 * h_load = weight * f(tg)
384 *
385 * Where f(tg) is the recursive weight fraction assigned to
386 * this group.
387 */
388 unsigned long h_load;
389
390 /*
391 * Maintaining per-cpu shares distribution for group scheduling
392 *
393 * load_stamp is the last time we updated the load average
394 * load_last is the last time we updated the load average and saw load
395 * load_unacc_exec_time is currently unaccounted execution time
396 */
397 u64 load_avg;
398 u64 load_period;
399 u64 load_stamp, load_last, load_unacc_exec_time;
400
401 unsigned long load_contribution;
402 #endif
403 #ifdef CONFIG_CFS_BANDWIDTH
404 int runtime_enabled;
405 u64 runtime_expires;
406 s64 runtime_remaining;
407
408 u64 throttled_timestamp;
409 int throttled, throttle_count;
410 struct list_head throttled_list;
411 #endif
412 #endif
413 };
414
415 #ifdef CONFIG_FAIR_GROUP_SCHED
416 #ifdef CONFIG_CFS_BANDWIDTH
417 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
418 {
419 return &tg->cfs_bandwidth;
420 }
421
422 static inline u64 default_cfs_period(void);
423 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
424 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
425
426 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
427 {
428 struct cfs_bandwidth *cfs_b =
429 container_of(timer, struct cfs_bandwidth, slack_timer);
430 do_sched_cfs_slack_timer(cfs_b);
431
432 return HRTIMER_NORESTART;
433 }
434
435 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
436 {
437 struct cfs_bandwidth *cfs_b =
438 container_of(timer, struct cfs_bandwidth, period_timer);
439 ktime_t now;
440 int overrun;
441 int idle = 0;
442
443 for (;;) {
444 now = hrtimer_cb_get_time(timer);
445 overrun = hrtimer_forward(timer, now, cfs_b->period);
446
447 if (!overrun)
448 break;
449
450 idle = do_sched_cfs_period_timer(cfs_b, overrun);
451 }
452
453 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
454 }
455
456 static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
457 {
458 raw_spin_lock_init(&cfs_b->lock);
459 cfs_b->runtime = 0;
460 cfs_b->quota = RUNTIME_INF;
461 cfs_b->period = ns_to_ktime(default_cfs_period());
462
463 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
464 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
465 cfs_b->period_timer.function = sched_cfs_period_timer;
466 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
467 cfs_b->slack_timer.function = sched_cfs_slack_timer;
468 }
469
470 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
471 {
472 cfs_rq->runtime_enabled = 0;
473 INIT_LIST_HEAD(&cfs_rq->throttled_list);
474 }
475
476 /* requires cfs_b->lock, may release to reprogram timer */
477 static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
478 {
479 /*
480 * The timer may be active because we're trying to set a new bandwidth
481 * period or because we're racing with the tear-down path
482 * (timer_active==0 becomes visible before the hrtimer call-back
483 * terminates). In either case we ensure that it's re-programmed
484 */
485 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
486 raw_spin_unlock(&cfs_b->lock);
487 /* ensure cfs_b->lock is available while we wait */
488 hrtimer_cancel(&cfs_b->period_timer);
489
490 raw_spin_lock(&cfs_b->lock);
491 /* if someone else restarted the timer then we're done */
492 if (cfs_b->timer_active)
493 return;
494 }
495
496 cfs_b->timer_active = 1;
497 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
498 }
499
500 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
501 {
502 hrtimer_cancel(&cfs_b->period_timer);
503 hrtimer_cancel(&cfs_b->slack_timer);
504 }
505 #else
506 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
507 static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
508 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
509
510 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
511 {
512 return NULL;
513 }
514 #endif /* CONFIG_CFS_BANDWIDTH */
515 #endif /* CONFIG_FAIR_GROUP_SCHED */
516
517 /* Real-Time classes' related field in a runqueue: */
518 struct rt_rq {
519 struct rt_prio_array active;
520 unsigned long rt_nr_running;
521 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
522 struct {
523 int curr; /* highest queued rt task prio */
524 #ifdef CONFIG_SMP
525 int next; /* next highest */
526 #endif
527 } highest_prio;
528 #endif
529 #ifdef CONFIG_SMP
530 unsigned long rt_nr_migratory;
531 unsigned long rt_nr_total;
532 int overloaded;
533 struct plist_head pushable_tasks;
534 #endif
535 int rt_throttled;
536 u64 rt_time;
537 u64 rt_runtime;
538 /* Nests inside the rq lock: */
539 raw_spinlock_t rt_runtime_lock;
540
541 #ifdef CONFIG_RT_GROUP_SCHED
542 unsigned long rt_nr_boosted;
543
544 struct rq *rq;
545 struct list_head leaf_rt_rq_list;
546 struct task_group *tg;
547 #endif
548 };
549
550 #ifdef CONFIG_SMP
551
552 /*
553 * We add the notion of a root-domain which will be used to define per-domain
554 * variables. Each exclusive cpuset essentially defines an island domain by
555 * fully partitioning the member cpus from any other cpuset. Whenever a new
556 * exclusive cpuset is created, we also create and attach a new root-domain
557 * object.
558 *
559 */
560 struct root_domain {
561 atomic_t refcount;
562 atomic_t rto_count;
563 struct rcu_head rcu;
564 cpumask_var_t span;
565 cpumask_var_t online;
566
567 /*
568 * The "RT overload" flag: it gets set if a CPU has more than
569 * one runnable RT task.
570 */
571 cpumask_var_t rto_mask;
572 struct cpupri cpupri;
573 };
574
575 /*
576 * By default the system creates a single root-domain with all cpus as
577 * members (mimicking the global state we have today).
578 */
579 static struct root_domain def_root_domain;
580
581 #endif /* CONFIG_SMP */
582
583 /*
584 * This is the main, per-CPU runqueue data structure.
585 *
586 * Locking rule: those places that want to lock multiple runqueues
587 * (such as the load balancing or the thread migration code), lock
588 * acquire operations must be ordered by ascending &runqueue.
589 */
590 struct rq {
591 /* runqueue lock: */
592 raw_spinlock_t lock;
593
594 /*
595 * nr_running and cpu_load should be in the same cacheline because
596 * remote CPUs use both these fields when doing load calculation.
597 */
598 unsigned long nr_running;
599 #define CPU_LOAD_IDX_MAX 5
600 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
601 unsigned long last_load_update_tick;
602 #ifdef CONFIG_NO_HZ
603 u64 nohz_stamp;
604 unsigned char nohz_balance_kick;
605 #endif
606 int skip_clock_update;
607
608 /* capture load from *all* tasks on this cpu: */
609 struct load_weight load;
610 unsigned long nr_load_updates;
611 u64 nr_switches;
612
613 struct cfs_rq cfs;
614 struct rt_rq rt;
615
616 #ifdef CONFIG_FAIR_GROUP_SCHED
617 /* list of leaf cfs_rq on this cpu: */
618 struct list_head leaf_cfs_rq_list;
619 #endif
620 #ifdef CONFIG_RT_GROUP_SCHED
621 struct list_head leaf_rt_rq_list;
622 #endif
623
624 /*
625 * This is part of a global counter where only the total sum
626 * over all CPUs matters. A task can increase this counter on
627 * one CPU and if it got migrated afterwards it may decrease
628 * it on another CPU. Always updated under the runqueue lock:
629 */
630 unsigned long nr_uninterruptible;
631
632 struct task_struct *curr, *idle, *stop;
633 unsigned long next_balance;
634 struct mm_struct *prev_mm;
635
636 u64 clock;
637 u64 clock_task;
638
639 atomic_t nr_iowait;
640
641 #ifdef CONFIG_SMP
642 struct root_domain *rd;
643 struct sched_domain *sd;
644
645 unsigned long cpu_power;
646
647 unsigned char idle_balance;
648 /* For active balancing */
649 int post_schedule;
650 int active_balance;
651 int push_cpu;
652 struct cpu_stop_work active_balance_work;
653 /* cpu of this runqueue: */
654 int cpu;
655 int online;
656
657 u64 rt_avg;
658 u64 age_stamp;
659 u64 idle_stamp;
660 u64 avg_idle;
661 #endif
662
663 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
664 u64 prev_irq_time;
665 #endif
666 #ifdef CONFIG_PARAVIRT
667 u64 prev_steal_time;
668 #endif
669 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
670 u64 prev_steal_time_rq;
671 #endif
672
673 /* calc_load related fields */
674 unsigned long calc_load_update;
675 long calc_load_active;
676
677 #ifdef CONFIG_SCHED_HRTICK
678 #ifdef CONFIG_SMP
679 int hrtick_csd_pending;
680 struct call_single_data hrtick_csd;
681 #endif
682 struct hrtimer hrtick_timer;
683 #endif
684
685 #ifdef CONFIG_SCHEDSTATS
686 /* latency stats */
687 struct sched_info rq_sched_info;
688 unsigned long long rq_cpu_time;
689 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
690
691 /* sys_sched_yield() stats */
692 unsigned int yld_count;
693
694 /* schedule() stats */
695 unsigned int sched_switch;
696 unsigned int sched_count;
697 unsigned int sched_goidle;
698
699 /* try_to_wake_up() stats */
700 unsigned int ttwu_count;
701 unsigned int ttwu_local;
702 #endif
703
704 #ifdef CONFIG_SMP
705 struct llist_head wake_list;
706 #endif
707 };
708
709 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
710
711
712 static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
713
714 static inline int cpu_of(struct rq *rq)
715 {
716 #ifdef CONFIG_SMP
717 return rq->cpu;
718 #else
719 return 0;
720 #endif
721 }
722
723 #define rcu_dereference_check_sched_domain(p) \
724 rcu_dereference_check((p), \
725 lockdep_is_held(&sched_domains_mutex))
726
727 /*
728 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
729 * See detach_destroy_domains: synchronize_sched for details.
730 *
731 * The domain tree of any CPU may only be accessed from within
732 * preempt-disabled sections.
733 */
734 #define for_each_domain(cpu, __sd) \
735 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
736
737 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
738 #define this_rq() (&__get_cpu_var(runqueues))
739 #define task_rq(p) cpu_rq(task_cpu(p))
740 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
741 #define raw_rq() (&__raw_get_cpu_var(runqueues))
742
743 #ifdef CONFIG_CGROUP_SCHED
744
745 /*
746 * Return the group to which this tasks belongs.
747 *
748 * We use task_subsys_state_check() and extend the RCU verification with
749 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
750 * task it moves into the cgroup. Therefore by holding either of those locks,
751 * we pin the task to the current cgroup.
752 */
753 static inline struct task_group *task_group(struct task_struct *p)
754 {
755 struct task_group *tg;
756 struct cgroup_subsys_state *css;
757
758 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
759 lockdep_is_held(&p->pi_lock) ||
760 lockdep_is_held(&task_rq(p)->lock));
761 tg = container_of(css, struct task_group, css);
762
763 return autogroup_task_group(p, tg);
764 }
765
766 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
767 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
768 {
769 #ifdef CONFIG_FAIR_GROUP_SCHED
770 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
771 p->se.parent = task_group(p)->se[cpu];
772 #endif
773
774 #ifdef CONFIG_RT_GROUP_SCHED
775 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
776 p->rt.parent = task_group(p)->rt_se[cpu];
777 #endif
778 }
779
780 #else /* CONFIG_CGROUP_SCHED */
781
782 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
783 static inline struct task_group *task_group(struct task_struct *p)
784 {
785 return NULL;
786 }
787
788 #endif /* CONFIG_CGROUP_SCHED */
789
790 static void update_rq_clock_task(struct rq *rq, s64 delta);
791
792 static void update_rq_clock(struct rq *rq)
793 {
794 s64 delta;
795
796 if (rq->skip_clock_update > 0)
797 return;
798
799 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
800 rq->clock += delta;
801 update_rq_clock_task(rq, delta);
802 }
803
804 /*
805 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
806 */
807 #ifdef CONFIG_SCHED_DEBUG
808 # define const_debug __read_mostly
809 #else
810 # define const_debug static const
811 #endif
812
813 /**
814 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
815 * @cpu: the processor in question.
816 *
817 * This interface allows printk to be called with the runqueue lock
818 * held and know whether or not it is OK to wake up the klogd.
819 */
820 int runqueue_is_locked(int cpu)
821 {
822 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
823 }
824
825 /*
826 * Debugging: various feature bits
827 */
828
829 #define SCHED_FEAT(name, enabled) \
830 __SCHED_FEAT_##name ,
831
832 enum {
833 #include "sched_features.h"
834 };
835
836 #undef SCHED_FEAT
837
838 #define SCHED_FEAT(name, enabled) \
839 (1UL << __SCHED_FEAT_##name) * enabled |
840
841 const_debug unsigned int sysctl_sched_features =
842 #include "sched_features.h"
843 0;
844
845 #undef SCHED_FEAT
846
847 #ifdef CONFIG_SCHED_DEBUG
848 #define SCHED_FEAT(name, enabled) \
849 #name ,
850
851 static __read_mostly char *sched_feat_names[] = {
852 #include "sched_features.h"
853 NULL
854 };
855
856 #undef SCHED_FEAT
857
858 static int sched_feat_show(struct seq_file *m, void *v)
859 {
860 int i;
861
862 for (i = 0; sched_feat_names[i]; i++) {
863 if (!(sysctl_sched_features & (1UL << i)))
864 seq_puts(m, "NO_");
865 seq_printf(m, "%s ", sched_feat_names[i]);
866 }
867 seq_puts(m, "\n");
868
869 return 0;
870 }
871
872 static ssize_t
873 sched_feat_write(struct file *filp, const char __user *ubuf,
874 size_t cnt, loff_t *ppos)
875 {
876 char buf[64];
877 char *cmp;
878 int neg = 0;
879 int i;
880
881 if (cnt > 63)
882 cnt = 63;
883
884 if (copy_from_user(&buf, ubuf, cnt))
885 return -EFAULT;
886
887 buf[cnt] = 0;
888 cmp = strstrip(buf);
889
890 if (strncmp(cmp, "NO_", 3) == 0) {
891 neg = 1;
892 cmp += 3;
893 }
894
895 for (i = 0; sched_feat_names[i]; i++) {
896 if (strcmp(cmp, sched_feat_names[i]) == 0) {
897 if (neg)
898 sysctl_sched_features &= ~(1UL << i);
899 else
900 sysctl_sched_features |= (1UL << i);
901 break;
902 }
903 }
904
905 if (!sched_feat_names[i])
906 return -EINVAL;
907
908 *ppos += cnt;
909
910 return cnt;
911 }
912
913 static int sched_feat_open(struct inode *inode, struct file *filp)
914 {
915 return single_open(filp, sched_feat_show, NULL);
916 }
917
918 static const struct file_operations sched_feat_fops = {
919 .open = sched_feat_open,
920 .write = sched_feat_write,
921 .read = seq_read,
922 .llseek = seq_lseek,
923 .release = single_release,
924 };
925
926 static __init int sched_init_debug(void)
927 {
928 debugfs_create_file("sched_features", 0644, NULL, NULL,
929 &sched_feat_fops);
930
931 return 0;
932 }
933 late_initcall(sched_init_debug);
934
935 #endif
936
937 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
938
939 /*
940 * Number of tasks to iterate in a single balance run.
941 * Limited because this is done with IRQs disabled.
942 */
943 const_debug unsigned int sysctl_sched_nr_migrate = 32;
944
945 /*
946 * period over which we average the RT time consumption, measured
947 * in ms.
948 *
949 * default: 1s
950 */
951 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
952
953 /*
954 * period over which we measure -rt task cpu usage in us.
955 * default: 1s
956 */
957 unsigned int sysctl_sched_rt_period = 1000000;
958
959 static __read_mostly int scheduler_running;
960
961 /*
962 * part of the period that we allow rt tasks to run in us.
963 * default: 0.95s
964 */
965 int sysctl_sched_rt_runtime = 950000;
966
967 static inline u64 global_rt_period(void)
968 {
969 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
970 }
971
972 static inline u64 global_rt_runtime(void)
973 {
974 if (sysctl_sched_rt_runtime < 0)
975 return RUNTIME_INF;
976
977 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
978 }
979
980 #ifndef prepare_arch_switch
981 # define prepare_arch_switch(next) do { } while (0)
982 #endif
983 #ifndef finish_arch_switch
984 # define finish_arch_switch(prev) do { } while (0)
985 #endif
986
987 static inline int task_current(struct rq *rq, struct task_struct *p)
988 {
989 return rq->curr == p;
990 }
991
992 static inline int task_running(struct rq *rq, struct task_struct *p)
993 {
994 #ifdef CONFIG_SMP
995 return p->on_cpu;
996 #else
997 return task_current(rq, p);
998 #endif
999 }
1000
1001 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
1002 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1003 {
1004 #ifdef CONFIG_SMP
1005 /*
1006 * We can optimise this out completely for !SMP, because the
1007 * SMP rebalancing from interrupt is the only thing that cares
1008 * here.
1009 */
1010 next->on_cpu = 1;
1011 #endif
1012 }
1013
1014 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1015 {
1016 #ifdef CONFIG_SMP
1017 /*
1018 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1019 * We must ensure this doesn't happen until the switch is completely
1020 * finished.
1021 */
1022 smp_wmb();
1023 prev->on_cpu = 0;
1024 #endif
1025 #ifdef CONFIG_DEBUG_SPINLOCK
1026 /* this is a valid case when another task releases the spinlock */
1027 rq->lock.owner = current;
1028 #endif
1029 /*
1030 * If we are tracking spinlock dependencies then we have to
1031 * fix up the runqueue lock - which gets 'carried over' from
1032 * prev into current:
1033 */
1034 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1035
1036 raw_spin_unlock_irq(&rq->lock);
1037 }
1038
1039 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
1040 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1041 {
1042 #ifdef CONFIG_SMP
1043 /*
1044 * We can optimise this out completely for !SMP, because the
1045 * SMP rebalancing from interrupt is the only thing that cares
1046 * here.
1047 */
1048 next->on_cpu = 1;
1049 #endif
1050 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1051 raw_spin_unlock_irq(&rq->lock);
1052 #else
1053 raw_spin_unlock(&rq->lock);
1054 #endif
1055 }
1056
1057 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1058 {
1059 #ifdef CONFIG_SMP
1060 /*
1061 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1062 * We must ensure this doesn't happen until the switch is completely
1063 * finished.
1064 */
1065 smp_wmb();
1066 prev->on_cpu = 0;
1067 #endif
1068 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1069 local_irq_enable();
1070 #endif
1071 }
1072 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
1073
1074 /*
1075 * __task_rq_lock - lock the rq @p resides on.
1076 */
1077 static inline struct rq *__task_rq_lock(struct task_struct *p)
1078 __acquires(rq->lock)
1079 {
1080 struct rq *rq;
1081
1082 lockdep_assert_held(&p->pi_lock);
1083
1084 for (;;) {
1085 rq = task_rq(p);
1086 raw_spin_lock(&rq->lock);
1087 if (likely(rq == task_rq(p)))
1088 return rq;
1089 raw_spin_unlock(&rq->lock);
1090 }
1091 }
1092
1093 /*
1094 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1095 */
1096 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1097 __acquires(p->pi_lock)
1098 __acquires(rq->lock)
1099 {
1100 struct rq *rq;
1101
1102 for (;;) {
1103 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1104 rq = task_rq(p);
1105 raw_spin_lock(&rq->lock);
1106 if (likely(rq == task_rq(p)))
1107 return rq;
1108 raw_spin_unlock(&rq->lock);
1109 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1110 }
1111 }
1112
1113 static void __task_rq_unlock(struct rq *rq)
1114 __releases(rq->lock)
1115 {
1116 raw_spin_unlock(&rq->lock);
1117 }
1118
1119 static inline void
1120 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1121 __releases(rq->lock)
1122 __releases(p->pi_lock)
1123 {
1124 raw_spin_unlock(&rq->lock);
1125 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1126 }
1127
1128 /*
1129 * this_rq_lock - lock this runqueue and disable interrupts.
1130 */
1131 static struct rq *this_rq_lock(void)
1132 __acquires(rq->lock)
1133 {
1134 struct rq *rq;
1135
1136 local_irq_disable();
1137 rq = this_rq();
1138 raw_spin_lock(&rq->lock);
1139
1140 return rq;
1141 }
1142
1143 #ifdef CONFIG_SCHED_HRTICK
1144 /*
1145 * Use HR-timers to deliver accurate preemption points.
1146 *
1147 * Its all a bit involved since we cannot program an hrt while holding the
1148 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1149 * reschedule event.
1150 *
1151 * When we get rescheduled we reprogram the hrtick_timer outside of the
1152 * rq->lock.
1153 */
1154
1155 /*
1156 * Use hrtick when:
1157 * - enabled by features
1158 * - hrtimer is actually high res
1159 */
1160 static inline int hrtick_enabled(struct rq *rq)
1161 {
1162 if (!sched_feat(HRTICK))
1163 return 0;
1164 if (!cpu_active(cpu_of(rq)))
1165 return 0;
1166 return hrtimer_is_hres_active(&rq->hrtick_timer);
1167 }
1168
1169 static void hrtick_clear(struct rq *rq)
1170 {
1171 if (hrtimer_active(&rq->hrtick_timer))
1172 hrtimer_cancel(&rq->hrtick_timer);
1173 }
1174
1175 /*
1176 * High-resolution timer tick.
1177 * Runs from hardirq context with interrupts disabled.
1178 */
1179 static enum hrtimer_restart hrtick(struct hrtimer *timer)
1180 {
1181 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1182
1183 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1184
1185 raw_spin_lock(&rq->lock);
1186 update_rq_clock(rq);
1187 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1188 raw_spin_unlock(&rq->lock);
1189
1190 return HRTIMER_NORESTART;
1191 }
1192
1193 #ifdef CONFIG_SMP
1194 /*
1195 * called from hardirq (IPI) context
1196 */
1197 static void __hrtick_start(void *arg)
1198 {
1199 struct rq *rq = arg;
1200
1201 raw_spin_lock(&rq->lock);
1202 hrtimer_restart(&rq->hrtick_timer);
1203 rq->hrtick_csd_pending = 0;
1204 raw_spin_unlock(&rq->lock);
1205 }
1206
1207 /*
1208 * Called to set the hrtick timer state.
1209 *
1210 * called with rq->lock held and irqs disabled
1211 */
1212 static void hrtick_start(struct rq *rq, u64 delay)
1213 {
1214 struct hrtimer *timer = &rq->hrtick_timer;
1215 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1216
1217 hrtimer_set_expires(timer, time);
1218
1219 if (rq == this_rq()) {
1220 hrtimer_restart(timer);
1221 } else if (!rq->hrtick_csd_pending) {
1222 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
1223 rq->hrtick_csd_pending = 1;
1224 }
1225 }
1226
1227 static int
1228 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1229 {
1230 int cpu = (int)(long)hcpu;
1231
1232 switch (action) {
1233 case CPU_UP_CANCELED:
1234 case CPU_UP_CANCELED_FROZEN:
1235 case CPU_DOWN_PREPARE:
1236 case CPU_DOWN_PREPARE_FROZEN:
1237 case CPU_DEAD:
1238 case CPU_DEAD_FROZEN:
1239 hrtick_clear(cpu_rq(cpu));
1240 return NOTIFY_OK;
1241 }
1242
1243 return NOTIFY_DONE;
1244 }
1245
1246 static __init void init_hrtick(void)
1247 {
1248 hotcpu_notifier(hotplug_hrtick, 0);
1249 }
1250 #else
1251 /*
1252 * Called to set the hrtick timer state.
1253 *
1254 * called with rq->lock held and irqs disabled
1255 */
1256 static void hrtick_start(struct rq *rq, u64 delay)
1257 {
1258 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
1259 HRTIMER_MODE_REL_PINNED, 0);
1260 }
1261
1262 static inline void init_hrtick(void)
1263 {
1264 }
1265 #endif /* CONFIG_SMP */
1266
1267 static void init_rq_hrtick(struct rq *rq)
1268 {
1269 #ifdef CONFIG_SMP
1270 rq->hrtick_csd_pending = 0;
1271
1272 rq->hrtick_csd.flags = 0;
1273 rq->hrtick_csd.func = __hrtick_start;
1274 rq->hrtick_csd.info = rq;
1275 #endif
1276
1277 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1278 rq->hrtick_timer.function = hrtick;
1279 }
1280 #else /* CONFIG_SCHED_HRTICK */
1281 static inline void hrtick_clear(struct rq *rq)
1282 {
1283 }
1284
1285 static inline void init_rq_hrtick(struct rq *rq)
1286 {
1287 }
1288
1289 static inline void init_hrtick(void)
1290 {
1291 }
1292 #endif /* CONFIG_SCHED_HRTICK */
1293
1294 /*
1295 * resched_task - mark a task 'to be rescheduled now'.
1296 *
1297 * On UP this means the setting of the need_resched flag, on SMP it
1298 * might also involve a cross-CPU call to trigger the scheduler on
1299 * the target CPU.
1300 */
1301 #ifdef CONFIG_SMP
1302
1303 #ifndef tsk_is_polling
1304 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1305 #endif
1306
1307 static void resched_task(struct task_struct *p)
1308 {
1309 int cpu;
1310
1311 assert_raw_spin_locked(&task_rq(p)->lock);
1312
1313 if (test_tsk_need_resched(p))
1314 return;
1315
1316 set_tsk_need_resched(p);
1317
1318 cpu = task_cpu(p);
1319 if (cpu == smp_processor_id())
1320 return;
1321
1322 /* NEED_RESCHED must be visible before we test polling */
1323 smp_mb();
1324 if (!tsk_is_polling(p))
1325 smp_send_reschedule(cpu);
1326 }
1327
1328 static void resched_cpu(int cpu)
1329 {
1330 struct rq *rq = cpu_rq(cpu);
1331 unsigned long flags;
1332
1333 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1334 return;
1335 resched_task(cpu_curr(cpu));
1336 raw_spin_unlock_irqrestore(&rq->lock, flags);
1337 }
1338
1339 #ifdef CONFIG_NO_HZ
1340 /*
1341 * In the semi idle case, use the nearest busy cpu for migrating timers
1342 * from an idle cpu. This is good for power-savings.
1343 *
1344 * We don't do similar optimization for completely idle system, as
1345 * selecting an idle cpu will add more delays to the timers than intended
1346 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1347 */
1348 int get_nohz_timer_target(void)
1349 {
1350 int cpu = smp_processor_id();
1351 int i;
1352 struct sched_domain *sd;
1353
1354 rcu_read_lock();
1355 for_each_domain(cpu, sd) {
1356 for_each_cpu(i, sched_domain_span(sd)) {
1357 if (!idle_cpu(i)) {
1358 cpu = i;
1359 goto unlock;
1360 }
1361 }
1362 }
1363 unlock:
1364 rcu_read_unlock();
1365 return cpu;
1366 }
1367 /*
1368 * When add_timer_on() enqueues a timer into the timer wheel of an
1369 * idle CPU then this timer might expire before the next timer event
1370 * which is scheduled to wake up that CPU. In case of a completely
1371 * idle system the next event might even be infinite time into the
1372 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1373 * leaves the inner idle loop so the newly added timer is taken into
1374 * account when the CPU goes back to idle and evaluates the timer
1375 * wheel for the next timer event.
1376 */
1377 void wake_up_idle_cpu(int cpu)
1378 {
1379 struct rq *rq = cpu_rq(cpu);
1380
1381 if (cpu == smp_processor_id())
1382 return;
1383
1384 /*
1385 * This is safe, as this function is called with the timer
1386 * wheel base lock of (cpu) held. When the CPU is on the way
1387 * to idle and has not yet set rq->curr to idle then it will
1388 * be serialized on the timer wheel base lock and take the new
1389 * timer into account automatically.
1390 */
1391 if (rq->curr != rq->idle)
1392 return;
1393
1394 /*
1395 * We can set TIF_RESCHED on the idle task of the other CPU
1396 * lockless. The worst case is that the other CPU runs the
1397 * idle task through an additional NOOP schedule()
1398 */
1399 set_tsk_need_resched(rq->idle);
1400
1401 /* NEED_RESCHED must be visible before we test polling */
1402 smp_mb();
1403 if (!tsk_is_polling(rq->idle))
1404 smp_send_reschedule(cpu);
1405 }
1406
1407 static inline bool got_nohz_idle_kick(void)
1408 {
1409 return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick;
1410 }
1411
1412 #else /* CONFIG_NO_HZ */
1413
1414 static inline bool got_nohz_idle_kick(void)
1415 {
1416 return false;
1417 }
1418
1419 #endif /* CONFIG_NO_HZ */
1420
1421 static u64 sched_avg_period(void)
1422 {
1423 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1424 }
1425
1426 static void sched_avg_update(struct rq *rq)
1427 {
1428 s64 period = sched_avg_period();
1429
1430 while ((s64)(rq->clock - rq->age_stamp) > period) {
1431 /*
1432 * Inline assembly required to prevent the compiler
1433 * optimising this loop into a divmod call.
1434 * See __iter_div_u64_rem() for another example of this.
1435 */
1436 asm("" : "+rm" (rq->age_stamp));
1437 rq->age_stamp += period;
1438 rq->rt_avg /= 2;
1439 }
1440 }
1441
1442 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1443 {
1444 rq->rt_avg += rt_delta;
1445 sched_avg_update(rq);
1446 }
1447
1448 #else /* !CONFIG_SMP */
1449 static void resched_task(struct task_struct *p)
1450 {
1451 assert_raw_spin_locked(&task_rq(p)->lock);
1452 set_tsk_need_resched(p);
1453 }
1454
1455 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1456 {
1457 }
1458
1459 static void sched_avg_update(struct rq *rq)
1460 {
1461 }
1462 #endif /* CONFIG_SMP */
1463
1464 #if BITS_PER_LONG == 32
1465 # define WMULT_CONST (~0UL)
1466 #else
1467 # define WMULT_CONST (1UL << 32)
1468 #endif
1469
1470 #define WMULT_SHIFT 32
1471
1472 /*
1473 * Shift right and round:
1474 */
1475 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1476
1477 /*
1478 * delta *= weight / lw
1479 */
1480 static unsigned long
1481 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1482 struct load_weight *lw)
1483 {
1484 u64 tmp;
1485
1486 /*
1487 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
1488 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
1489 * 2^SCHED_LOAD_RESOLUTION.
1490 */
1491 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
1492 tmp = (u64)delta_exec * scale_load_down(weight);
1493 else
1494 tmp = (u64)delta_exec;
1495
1496 if (!lw->inv_weight) {
1497 unsigned long w = scale_load_down(lw->weight);
1498
1499 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
1500 lw->inv_weight = 1;
1501 else if (unlikely(!w))
1502 lw->inv_weight = WMULT_CONST;
1503 else
1504 lw->inv_weight = WMULT_CONST / w;
1505 }
1506
1507 /*
1508 * Check whether we'd overflow the 64-bit multiplication:
1509 */
1510 if (unlikely(tmp > WMULT_CONST))
1511 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1512 WMULT_SHIFT/2);
1513 else
1514 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1515
1516 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1517 }
1518
1519 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1520 {
1521 lw->weight += inc;
1522 lw->inv_weight = 0;
1523 }
1524
1525 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1526 {
1527 lw->weight -= dec;
1528 lw->inv_weight = 0;
1529 }
1530
1531 static inline void update_load_set(struct load_weight *lw, unsigned long w)
1532 {
1533 lw->weight = w;
1534 lw->inv_weight = 0;
1535 }
1536
1537 /*
1538 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1539 * of tasks with abnormal "nice" values across CPUs the contribution that
1540 * each task makes to its run queue's load is weighted according to its
1541 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1542 * scaled version of the new time slice allocation that they receive on time
1543 * slice expiry etc.
1544 */
1545
1546 #define WEIGHT_IDLEPRIO 3
1547 #define WMULT_IDLEPRIO 1431655765
1548
1549 /*
1550 * Nice levels are multiplicative, with a gentle 10% change for every
1551 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1552 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1553 * that remained on nice 0.
1554 *
1555 * The "10% effect" is relative and cumulative: from _any_ nice level,
1556 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1557 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1558 * If a task goes up by ~10% and another task goes down by ~10% then
1559 * the relative distance between them is ~25%.)
1560 */
1561 static const int prio_to_weight[40] = {
1562 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1563 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1564 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1565 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1566 /* 0 */ 1024, 820, 655, 526, 423,
1567 /* 5 */ 335, 272, 215, 172, 137,
1568 /* 10 */ 110, 87, 70, 56, 45,
1569 /* 15 */ 36, 29, 23, 18, 15,
1570 };
1571
1572 /*
1573 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1574 *
1575 * In cases where the weight does not change often, we can use the
1576 * precalculated inverse to speed up arithmetics by turning divisions
1577 * into multiplications:
1578 */
1579 static const u32 prio_to_wmult[40] = {
1580 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1581 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1582 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1583 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1584 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1585 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1586 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1587 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1588 };
1589
1590 /* Time spent by the tasks of the cpu accounting group executing in ... */
1591 enum cpuacct_stat_index {
1592 CPUACCT_STAT_USER, /* ... user mode */
1593 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1594
1595 CPUACCT_STAT_NSTATS,
1596 };
1597
1598 #ifdef CONFIG_CGROUP_CPUACCT
1599 static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1600 static void cpuacct_update_stats(struct task_struct *tsk,
1601 enum cpuacct_stat_index idx, cputime_t val);
1602 #else
1603 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1604 static inline void cpuacct_update_stats(struct task_struct *tsk,
1605 enum cpuacct_stat_index idx, cputime_t val) {}
1606 #endif
1607
1608 static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1609 {
1610 update_load_add(&rq->load, load);
1611 }
1612
1613 static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1614 {
1615 update_load_sub(&rq->load, load);
1616 }
1617
1618 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1619 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1620 typedef int (*tg_visitor)(struct task_group *, void *);
1621
1622 /*
1623 * Iterate task_group tree rooted at *from, calling @down when first entering a
1624 * node and @up when leaving it for the final time.
1625 *
1626 * Caller must hold rcu_lock or sufficient equivalent.
1627 */
1628 static int walk_tg_tree_from(struct task_group *from,
1629 tg_visitor down, tg_visitor up, void *data)
1630 {
1631 struct task_group *parent, *child;
1632 int ret;
1633
1634 parent = from;
1635
1636 down:
1637 ret = (*down)(parent, data);
1638 if (ret)
1639 goto out;
1640 list_for_each_entry_rcu(child, &parent->children, siblings) {
1641 parent = child;
1642 goto down;
1643
1644 up:
1645 continue;
1646 }
1647 ret = (*up)(parent, data);
1648 if (ret || parent == from)
1649 goto out;
1650
1651 child = parent;
1652 parent = parent->parent;
1653 if (parent)
1654 goto up;
1655 out:
1656 return ret;
1657 }
1658
1659 /*
1660 * Iterate the full tree, calling @down when first entering a node and @up when
1661 * leaving it for the final time.
1662 *
1663 * Caller must hold rcu_lock or sufficient equivalent.
1664 */
1665
1666 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1667 {
1668 return walk_tg_tree_from(&root_task_group, down, up, data);
1669 }
1670
1671 static int tg_nop(struct task_group *tg, void *data)
1672 {
1673 return 0;
1674 }
1675 #endif
1676
1677 #ifdef CONFIG_SMP
1678 /* Used instead of source_load when we know the type == 0 */
1679 static unsigned long weighted_cpuload(const int cpu)
1680 {
1681 return cpu_rq(cpu)->load.weight;
1682 }
1683
1684 /*
1685 * Return a low guess at the load of a migration-source cpu weighted
1686 * according to the scheduling class and "nice" value.
1687 *
1688 * We want to under-estimate the load of migration sources, to
1689 * balance conservatively.
1690 */
1691 static unsigned long source_load(int cpu, int type)
1692 {
1693 struct rq *rq = cpu_rq(cpu);
1694 unsigned long total = weighted_cpuload(cpu);
1695
1696 if (type == 0 || !sched_feat(LB_BIAS))
1697 return total;
1698
1699 return min(rq->cpu_load[type-1], total);
1700 }
1701
1702 /*
1703 * Return a high guess at the load of a migration-target cpu weighted
1704 * according to the scheduling class and "nice" value.
1705 */
1706 static unsigned long target_load(int cpu, int type)
1707 {
1708 struct rq *rq = cpu_rq(cpu);
1709 unsigned long total = weighted_cpuload(cpu);
1710
1711 if (type == 0 || !sched_feat(LB_BIAS))
1712 return total;
1713
1714 return max(rq->cpu_load[type-1], total);
1715 }
1716
1717 static unsigned long power_of(int cpu)
1718 {
1719 return cpu_rq(cpu)->cpu_power;
1720 }
1721
1722 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1723
1724 static unsigned long cpu_avg_load_per_task(int cpu)
1725 {
1726 struct rq *rq = cpu_rq(cpu);
1727 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1728
1729 if (nr_running)
1730 return rq->load.weight / nr_running;
1731
1732 return 0;
1733 }
1734
1735 #ifdef CONFIG_PREEMPT
1736
1737 static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1738
1739 /*
1740 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1741 * way at the expense of forcing extra atomic operations in all
1742 * invocations. This assures that the double_lock is acquired using the
1743 * same underlying policy as the spinlock_t on this architecture, which
1744 * reduces latency compared to the unfair variant below. However, it
1745 * also adds more overhead and therefore may reduce throughput.
1746 */
1747 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1748 __releases(this_rq->lock)
1749 __acquires(busiest->lock)
1750 __acquires(this_rq->lock)
1751 {
1752 raw_spin_unlock(&this_rq->lock);
1753 double_rq_lock(this_rq, busiest);
1754
1755 return 1;
1756 }
1757
1758 #else
1759 /*
1760 * Unfair double_lock_balance: Optimizes throughput at the expense of
1761 * latency by eliminating extra atomic operations when the locks are
1762 * already in proper order on entry. This favors lower cpu-ids and will
1763 * grant the double lock to lower cpus over higher ids under contention,
1764 * regardless of entry order into the function.
1765 */
1766 static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1767 __releases(this_rq->lock)
1768 __acquires(busiest->lock)
1769 __acquires(this_rq->lock)
1770 {
1771 int ret = 0;
1772
1773 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1774 if (busiest < this_rq) {
1775 raw_spin_unlock(&this_rq->lock);
1776 raw_spin_lock(&busiest->lock);
1777 raw_spin_lock_nested(&this_rq->lock,
1778 SINGLE_DEPTH_NESTING);
1779 ret = 1;
1780 } else
1781 raw_spin_lock_nested(&busiest->lock,
1782 SINGLE_DEPTH_NESTING);
1783 }
1784 return ret;
1785 }
1786
1787 #endif /* CONFIG_PREEMPT */
1788
1789 /*
1790 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1791 */
1792 static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1793 {
1794 if (unlikely(!irqs_disabled())) {
1795 /* printk() doesn't work good under rq->lock */
1796 raw_spin_unlock(&this_rq->lock);
1797 BUG_ON(1);
1798 }
1799
1800 return _double_lock_balance(this_rq, busiest);
1801 }
1802
1803 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1804 __releases(busiest->lock)
1805 {
1806 raw_spin_unlock(&busiest->lock);
1807 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1808 }
1809
1810 /*
1811 * double_rq_lock - safely lock two runqueues
1812 *
1813 * Note this does not disable interrupts like task_rq_lock,
1814 * you need to do so manually before calling.
1815 */
1816 static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1817 __acquires(rq1->lock)
1818 __acquires(rq2->lock)
1819 {
1820 BUG_ON(!irqs_disabled());
1821 if (rq1 == rq2) {
1822 raw_spin_lock(&rq1->lock);
1823 __acquire(rq2->lock); /* Fake it out ;) */
1824 } else {
1825 if (rq1 < rq2) {
1826 raw_spin_lock(&rq1->lock);
1827 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1828 } else {
1829 raw_spin_lock(&rq2->lock);
1830 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1831 }
1832 }
1833 }
1834
1835 /*
1836 * double_rq_unlock - safely unlock two runqueues
1837 *
1838 * Note this does not restore interrupts like task_rq_unlock,
1839 * you need to do so manually after calling.
1840 */
1841 static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1842 __releases(rq1->lock)
1843 __releases(rq2->lock)
1844 {
1845 raw_spin_unlock(&rq1->lock);
1846 if (rq1 != rq2)
1847 raw_spin_unlock(&rq2->lock);
1848 else
1849 __release(rq2->lock);
1850 }
1851
1852 #else /* CONFIG_SMP */
1853
1854 /*
1855 * double_rq_lock - safely lock two runqueues
1856 *
1857 * Note this does not disable interrupts like task_rq_lock,
1858 * you need to do so manually before calling.
1859 */
1860 static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1861 __acquires(rq1->lock)
1862 __acquires(rq2->lock)
1863 {
1864 BUG_ON(!irqs_disabled());
1865 BUG_ON(rq1 != rq2);
1866 raw_spin_lock(&rq1->lock);
1867 __acquire(rq2->lock); /* Fake it out ;) */
1868 }
1869
1870 /*
1871 * double_rq_unlock - safely unlock two runqueues
1872 *
1873 * Note this does not restore interrupts like task_rq_unlock,
1874 * you need to do so manually after calling.
1875 */
1876 static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1877 __releases(rq1->lock)
1878 __releases(rq2->lock)
1879 {
1880 BUG_ON(rq1 != rq2);
1881 raw_spin_unlock(&rq1->lock);
1882 __release(rq2->lock);
1883 }
1884
1885 #endif
1886
1887 static void calc_load_account_idle(struct rq *this_rq);
1888 static void update_sysctl(void);
1889 static int get_update_sysctl_factor(void);
1890 static void update_cpu_load(struct rq *this_rq);
1891
1892 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1893 {
1894 set_task_rq(p, cpu);
1895 #ifdef CONFIG_SMP
1896 /*
1897 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1898 * successfully executed on another CPU. We must ensure that updates of
1899 * per-task data have been completed by this moment.
1900 */
1901 smp_wmb();
1902 task_thread_info(p)->cpu = cpu;
1903 #endif
1904 }
1905
1906 static const struct sched_class rt_sched_class;
1907
1908 #define sched_class_highest (&stop_sched_class)
1909 #define for_each_class(class) \
1910 for (class = sched_class_highest; class; class = class->next)
1911
1912 #include "sched_stats.h"
1913
1914 static void inc_nr_running(struct rq *rq)
1915 {
1916 rq->nr_running++;
1917 }
1918
1919 static void dec_nr_running(struct rq *rq)
1920 {
1921 rq->nr_running--;
1922 }
1923
1924 static void set_load_weight(struct task_struct *p)
1925 {
1926 int prio = p->static_prio - MAX_RT_PRIO;
1927 struct load_weight *load = &p->se.load;
1928
1929 /*
1930 * SCHED_IDLE tasks get minimal weight:
1931 */
1932 if (p->policy == SCHED_IDLE) {
1933 load->weight = scale_load(WEIGHT_IDLEPRIO);
1934 load->inv_weight = WMULT_IDLEPRIO;
1935 return;
1936 }
1937
1938 load->weight = scale_load(prio_to_weight[prio]);
1939 load->inv_weight = prio_to_wmult[prio];
1940 }
1941
1942 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1943 {
1944 update_rq_clock(rq);
1945 sched_info_queued(p);
1946 p->sched_class->enqueue_task(rq, p, flags);
1947 }
1948
1949 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1950 {
1951 update_rq_clock(rq);
1952 sched_info_dequeued(p);
1953 p->sched_class->dequeue_task(rq, p, flags);
1954 }
1955
1956 /*
1957 * activate_task - move a task to the runqueue.
1958 */
1959 static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1960 {
1961 if (task_contributes_to_load(p))
1962 rq->nr_uninterruptible--;
1963
1964 enqueue_task(rq, p, flags);
1965 }
1966
1967 /*
1968 * deactivate_task - remove a task from the runqueue.
1969 */
1970 static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1971 {
1972 if (task_contributes_to_load(p))
1973 rq->nr_uninterruptible++;
1974
1975 dequeue_task(rq, p, flags);
1976 }
1977
1978 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1979
1980 /*
1981 * There are no locks covering percpu hardirq/softirq time.
1982 * They are only modified in account_system_vtime, on corresponding CPU
1983 * with interrupts disabled. So, writes are safe.
1984 * They are read and saved off onto struct rq in update_rq_clock().
1985 * This may result in other CPU reading this CPU's irq time and can
1986 * race with irq/account_system_vtime on this CPU. We would either get old
1987 * or new value with a side effect of accounting a slice of irq time to wrong
1988 * task when irq is in progress while we read rq->clock. That is a worthy
1989 * compromise in place of having locks on each irq in account_system_time.
1990 */
1991 static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1992 static DEFINE_PER_CPU(u64, cpu_softirq_time);
1993
1994 static DEFINE_PER_CPU(u64, irq_start_time);
1995 static int sched_clock_irqtime;
1996
1997 void enable_sched_clock_irqtime(void)
1998 {
1999 sched_clock_irqtime = 1;
2000 }
2001
2002 void disable_sched_clock_irqtime(void)
2003 {
2004 sched_clock_irqtime = 0;
2005 }
2006
2007 #ifndef CONFIG_64BIT
2008 static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
2009
2010 static inline void irq_time_write_begin(void)
2011 {
2012 __this_cpu_inc(irq_time_seq.sequence);
2013 smp_wmb();
2014 }
2015
2016 static inline void irq_time_write_end(void)
2017 {
2018 smp_wmb();
2019 __this_cpu_inc(irq_time_seq.sequence);
2020 }
2021
2022 static inline u64 irq_time_read(int cpu)
2023 {
2024 u64 irq_time;
2025 unsigned seq;
2026
2027 do {
2028 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
2029 irq_time = per_cpu(cpu_softirq_time, cpu) +
2030 per_cpu(cpu_hardirq_time, cpu);
2031 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
2032
2033 return irq_time;
2034 }
2035 #else /* CONFIG_64BIT */
2036 static inline void irq_time_write_begin(void)
2037 {
2038 }
2039
2040 static inline void irq_time_write_end(void)
2041 {
2042 }
2043
2044 static inline u64 irq_time_read(int cpu)
2045 {
2046 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
2047 }
2048 #endif /* CONFIG_64BIT */
2049
2050 /*
2051 * Called before incrementing preempt_count on {soft,}irq_enter
2052 * and before decrementing preempt_count on {soft,}irq_exit.
2053 */
2054 void account_system_vtime(struct task_struct *curr)
2055 {
2056 unsigned long flags;
2057 s64 delta;
2058 int cpu;
2059
2060 if (!sched_clock_irqtime)
2061 return;
2062
2063 local_irq_save(flags);
2064
2065 cpu = smp_processor_id();
2066 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
2067 __this_cpu_add(irq_start_time, delta);
2068
2069 irq_time_write_begin();
2070 /*
2071 * We do not account for softirq time from ksoftirqd here.
2072 * We want to continue accounting softirq time to ksoftirqd thread
2073 * in that case, so as not to confuse scheduler with a special task
2074 * that do not consume any time, but still wants to run.
2075 */
2076 if (hardirq_count())
2077 __this_cpu_add(cpu_hardirq_time, delta);
2078 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
2079 __this_cpu_add(cpu_softirq_time, delta);
2080
2081 irq_time_write_end();
2082 local_irq_restore(flags);
2083 }
2084 EXPORT_SYMBOL_GPL(account_system_vtime);
2085
2086 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2087
2088 #ifdef CONFIG_PARAVIRT
2089 static inline u64 steal_ticks(u64 steal)
2090 {
2091 if (unlikely(steal > NSEC_PER_SEC))
2092 return div_u64(steal, TICK_NSEC);
2093
2094 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
2095 }
2096 #endif
2097
2098 static void update_rq_clock_task(struct rq *rq, s64 delta)
2099 {
2100 /*
2101 * In theory, the compile should just see 0 here, and optimize out the call
2102 * to sched_rt_avg_update. But I don't trust it...
2103 */
2104 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
2105 s64 steal = 0, irq_delta = 0;
2106 #endif
2107 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2108 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
2109
2110 /*
2111 * Since irq_time is only updated on {soft,}irq_exit, we might run into
2112 * this case when a previous update_rq_clock() happened inside a
2113 * {soft,}irq region.
2114 *
2115 * When this happens, we stop ->clock_task and only update the
2116 * prev_irq_time stamp to account for the part that fit, so that a next
2117 * update will consume the rest. This ensures ->clock_task is
2118 * monotonic.
2119 *
2120 * It does however cause some slight miss-attribution of {soft,}irq
2121 * time, a more accurate solution would be to update the irq_time using
2122 * the current rq->clock timestamp, except that would require using
2123 * atomic ops.
2124 */
2125 if (irq_delta > delta)
2126 irq_delta = delta;
2127
2128 rq->prev_irq_time += irq_delta;
2129 delta -= irq_delta;
2130 #endif
2131 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
2132 if (static_branch((&paravirt_steal_rq_enabled))) {
2133 u64 st;
2134
2135 steal = paravirt_steal_clock(cpu_of(rq));
2136 steal -= rq->prev_steal_time_rq;
2137
2138 if (unlikely(steal > delta))
2139 steal = delta;
2140
2141 st = steal_ticks(steal);
2142 steal = st * TICK_NSEC;
2143
2144 rq->prev_steal_time_rq += steal;
2145
2146 delta -= steal;
2147 }
2148 #endif
2149
2150 rq->clock_task += delta;
2151
2152 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
2153 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
2154 sched_rt_avg_update(rq, irq_delta + steal);
2155 #endif
2156 }
2157
2158 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2159 static int irqtime_account_hi_update(void)
2160 {
2161 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2162 unsigned long flags;
2163 u64 latest_ns;
2164 int ret = 0;
2165
2166 local_irq_save(flags);
2167 latest_ns = this_cpu_read(cpu_hardirq_time);
2168 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
2169 ret = 1;
2170 local_irq_restore(flags);
2171 return ret;
2172 }
2173
2174 static int irqtime_account_si_update(void)
2175 {
2176 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2177 unsigned long flags;
2178 u64 latest_ns;
2179 int ret = 0;
2180
2181 local_irq_save(flags);
2182 latest_ns = this_cpu_read(cpu_softirq_time);
2183 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
2184 ret = 1;
2185 local_irq_restore(flags);
2186 return ret;
2187 }
2188
2189 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
2190
2191 #define sched_clock_irqtime (0)
2192
2193 #endif
2194
2195 #include "sched_idletask.c"
2196 #include "sched_fair.c"
2197 #include "sched_rt.c"
2198 #include "sched_autogroup.c"
2199 #include "sched_stoptask.c"
2200 #ifdef CONFIG_SCHED_DEBUG
2201 # include "sched_debug.c"
2202 #endif
2203
2204 void sched_set_stop_task(int cpu, struct task_struct *stop)
2205 {
2206 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2207 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2208
2209 if (stop) {
2210 /*
2211 * Make it appear like a SCHED_FIFO task, its something
2212 * userspace knows about and won't get confused about.
2213 *
2214 * Also, it will make PI more or less work without too
2215 * much confusion -- but then, stop work should not
2216 * rely on PI working anyway.
2217 */
2218 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2219
2220 stop->sched_class = &stop_sched_class;
2221 }
2222
2223 cpu_rq(cpu)->stop = stop;
2224
2225 if (old_stop) {
2226 /*
2227 * Reset it back to a normal scheduling class so that
2228 * it can die in pieces.
2229 */
2230 old_stop->sched_class = &rt_sched_class;
2231 }
2232 }
2233
2234 /*
2235 * __normal_prio - return the priority that is based on the static prio
2236 */
2237 static inline int __normal_prio(struct task_struct *p)
2238 {
2239 return p->static_prio;
2240 }
2241
2242 /*
2243 * Calculate the expected normal priority: i.e. priority
2244 * without taking RT-inheritance into account. Might be
2245 * boosted by interactivity modifiers. Changes upon fork,
2246 * setprio syscalls, and whenever the interactivity
2247 * estimator recalculates.
2248 */
2249 static inline int normal_prio(struct task_struct *p)
2250 {
2251 int prio;
2252
2253 if (task_has_rt_policy(p))
2254 prio = MAX_RT_PRIO-1 - p->rt_priority;
2255 else
2256 prio = __normal_prio(p);
2257 return prio;
2258 }
2259
2260 /*
2261 * Calculate the current priority, i.e. the priority
2262 * taken into account by the scheduler. This value might
2263 * be boosted by RT tasks, or might be boosted by
2264 * interactivity modifiers. Will be RT if the task got
2265 * RT-boosted. If not then it returns p->normal_prio.
2266 */
2267 static int effective_prio(struct task_struct *p)
2268 {
2269 p->normal_prio = normal_prio(p);
2270 /*
2271 * If we are RT tasks or we were boosted to RT priority,
2272 * keep the priority unchanged. Otherwise, update priority
2273 * to the normal priority:
2274 */
2275 if (!rt_prio(p->prio))
2276 return p->normal_prio;
2277 return p->prio;
2278 }
2279
2280 /**
2281 * task_curr - is this task currently executing on a CPU?
2282 * @p: the task in question.
2283 */
2284 inline int task_curr(const struct task_struct *p)
2285 {
2286 return cpu_curr(task_cpu(p)) == p;
2287 }
2288
2289 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2290 const struct sched_class *prev_class,
2291 int oldprio)
2292 {
2293 if (prev_class != p->sched_class) {
2294 if (prev_class->switched_from)
2295 prev_class->switched_from(rq, p);
2296 p->sched_class->switched_to(rq, p);
2297 } else if (oldprio != p->prio)
2298 p->sched_class->prio_changed(rq, p, oldprio);
2299 }
2300
2301 static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2302 {
2303 const struct sched_class *class;
2304
2305 if (p->sched_class == rq->curr->sched_class) {
2306 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2307 } else {
2308 for_each_class(class) {
2309 if (class == rq->curr->sched_class)
2310 break;
2311 if (class == p->sched_class) {
2312 resched_task(rq->curr);
2313 break;
2314 }
2315 }
2316 }
2317
2318 /*
2319 * A queue event has occurred, and we're going to schedule. In
2320 * this case, we can save a useless back to back clock update.
2321 */
2322 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
2323 rq->skip_clock_update = 1;
2324 }
2325
2326 #ifdef CONFIG_SMP
2327 /*
2328 * Is this task likely cache-hot:
2329 */
2330 static int
2331 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2332 {
2333 s64 delta;
2334
2335 if (p->sched_class != &fair_sched_class)
2336 return 0;
2337
2338 if (unlikely(p->policy == SCHED_IDLE))
2339 return 0;
2340
2341 /*
2342 * Buddy candidates are cache hot:
2343 */
2344 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2345 (&p->se == cfs_rq_of(&p->se)->next ||
2346 &p->se == cfs_rq_of(&p->se)->last))
2347 return 1;
2348
2349 if (sysctl_sched_migration_cost == -1)
2350 return 1;
2351 if (sysctl_sched_migration_cost == 0)
2352 return 0;
2353
2354 delta = now - p->se.exec_start;
2355
2356 return delta < (s64)sysctl_sched_migration_cost;
2357 }
2358
2359 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2360 {
2361 #ifdef CONFIG_SCHED_DEBUG
2362 /*
2363 * We should never call set_task_cpu() on a blocked task,
2364 * ttwu() will sort out the placement.
2365 */
2366 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2367 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2368
2369 #ifdef CONFIG_LOCKDEP
2370 /*
2371 * The caller should hold either p->pi_lock or rq->lock, when changing
2372 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2373 *
2374 * sched_move_task() holds both and thus holding either pins the cgroup,
2375 * see set_task_rq().
2376 *
2377 * Furthermore, all task_rq users should acquire both locks, see
2378 * task_rq_lock().
2379 */
2380 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2381 lockdep_is_held(&task_rq(p)->lock)));
2382 #endif
2383 #endif
2384
2385 trace_sched_migrate_task(p, new_cpu);
2386
2387 if (task_cpu(p) != new_cpu) {
2388 p->se.nr_migrations++;
2389 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
2390 }
2391
2392 __set_task_cpu(p, new_cpu);
2393 }
2394
2395 struct migration_arg {
2396 struct task_struct *task;
2397 int dest_cpu;
2398 };
2399
2400 static int migration_cpu_stop(void *data);
2401
2402 /*
2403 * wait_task_inactive - wait for a thread to unschedule.
2404 *
2405 * If @match_state is nonzero, it's the @p->state value just checked and
2406 * not expected to change. If it changes, i.e. @p might have woken up,
2407 * then return zero. When we succeed in waiting for @p to be off its CPU,
2408 * we return a positive number (its total switch count). If a second call
2409 * a short while later returns the same number, the caller can be sure that
2410 * @p has remained unscheduled the whole time.
2411 *
2412 * The caller must ensure that the task *will* unschedule sometime soon,
2413 * else this function might spin for a *long* time. This function can't
2414 * be called with interrupts off, or it may introduce deadlock with
2415 * smp_call_function() if an IPI is sent by the same process we are
2416 * waiting to become inactive.
2417 */
2418 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2419 {
2420 unsigned long flags;
2421 int running, on_rq;
2422 unsigned long ncsw;
2423 struct rq *rq;
2424
2425 for (;;) {
2426 /*
2427 * We do the initial early heuristics without holding
2428 * any task-queue locks at all. We'll only try to get
2429 * the runqueue lock when things look like they will
2430 * work out!
2431 */
2432 rq = task_rq(p);
2433
2434 /*
2435 * If the task is actively running on another CPU
2436 * still, just relax and busy-wait without holding
2437 * any locks.
2438 *
2439 * NOTE! Since we don't hold any locks, it's not
2440 * even sure that "rq" stays as the right runqueue!
2441 * But we don't care, since "task_running()" will
2442 * return false if the runqueue has changed and p
2443 * is actually now running somewhere else!
2444 */
2445 while (task_running(rq, p)) {
2446 if (match_state && unlikely(p->state != match_state))
2447 return 0;
2448 cpu_relax();
2449 }
2450
2451 /*
2452 * Ok, time to look more closely! We need the rq
2453 * lock now, to be *sure*. If we're wrong, we'll
2454 * just go back and repeat.
2455 */
2456 rq = task_rq_lock(p, &flags);
2457 trace_sched_wait_task(p);
2458 running = task_running(rq, p);
2459 on_rq = p->on_rq;
2460 ncsw = 0;
2461 if (!match_state || p->state == match_state)
2462 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2463 task_rq_unlock(rq, p, &flags);
2464
2465 /*
2466 * If it changed from the expected state, bail out now.
2467 */
2468 if (unlikely(!ncsw))
2469 break;
2470
2471 /*
2472 * Was it really running after all now that we
2473 * checked with the proper locks actually held?
2474 *
2475 * Oops. Go back and try again..
2476 */
2477 if (unlikely(running)) {
2478 cpu_relax();
2479 continue;
2480 }
2481
2482 /*
2483 * It's not enough that it's not actively running,
2484 * it must be off the runqueue _entirely_, and not
2485 * preempted!
2486 *
2487 * So if it was still runnable (but just not actively
2488 * running right now), it's preempted, and we should
2489 * yield - it could be a while.
2490 */
2491 if (unlikely(on_rq)) {
2492 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2493
2494 set_current_state(TASK_UNINTERRUPTIBLE);
2495 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2496 continue;
2497 }
2498
2499 /*
2500 * Ahh, all good. It wasn't running, and it wasn't
2501 * runnable, which means that it will never become
2502 * running in the future either. We're all done!
2503 */
2504 break;
2505 }
2506
2507 return ncsw;
2508 }
2509
2510 /***
2511 * kick_process - kick a running thread to enter/exit the kernel
2512 * @p: the to-be-kicked thread
2513 *
2514 * Cause a process which is running on another CPU to enter
2515 * kernel-mode, without any delay. (to get signals handled.)
2516 *
2517 * NOTE: this function doesn't have to take the runqueue lock,
2518 * because all it wants to ensure is that the remote task enters
2519 * the kernel. If the IPI races and the task has been migrated
2520 * to another CPU then no harm is done and the purpose has been
2521 * achieved as well.
2522 */
2523 void kick_process(struct task_struct *p)
2524 {
2525 int cpu;
2526
2527 preempt_disable();
2528 cpu = task_cpu(p);
2529 if ((cpu != smp_processor_id()) && task_curr(p))
2530 smp_send_reschedule(cpu);
2531 preempt_enable();
2532 }
2533 EXPORT_SYMBOL_GPL(kick_process);
2534 #endif /* CONFIG_SMP */
2535
2536 #ifdef CONFIG_SMP
2537 /*
2538 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
2539 */
2540 static int select_fallback_rq(int cpu, struct task_struct *p)
2541 {
2542 int dest_cpu;
2543 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2544
2545 /* Look for allowed, online CPU in same node. */
2546 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2547 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
2548 return dest_cpu;
2549
2550 /* Any allowed, online CPU? */
2551 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
2552 if (dest_cpu < nr_cpu_ids)
2553 return dest_cpu;
2554
2555 /* No more Mr. Nice Guy. */
2556 dest_cpu = cpuset_cpus_allowed_fallback(p);
2557 /*
2558 * Don't tell them about moving exiting tasks or
2559 * kernel threads (both mm NULL), since they never
2560 * leave kernel.
2561 */
2562 if (p->mm && printk_ratelimit()) {
2563 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2564 task_pid_nr(p), p->comm, cpu);
2565 }
2566
2567 return dest_cpu;
2568 }
2569
2570 /*
2571 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
2572 */
2573 static inline
2574 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2575 {
2576 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2577
2578 /*
2579 * In order not to call set_task_cpu() on a blocking task we need
2580 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2581 * cpu.
2582 *
2583 * Since this is common to all placement strategies, this lives here.
2584 *
2585 * [ this allows ->select_task() to simply return task_cpu(p) and
2586 * not worry about this generic constraint ]
2587 */
2588 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
2589 !cpu_online(cpu)))
2590 cpu = select_fallback_rq(task_cpu(p), p);
2591
2592 return cpu;
2593 }
2594
2595 static void update_avg(u64 *avg, u64 sample)
2596 {
2597 s64 diff = sample - *avg;
2598 *avg += diff >> 3;
2599 }
2600 #endif
2601
2602 static void
2603 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2604 {
2605 #ifdef CONFIG_SCHEDSTATS
2606 struct rq *rq = this_rq();
2607
2608 #ifdef CONFIG_SMP
2609 int this_cpu = smp_processor_id();
2610
2611 if (cpu == this_cpu) {
2612 schedstat_inc(rq, ttwu_local);
2613 schedstat_inc(p, se.statistics.nr_wakeups_local);
2614 } else {
2615 struct sched_domain *sd;
2616
2617 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2618 rcu_read_lock();
2619 for_each_domain(this_cpu, sd) {
2620 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2621 schedstat_inc(sd, ttwu_wake_remote);
2622 break;
2623 }
2624 }
2625 rcu_read_unlock();
2626 }
2627
2628 if (wake_flags & WF_MIGRATED)
2629 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2630
2631 #endif /* CONFIG_SMP */
2632
2633 schedstat_inc(rq, ttwu_count);
2634 schedstat_inc(p, se.statistics.nr_wakeups);
2635
2636 if (wake_flags & WF_SYNC)
2637 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2638
2639 #endif /* CONFIG_SCHEDSTATS */
2640 }
2641
2642 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2643 {
2644 activate_task(rq, p, en_flags);
2645 p->on_rq = 1;
2646
2647 /* if a worker is waking up, notify workqueue */
2648 if (p->flags & PF_WQ_WORKER)
2649 wq_worker_waking_up(p, cpu_of(rq));
2650 }
2651
2652 /*
2653 * Mark the task runnable and perform wakeup-preemption.
2654 */
2655 static void
2656 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2657 {
2658 trace_sched_wakeup(p, true);
2659 check_preempt_curr(rq, p, wake_flags);
2660
2661 p->state = TASK_RUNNING;
2662 #ifdef CONFIG_SMP
2663 if (p->sched_class->task_woken)
2664 p->sched_class->task_woken(rq, p);
2665
2666 if (rq->idle_stamp) {
2667 u64 delta = rq->clock - rq->idle_stamp;
2668 u64 max = 2*sysctl_sched_migration_cost;
2669
2670 if (delta > max)
2671 rq->avg_idle = max;
2672 else
2673 update_avg(&rq->avg_idle, delta);
2674 rq->idle_stamp = 0;
2675 }
2676 #endif
2677 }
2678
2679 static void
2680 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2681 {
2682 #ifdef CONFIG_SMP
2683 if (p->sched_contributes_to_load)
2684 rq->nr_uninterruptible--;
2685 #endif
2686
2687 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2688 ttwu_do_wakeup(rq, p, wake_flags);
2689 }
2690
2691 /*
2692 * Called in case the task @p isn't fully descheduled from its runqueue,
2693 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2694 * since all we need to do is flip p->state to TASK_RUNNING, since
2695 * the task is still ->on_rq.
2696 */
2697 static int ttwu_remote(struct task_struct *p, int wake_flags)
2698 {
2699 struct rq *rq;
2700 int ret = 0;
2701
2702 rq = __task_rq_lock(p);
2703 if (p->on_rq) {
2704 ttwu_do_wakeup(rq, p, wake_flags);
2705 ret = 1;
2706 }
2707 __task_rq_unlock(rq);
2708
2709 return ret;
2710 }
2711
2712 #ifdef CONFIG_SMP
2713 static void sched_ttwu_pending(void)
2714 {
2715 struct rq *rq = this_rq();
2716 struct llist_node *llist = llist_del_all(&rq->wake_list);
2717 struct task_struct *p;
2718
2719 raw_spin_lock(&rq->lock);
2720
2721 while (llist) {
2722 p = llist_entry(llist, struct task_struct, wake_entry);
2723 llist = llist_next(llist);
2724 ttwu_do_activate(rq, p, 0);
2725 }
2726
2727 raw_spin_unlock(&rq->lock);
2728 }
2729
2730 void scheduler_ipi(void)
2731 {
2732 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
2733 return;
2734
2735 /*
2736 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
2737 * traditionally all their work was done from the interrupt return
2738 * path. Now that we actually do some work, we need to make sure
2739 * we do call them.
2740 *
2741 * Some archs already do call them, luckily irq_enter/exit nest
2742 * properly.
2743 *
2744 * Arguably we should visit all archs and update all handlers,
2745 * however a fair share of IPIs are still resched only so this would
2746 * somewhat pessimize the simple resched case.
2747 */
2748 irq_enter();
2749 sched_ttwu_pending();
2750
2751 /*
2752 * Check if someone kicked us for doing the nohz idle load balance.
2753 */
2754 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
2755 this_rq()->idle_balance = 1;
2756 raise_softirq_irqoff(SCHED_SOFTIRQ);
2757 }
2758 irq_exit();
2759 }
2760
2761 static void ttwu_queue_remote(struct task_struct *p, int cpu)
2762 {
2763 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
2764 smp_send_reschedule(cpu);
2765 }
2766
2767 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2768 static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
2769 {
2770 struct rq *rq;
2771 int ret = 0;
2772
2773 rq = __task_rq_lock(p);
2774 if (p->on_cpu) {
2775 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2776 ttwu_do_wakeup(rq, p, wake_flags);
2777 ret = 1;
2778 }
2779 __task_rq_unlock(rq);
2780
2781 return ret;
2782
2783 }
2784 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2785 #endif /* CONFIG_SMP */
2786
2787 static void ttwu_queue(struct task_struct *p, int cpu)
2788 {
2789 struct rq *rq = cpu_rq(cpu);
2790
2791 #if defined(CONFIG_SMP)
2792 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2793 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2794 ttwu_queue_remote(p, cpu);
2795 return;
2796 }
2797 #endif
2798
2799 raw_spin_lock(&rq->lock);
2800 ttwu_do_activate(rq, p, 0);
2801 raw_spin_unlock(&rq->lock);
2802 }
2803
2804 /**
2805 * try_to_wake_up - wake up a thread
2806 * @p: the thread to be awakened
2807 * @state: the mask of task states that can be woken
2808 * @wake_flags: wake modifier flags (WF_*)
2809 *
2810 * Put it on the run-queue if it's not already there. The "current"
2811 * thread is always on the run-queue (except when the actual
2812 * re-schedule is in progress), and as such you're allowed to do
2813 * the simpler "current->state = TASK_RUNNING" to mark yourself
2814 * runnable without the overhead of this.
2815 *
2816 * Returns %true if @p was woken up, %false if it was already running
2817 * or @state didn't match @p's state.
2818 */
2819 static int
2820 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2821 {
2822 unsigned long flags;
2823 int cpu, success = 0;
2824
2825 smp_wmb();
2826 raw_spin_lock_irqsave(&p->pi_lock, flags);
2827 if (!(p->state & state))
2828 goto out;
2829
2830 success = 1; /* we're going to change ->state */
2831 cpu = task_cpu(p);
2832
2833 if (p->on_rq && ttwu_remote(p, wake_flags))
2834 goto stat;
2835
2836 #ifdef CONFIG_SMP
2837 /*
2838 * If the owning (remote) cpu is still in the middle of schedule() with
2839 * this task as prev, wait until its done referencing the task.
2840 */
2841 while (p->on_cpu) {
2842 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2843 /*
2844 * In case the architecture enables interrupts in
2845 * context_switch(), we cannot busy wait, since that
2846 * would lead to deadlocks when an interrupt hits and
2847 * tries to wake up @prev. So bail and do a complete
2848 * remote wakeup.
2849 */
2850 if (ttwu_activate_remote(p, wake_flags))
2851 goto stat;
2852 #else
2853 cpu_relax();
2854 #endif
2855 }
2856 /*
2857 * Pairs with the smp_wmb() in finish_lock_switch().
2858 */
2859 smp_rmb();
2860
2861 p->sched_contributes_to_load = !!task_contributes_to_load(p);
2862 p->state = TASK_WAKING;
2863
2864 if (p->sched_class->task_waking)
2865 p->sched_class->task_waking(p);
2866
2867 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2868 if (task_cpu(p) != cpu) {
2869 wake_flags |= WF_MIGRATED;
2870 set_task_cpu(p, cpu);
2871 }
2872 #endif /* CONFIG_SMP */
2873
2874 ttwu_queue(p, cpu);
2875 stat:
2876 ttwu_stat(p, cpu, wake_flags);
2877 out:
2878 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2879
2880 return success;
2881 }
2882
2883 /**
2884 * try_to_wake_up_local - try to wake up a local task with rq lock held
2885 * @p: the thread to be awakened
2886 *
2887 * Put @p on the run-queue if it's not already there. The caller must
2888 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2889 * the current task.
2890 */
2891 static void try_to_wake_up_local(struct task_struct *p)
2892 {
2893 struct rq *rq = task_rq(p);
2894
2895 BUG_ON(rq != this_rq());
2896 BUG_ON(p == current);
2897 lockdep_assert_held(&rq->lock);
2898
2899 if (!raw_spin_trylock(&p->pi_lock)) {
2900 raw_spin_unlock(&rq->lock);
2901 raw_spin_lock(&p->pi_lock);
2902 raw_spin_lock(&rq->lock);
2903 }
2904
2905 if (!(p->state & TASK_NORMAL))
2906 goto out;
2907
2908 if (!p->on_rq)
2909 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2910
2911 ttwu_do_wakeup(rq, p, 0);
2912 ttwu_stat(p, smp_processor_id(), 0);
2913 out:
2914 raw_spin_unlock(&p->pi_lock);
2915 }
2916
2917 /**
2918 * wake_up_process - Wake up a specific process
2919 * @p: The process to be woken up.
2920 *
2921 * Attempt to wake up the nominated process and move it to the set of runnable
2922 * processes. Returns 1 if the process was woken up, 0 if it was already
2923 * running.
2924 *
2925 * It may be assumed that this function implies a write memory barrier before
2926 * changing the task state if and only if any tasks are woken up.
2927 */
2928 int wake_up_process(struct task_struct *p)
2929 {
2930 return try_to_wake_up(p, TASK_ALL, 0);
2931 }
2932 EXPORT_SYMBOL(wake_up_process);
2933
2934 int wake_up_state(struct task_struct *p, unsigned int state)
2935 {
2936 return try_to_wake_up(p, state, 0);
2937 }
2938
2939 /*
2940 * Perform scheduler related setup for a newly forked process p.
2941 * p is forked by current.
2942 *
2943 * __sched_fork() is basic setup used by init_idle() too:
2944 */
2945 static void __sched_fork(struct task_struct *p)
2946 {
2947 p->on_rq = 0;
2948
2949 p->se.on_rq = 0;
2950 p->se.exec_start = 0;
2951 p->se.sum_exec_runtime = 0;
2952 p->se.prev_sum_exec_runtime = 0;
2953 p->se.nr_migrations = 0;
2954 p->se.vruntime = 0;
2955 INIT_LIST_HEAD(&p->se.group_node);
2956
2957 #ifdef CONFIG_SCHEDSTATS
2958 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2959 #endif
2960
2961 INIT_LIST_HEAD(&p->rt.run_list);
2962
2963 #ifdef CONFIG_PREEMPT_NOTIFIERS
2964 INIT_HLIST_HEAD(&p->preempt_notifiers);
2965 #endif
2966 }
2967
2968 /*
2969 * fork()/clone()-time setup:
2970 */
2971 void sched_fork(struct task_struct *p)
2972 {
2973 unsigned long flags;
2974 int cpu = get_cpu();
2975
2976 __sched_fork(p);
2977 /*
2978 * We mark the process as running here. This guarantees that
2979 * nobody will actually run it, and a signal or other external
2980 * event cannot wake it up and insert it on the runqueue either.
2981 */
2982 p->state = TASK_RUNNING;
2983
2984 /*
2985 * Make sure we do not leak PI boosting priority to the child.
2986 */
2987 p->prio = current->normal_prio;
2988
2989 /*
2990 * Revert to default priority/policy on fork if requested.
2991 */
2992 if (unlikely(p->sched_reset_on_fork)) {
2993 if (task_has_rt_policy(p)) {
2994 p->policy = SCHED_NORMAL;
2995 p->static_prio = NICE_TO_PRIO(0);
2996 p->rt_priority = 0;
2997 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2998 p->static_prio = NICE_TO_PRIO(0);
2999
3000 p->prio = p->normal_prio = __normal_prio(p);
3001 set_load_weight(p);
3002
3003 /*
3004 * We don't need the reset flag anymore after the fork. It has
3005 * fulfilled its duty:
3006 */
3007 p->sched_reset_on_fork = 0;
3008 }
3009
3010 if (!rt_prio(p->prio))
3011 p->sched_class = &fair_sched_class;
3012
3013 if (p->sched_class->task_fork)
3014 p->sched_class->task_fork(p);
3015
3016 /*
3017 * The child is not yet in the pid-hash so no cgroup attach races,
3018 * and the cgroup is pinned to this child due to cgroup_fork()
3019 * is ran before sched_fork().
3020 *
3021 * Silence PROVE_RCU.
3022 */
3023 raw_spin_lock_irqsave(&p->pi_lock, flags);
3024 set_task_cpu(p, cpu);
3025 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3026
3027 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
3028 if (likely(sched_info_on()))
3029 memset(&p->sched_info, 0, sizeof(p->sched_info));
3030 #endif
3031 #if defined(CONFIG_SMP)
3032 p->on_cpu = 0;
3033 #endif
3034 #ifdef CONFIG_PREEMPT_COUNT
3035 /* Want to start with kernel preemption disabled. */
3036 task_thread_info(p)->preempt_count = 1;
3037 #endif
3038 #ifdef CONFIG_SMP
3039 plist_node_init(&p->pushable_tasks, MAX_PRIO);
3040 #endif
3041
3042 put_cpu();
3043 }
3044
3045 /*
3046 * wake_up_new_task - wake up a newly created task for the first time.
3047 *
3048 * This function will do some initial scheduler statistics housekeeping
3049 * that must be done for every newly created context, then puts the task
3050 * on the runqueue and wakes it.
3051 */
3052 void wake_up_new_task(struct task_struct *p)
3053 {
3054 unsigned long flags;
3055 struct rq *rq;
3056
3057 raw_spin_lock_irqsave(&p->pi_lock, flags);
3058 #ifdef CONFIG_SMP
3059 /*
3060 * Fork balancing, do it here and not earlier because:
3061 * - cpus_allowed can change in the fork path
3062 * - any previously selected cpu might disappear through hotplug
3063 */
3064 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
3065 #endif
3066
3067 rq = __task_rq_lock(p);
3068 activate_task(rq, p, 0);
3069 p->on_rq = 1;
3070 trace_sched_wakeup_new(p, true);
3071 check_preempt_curr(rq, p, WF_FORK);
3072 #ifdef CONFIG_SMP
3073 if (p->sched_class->task_woken)
3074 p->sched_class->task_woken(rq, p);
3075 #endif
3076 task_rq_unlock(rq, p, &flags);
3077 }
3078
3079 #ifdef CONFIG_PREEMPT_NOTIFIERS
3080
3081 /**
3082 * preempt_notifier_register - tell me when current is being preempted & rescheduled
3083 * @notifier: notifier struct to register
3084 */
3085 void preempt_notifier_register(struct preempt_notifier *notifier)
3086 {
3087 hlist_add_head(&notifier->link, &current->preempt_notifiers);
3088 }
3089 EXPORT_SYMBOL_GPL(preempt_notifier_register);
3090
3091 /**
3092 * preempt_notifier_unregister - no longer interested in preemption notifications
3093 * @notifier: notifier struct to unregister
3094 *
3095 * This is safe to call from within a preemption notifier.
3096 */
3097 void preempt_notifier_unregister(struct preempt_notifier *notifier)
3098 {
3099 hlist_del(&notifier->link);
3100 }
3101 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3102
3103 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3104 {
3105 struct preempt_notifier *notifier;
3106 struct hlist_node *node;
3107
3108 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
3109 notifier->ops->sched_in(notifier, raw_smp_processor_id());
3110 }
3111
3112 static void
3113 fire_sched_out_preempt_notifiers(struct task_struct *curr,
3114 struct task_struct *next)
3115 {
3116 struct preempt_notifier *notifier;
3117 struct hlist_node *node;
3118
3119 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
3120 notifier->ops->sched_out(notifier, next);
3121 }
3122
3123 #else /* !CONFIG_PREEMPT_NOTIFIERS */
3124
3125 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3126 {
3127 }
3128
3129 static void
3130 fire_sched_out_preempt_notifiers(struct task_struct *curr,
3131 struct task_struct *next)
3132 {
3133 }
3134
3135 #endif /* CONFIG_PREEMPT_NOTIFIERS */
3136
3137 /**
3138 * prepare_task_switch - prepare to switch tasks
3139 * @rq: the runqueue preparing to switch
3140 * @prev: the current task that is being switched out
3141 * @next: the task we are going to switch to.
3142 *
3143 * This is called with the rq lock held and interrupts off. It must
3144 * be paired with a subsequent finish_task_switch after the context
3145 * switch.
3146 *
3147 * prepare_task_switch sets up locking and calls architecture specific
3148 * hooks.
3149 */
3150 static inline void
3151 prepare_task_switch(struct rq *rq, struct task_struct *prev,
3152 struct task_struct *next)
3153 {
3154 sched_info_switch(prev, next);
3155 perf_event_task_sched_out(prev, next);
3156 fire_sched_out_preempt_notifiers(prev, next);
3157 prepare_lock_switch(rq, next);
3158 prepare_arch_switch(next);
3159 trace_sched_switch(prev, next);
3160 }
3161
3162 /**
3163 * finish_task_switch - clean up after a task-switch
3164 * @rq: runqueue associated with task-switch
3165 * @prev: the thread we just switched away from.
3166 *
3167 * finish_task_switch must be called after the context switch, paired
3168 * with a prepare_task_switch call before the context switch.
3169 * finish_task_switch will reconcile locking set up by prepare_task_switch,
3170 * and do any other architecture-specific cleanup actions.
3171 *
3172 * Note that we may have delayed dropping an mm in context_switch(). If
3173 * so, we finish that here outside of the runqueue lock. (Doing it
3174 * with the lock held can cause deadlocks; see schedule() for
3175 * details.)
3176 */
3177 static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3178 __releases(rq->lock)
3179 {
3180 struct mm_struct *mm = rq->prev_mm;
3181 long prev_state;
3182
3183 rq->prev_mm = NULL;
3184
3185 /*
3186 * A task struct has one reference for the use as "current".
3187 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
3188 * schedule one last time. The schedule call will never return, and
3189 * the scheduled task must drop that reference.
3190 * The test for TASK_DEAD must occur while the runqueue locks are
3191 * still held, otherwise prev could be scheduled on another cpu, die
3192 * there before we look at prev->state, and then the reference would
3193 * be dropped twice.
3194 * Manfred Spraul <manfred@colorfullife.com>
3195 */
3196 prev_state = prev->state;
3197 finish_arch_switch(prev);
3198 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3199 local_irq_disable();
3200 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3201 perf_event_task_sched_in(prev, current);
3202 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3203 local_irq_enable();
3204 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3205 finish_lock_switch(rq, prev);
3206
3207 fire_sched_in_preempt_notifiers(current);
3208 if (mm)
3209 mmdrop(mm);
3210 if (unlikely(prev_state == TASK_DEAD)) {
3211 /*
3212 * Remove function-return probe instances associated with this
3213 * task and put them back on the free list.
3214 */
3215 kprobe_flush_task(prev);
3216 put_task_struct(prev);
3217 }
3218 }
3219
3220 #ifdef CONFIG_SMP
3221
3222 /* assumes rq->lock is held */
3223 static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
3224 {
3225 if (prev->sched_class->pre_schedule)
3226 prev->sched_class->pre_schedule(rq, prev);
3227 }
3228
3229 /* rq->lock is NOT held, but preemption is disabled */
3230 static inline void post_schedule(struct rq *rq)
3231 {
3232 if (rq->post_schedule) {
3233 unsigned long flags;
3234
3235 raw_spin_lock_irqsave(&rq->lock, flags);
3236 if (rq->curr->sched_class->post_schedule)
3237 rq->curr->sched_class->post_schedule(rq);
3238 raw_spin_unlock_irqrestore(&rq->lock, flags);
3239
3240 rq->post_schedule = 0;
3241 }
3242 }
3243
3244 #else
3245
3246 static inline void pre_schedule(struct rq *rq, struct task_struct *p)
3247 {
3248 }
3249
3250 static inline void post_schedule(struct rq *rq)
3251 {
3252 }
3253
3254 #endif
3255
3256 /**
3257 * schedule_tail - first thing a freshly forked thread must call.
3258 * @prev: the thread we just switched away from.
3259 */
3260 asmlinkage void schedule_tail(struct task_struct *prev)
3261 __releases(rq->lock)
3262 {
3263 struct rq *rq = this_rq();
3264
3265 finish_task_switch(rq, prev);
3266
3267 /*
3268 * FIXME: do we need to worry about rq being invalidated by the
3269 * task_switch?
3270 */
3271 post_schedule(rq);
3272
3273 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
3274 /* In this case, finish_task_switch does not reenable preemption */
3275 preempt_enable();
3276 #endif
3277 if (current->set_child_tid)
3278 put_user(task_pid_vnr(current), current->set_child_tid);
3279 }
3280
3281 /*
3282 * context_switch - switch to the new MM and the new
3283 * thread's register state.
3284 */
3285 static inline void
3286 context_switch(struct rq *rq, struct task_struct *prev,
3287 struct task_struct *next)
3288 {
3289 struct mm_struct *mm, *oldmm;
3290
3291 prepare_task_switch(rq, prev, next);
3292
3293 mm = next->mm;
3294 oldmm = prev->active_mm;
3295 /*
3296 * For paravirt, this is coupled with an exit in switch_to to
3297 * combine the page table reload and the switch backend into
3298 * one hypercall.
3299 */
3300 arch_start_context_switch(prev);
3301
3302 if (!mm) {
3303 next->active_mm = oldmm;
3304 atomic_inc(&oldmm->mm_count);
3305 enter_lazy_tlb(oldmm, next);
3306 } else
3307 switch_mm(oldmm, mm, next);
3308
3309 if (!prev->mm) {
3310 prev->active_mm = NULL;
3311 rq->prev_mm = oldmm;
3312 }
3313 /*
3314 * Since the runqueue lock will be released by the next
3315 * task (which is an invalid locking op but in the case
3316 * of the scheduler it's an obvious special-case), so we
3317 * do an early lockdep release here:
3318 */
3319 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
3320 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3321 #endif
3322
3323 /* Here we just switch the register state and the stack. */
3324 switch_to(prev, next, prev);
3325
3326 barrier();
3327 /*
3328 * this_rq must be evaluated again because prev may have moved
3329 * CPUs since it called schedule(), thus the 'rq' on its stack
3330 * frame will be invalid.
3331 */
3332 finish_task_switch(this_rq(), prev);
3333 }
3334
3335 /*
3336 * nr_running, nr_uninterruptible and nr_context_switches:
3337 *
3338 * externally visible scheduler statistics: current number of runnable
3339 * threads, current number of uninterruptible-sleeping threads, total
3340 * number of context switches performed since bootup.
3341 */
3342 unsigned long nr_running(void)
3343 {
3344 unsigned long i, sum = 0;
3345
3346 for_each_online_cpu(i)
3347 sum += cpu_rq(i)->nr_running;
3348
3349 return sum;
3350 }
3351
3352 unsigned long nr_uninterruptible(void)
3353 {
3354 unsigned long i, sum = 0;
3355
3356 for_each_possible_cpu(i)
3357 sum += cpu_rq(i)->nr_uninterruptible;
3358
3359 /*
3360 * Since we read the counters lockless, it might be slightly
3361 * inaccurate. Do not allow it to go below zero though:
3362 */
3363 if (unlikely((long)sum < 0))
3364 sum = 0;
3365
3366 return sum;
3367 }
3368
3369 unsigned long long nr_context_switches(void)
3370 {
3371 int i;
3372 unsigned long long sum = 0;
3373
3374 for_each_possible_cpu(i)
3375 sum += cpu_rq(i)->nr_switches;
3376
3377 return sum;
3378 }
3379
3380 unsigned long nr_iowait(void)
3381 {
3382 unsigned long i, sum = 0;
3383
3384 for_each_possible_cpu(i)
3385 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3386
3387 return sum;
3388 }
3389
3390 unsigned long nr_iowait_cpu(int cpu)
3391 {
3392 struct rq *this = cpu_rq(cpu);
3393 return atomic_read(&this->nr_iowait);
3394 }
3395
3396 unsigned long this_cpu_load(void)
3397 {
3398 struct rq *this = this_rq();
3399 return this->cpu_load[0];
3400 }
3401
3402
3403 /* Variables and functions for calc_load */
3404 static atomic_long_t calc_load_tasks;
3405 static unsigned long calc_load_update;
3406 unsigned long avenrun[3];
3407 EXPORT_SYMBOL(avenrun);
3408
3409 static long calc_load_fold_active(struct rq *this_rq)
3410 {
3411 long nr_active, delta = 0;
3412
3413 nr_active = this_rq->nr_running;
3414 nr_active += (long) this_rq->nr_uninterruptible;
3415
3416 if (nr_active != this_rq->calc_load_active) {
3417 delta = nr_active - this_rq->calc_load_active;
3418 this_rq->calc_load_active = nr_active;
3419 }
3420
3421 return delta;
3422 }
3423
3424 static unsigned long
3425 calc_load(unsigned long load, unsigned long exp, unsigned long active)
3426 {
3427 load *= exp;
3428 load += active * (FIXED_1 - exp);
3429 load += 1UL << (FSHIFT - 1);
3430 return load >> FSHIFT;
3431 }
3432
3433 #ifdef CONFIG_NO_HZ
3434 /*
3435 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3436 *
3437 * When making the ILB scale, we should try to pull this in as well.
3438 */
3439 static atomic_long_t calc_load_tasks_idle;
3440
3441 static void calc_load_account_idle(struct rq *this_rq)
3442 {
3443 long delta;
3444
3445 delta = calc_load_fold_active(this_rq);
3446 if (delta)
3447 atomic_long_add(delta, &calc_load_tasks_idle);
3448 }
3449
3450 static long calc_load_fold_idle(void)
3451 {
3452 long delta = 0;
3453
3454 /*
3455 * Its got a race, we don't care...
3456 */
3457 if (atomic_long_read(&calc_load_tasks_idle))
3458 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3459
3460 return delta;
3461 }
3462
3463 /**
3464 * fixed_power_int - compute: x^n, in O(log n) time
3465 *
3466 * @x: base of the power
3467 * @frac_bits: fractional bits of @x
3468 * @n: power to raise @x to.
3469 *
3470 * By exploiting the relation between the definition of the natural power
3471 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3472 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3473 * (where: n_i \elem {0, 1}, the binary vector representing n),
3474 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3475 * of course trivially computable in O(log_2 n), the length of our binary
3476 * vector.
3477 */
3478 static unsigned long
3479 fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3480 {
3481 unsigned long result = 1UL << frac_bits;
3482
3483 if (n) for (;;) {
3484 if (n & 1) {
3485 result *= x;
3486 result += 1UL << (frac_bits - 1);
3487 result >>= frac_bits;
3488 }
3489 n >>= 1;
3490 if (!n)
3491 break;
3492 x *= x;
3493 x += 1UL << (frac_bits - 1);
3494 x >>= frac_bits;
3495 }
3496
3497 return result;
3498 }
3499
3500 /*
3501 * a1 = a0 * e + a * (1 - e)
3502 *
3503 * a2 = a1 * e + a * (1 - e)
3504 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3505 * = a0 * e^2 + a * (1 - e) * (1 + e)
3506 *
3507 * a3 = a2 * e + a * (1 - e)
3508 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3509 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3510 *
3511 * ...
3512 *
3513 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3514 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3515 * = a0 * e^n + a * (1 - e^n)
3516 *
3517 * [1] application of the geometric series:
3518 *
3519 * n 1 - x^(n+1)
3520 * S_n := \Sum x^i = -------------
3521 * i=0 1 - x
3522 */
3523 static unsigned long
3524 calc_load_n(unsigned long load, unsigned long exp,
3525 unsigned long active, unsigned int n)
3526 {
3527
3528 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3529 }
3530
3531 /*
3532 * NO_HZ can leave us missing all per-cpu ticks calling
3533 * calc_load_account_active(), but since an idle CPU folds its delta into
3534 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3535 * in the pending idle delta if our idle period crossed a load cycle boundary.
3536 *
3537 * Once we've updated the global active value, we need to apply the exponential
3538 * weights adjusted to the number of cycles missed.
3539 */
3540 static void calc_global_nohz(unsigned long ticks)
3541 {
3542 long delta, active, n;
3543
3544 if (time_before(jiffies, calc_load_update))
3545 return;
3546
3547 /*
3548 * If we crossed a calc_load_update boundary, make sure to fold
3549 * any pending idle changes, the respective CPUs might have
3550 * missed the tick driven calc_load_account_active() update
3551 * due to NO_HZ.
3552 */
3553 delta = calc_load_fold_idle();
3554 if (delta)
3555 atomic_long_add(delta, &calc_load_tasks);
3556
3557 /*
3558 * If we were idle for multiple load cycles, apply them.
3559 */
3560 if (ticks >= LOAD_FREQ) {
3561 n = ticks / LOAD_FREQ;
3562
3563 active = atomic_long_read(&calc_load_tasks);
3564 active = active > 0 ? active * FIXED_1 : 0;
3565
3566 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3567 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3568 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3569
3570 calc_load_update += n * LOAD_FREQ;
3571 }
3572
3573 /*
3574 * Its possible the remainder of the above division also crosses
3575 * a LOAD_FREQ period, the regular check in calc_global_load()
3576 * which comes after this will take care of that.
3577 *
3578 * Consider us being 11 ticks before a cycle completion, and us
3579 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3580 * age us 4 cycles, and the test in calc_global_load() will
3581 * pick up the final one.
3582 */
3583 }
3584 #else
3585 static void calc_load_account_idle(struct rq *this_rq)
3586 {
3587 }
3588
3589 static inline long calc_load_fold_idle(void)
3590 {
3591 return 0;
3592 }
3593
3594 static void calc_global_nohz(unsigned long ticks)
3595 {
3596 }
3597 #endif
3598
3599 /**
3600 * get_avenrun - get the load average array
3601 * @loads: pointer to dest load array
3602 * @offset: offset to add
3603 * @shift: shift count to shift the result left
3604 *
3605 * These values are estimates at best, so no need for locking.
3606 */
3607 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3608 {
3609 loads[0] = (avenrun[0] + offset) << shift;
3610 loads[1] = (avenrun[1] + offset) << shift;
3611 loads[2] = (avenrun[2] + offset) << shift;
3612 }
3613
3614 /*
3615 * calc_load - update the avenrun load estimates 10 ticks after the
3616 * CPUs have updated calc_load_tasks.
3617 */
3618 void calc_global_load(unsigned long ticks)
3619 {
3620 long active;
3621
3622 calc_global_nohz(ticks);
3623
3624 if (time_before(jiffies, calc_load_update + 10))
3625 return;
3626
3627 active = atomic_long_read(&calc_load_tasks);
3628 active = active > 0 ? active * FIXED_1 : 0;
3629
3630 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3631 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3632 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3633
3634 calc_load_update += LOAD_FREQ;
3635 }
3636
3637 /*
3638 * Called from update_cpu_load() to periodically update this CPU's
3639 * active count.
3640 */
3641 static void calc_load_account_active(struct rq *this_rq)
3642 {
3643 long delta;
3644
3645 if (time_before(jiffies, this_rq->calc_load_update))
3646 return;
3647
3648 delta = calc_load_fold_active(this_rq);
3649 delta += calc_load_fold_idle();
3650 if (delta)
3651 atomic_long_add(delta, &calc_load_tasks);
3652
3653 this_rq->calc_load_update += LOAD_FREQ;
3654 }
3655
3656 /*
3657 * The exact cpuload at various idx values, calculated at every tick would be
3658 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3659 *
3660 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3661 * on nth tick when cpu may be busy, then we have:
3662 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3663 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3664 *
3665 * decay_load_missed() below does efficient calculation of
3666 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3667 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3668 *
3669 * The calculation is approximated on a 128 point scale.
3670 * degrade_zero_ticks is the number of ticks after which load at any
3671 * particular idx is approximated to be zero.
3672 * degrade_factor is a precomputed table, a row for each load idx.
3673 * Each column corresponds to degradation factor for a power of two ticks,
3674 * based on 128 point scale.
3675 * Example:
3676 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3677 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3678 *
3679 * With this power of 2 load factors, we can degrade the load n times
3680 * by looking at 1 bits in n and doing as many mult/shift instead of
3681 * n mult/shifts needed by the exact degradation.
3682 */
3683 #define DEGRADE_SHIFT 7
3684 static const unsigned char
3685 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3686 static const unsigned char
3687 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3688 {0, 0, 0, 0, 0, 0, 0, 0},
3689 {64, 32, 8, 0, 0, 0, 0, 0},
3690 {96, 72, 40, 12, 1, 0, 0},
3691 {112, 98, 75, 43, 15, 1, 0},
3692 {120, 112, 98, 76, 45, 16, 2} };
3693
3694 /*
3695 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3696 * would be when CPU is idle and so we just decay the old load without
3697 * adding any new load.
3698 */
3699 static unsigned long
3700 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3701 {
3702 int j = 0;
3703
3704 if (!missed_updates)
3705 return load;
3706
3707 if (missed_updates >= degrade_zero_ticks[idx])
3708 return 0;
3709
3710 if (idx == 1)
3711 return load >> missed_updates;
3712
3713 while (missed_updates) {
3714 if (missed_updates % 2)
3715 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3716
3717 missed_updates >>= 1;
3718 j++;
3719 }
3720 return load;
3721 }
3722
3723 /*
3724 * Update rq->cpu_load[] statistics. This function is usually called every
3725 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3726 * every tick. We fix it up based on jiffies.
3727 */
3728 static void update_cpu_load(struct rq *this_rq)
3729 {
3730 unsigned long this_load = this_rq->load.weight;
3731 unsigned long curr_jiffies = jiffies;
3732 unsigned long pending_updates;
3733 int i, scale;
3734
3735 this_rq->nr_load_updates++;
3736
3737 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3738 if (curr_jiffies == this_rq->last_load_update_tick)
3739 return;
3740
3741 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3742 this_rq->last_load_update_tick = curr_jiffies;
3743
3744 /* Update our load: */
3745 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3746 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3747 unsigned long old_load, new_load;
3748
3749 /* scale is effectively 1 << i now, and >> i divides by scale */
3750
3751 old_load = this_rq->cpu_load[i];
3752 old_load = decay_load_missed(old_load, pending_updates - 1, i);
3753 new_load = this_load;
3754 /*
3755 * Round up the averaging division if load is increasing. This
3756 * prevents us from getting stuck on 9 if the load is 10, for
3757 * example.
3758 */
3759 if (new_load > old_load)
3760 new_load += scale - 1;
3761
3762 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3763 }
3764
3765 sched_avg_update(this_rq);
3766 }
3767
3768 static void update_cpu_load_active(struct rq *this_rq)
3769 {
3770 update_cpu_load(this_rq);
3771
3772 calc_load_account_active(this_rq);
3773 }
3774
3775 #ifdef CONFIG_SMP
3776
3777 /*
3778 * sched_exec - execve() is a valuable balancing opportunity, because at
3779 * this point the task has the smallest effective memory and cache footprint.
3780 */
3781 void sched_exec(void)
3782 {
3783 struct task_struct *p = current;
3784 unsigned long flags;
3785 int dest_cpu;
3786
3787 raw_spin_lock_irqsave(&p->pi_lock, flags);
3788 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
3789 if (dest_cpu == smp_processor_id())
3790 goto unlock;
3791
3792 if (likely(cpu_active(dest_cpu))) {
3793 struct migration_arg arg = { p, dest_cpu };
3794
3795 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3796 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
3797 return;
3798 }
3799 unlock:
3800 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3801 }
3802
3803 #endif
3804
3805 DEFINE_PER_CPU(struct kernel_stat, kstat);
3806
3807 EXPORT_PER_CPU_SYMBOL(kstat);
3808
3809 /*
3810 * Return any ns on the sched_clock that have not yet been accounted in
3811 * @p in case that task is currently running.
3812 *
3813 * Called with task_rq_lock() held on @rq.
3814 */
3815 static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3816 {
3817 u64 ns = 0;
3818
3819 if (task_current(rq, p)) {
3820 update_rq_clock(rq);
3821 ns = rq->clock_task - p->se.exec_start;
3822 if ((s64)ns < 0)
3823 ns = 0;
3824 }
3825
3826 return ns;
3827 }
3828
3829 unsigned long long task_delta_exec(struct task_struct *p)
3830 {
3831 unsigned long flags;
3832 struct rq *rq;
3833 u64 ns = 0;
3834
3835 rq = task_rq_lock(p, &flags);
3836 ns = do_task_delta_exec(p, rq);
3837 task_rq_unlock(rq, p, &flags);
3838
3839 return ns;
3840 }
3841
3842 /*
3843 * Return accounted runtime for the task.
3844 * In case the task is currently running, return the runtime plus current's
3845 * pending runtime that have not been accounted yet.
3846 */
3847 unsigned long long task_sched_runtime(struct task_struct *p)
3848 {
3849 unsigned long flags;
3850 struct rq *rq;
3851 u64 ns = 0;
3852
3853 rq = task_rq_lock(p, &flags);
3854 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3855 task_rq_unlock(rq, p, &flags);
3856
3857 return ns;
3858 }
3859
3860 /*
3861 * Account user cpu time to a process.
3862 * @p: the process that the cpu time gets accounted to
3863 * @cputime: the cpu time spent in user space since the last update
3864 * @cputime_scaled: cputime scaled by cpu frequency
3865 */
3866 void account_user_time(struct task_struct *p, cputime_t cputime,
3867 cputime_t cputime_scaled)
3868 {
3869 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3870 cputime64_t tmp;
3871
3872 /* Add user time to process. */
3873 p->utime = cputime_add(p->utime, cputime);
3874 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3875 account_group_user_time(p, cputime);
3876
3877 /* Add user time to cpustat. */
3878 tmp = cputime_to_cputime64(cputime);
3879 if (TASK_NICE(p) > 0)
3880 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3881 else
3882 cpustat->user = cputime64_add(cpustat->user, tmp);
3883
3884 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
3885 /* Account for user time used */
3886 acct_update_integrals(p);
3887 }
3888
3889 /*
3890 * Account guest cpu time to a process.
3891 * @p: the process that the cpu time gets accounted to
3892 * @cputime: the cpu time spent in virtual machine since the last update
3893 * @cputime_scaled: cputime scaled by cpu frequency
3894 */
3895 static void account_guest_time(struct task_struct *p, cputime_t cputime,
3896 cputime_t cputime_scaled)
3897 {
3898 cputime64_t tmp;
3899 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3900
3901 tmp = cputime_to_cputime64(cputime);
3902
3903 /* Add guest time to process. */
3904 p->utime = cputime_add(p->utime, cputime);
3905 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3906 account_group_user_time(p, cputime);
3907 p->gtime = cputime_add(p->gtime, cputime);
3908
3909 /* Add guest time to cpustat. */
3910 if (TASK_NICE(p) > 0) {
3911 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3912 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3913 } else {
3914 cpustat->user = cputime64_add(cpustat->user, tmp);
3915 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3916 }
3917 }
3918
3919 /*
3920 * Account system cpu time to a process and desired cpustat field
3921 * @p: the process that the cpu time gets accounted to
3922 * @cputime: the cpu time spent in kernel space since the last update
3923 * @cputime_scaled: cputime scaled by cpu frequency
3924 * @target_cputime64: pointer to cpustat field that has to be updated
3925 */
3926 static inline
3927 void __account_system_time(struct task_struct *p, cputime_t cputime,
3928 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3929 {
3930 cputime64_t tmp = cputime_to_cputime64(cputime);
3931
3932 /* Add system time to process. */
3933 p->stime = cputime_add(p->stime, cputime);
3934 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3935 account_group_system_time(p, cputime);
3936
3937 /* Add system time to cpustat. */
3938 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3939 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3940
3941 /* Account for system time used */
3942 acct_update_integrals(p);
3943 }
3944
3945 /*
3946 * Account system cpu time to a process.
3947 * @p: the process that the cpu time gets accounted to
3948 * @hardirq_offset: the offset to subtract from hardirq_count()
3949 * @cputime: the cpu time spent in kernel space since the last update
3950 * @cputime_scaled: cputime scaled by cpu frequency
3951 */
3952 void account_system_time(struct task_struct *p, int hardirq_offset,
3953 cputime_t cputime, cputime_t cputime_scaled)
3954 {
3955 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3956 cputime64_t *target_cputime64;
3957
3958 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
3959 account_guest_time(p, cputime, cputime_scaled);
3960 return;
3961 }
3962
3963 if (hardirq_count() - hardirq_offset)
3964 target_cputime64 = &cpustat->irq;
3965 else if (in_serving_softirq())
3966 target_cputime64 = &cpustat->softirq;
3967 else
3968 target_cputime64 = &cpustat->system;
3969
3970 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
3971 }
3972
3973 /*
3974 * Account for involuntary wait time.
3975 * @cputime: the cpu time spent in involuntary wait
3976 */
3977 void account_steal_time(cputime_t cputime)
3978 {
3979 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3980 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3981
3982 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
3983 }
3984
3985 /*
3986 * Account for idle time.
3987 * @cputime: the cpu time spent in idle wait
3988 */
3989 void account_idle_time(cputime_t cputime)
3990 {
3991 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3992 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3993 struct rq *rq = this_rq();
3994
3995 if (atomic_read(&rq->nr_iowait) > 0)
3996 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3997 else
3998 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
3999 }
4000
4001 static __always_inline bool steal_account_process_tick(void)
4002 {
4003 #ifdef CONFIG_PARAVIRT
4004 if (static_branch(&paravirt_steal_enabled)) {
4005 u64 steal, st = 0;
4006
4007 steal = paravirt_steal_clock(smp_processor_id());
4008 steal -= this_rq()->prev_steal_time;
4009
4010 st = steal_ticks(steal);
4011 this_rq()->prev_steal_time += st * TICK_NSEC;
4012
4013 account_steal_time(st);
4014 return st;
4015 }
4016 #endif
4017 return false;
4018 }
4019
4020 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
4021
4022 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
4023 /*
4024 * Account a tick to a process and cpustat
4025 * @p: the process that the cpu time gets accounted to
4026 * @user_tick: is the tick from userspace
4027 * @rq: the pointer to rq
4028 *
4029 * Tick demultiplexing follows the order
4030 * - pending hardirq update
4031 * - pending softirq update
4032 * - user_time
4033 * - idle_time
4034 * - system time
4035 * - check for guest_time
4036 * - else account as system_time
4037 *
4038 * Check for hardirq is done both for system and user time as there is
4039 * no timer going off while we are on hardirq and hence we may never get an
4040 * opportunity to update it solely in system time.
4041 * p->stime and friends are only updated on system time and not on irq
4042 * softirq as those do not count in task exec_runtime any more.
4043 */
4044 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
4045 struct rq *rq)
4046 {
4047 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
4048 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
4049 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4050
4051 if (steal_account_process_tick())
4052 return;
4053
4054 if (irqtime_account_hi_update()) {
4055 cpustat->irq = cputime64_add(cpustat->irq, tmp);
4056 } else if (irqtime_account_si_update()) {
4057 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
4058 } else if (this_cpu_ksoftirqd() == p) {
4059 /*
4060 * ksoftirqd time do not get accounted in cpu_softirq_time.
4061 * So, we have to handle it separately here.
4062 * Also, p->stime needs to be updated for ksoftirqd.
4063 */
4064 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
4065 &cpustat->softirq);
4066 } else if (user_tick) {
4067 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
4068 } else if (p == rq->idle) {
4069 account_idle_time(cputime_one_jiffy);
4070 } else if (p->flags & PF_VCPU) { /* System time or guest time */
4071 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
4072 } else {
4073 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
4074 &cpustat->system);
4075 }
4076 }
4077
4078 static void irqtime_account_idle_ticks(int ticks)
4079 {
4080 int i;
4081 struct rq *rq = this_rq();
4082
4083 for (i = 0; i < ticks; i++)
4084 irqtime_account_process_tick(current, 0, rq);
4085 }
4086 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
4087 static void irqtime_account_idle_ticks(int ticks) {}
4088 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
4089 struct rq *rq) {}
4090 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
4091
4092 /*
4093 * Account a single tick of cpu time.
4094 * @p: the process that the cpu time gets accounted to
4095 * @user_tick: indicates if the tick is a user or a system tick
4096 */
4097 void account_process_tick(struct task_struct *p, int user_tick)
4098 {
4099 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
4100 struct rq *rq = this_rq();
4101
4102 if (sched_clock_irqtime) {
4103 irqtime_account_process_tick(p, user_tick, rq);
4104 return;
4105 }
4106
4107 if (steal_account_process_tick())
4108 return;
4109
4110 if (user_tick)
4111 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
4112 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
4113 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
4114 one_jiffy_scaled);
4115 else
4116 account_idle_time(cputime_one_jiffy);
4117 }
4118
4119 /*
4120 * Account multiple ticks of steal time.
4121 * @p: the process from which the cpu time has been stolen
4122 * @ticks: number of stolen ticks
4123 */
4124 void account_steal_ticks(unsigned long ticks)
4125 {
4126 account_steal_time(jiffies_to_cputime(ticks));
4127 }
4128
4129 /*
4130 * Account multiple ticks of idle time.
4131 * @ticks: number of stolen ticks
4132 */
4133 void account_idle_ticks(unsigned long ticks)
4134 {
4135
4136 if (sched_clock_irqtime) {
4137 irqtime_account_idle_ticks(ticks);
4138 return;
4139 }
4140
4141 account_idle_time(jiffies_to_cputime(ticks));
4142 }
4143
4144 #endif
4145
4146 /*
4147 * Use precise platform statistics if available:
4148 */
4149 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
4150 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4151 {
4152 *ut = p->utime;
4153 *st = p->stime;
4154 }
4155
4156 void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4157 {
4158 struct task_cputime cputime;
4159
4160 thread_group_cputime(p, &cputime);
4161
4162 *ut = cputime.utime;
4163 *st = cputime.stime;
4164 }
4165 #else
4166
4167 #ifndef nsecs_to_cputime
4168 # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
4169 #endif
4170
4171 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4172 {
4173 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
4174
4175 /*
4176 * Use CFS's precise accounting:
4177 */
4178 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
4179
4180 if (total) {
4181 u64 temp = rtime;
4182
4183 temp *= utime;
4184 do_div(temp, total);
4185 utime = (cputime_t)temp;
4186 } else
4187 utime = rtime;
4188
4189 /*
4190 * Compare with previous values, to keep monotonicity:
4191 */
4192 p->prev_utime = max(p->prev_utime, utime);
4193 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
4194
4195 *ut = p->prev_utime;
4196 *st = p->prev_stime;
4197 }
4198
4199 /*
4200 * Must be called with siglock held.
4201 */
4202 void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4203 {
4204 struct signal_struct *sig = p->signal;
4205 struct task_cputime cputime;
4206 cputime_t rtime, utime, total;
4207
4208 thread_group_cputime(p, &cputime);
4209
4210 total = cputime_add(cputime.utime, cputime.stime);
4211 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
4212
4213 if (total) {
4214 u64 temp = rtime;
4215
4216 temp *= cputime.utime;
4217 do_div(temp, total);
4218 utime = (cputime_t)temp;
4219 } else
4220 utime = rtime;
4221
4222 sig->prev_utime = max(sig->prev_utime, utime);
4223 sig->prev_stime = max(sig->prev_stime,
4224 cputime_sub(rtime, sig->prev_utime));
4225
4226 *ut = sig->prev_utime;
4227 *st = sig->prev_stime;
4228 }
4229 #endif
4230
4231 /*
4232 * This function gets called by the timer code, with HZ frequency.
4233 * We call it with interrupts disabled.
4234 */
4235 void scheduler_tick(void)
4236 {
4237 int cpu = smp_processor_id();
4238 struct rq *rq = cpu_rq(cpu);
4239 struct task_struct *curr = rq->curr;
4240
4241 sched_clock_tick();
4242
4243 raw_spin_lock(&rq->lock);
4244 update_rq_clock(rq);
4245 update_cpu_load_active(rq);
4246 curr->sched_class->task_tick(rq, curr, 0);
4247 raw_spin_unlock(&rq->lock);
4248
4249 perf_event_task_tick();
4250
4251 #ifdef CONFIG_SMP
4252 rq->idle_balance = idle_cpu(cpu);
4253 trigger_load_balance(rq, cpu);
4254 #endif
4255 }
4256
4257 notrace unsigned long get_parent_ip(unsigned long addr)
4258 {
4259 if (in_lock_functions(addr)) {
4260 addr = CALLER_ADDR2;
4261 if (in_lock_functions(addr))
4262 addr = CALLER_ADDR3;
4263 }
4264 return addr;
4265 }
4266
4267 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4268 defined(CONFIG_PREEMPT_TRACER))
4269
4270 void __kprobes add_preempt_count(int val)
4271 {
4272 #ifdef CONFIG_DEBUG_PREEMPT
4273 /*
4274 * Underflow?
4275 */
4276 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4277 return;
4278 #endif
4279 preempt_count() += val;
4280 #ifdef CONFIG_DEBUG_PREEMPT
4281 /*
4282 * Spinlock count overflowing soon?
4283 */
4284 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4285 PREEMPT_MASK - 10);
4286 #endif
4287 if (preempt_count() == val)
4288 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4289 }
4290 EXPORT_SYMBOL(add_preempt_count);
4291
4292 void __kprobes sub_preempt_count(int val)
4293 {
4294 #ifdef CONFIG_DEBUG_PREEMPT
4295 /*
4296 * Underflow?
4297 */
4298 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4299 return;
4300 /*
4301 * Is the spinlock portion underflowing?
4302 */
4303 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4304 !(preempt_count() & PREEMPT_MASK)))
4305 return;
4306 #endif
4307
4308 if (preempt_count() == val)
4309 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4310 preempt_count() -= val;
4311 }
4312 EXPORT_SYMBOL(sub_preempt_count);
4313
4314 #endif
4315
4316 /*
4317 * Print scheduling while atomic bug:
4318 */
4319 static noinline void __schedule_bug(struct task_struct *prev)
4320 {
4321 struct pt_regs *regs = get_irq_regs();
4322
4323 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4324 prev->comm, prev->pid, preempt_count());
4325
4326 debug_show_held_locks(prev);
4327 print_modules();
4328 if (irqs_disabled())
4329 print_irqtrace_events(prev);
4330
4331 if (regs)
4332 show_regs(regs);
4333 else
4334 dump_stack();
4335 }
4336
4337 /*
4338 * Various schedule()-time debugging checks and statistics:
4339 */
4340 static inline void schedule_debug(struct task_struct *prev)
4341 {
4342 /*
4343 * Test if we are atomic. Since do_exit() needs to call into
4344 * schedule() atomically, we ignore that path for now.
4345 * Otherwise, whine if we are scheduling when we should not be.
4346 */
4347 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
4348 __schedule_bug(prev);
4349 rcu_sleep_check();
4350
4351 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4352
4353 schedstat_inc(this_rq(), sched_count);
4354 }
4355
4356 static void put_prev_task(struct rq *rq, struct task_struct *prev)
4357 {
4358 if (prev->on_rq || rq->skip_clock_update < 0)
4359 update_rq_clock(rq);
4360 prev->sched_class->put_prev_task(rq, prev);
4361 }
4362
4363 /*
4364 * Pick up the highest-prio task:
4365 */
4366 static inline struct task_struct *
4367 pick_next_task(struct rq *rq)
4368 {
4369 const struct sched_class *class;
4370 struct task_struct *p;
4371
4372 /*
4373 * Optimization: we know that if all tasks are in
4374 * the fair class we can call that function directly:
4375 */
4376 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
4377 p = fair_sched_class.pick_next_task(rq);
4378 if (likely(p))
4379 return p;
4380 }
4381
4382 for_each_class(class) {
4383 p = class->pick_next_task(rq);
4384 if (p)
4385 return p;
4386 }
4387
4388 BUG(); /* the idle class will always have a runnable task */
4389 }
4390
4391 /*
4392 * __schedule() is the main scheduler function.
4393 */
4394 static void __sched __schedule(void)
4395 {
4396 struct task_struct *prev, *next;
4397 unsigned long *switch_count;
4398 struct rq *rq;
4399 int cpu;
4400
4401 need_resched:
4402 preempt_disable();
4403 cpu = smp_processor_id();
4404 rq = cpu_rq(cpu);
4405 rcu_note_context_switch(cpu);
4406 prev = rq->curr;
4407
4408 schedule_debug(prev);
4409
4410 if (sched_feat(HRTICK))
4411 hrtick_clear(rq);
4412
4413 raw_spin_lock_irq(&rq->lock);
4414
4415 switch_count = &prev->nivcsw;
4416 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4417 if (unlikely(signal_pending_state(prev->state, prev))) {
4418 prev->state = TASK_RUNNING;
4419 } else {
4420 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4421 prev->on_rq = 0;
4422
4423 /*
4424 * If a worker went to sleep, notify and ask workqueue
4425 * whether it wants to wake up a task to maintain
4426 * concurrency.
4427 */
4428 if (prev->flags & PF_WQ_WORKER) {
4429 struct task_struct *to_wakeup;
4430
4431 to_wakeup = wq_worker_sleeping(prev, cpu);
4432 if (to_wakeup)
4433 try_to_wake_up_local(to_wakeup);
4434 }
4435 }
4436 switch_count = &prev->nvcsw;
4437 }
4438
4439 pre_schedule(rq, prev);
4440
4441 if (unlikely(!rq->nr_running))
4442 idle_balance(cpu, rq);
4443
4444 put_prev_task(rq, prev);
4445 next = pick_next_task(rq);
4446 clear_tsk_need_resched(prev);
4447 rq->skip_clock_update = 0;
4448
4449 if (likely(prev != next)) {
4450 rq->nr_switches++;
4451 rq->curr = next;
4452 ++*switch_count;
4453
4454 context_switch(rq, prev, next); /* unlocks the rq */
4455 /*
4456 * The context switch have flipped the stack from under us
4457 * and restored the local variables which were saved when
4458 * this task called schedule() in the past. prev == current
4459 * is still correct, but it can be moved to another cpu/rq.
4460 */
4461 cpu = smp_processor_id();
4462 rq = cpu_rq(cpu);
4463 } else
4464 raw_spin_unlock_irq(&rq->lock);
4465
4466 post_schedule(rq);
4467
4468 preempt_enable_no_resched();
4469 if (need_resched())
4470 goto need_resched;
4471 }
4472
4473 static inline void sched_submit_work(struct task_struct *tsk)
4474 {
4475 if (!tsk->state)
4476 return;
4477 /*
4478 * If we are going to sleep and we have plugged IO queued,
4479 * make sure to submit it to avoid deadlocks.
4480 */
4481 if (blk_needs_flush_plug(tsk))
4482 blk_schedule_flush_plug(tsk);
4483 }
4484
4485 asmlinkage void __sched schedule(void)
4486 {
4487 struct task_struct *tsk = current;
4488
4489 sched_submit_work(tsk);
4490 __schedule();
4491 }
4492 EXPORT_SYMBOL(schedule);
4493
4494 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4495
4496 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4497 {
4498 if (lock->owner != owner)
4499 return false;
4500
4501 /*
4502 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4503 * lock->owner still matches owner, if that fails, owner might
4504 * point to free()d memory, if it still matches, the rcu_read_lock()
4505 * ensures the memory stays valid.
4506 */
4507 barrier();
4508
4509 return owner->on_cpu;
4510 }
4511
4512 /*
4513 * Look out! "owner" is an entirely speculative pointer
4514 * access and not reliable.
4515 */
4516 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
4517 {
4518 if (!sched_feat(OWNER_SPIN))
4519 return 0;
4520
4521 rcu_read_lock();
4522 while (owner_running(lock, owner)) {
4523 if (need_resched())
4524 break;
4525
4526 arch_mutex_cpu_relax();
4527 }
4528 rcu_read_unlock();
4529
4530 /*
4531 * We break out the loop above on need_resched() and when the
4532 * owner changed, which is a sign for heavy contention. Return
4533 * success only when lock->owner is NULL.
4534 */
4535 return lock->owner == NULL;
4536 }
4537 #endif
4538
4539 #ifdef CONFIG_PREEMPT
4540 /*
4541 * this is the entry point to schedule() from in-kernel preemption
4542 * off of preempt_enable. Kernel preemptions off return from interrupt
4543 * occur there and call schedule directly.
4544 */
4545 asmlinkage void __sched notrace preempt_schedule(void)
4546 {
4547 struct thread_info *ti = current_thread_info();
4548
4549 /*
4550 * If there is a non-zero preempt_count or interrupts are disabled,
4551 * we do not want to preempt the current task. Just return..
4552 */
4553 if (likely(ti->preempt_count || irqs_disabled()))
4554 return;
4555
4556 do {
4557 add_preempt_count_notrace(PREEMPT_ACTIVE);
4558 __schedule();
4559 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4560
4561 /*
4562 * Check again in case we missed a preemption opportunity
4563 * between schedule and now.
4564 */
4565 barrier();
4566 } while (need_resched());
4567 }
4568 EXPORT_SYMBOL(preempt_schedule);
4569
4570 /*
4571 * this is the entry point to schedule() from kernel preemption
4572 * off of irq context.
4573 * Note, that this is called and return with irqs disabled. This will
4574 * protect us against recursive calling from irq.
4575 */
4576 asmlinkage void __sched preempt_schedule_irq(void)
4577 {
4578 struct thread_info *ti = current_thread_info();
4579
4580 /* Catch callers which need to be fixed */
4581 BUG_ON(ti->preempt_count || !irqs_disabled());
4582
4583 do {
4584 add_preempt_count(PREEMPT_ACTIVE);
4585 local_irq_enable();
4586 __schedule();
4587 local_irq_disable();
4588 sub_preempt_count(PREEMPT_ACTIVE);
4589
4590 /*
4591 * Check again in case we missed a preemption opportunity
4592 * between schedule and now.
4593 */
4594 barrier();
4595 } while (need_resched());
4596 }
4597
4598 #endif /* CONFIG_PREEMPT */
4599
4600 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
4601 void *key)
4602 {
4603 return try_to_wake_up(curr->private, mode, wake_flags);
4604 }
4605 EXPORT_SYMBOL(default_wake_function);
4606
4607 /*
4608 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4609 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
4610 * number) then we wake all the non-exclusive tasks and one exclusive task.
4611 *
4612 * There are circumstances in which we can try to wake a task which has already
4613 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4614 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4615 */
4616 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4617 int nr_exclusive, int wake_flags, void *key)
4618 {
4619 wait_queue_t *curr, *next;
4620
4621 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
4622 unsigned flags = curr->flags;
4623
4624 if (curr->func(curr, mode, wake_flags, key) &&
4625 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
4626 break;
4627 }
4628 }
4629
4630 /**
4631 * __wake_up - wake up threads blocked on a waitqueue.
4632 * @q: the waitqueue
4633 * @mode: which threads
4634 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4635 * @key: is directly passed to the wakeup function
4636 *
4637 * It may be assumed that this function implies a write memory barrier before
4638 * changing the task state if and only if any tasks are woken up.
4639 */
4640 void __wake_up(wait_queue_head_t *q, unsigned int mode,
4641 int nr_exclusive, void *key)
4642 {
4643 unsigned long flags;
4644
4645 spin_lock_irqsave(&q->lock, flags);
4646 __wake_up_common(q, mode, nr_exclusive, 0, key);
4647 spin_unlock_irqrestore(&q->lock, flags);
4648 }
4649 EXPORT_SYMBOL(__wake_up);
4650
4651 /*
4652 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4653 */
4654 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4655 {
4656 __wake_up_common(q, mode, 1, 0, NULL);
4657 }
4658 EXPORT_SYMBOL_GPL(__wake_up_locked);
4659
4660 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4661 {
4662 __wake_up_common(q, mode, 1, 0, key);
4663 }
4664 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4665
4666 /**
4667 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4668 * @q: the waitqueue
4669 * @mode: which threads
4670 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4671 * @key: opaque value to be passed to wakeup targets
4672 *
4673 * The sync wakeup differs that the waker knows that it will schedule
4674 * away soon, so while the target thread will be woken up, it will not
4675 * be migrated to another CPU - ie. the two threads are 'synchronized'
4676 * with each other. This can prevent needless bouncing between CPUs.
4677 *
4678 * On UP it can prevent extra preemption.
4679 *
4680 * It may be assumed that this function implies a write memory barrier before
4681 * changing the task state if and only if any tasks are woken up.
4682 */
4683 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4684 int nr_exclusive, void *key)
4685 {
4686 unsigned long flags;
4687 int wake_flags = WF_SYNC;
4688
4689 if (unlikely(!q))
4690 return;
4691
4692 if (unlikely(!nr_exclusive))
4693 wake_flags = 0;
4694
4695 spin_lock_irqsave(&q->lock, flags);
4696 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
4697 spin_unlock_irqrestore(&q->lock, flags);
4698 }
4699 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4700
4701 /*
4702 * __wake_up_sync - see __wake_up_sync_key()
4703 */
4704 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4705 {
4706 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4707 }
4708 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4709
4710 /**
4711 * complete: - signals a single thread waiting on this completion
4712 * @x: holds the state of this particular completion
4713 *
4714 * This will wake up a single thread waiting on this completion. Threads will be
4715 * awakened in the same order in which they were queued.
4716 *
4717 * See also complete_all(), wait_for_completion() and related routines.
4718 *
4719 * It may be assumed that this function implies a write memory barrier before
4720 * changing the task state if and only if any tasks are woken up.
4721 */
4722 void complete(struct completion *x)
4723 {
4724 unsigned long flags;
4725
4726 spin_lock_irqsave(&x->wait.lock, flags);
4727 x->done++;
4728 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4729 spin_unlock_irqrestore(&x->wait.lock, flags);
4730 }
4731 EXPORT_SYMBOL(complete);
4732
4733 /**
4734 * complete_all: - signals all threads waiting on this completion
4735 * @x: holds the state of this particular completion
4736 *
4737 * This will wake up all threads waiting on this particular completion event.
4738 *
4739 * It may be assumed that this function implies a write memory barrier before
4740 * changing the task state if and only if any tasks are woken up.
4741 */
4742 void complete_all(struct completion *x)
4743 {
4744 unsigned long flags;
4745
4746 spin_lock_irqsave(&x->wait.lock, flags);
4747 x->done += UINT_MAX/2;
4748 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4749 spin_unlock_irqrestore(&x->wait.lock, flags);
4750 }
4751 EXPORT_SYMBOL(complete_all);
4752
4753 static inline long __sched
4754 do_wait_for_common(struct completion *x, long timeout, int state)
4755 {
4756 if (!x->done) {
4757 DECLARE_WAITQUEUE(wait, current);
4758
4759 __add_wait_queue_tail_exclusive(&x->wait, &wait);
4760 do {
4761 if (signal_pending_state(state, current)) {
4762 timeout = -ERESTARTSYS;
4763 break;
4764 }
4765 __set_current_state(state);
4766 spin_unlock_irq(&x->wait.lock);
4767 timeout = schedule_timeout(timeout);
4768 spin_lock_irq(&x->wait.lock);
4769 } while (!x->done && timeout);
4770 __remove_wait_queue(&x->wait, &wait);
4771 if (!x->done)
4772 return timeout;
4773 }
4774 x->done--;
4775 return timeout ?: 1;
4776 }
4777
4778 static long __sched
4779 wait_for_common(struct completion *x, long timeout, int state)
4780 {
4781 might_sleep();
4782
4783 spin_lock_irq(&x->wait.lock);
4784 timeout = do_wait_for_common(x, timeout, state);
4785 spin_unlock_irq(&x->wait.lock);
4786 return timeout;
4787 }
4788
4789 /**
4790 * wait_for_completion: - waits for completion of a task
4791 * @x: holds the state of this particular completion
4792 *
4793 * This waits to be signaled for completion of a specific task. It is NOT
4794 * interruptible and there is no timeout.
4795 *
4796 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4797 * and interrupt capability. Also see complete().
4798 */
4799 void __sched wait_for_completion(struct completion *x)
4800 {
4801 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4802 }
4803 EXPORT_SYMBOL(wait_for_completion);
4804
4805 /**
4806 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4807 * @x: holds the state of this particular completion
4808 * @timeout: timeout value in jiffies
4809 *
4810 * This waits for either a completion of a specific task to be signaled or for a
4811 * specified timeout to expire. The timeout is in jiffies. It is not
4812 * interruptible.
4813 */
4814 unsigned long __sched
4815 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4816 {
4817 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4818 }
4819 EXPORT_SYMBOL(wait_for_completion_timeout);
4820
4821 /**
4822 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4823 * @x: holds the state of this particular completion
4824 *
4825 * This waits for completion of a specific task to be signaled. It is
4826 * interruptible.
4827 */
4828 int __sched wait_for_completion_interruptible(struct completion *x)
4829 {
4830 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4831 if (t == -ERESTARTSYS)
4832 return t;
4833 return 0;
4834 }
4835 EXPORT_SYMBOL(wait_for_completion_interruptible);
4836
4837 /**
4838 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4839 * @x: holds the state of this particular completion
4840 * @timeout: timeout value in jiffies
4841 *
4842 * This waits for either a completion of a specific task to be signaled or for a
4843 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4844 */
4845 long __sched
4846 wait_for_completion_interruptible_timeout(struct completion *x,
4847 unsigned long timeout)
4848 {
4849 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4850 }
4851 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4852
4853 /**
4854 * wait_for_completion_killable: - waits for completion of a task (killable)
4855 * @x: holds the state of this particular completion
4856 *
4857 * This waits to be signaled for completion of a specific task. It can be
4858 * interrupted by a kill signal.
4859 */
4860 int __sched wait_for_completion_killable(struct completion *x)
4861 {
4862 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4863 if (t == -ERESTARTSYS)
4864 return t;
4865 return 0;
4866 }
4867 EXPORT_SYMBOL(wait_for_completion_killable);
4868
4869 /**
4870 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4871 * @x: holds the state of this particular completion
4872 * @timeout: timeout value in jiffies
4873 *
4874 * This waits for either a completion of a specific task to be
4875 * signaled or for a specified timeout to expire. It can be
4876 * interrupted by a kill signal. The timeout is in jiffies.
4877 */
4878 long __sched
4879 wait_for_completion_killable_timeout(struct completion *x,
4880 unsigned long timeout)
4881 {
4882 return wait_for_common(x, timeout, TASK_KILLABLE);
4883 }
4884 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4885
4886 /**
4887 * try_wait_for_completion - try to decrement a completion without blocking
4888 * @x: completion structure
4889 *
4890 * Returns: 0 if a decrement cannot be done without blocking
4891 * 1 if a decrement succeeded.
4892 *
4893 * If a completion is being used as a counting completion,
4894 * attempt to decrement the counter without blocking. This
4895 * enables us to avoid waiting if the resource the completion
4896 * is protecting is not available.
4897 */
4898 bool try_wait_for_completion(struct completion *x)
4899 {
4900 unsigned long flags;
4901 int ret = 1;
4902
4903 spin_lock_irqsave(&x->wait.lock, flags);
4904 if (!x->done)
4905 ret = 0;
4906 else
4907 x->done--;
4908 spin_unlock_irqrestore(&x->wait.lock, flags);
4909 return ret;
4910 }
4911 EXPORT_SYMBOL(try_wait_for_completion);
4912
4913 /**
4914 * completion_done - Test to see if a completion has any waiters
4915 * @x: completion structure
4916 *
4917 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4918 * 1 if there are no waiters.
4919 *
4920 */
4921 bool completion_done(struct completion *x)
4922 {
4923 unsigned long flags;
4924 int ret = 1;
4925
4926 spin_lock_irqsave(&x->wait.lock, flags);
4927 if (!x->done)
4928 ret = 0;
4929 spin_unlock_irqrestore(&x->wait.lock, flags);
4930 return ret;
4931 }
4932 EXPORT_SYMBOL(completion_done);
4933
4934 static long __sched
4935 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4936 {
4937 unsigned long flags;
4938 wait_queue_t wait;
4939
4940 init_waitqueue_entry(&wait, current);
4941
4942 __set_current_state(state);
4943
4944 spin_lock_irqsave(&q->lock, flags);
4945 __add_wait_queue(q, &wait);
4946 spin_unlock(&q->lock);
4947 timeout = schedule_timeout(timeout);
4948 spin_lock_irq(&q->lock);
4949 __remove_wait_queue(q, &wait);
4950 spin_unlock_irqrestore(&q->lock, flags);
4951
4952 return timeout;
4953 }
4954
4955 void __sched interruptible_sleep_on(wait_queue_head_t *q)
4956 {
4957 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4958 }
4959 EXPORT_SYMBOL(interruptible_sleep_on);
4960
4961 long __sched
4962 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
4963 {
4964 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
4965 }
4966 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4967
4968 void __sched sleep_on(wait_queue_head_t *q)
4969 {
4970 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4971 }
4972 EXPORT_SYMBOL(sleep_on);
4973
4974 long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
4975 {
4976 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
4977 }
4978 EXPORT_SYMBOL(sleep_on_timeout);
4979
4980 #ifdef CONFIG_RT_MUTEXES
4981
4982 /*
4983 * rt_mutex_setprio - set the current priority of a task
4984 * @p: task
4985 * @prio: prio value (kernel-internal form)
4986 *
4987 * This function changes the 'effective' priority of a task. It does
4988 * not touch ->normal_prio like __setscheduler().
4989 *
4990 * Used by the rt_mutex code to implement priority inheritance logic.
4991 */
4992 void rt_mutex_setprio(struct task_struct *p, int prio)
4993 {
4994 int oldprio, on_rq, running;
4995 struct rq *rq;
4996 const struct sched_class *prev_class;
4997
4998 BUG_ON(prio < 0 || prio > MAX_PRIO);
4999
5000 rq = __task_rq_lock(p);
5001
5002 trace_sched_pi_setprio(p, prio);
5003 oldprio = p->prio;
5004 prev_class = p->sched_class;
5005 on_rq = p->on_rq;
5006 running = task_current(rq, p);
5007 if (on_rq)
5008 dequeue_task(rq, p, 0);
5009 if (running)
5010 p->sched_class->put_prev_task(rq, p);
5011
5012 if (rt_prio(prio))
5013 p->sched_class = &rt_sched_class;
5014 else
5015 p->sched_class = &fair_sched_class;
5016
5017 p->prio = prio;
5018
5019 if (running)
5020 p->sched_class->set_curr_task(rq);
5021 if (on_rq)
5022 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
5023
5024 check_class_changed(rq, p, prev_class, oldprio);
5025 __task_rq_unlock(rq);
5026 }
5027
5028 #endif
5029
5030 void set_user_nice(struct task_struct *p, long nice)
5031 {
5032 int old_prio, delta, on_rq;
5033 unsigned long flags;
5034 struct rq *rq;
5035
5036 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
5037 return;
5038 /*
5039 * We have to be careful, if called from sys_setpriority(),
5040 * the task might be in the middle of scheduling on another CPU.
5041 */
5042 rq = task_rq_lock(p, &flags);
5043 /*
5044 * The RT priorities are set via sched_setscheduler(), but we still
5045 * allow the 'normal' nice value to be set - but as expected
5046 * it wont have any effect on scheduling until the task is
5047 * SCHED_FIFO/SCHED_RR:
5048 */
5049 if (task_has_rt_policy(p)) {
5050 p->static_prio = NICE_TO_PRIO(nice);
5051 goto out_unlock;
5052 }
5053 on_rq = p->on_rq;
5054 if (on_rq)
5055 dequeue_task(rq, p, 0);
5056
5057 p->static_prio = NICE_TO_PRIO(nice);
5058 set_load_weight(p);
5059 old_prio = p->prio;
5060 p->prio = effective_prio(p);
5061 delta = p->prio - old_prio;
5062
5063 if (on_rq) {
5064 enqueue_task(rq, p, 0);
5065 /*
5066 * If the task increased its priority or is running and
5067 * lowered its priority, then reschedule its CPU:
5068 */
5069 if (delta < 0 || (delta > 0 && task_running(rq, p)))
5070 resched_task(rq->curr);
5071 }
5072 out_unlock:
5073 task_rq_unlock(rq, p, &flags);
5074 }
5075 EXPORT_SYMBOL(set_user_nice);
5076
5077 /*
5078 * can_nice - check if a task can reduce its nice value
5079 * @p: task
5080 * @nice: nice value
5081 */
5082 int can_nice(const struct task_struct *p, const int nice)
5083 {
5084 /* convert nice value [19,-20] to rlimit style value [1,40] */
5085 int nice_rlim = 20 - nice;
5086
5087 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
5088 capable(CAP_SYS_NICE));
5089 }
5090
5091 #ifdef __ARCH_WANT_SYS_NICE
5092
5093 /*
5094 * sys_nice - change the priority of the current process.
5095 * @increment: priority increment
5096 *
5097 * sys_setpriority is a more generic, but much slower function that
5098 * does similar things.
5099 */
5100 SYSCALL_DEFINE1(nice, int, increment)
5101 {
5102 long nice, retval;
5103
5104 /*
5105 * Setpriority might change our priority at the same moment.
5106 * We don't have to worry. Conceptually one call occurs first
5107 * and we have a single winner.
5108 */
5109 if (increment < -40)
5110 increment = -40;
5111 if (increment > 40)
5112 increment = 40;
5113
5114 nice = TASK_NICE(current) + increment;
5115 if (nice < -20)
5116 nice = -20;
5117 if (nice > 19)
5118 nice = 19;
5119
5120 if (increment < 0 && !can_nice(current, nice))
5121 return -EPERM;
5122
5123 retval = security_task_setnice(current, nice);
5124 if (retval)
5125 return retval;
5126
5127 set_user_nice(current, nice);
5128 return 0;
5129 }
5130
5131 #endif
5132
5133 /**
5134 * task_prio - return the priority value of a given task.
5135 * @p: the task in question.
5136 *
5137 * This is the priority value as seen by users in /proc.
5138 * RT tasks are offset by -200. Normal tasks are centered
5139 * around 0, value goes from -16 to +15.
5140 */
5141 int task_prio(const struct task_struct *p)
5142 {
5143 return p->prio - MAX_RT_PRIO;
5144 }
5145
5146 /**
5147 * task_nice - return the nice value of a given task.
5148 * @p: the task in question.
5149 */
5150 int task_nice(const struct task_struct *p)
5151 {
5152 return TASK_NICE(p);
5153 }
5154 EXPORT_SYMBOL(task_nice);
5155
5156 /**
5157 * idle_cpu - is a given cpu idle currently?
5158 * @cpu: the processor in question.
5159 */
5160 int idle_cpu(int cpu)
5161 {
5162 struct rq *rq = cpu_rq(cpu);
5163
5164 if (rq->curr != rq->idle)
5165 return 0;
5166
5167 if (rq->nr_running)
5168 return 0;
5169
5170 #ifdef CONFIG_SMP
5171 if (!llist_empty(&rq->wake_list))
5172 return 0;
5173 #endif
5174
5175 return 1;
5176 }
5177
5178 /**
5179 * idle_task - return the idle task for a given cpu.
5180 * @cpu: the processor in question.
5181 */
5182 struct task_struct *idle_task(int cpu)
5183 {
5184 return cpu_rq(cpu)->idle;
5185 }
5186
5187 /**
5188 * find_process_by_pid - find a process with a matching PID value.
5189 * @pid: the pid in question.
5190 */
5191 static struct task_struct *find_process_by_pid(pid_t pid)
5192 {
5193 return pid ? find_task_by_vpid(pid) : current;
5194 }
5195
5196 /* Actually do priority change: must hold rq lock. */
5197 static void
5198 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
5199 {
5200 p->policy = policy;
5201 p->rt_priority = prio;
5202 p->normal_prio = normal_prio(p);
5203 /* we are holding p->pi_lock already */
5204 p->prio = rt_mutex_getprio(p);
5205 if (rt_prio(p->prio))
5206 p->sched_class = &rt_sched_class;
5207 else
5208 p->sched_class = &fair_sched_class;
5209 set_load_weight(p);
5210 }
5211
5212 /*
5213 * check the target process has a UID that matches the current process's
5214 */
5215 static bool check_same_owner(struct task_struct *p)
5216 {
5217 const struct cred *cred = current_cred(), *pcred;
5218 bool match;
5219
5220 rcu_read_lock();
5221 pcred = __task_cred(p);
5222 if (cred->user->user_ns == pcred->user->user_ns)
5223 match = (cred->euid == pcred->euid ||
5224 cred->euid == pcred->uid);
5225 else
5226 match = false;
5227 rcu_read_unlock();
5228 return match;
5229 }
5230
5231 static int __sched_setscheduler(struct task_struct *p, int policy,
5232 const struct sched_param *param, bool user)
5233 {
5234 int retval, oldprio, oldpolicy = -1, on_rq, running;
5235 unsigned long flags;
5236 const struct sched_class *prev_class;
5237 struct rq *rq;
5238 int reset_on_fork;
5239
5240 /* may grab non-irq protected spin_locks */
5241 BUG_ON(in_interrupt());
5242 recheck:
5243 /* double check policy once rq lock held */
5244 if (policy < 0) {
5245 reset_on_fork = p->sched_reset_on_fork;
5246 policy = oldpolicy = p->policy;
5247 } else {
5248 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
5249 policy &= ~SCHED_RESET_ON_FORK;
5250
5251 if (policy != SCHED_FIFO && policy != SCHED_RR &&
5252 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5253 policy != SCHED_IDLE)
5254 return -EINVAL;
5255 }
5256
5257 /*
5258 * Valid priorities for SCHED_FIFO and SCHED_RR are
5259 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5260 * SCHED_BATCH and SCHED_IDLE is 0.
5261 */
5262 if (param->sched_priority < 0 ||
5263 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
5264 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
5265 return -EINVAL;
5266 if (rt_policy(policy) != (param->sched_priority != 0))
5267 return -EINVAL;
5268
5269 /*
5270 * Allow unprivileged RT tasks to decrease priority:
5271 */
5272 if (user && !capable(CAP_SYS_NICE)) {
5273 if (rt_policy(policy)) {
5274 unsigned long rlim_rtprio =
5275 task_rlimit(p, RLIMIT_RTPRIO);
5276
5277 /* can't set/change the rt policy */
5278 if (policy != p->policy && !rlim_rtprio)
5279 return -EPERM;
5280
5281 /* can't increase priority */
5282 if (param->sched_priority > p->rt_priority &&
5283 param->sched_priority > rlim_rtprio)
5284 return -EPERM;
5285 }
5286
5287 /*
5288 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5289 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
5290 */
5291 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
5292 if (!can_nice(p, TASK_NICE(p)))
5293 return -EPERM;
5294 }
5295
5296 /* can't change other user's priorities */
5297 if (!check_same_owner(p))
5298 return -EPERM;
5299
5300 /* Normal users shall not reset the sched_reset_on_fork flag */
5301 if (p->sched_reset_on_fork && !reset_on_fork)
5302 return -EPERM;
5303 }
5304
5305 if (user) {
5306 retval = security_task_setscheduler(p);
5307 if (retval)
5308 return retval;
5309 }
5310
5311 /*
5312 * make sure no PI-waiters arrive (or leave) while we are
5313 * changing the priority of the task:
5314 *
5315 * To be able to change p->policy safely, the appropriate
5316 * runqueue lock must be held.
5317 */
5318 rq = task_rq_lock(p, &flags);
5319
5320 /*
5321 * Changing the policy of the stop threads its a very bad idea
5322 */
5323 if (p == rq->stop) {
5324 task_rq_unlock(rq, p, &flags);
5325 return -EINVAL;
5326 }
5327
5328 /*
5329 * If not changing anything there's no need to proceed further:
5330 */
5331 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5332 param->sched_priority == p->rt_priority))) {
5333
5334 __task_rq_unlock(rq);
5335 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5336 return 0;
5337 }
5338
5339 #ifdef CONFIG_RT_GROUP_SCHED
5340 if (user) {
5341 /*
5342 * Do not allow realtime tasks into groups that have no runtime
5343 * assigned.
5344 */
5345 if (rt_bandwidth_enabled() && rt_policy(policy) &&
5346 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5347 !task_group_is_autogroup(task_group(p))) {
5348 task_rq_unlock(rq, p, &flags);
5349 return -EPERM;
5350 }
5351 }
5352 #endif
5353
5354 /* recheck policy now with rq lock held */
5355 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5356 policy = oldpolicy = -1;
5357 task_rq_unlock(rq, p, &flags);
5358 goto recheck;
5359 }
5360 on_rq = p->on_rq;
5361 running = task_current(rq, p);
5362 if (on_rq)
5363 deactivate_task(rq, p, 0);
5364 if (running)
5365 p->sched_class->put_prev_task(rq, p);
5366
5367 p->sched_reset_on_fork = reset_on_fork;
5368
5369 oldprio = p->prio;
5370 prev_class = p->sched_class;
5371 __setscheduler(rq, p, policy, param->sched_priority);
5372
5373 if (running)
5374 p->sched_class->set_curr_task(rq);
5375 if (on_rq)
5376 activate_task(rq, p, 0);
5377
5378 check_class_changed(rq, p, prev_class, oldprio);
5379 task_rq_unlock(rq, p, &flags);
5380
5381 rt_mutex_adjust_pi(p);
5382
5383 return 0;
5384 }
5385
5386 /**
5387 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5388 * @p: the task in question.
5389 * @policy: new policy.
5390 * @param: structure containing the new RT priority.
5391 *
5392 * NOTE that the task may be already dead.
5393 */
5394 int sched_setscheduler(struct task_struct *p, int policy,
5395 const struct sched_param *param)
5396 {
5397 return __sched_setscheduler(p, policy, param, true);
5398 }
5399 EXPORT_SYMBOL_GPL(sched_setscheduler);
5400
5401 /**
5402 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5403 * @p: the task in question.
5404 * @policy: new policy.
5405 * @param: structure containing the new RT priority.
5406 *
5407 * Just like sched_setscheduler, only don't bother checking if the
5408 * current context has permission. For example, this is needed in
5409 * stop_machine(): we create temporary high priority worker threads,
5410 * but our caller might not have that capability.
5411 */
5412 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
5413 const struct sched_param *param)
5414 {
5415 return __sched_setscheduler(p, policy, param, false);
5416 }
5417
5418 static int
5419 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5420 {
5421 struct sched_param lparam;
5422 struct task_struct *p;
5423 int retval;
5424
5425 if (!param || pid < 0)
5426 return -EINVAL;
5427 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5428 return -EFAULT;
5429
5430 rcu_read_lock();
5431 retval = -ESRCH;
5432 p = find_process_by_pid(pid);
5433 if (p != NULL)
5434 retval = sched_setscheduler(p, policy, &lparam);
5435 rcu_read_unlock();
5436
5437 return retval;
5438 }
5439
5440 /**
5441 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5442 * @pid: the pid in question.
5443 * @policy: new policy.
5444 * @param: structure containing the new RT priority.
5445 */
5446 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5447 struct sched_param __user *, param)
5448 {
5449 /* negative values for policy are not valid */
5450 if (policy < 0)
5451 return -EINVAL;
5452
5453 return do_sched_setscheduler(pid, policy, param);
5454 }
5455
5456 /**
5457 * sys_sched_setparam - set/change the RT priority of a thread
5458 * @pid: the pid in question.
5459 * @param: structure containing the new RT priority.
5460 */
5461 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5462 {
5463 return do_sched_setscheduler(pid, -1, param);
5464 }
5465
5466 /**
5467 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5468 * @pid: the pid in question.
5469 */
5470 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5471 {
5472 struct task_struct *p;
5473 int retval;
5474
5475 if (pid < 0)
5476 return -EINVAL;
5477
5478 retval = -ESRCH;
5479 rcu_read_lock();
5480 p = find_process_by_pid(pid);
5481 if (p) {
5482 retval = security_task_getscheduler(p);
5483 if (!retval)
5484 retval = p->policy
5485 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
5486 }
5487 rcu_read_unlock();
5488 return retval;
5489 }
5490
5491 /**
5492 * sys_sched_getparam - get the RT priority of a thread
5493 * @pid: the pid in question.
5494 * @param: structure containing the RT priority.
5495 */
5496 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5497 {
5498 struct sched_param lp;
5499 struct task_struct *p;
5500 int retval;
5501
5502 if (!param || pid < 0)
5503 return -EINVAL;
5504
5505 rcu_read_lock();
5506 p = find_process_by_pid(pid);
5507 retval = -ESRCH;
5508 if (!p)
5509 goto out_unlock;
5510
5511 retval = security_task_getscheduler(p);
5512 if (retval)
5513 goto out_unlock;
5514
5515 lp.sched_priority = p->rt_priority;
5516 rcu_read_unlock();
5517
5518 /*
5519 * This one might sleep, we cannot do it with a spinlock held ...
5520 */
5521 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5522
5523 return retval;
5524
5525 out_unlock:
5526 rcu_read_unlock();
5527 return retval;
5528 }
5529
5530 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
5531 {
5532 cpumask_var_t cpus_allowed, new_mask;
5533 struct task_struct *p;
5534 int retval;
5535
5536 get_online_cpus();
5537 rcu_read_lock();
5538
5539 p = find_process_by_pid(pid);
5540 if (!p) {
5541 rcu_read_unlock();
5542 put_online_cpus();
5543 return -ESRCH;
5544 }
5545
5546 /* Prevent p going away */
5547 get_task_struct(p);
5548 rcu_read_unlock();
5549
5550 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5551 retval = -ENOMEM;
5552 goto out_put_task;
5553 }
5554 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5555 retval = -ENOMEM;
5556 goto out_free_cpus_allowed;
5557 }
5558 retval = -EPERM;
5559 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
5560 goto out_unlock;
5561
5562 retval = security_task_setscheduler(p);
5563 if (retval)
5564 goto out_unlock;
5565
5566 cpuset_cpus_allowed(p, cpus_allowed);
5567 cpumask_and(new_mask, in_mask, cpus_allowed);
5568 again:
5569 retval = set_cpus_allowed_ptr(p, new_mask);
5570
5571 if (!retval) {
5572 cpuset_cpus_allowed(p, cpus_allowed);
5573 if (!cpumask_subset(new_mask, cpus_allowed)) {
5574 /*
5575 * We must have raced with a concurrent cpuset
5576 * update. Just reset the cpus_allowed to the
5577 * cpuset's cpus_allowed
5578 */
5579 cpumask_copy(new_mask, cpus_allowed);
5580 goto again;
5581 }
5582 }
5583 out_unlock:
5584 free_cpumask_var(new_mask);
5585 out_free_cpus_allowed:
5586 free_cpumask_var(cpus_allowed);
5587 out_put_task:
5588 put_task_struct(p);
5589 put_online_cpus();
5590 return retval;
5591 }
5592
5593 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5594 struct cpumask *new_mask)
5595 {
5596 if (len < cpumask_size())
5597 cpumask_clear(new_mask);
5598 else if (len > cpumask_size())
5599 len = cpumask_size();
5600
5601 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5602 }
5603
5604 /**
5605 * sys_sched_setaffinity - set the cpu affinity of a process
5606 * @pid: pid of the process
5607 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5608 * @user_mask_ptr: user-space pointer to the new cpu mask
5609 */
5610 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5611 unsigned long __user *, user_mask_ptr)
5612 {
5613 cpumask_var_t new_mask;
5614 int retval;
5615
5616 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5617 return -ENOMEM;
5618
5619 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5620 if (retval == 0)
5621 retval = sched_setaffinity(pid, new_mask);
5622 free_cpumask_var(new_mask);
5623 return retval;
5624 }
5625
5626 long sched_getaffinity(pid_t pid, struct cpumask *mask)
5627 {
5628 struct task_struct *p;
5629 unsigned long flags;
5630 int retval;
5631
5632 get_online_cpus();
5633 rcu_read_lock();
5634
5635 retval = -ESRCH;
5636 p = find_process_by_pid(pid);
5637 if (!p)
5638 goto out_unlock;
5639
5640 retval = security_task_getscheduler(p);
5641 if (retval)
5642 goto out_unlock;
5643
5644 raw_spin_lock_irqsave(&p->pi_lock, flags);
5645 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5646 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5647
5648 out_unlock:
5649 rcu_read_unlock();
5650 put_online_cpus();
5651
5652 return retval;
5653 }
5654
5655 /**
5656 * sys_sched_getaffinity - get the cpu affinity of a process
5657 * @pid: pid of the process
5658 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5659 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5660 */
5661 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5662 unsigned long __user *, user_mask_ptr)
5663 {
5664 int ret;
5665 cpumask_var_t mask;
5666
5667 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
5668 return -EINVAL;
5669 if (len & (sizeof(unsigned long)-1))
5670 return -EINVAL;
5671
5672 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5673 return -ENOMEM;
5674
5675 ret = sched_getaffinity(pid, mask);
5676 if (ret == 0) {
5677 size_t retlen = min_t(size_t, len, cpumask_size());
5678
5679 if (copy_to_user(user_mask_ptr, mask, retlen))
5680 ret = -EFAULT;
5681 else
5682 ret = retlen;
5683 }
5684 free_cpumask_var(mask);
5685
5686 return ret;
5687 }
5688
5689 /**
5690 * sys_sched_yield - yield the current processor to other threads.
5691 *
5692 * This function yields the current CPU to other tasks. If there are no
5693 * other threads running on this CPU then this function will return.
5694 */
5695 SYSCALL_DEFINE0(sched_yield)
5696 {
5697 struct rq *rq = this_rq_lock();
5698
5699 schedstat_inc(rq, yld_count);
5700 current->sched_class->yield_task(rq);
5701
5702 /*
5703 * Since we are going to call schedule() anyway, there's
5704 * no need to preempt or enable interrupts:
5705 */
5706 __release(rq->lock);
5707 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5708 do_raw_spin_unlock(&rq->lock);
5709 preempt_enable_no_resched();
5710
5711 schedule();
5712
5713 return 0;
5714 }
5715
5716 static inline int should_resched(void)
5717 {
5718 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5719 }
5720
5721 static void __cond_resched(void)
5722 {
5723 add_preempt_count(PREEMPT_ACTIVE);
5724 __schedule();
5725 sub_preempt_count(PREEMPT_ACTIVE);
5726 }
5727
5728 int __sched _cond_resched(void)
5729 {
5730 if (should_resched()) {
5731 __cond_resched();
5732 return 1;
5733 }
5734 return 0;
5735 }
5736 EXPORT_SYMBOL(_cond_resched);
5737
5738 /*
5739 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5740 * call schedule, and on return reacquire the lock.
5741 *
5742 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5743 * operations here to prevent schedule() from being called twice (once via
5744 * spin_unlock(), once by hand).
5745 */
5746 int __cond_resched_lock(spinlock_t *lock)
5747 {
5748 int resched = should_resched();
5749 int ret = 0;
5750
5751 lockdep_assert_held(lock);
5752
5753 if (spin_needbreak(lock) || resched) {
5754 spin_unlock(lock);
5755 if (resched)
5756 __cond_resched();
5757 else
5758 cpu_relax();
5759 ret = 1;
5760 spin_lock(lock);
5761 }
5762 return ret;
5763 }
5764 EXPORT_SYMBOL(__cond_resched_lock);
5765
5766 int __sched __cond_resched_softirq(void)
5767 {
5768 BUG_ON(!in_softirq());
5769
5770 if (should_resched()) {
5771 local_bh_enable();
5772 __cond_resched();
5773 local_bh_disable();
5774 return 1;
5775 }
5776 return 0;
5777 }
5778 EXPORT_SYMBOL(__cond_resched_softirq);
5779
5780 /**
5781 * yield - yield the current processor to other threads.
5782 *
5783 * This is a shortcut for kernel-space yielding - it marks the
5784 * thread runnable and calls sys_sched_yield().
5785 */
5786 void __sched yield(void)
5787 {
5788 set_current_state(TASK_RUNNING);
5789 sys_sched_yield();
5790 }
5791 EXPORT_SYMBOL(yield);
5792
5793 /**
5794 * yield_to - yield the current processor to another thread in
5795 * your thread group, or accelerate that thread toward the
5796 * processor it's on.
5797 * @p: target task
5798 * @preempt: whether task preemption is allowed or not
5799 *
5800 * It's the caller's job to ensure that the target task struct
5801 * can't go away on us before we can do any checks.
5802 *
5803 * Returns true if we indeed boosted the target task.
5804 */
5805 bool __sched yield_to(struct task_struct *p, bool preempt)
5806 {
5807 struct task_struct *curr = current;
5808 struct rq *rq, *p_rq;
5809 unsigned long flags;
5810 bool yielded = 0;
5811
5812 local_irq_save(flags);
5813 rq = this_rq();
5814
5815 again:
5816 p_rq = task_rq(p);
5817 double_rq_lock(rq, p_rq);
5818 while (task_rq(p) != p_rq) {
5819 double_rq_unlock(rq, p_rq);
5820 goto again;
5821 }
5822
5823 if (!curr->sched_class->yield_to_task)
5824 goto out;
5825
5826 if (curr->sched_class != p->sched_class)
5827 goto out;
5828
5829 if (task_running(p_rq, p) || p->state)
5830 goto out;
5831
5832 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5833 if (yielded) {
5834 schedstat_inc(rq, yld_count);
5835 /*
5836 * Make p's CPU reschedule; pick_next_entity takes care of
5837 * fairness.
5838 */
5839 if (preempt && rq != p_rq)
5840 resched_task(p_rq->curr);
5841 }
5842
5843 out:
5844 double_rq_unlock(rq, p_rq);
5845 local_irq_restore(flags);
5846
5847 if (yielded)
5848 schedule();
5849
5850 return yielded;
5851 }
5852 EXPORT_SYMBOL_GPL(yield_to);
5853
5854 /*
5855 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5856 * that process accounting knows that this is a task in IO wait state.
5857 */
5858 void __sched io_schedule(void)
5859 {
5860 struct rq *rq = raw_rq();
5861
5862 delayacct_blkio_start();
5863 atomic_inc(&rq->nr_iowait);
5864 blk_flush_plug(current);
5865 current->in_iowait = 1;
5866 schedule();
5867 current->in_iowait = 0;
5868 atomic_dec(&rq->nr_iowait);
5869 delayacct_blkio_end();
5870 }
5871 EXPORT_SYMBOL(io_schedule);
5872
5873 long __sched io_schedule_timeout(long timeout)
5874 {
5875 struct rq *rq = raw_rq();
5876 long ret;
5877
5878 delayacct_blkio_start();
5879 atomic_inc(&rq->nr_iowait);
5880 blk_flush_plug(current);
5881 current->in_iowait = 1;
5882 ret = schedule_timeout(timeout);
5883 current->in_iowait = 0;
5884 atomic_dec(&rq->nr_iowait);
5885 delayacct_blkio_end();
5886 return ret;
5887 }
5888
5889 /**
5890 * sys_sched_get_priority_max - return maximum RT priority.
5891 * @policy: scheduling class.
5892 *
5893 * this syscall returns the maximum rt_priority that can be used
5894 * by a given scheduling class.
5895 */
5896 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5897 {
5898 int ret = -EINVAL;
5899
5900 switch (policy) {
5901 case SCHED_FIFO:
5902 case SCHED_RR:
5903 ret = MAX_USER_RT_PRIO-1;
5904 break;
5905 case SCHED_NORMAL:
5906 case SCHED_BATCH:
5907 case SCHED_IDLE:
5908 ret = 0;
5909 break;
5910 }
5911 return ret;
5912 }
5913
5914 /**
5915 * sys_sched_get_priority_min - return minimum RT priority.
5916 * @policy: scheduling class.
5917 *
5918 * this syscall returns the minimum rt_priority that can be used
5919 * by a given scheduling class.
5920 */
5921 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5922 {
5923 int ret = -EINVAL;
5924
5925 switch (policy) {
5926 case SCHED_FIFO:
5927 case SCHED_RR:
5928 ret = 1;
5929 break;
5930 case SCHED_NORMAL:
5931 case SCHED_BATCH:
5932 case SCHED_IDLE:
5933 ret = 0;
5934 }
5935 return ret;
5936 }
5937
5938 /**
5939 * sys_sched_rr_get_interval - return the default timeslice of a process.
5940 * @pid: pid of the process.
5941 * @interval: userspace pointer to the timeslice value.
5942 *
5943 * this syscall writes the default timeslice value of a given process
5944 * into the user-space timespec buffer. A value of '0' means infinity.
5945 */
5946 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5947 struct timespec __user *, interval)
5948 {
5949 struct task_struct *p;
5950 unsigned int time_slice;
5951 unsigned long flags;
5952 struct rq *rq;
5953 int retval;
5954 struct timespec t;
5955
5956 if (pid < 0)
5957 return -EINVAL;
5958
5959 retval = -ESRCH;
5960 rcu_read_lock();
5961 p = find_process_by_pid(pid);
5962 if (!p)
5963 goto out_unlock;
5964
5965 retval = security_task_getscheduler(p);
5966 if (retval)
5967 goto out_unlock;
5968
5969 rq = task_rq_lock(p, &flags);
5970 time_slice = p->sched_class->get_rr_interval(rq, p);
5971 task_rq_unlock(rq, p, &flags);
5972
5973 rcu_read_unlock();
5974 jiffies_to_timespec(time_slice, &t);
5975 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5976 return retval;
5977
5978 out_unlock:
5979 rcu_read_unlock();
5980 return retval;
5981 }
5982
5983 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5984
5985 void sched_show_task(struct task_struct *p)
5986 {
5987 unsigned long free = 0;
5988 unsigned state;
5989
5990 state = p->state ? __ffs(p->state) + 1 : 0;
5991 printk(KERN_INFO "%-15.15s %c", p->comm,
5992 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5993 #if BITS_PER_LONG == 32
5994 if (state == TASK_RUNNING)
5995 printk(KERN_CONT " running ");
5996 else
5997 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5998 #else
5999 if (state == TASK_RUNNING)
6000 printk(KERN_CONT " running task ");
6001 else
6002 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
6003 #endif
6004 #ifdef CONFIG_DEBUG_STACK_USAGE
6005 free = stack_not_used(p);
6006 #endif
6007 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
6008 task_pid_nr(p), task_pid_nr(p->real_parent),
6009 (unsigned long)task_thread_info(p)->flags);
6010
6011 show_stack(p, NULL);
6012 }
6013
6014 void show_state_filter(unsigned long state_filter)
6015 {
6016 struct task_struct *g, *p;
6017
6018 #if BITS_PER_LONG == 32
6019 printk(KERN_INFO
6020 " task PC stack pid father\n");
6021 #else
6022 printk(KERN_INFO
6023 " task PC stack pid father\n");
6024 #endif
6025 rcu_read_lock();
6026 do_each_thread(g, p) {
6027 /*
6028 * reset the NMI-timeout, listing all files on a slow
6029 * console might take a lot of time:
6030 */
6031 touch_nmi_watchdog();
6032 if (!state_filter || (p->state & state_filter))
6033 sched_show_task(p);
6034 } while_each_thread(g, p);
6035
6036 touch_all_softlockup_watchdogs();
6037
6038 #ifdef CONFIG_SCHED_DEBUG
6039 sysrq_sched_debug_show();
6040 #endif
6041 rcu_read_unlock();
6042 /*
6043 * Only show locks if all tasks are dumped:
6044 */
6045 if (!state_filter)
6046 debug_show_all_locks();
6047 }
6048
6049 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6050 {
6051 idle->sched_class = &idle_sched_class;
6052 }
6053
6054 /**
6055 * init_idle - set up an idle thread for a given CPU
6056 * @idle: task in question
6057 * @cpu: cpu the idle task belongs to
6058 *
6059 * NOTE: this function does not set the idle thread's NEED_RESCHED
6060 * flag, to make booting more robust.
6061 */
6062 void __cpuinit init_idle(struct task_struct *idle, int cpu)
6063 {
6064 struct rq *rq = cpu_rq(cpu);
6065 unsigned long flags;
6066
6067 raw_spin_lock_irqsave(&rq->lock, flags);
6068
6069 __sched_fork(idle);
6070 idle->state = TASK_RUNNING;
6071 idle->se.exec_start = sched_clock();
6072
6073 do_set_cpus_allowed(idle, cpumask_of(cpu));
6074 /*
6075 * We're having a chicken and egg problem, even though we are
6076 * holding rq->lock, the cpu isn't yet set to this cpu so the
6077 * lockdep check in task_group() will fail.
6078 *
6079 * Similar case to sched_fork(). / Alternatively we could
6080 * use task_rq_lock() here and obtain the other rq->lock.
6081 *
6082 * Silence PROVE_RCU
6083 */
6084 rcu_read_lock();
6085 __set_task_cpu(idle, cpu);
6086 rcu_read_unlock();
6087
6088 rq->curr = rq->idle = idle;
6089 #if defined(CONFIG_SMP)
6090 idle->on_cpu = 1;
6091 #endif
6092 raw_spin_unlock_irqrestore(&rq->lock, flags);
6093
6094 /* Set the preempt count _outside_ the spinlocks! */
6095 task_thread_info(idle)->preempt_count = 0;
6096
6097 /*
6098 * The idle tasks have their own, simple scheduling class:
6099 */
6100 idle->sched_class = &idle_sched_class;
6101 ftrace_graph_init_idle_task(idle, cpu);
6102 }
6103
6104 /*
6105 * Increase the granularity value when there are more CPUs,
6106 * because with more CPUs the 'effective latency' as visible
6107 * to users decreases. But the relationship is not linear,
6108 * so pick a second-best guess by going with the log2 of the
6109 * number of CPUs.
6110 *
6111 * This idea comes from the SD scheduler of Con Kolivas:
6112 */
6113 static int get_update_sysctl_factor(void)
6114 {
6115 unsigned int cpus = min_t(int, num_online_cpus(), 8);
6116 unsigned int factor;
6117
6118 switch (sysctl_sched_tunable_scaling) {
6119 case SCHED_TUNABLESCALING_NONE:
6120 factor = 1;
6121 break;
6122 case SCHED_TUNABLESCALING_LINEAR:
6123 factor = cpus;
6124 break;
6125 case SCHED_TUNABLESCALING_LOG:
6126 default:
6127 factor = 1 + ilog2(cpus);
6128 break;
6129 }
6130
6131 return factor;
6132 }
6133
6134 static void update_sysctl(void)
6135 {
6136 unsigned int factor = get_update_sysctl_factor();
6137
6138 #define SET_SYSCTL(name) \
6139 (sysctl_##name = (factor) * normalized_sysctl_##name)
6140 SET_SYSCTL(sched_min_granularity);
6141 SET_SYSCTL(sched_latency);
6142 SET_SYSCTL(sched_wakeup_granularity);
6143 #undef SET_SYSCTL
6144 }
6145
6146 static inline void sched_init_granularity(void)
6147 {
6148 update_sysctl();
6149 }
6150
6151 #ifdef CONFIG_SMP
6152 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
6153 {
6154 if (p->sched_class && p->sched_class->set_cpus_allowed)
6155 p->sched_class->set_cpus_allowed(p, new_mask);
6156
6157 cpumask_copy(&p->cpus_allowed, new_mask);
6158 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6159 }
6160
6161 /*
6162 * This is how migration works:
6163 *
6164 * 1) we invoke migration_cpu_stop() on the target CPU using
6165 * stop_one_cpu().
6166 * 2) stopper starts to run (implicitly forcing the migrated thread
6167 * off the CPU)
6168 * 3) it checks whether the migrated task is still in the wrong runqueue.
6169 * 4) if it's in the wrong runqueue then the migration thread removes
6170 * it and puts it into the right queue.
6171 * 5) stopper completes and stop_one_cpu() returns and the migration
6172 * is done.
6173 */
6174
6175 /*
6176 * Change a given task's CPU affinity. Migrate the thread to a
6177 * proper CPU and schedule it away if the CPU it's executing on
6178 * is removed from the allowed bitmask.
6179 *
6180 * NOTE: the caller must have a valid reference to the task, the
6181 * task must not exit() & deallocate itself prematurely. The
6182 * call is not atomic; no spinlocks may be held.
6183 */
6184 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
6185 {
6186 unsigned long flags;
6187 struct rq *rq;
6188 unsigned int dest_cpu;
6189 int ret = 0;
6190
6191 rq = task_rq_lock(p, &flags);
6192
6193 if (cpumask_equal(&p->cpus_allowed, new_mask))
6194 goto out;
6195
6196 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
6197 ret = -EINVAL;
6198 goto out;
6199 }
6200
6201 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
6202 ret = -EINVAL;
6203 goto out;
6204 }
6205
6206 do_set_cpus_allowed(p, new_mask);
6207
6208 /* Can the task run on the task's current CPU? If so, we're done */
6209 if (cpumask_test_cpu(task_cpu(p), new_mask))
6210 goto out;
6211
6212 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
6213 if (p->on_rq) {
6214 struct migration_arg arg = { p, dest_cpu };
6215 /* Need help from migration thread: drop lock and wait. */
6216 task_rq_unlock(rq, p, &flags);
6217 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
6218 tlb_migrate_finish(p->mm);
6219 return 0;
6220 }
6221 out:
6222 task_rq_unlock(rq, p, &flags);
6223
6224 return ret;
6225 }
6226 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
6227
6228 /*
6229 * Move (not current) task off this cpu, onto dest cpu. We're doing
6230 * this because either it can't run here any more (set_cpus_allowed()
6231 * away from this CPU, or CPU going down), or because we're
6232 * attempting to rebalance this task on exec (sched_exec).
6233 *
6234 * So we race with normal scheduler movements, but that's OK, as long
6235 * as the task is no longer on this CPU.
6236 *
6237 * Returns non-zero if task was successfully migrated.
6238 */
6239 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
6240 {
6241 struct rq *rq_dest, *rq_src;
6242 int ret = 0;
6243
6244 if (unlikely(!cpu_active(dest_cpu)))
6245 return ret;
6246
6247 rq_src = cpu_rq(src_cpu);
6248 rq_dest = cpu_rq(dest_cpu);
6249
6250 raw_spin_lock(&p->pi_lock);
6251 double_rq_lock(rq_src, rq_dest);
6252 /* Already moved. */
6253 if (task_cpu(p) != src_cpu)
6254 goto done;
6255 /* Affinity changed (again). */
6256 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
6257 goto fail;
6258
6259 /*
6260 * If we're not on a rq, the next wake-up will ensure we're
6261 * placed properly.
6262 */
6263 if (p->on_rq) {
6264 deactivate_task(rq_src, p, 0);
6265 set_task_cpu(p, dest_cpu);
6266 activate_task(rq_dest, p, 0);
6267 check_preempt_curr(rq_dest, p, 0);
6268 }
6269 done:
6270 ret = 1;
6271 fail:
6272 double_rq_unlock(rq_src, rq_dest);
6273 raw_spin_unlock(&p->pi_lock);
6274 return ret;
6275 }
6276
6277 /*
6278 * migration_cpu_stop - this will be executed by a highprio stopper thread
6279 * and performs thread migration by bumping thread off CPU then
6280 * 'pushing' onto another runqueue.
6281 */
6282 static int migration_cpu_stop(void *data)
6283 {
6284 struct migration_arg *arg = data;
6285
6286 /*
6287 * The original target cpu might have gone down and we might
6288 * be on another cpu but it doesn't matter.
6289 */
6290 local_irq_disable();
6291 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
6292 local_irq_enable();
6293 return 0;
6294 }
6295
6296 #ifdef CONFIG_HOTPLUG_CPU
6297
6298 /*
6299 * Ensures that the idle task is using init_mm right before its cpu goes
6300 * offline.
6301 */
6302 void idle_task_exit(void)
6303 {
6304 struct mm_struct *mm = current->active_mm;
6305
6306 BUG_ON(cpu_online(smp_processor_id()));
6307
6308 if (mm != &init_mm)
6309 switch_mm(mm, &init_mm, current);
6310 mmdrop(mm);
6311 }
6312
6313 /*
6314 * While a dead CPU has no uninterruptible tasks queued at this point,
6315 * it might still have a nonzero ->nr_uninterruptible counter, because
6316 * for performance reasons the counter is not stricly tracking tasks to
6317 * their home CPUs. So we just add the counter to another CPU's counter,
6318 * to keep the global sum constant after CPU-down:
6319 */
6320 static void migrate_nr_uninterruptible(struct rq *rq_src)
6321 {
6322 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
6323
6324 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6325 rq_src->nr_uninterruptible = 0;
6326 }
6327
6328 /*
6329 * remove the tasks which were accounted by rq from calc_load_tasks.
6330 */
6331 static void calc_global_load_remove(struct rq *rq)
6332 {
6333 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
6334 rq->calc_load_active = 0;
6335 }
6336
6337 #ifdef CONFIG_CFS_BANDWIDTH
6338 static void unthrottle_offline_cfs_rqs(struct rq *rq)
6339 {
6340 struct cfs_rq *cfs_rq;
6341
6342 for_each_leaf_cfs_rq(rq, cfs_rq) {
6343 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6344
6345 if (!cfs_rq->runtime_enabled)
6346 continue;
6347
6348 /*
6349 * clock_task is not advancing so we just need to make sure
6350 * there's some valid quota amount
6351 */
6352 cfs_rq->runtime_remaining = cfs_b->quota;
6353 if (cfs_rq_throttled(cfs_rq))
6354 unthrottle_cfs_rq(cfs_rq);
6355 }
6356 }
6357 #else
6358 static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
6359 #endif
6360
6361 /*
6362 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6363 * try_to_wake_up()->select_task_rq().
6364 *
6365 * Called with rq->lock held even though we'er in stop_machine() and
6366 * there's no concurrency possible, we hold the required locks anyway
6367 * because of lock validation efforts.
6368 */
6369 static void migrate_tasks(unsigned int dead_cpu)
6370 {
6371 struct rq *rq = cpu_rq(dead_cpu);
6372 struct task_struct *next, *stop = rq->stop;
6373 int dest_cpu;
6374
6375 /*
6376 * Fudge the rq selection such that the below task selection loop
6377 * doesn't get stuck on the currently eligible stop task.
6378 *
6379 * We're currently inside stop_machine() and the rq is either stuck
6380 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6381 * either way we should never end up calling schedule() until we're
6382 * done here.
6383 */
6384 rq->stop = NULL;
6385
6386 /* Ensure any throttled groups are reachable by pick_next_task */
6387 unthrottle_offline_cfs_rqs(rq);
6388
6389 for ( ; ; ) {
6390 /*
6391 * There's this thread running, bail when that's the only
6392 * remaining thread.
6393 */
6394 if (rq->nr_running == 1)
6395 break;
6396
6397 next = pick_next_task(rq);
6398 BUG_ON(!next);
6399 next->sched_class->put_prev_task(rq, next);
6400
6401 /* Find suitable destination for @next, with force if needed. */
6402 dest_cpu = select_fallback_rq(dead_cpu, next);
6403 raw_spin_unlock(&rq->lock);
6404
6405 __migrate_task(next, dead_cpu, dest_cpu);
6406
6407 raw_spin_lock(&rq->lock);
6408 }
6409
6410 rq->stop = stop;
6411 }
6412
6413 #endif /* CONFIG_HOTPLUG_CPU */
6414
6415 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6416
6417 static struct ctl_table sd_ctl_dir[] = {
6418 {
6419 .procname = "sched_domain",
6420 .mode = 0555,
6421 },
6422 {}
6423 };
6424
6425 static struct ctl_table sd_ctl_root[] = {
6426 {
6427 .procname = "kernel",
6428 .mode = 0555,
6429 .child = sd_ctl_dir,
6430 },
6431 {}
6432 };
6433
6434 static struct ctl_table *sd_alloc_ctl_entry(int n)
6435 {
6436 struct ctl_table *entry =
6437 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
6438
6439 return entry;
6440 }
6441
6442 static void sd_free_ctl_entry(struct ctl_table **tablep)
6443 {
6444 struct ctl_table *entry;
6445
6446 /*
6447 * In the intermediate directories, both the child directory and
6448 * procname are dynamically allocated and could fail but the mode
6449 * will always be set. In the lowest directory the names are
6450 * static strings and all have proc handlers.
6451 */
6452 for (entry = *tablep; entry->mode; entry++) {
6453 if (entry->child)
6454 sd_free_ctl_entry(&entry->child);
6455 if (entry->proc_handler == NULL)
6456 kfree(entry->procname);
6457 }
6458
6459 kfree(*tablep);
6460 *tablep = NULL;
6461 }
6462
6463 static void
6464 set_table_entry(struct ctl_table *entry,
6465 const char *procname, void *data, int maxlen,
6466 mode_t mode, proc_handler *proc_handler)
6467 {
6468 entry->procname = procname;
6469 entry->data = data;
6470 entry->maxlen = maxlen;
6471 entry->mode = mode;
6472 entry->proc_handler = proc_handler;
6473 }
6474
6475 static struct ctl_table *
6476 sd_alloc_ctl_domain_table(struct sched_domain *sd)
6477 {
6478 struct ctl_table *table = sd_alloc_ctl_entry(13);
6479
6480 if (table == NULL)
6481 return NULL;
6482
6483 set_table_entry(&table[0], "min_interval", &sd->min_interval,
6484 sizeof(long), 0644, proc_doulongvec_minmax);
6485 set_table_entry(&table[1], "max_interval", &sd->max_interval,
6486 sizeof(long), 0644, proc_doulongvec_minmax);
6487 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
6488 sizeof(int), 0644, proc_dointvec_minmax);
6489 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
6490 sizeof(int), 0644, proc_dointvec_minmax);
6491 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
6492 sizeof(int), 0644, proc_dointvec_minmax);
6493 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
6494 sizeof(int), 0644, proc_dointvec_minmax);
6495 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
6496 sizeof(int), 0644, proc_dointvec_minmax);
6497 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
6498 sizeof(int), 0644, proc_dointvec_minmax);
6499 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
6500 sizeof(int), 0644, proc_dointvec_minmax);
6501 set_table_entry(&table[9], "cache_nice_tries",
6502 &sd->cache_nice_tries,
6503 sizeof(int), 0644, proc_dointvec_minmax);
6504 set_table_entry(&table[10], "flags", &sd->flags,
6505 sizeof(int), 0644, proc_dointvec_minmax);
6506 set_table_entry(&table[11], "name", sd->name,
6507 CORENAME_MAX_SIZE, 0444, proc_dostring);
6508 /* &table[12] is terminator */
6509
6510 return table;
6511 }
6512
6513 static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
6514 {
6515 struct ctl_table *entry, *table;
6516 struct sched_domain *sd;
6517 int domain_num = 0, i;
6518 char buf[32];
6519
6520 for_each_domain(cpu, sd)
6521 domain_num++;
6522 entry = table = sd_alloc_ctl_entry(domain_num + 1);
6523 if (table == NULL)
6524 return NULL;
6525
6526 i = 0;
6527 for_each_domain(cpu, sd) {
6528 snprintf(buf, 32, "domain%d", i);
6529 entry->procname = kstrdup(buf, GFP_KERNEL);
6530 entry->mode = 0555;
6531 entry->child = sd_alloc_ctl_domain_table(sd);
6532 entry++;
6533 i++;
6534 }
6535 return table;
6536 }
6537
6538 static struct ctl_table_header *sd_sysctl_header;
6539 static void register_sched_domain_sysctl(void)
6540 {
6541 int i, cpu_num = num_possible_cpus();
6542 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6543 char buf[32];
6544
6545 WARN_ON(sd_ctl_dir[0].child);
6546 sd_ctl_dir[0].child = entry;
6547
6548 if (entry == NULL)
6549 return;
6550
6551 for_each_possible_cpu(i) {
6552 snprintf(buf, 32, "cpu%d", i);
6553 entry->procname = kstrdup(buf, GFP_KERNEL);
6554 entry->mode = 0555;
6555 entry->child = sd_alloc_ctl_cpu_table(i);
6556 entry++;
6557 }
6558
6559 WARN_ON(sd_sysctl_header);
6560 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6561 }
6562
6563 /* may be called multiple times per register */
6564 static void unregister_sched_domain_sysctl(void)
6565 {
6566 if (sd_sysctl_header)
6567 unregister_sysctl_table(sd_sysctl_header);
6568 sd_sysctl_header = NULL;
6569 if (sd_ctl_dir[0].child)
6570 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6571 }
6572 #else
6573 static void register_sched_domain_sysctl(void)
6574 {
6575 }
6576 static void unregister_sched_domain_sysctl(void)
6577 {
6578 }
6579 #endif
6580
6581 static void set_rq_online(struct rq *rq)
6582 {
6583 if (!rq->online) {
6584 const struct sched_class *class;
6585
6586 cpumask_set_cpu(rq->cpu, rq->rd->online);
6587 rq->online = 1;
6588
6589 for_each_class(class) {
6590 if (class->rq_online)
6591 class->rq_online(rq);
6592 }
6593 }
6594 }
6595
6596 static void set_rq_offline(struct rq *rq)
6597 {
6598 if (rq->online) {
6599 const struct sched_class *class;
6600
6601 for_each_class(class) {
6602 if (class->rq_offline)
6603 class->rq_offline(rq);
6604 }
6605
6606 cpumask_clear_cpu(rq->cpu, rq->rd->online);
6607 rq->online = 0;
6608 }
6609 }
6610
6611 /*
6612 * migration_call - callback that gets triggered when a CPU is added.
6613 * Here we can start up the necessary migration thread for the new CPU.
6614 */
6615 static int __cpuinit
6616 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6617 {
6618 int cpu = (long)hcpu;
6619 unsigned long flags;
6620 struct rq *rq = cpu_rq(cpu);
6621
6622 switch (action & ~CPU_TASKS_FROZEN) {
6623
6624 case CPU_UP_PREPARE:
6625 rq->calc_load_update = calc_load_update;
6626 break;
6627
6628 case CPU_ONLINE:
6629 /* Update our root-domain */
6630 raw_spin_lock_irqsave(&rq->lock, flags);
6631 if (rq->rd) {
6632 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6633
6634 set_rq_online(rq);
6635 }
6636 raw_spin_unlock_irqrestore(&rq->lock, flags);
6637 break;
6638
6639 #ifdef CONFIG_HOTPLUG_CPU
6640 case CPU_DYING:
6641 sched_ttwu_pending();
6642 /* Update our root-domain */
6643 raw_spin_lock_irqsave(&rq->lock, flags);
6644 if (rq->rd) {
6645 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6646 set_rq_offline(rq);
6647 }
6648 migrate_tasks(cpu);
6649 BUG_ON(rq->nr_running != 1); /* the migration thread */
6650 raw_spin_unlock_irqrestore(&rq->lock, flags);
6651
6652 migrate_nr_uninterruptible(rq);
6653 calc_global_load_remove(rq);
6654 break;
6655 #endif
6656 }
6657
6658 update_max_interval();
6659
6660 return NOTIFY_OK;
6661 }
6662
6663 /*
6664 * Register at high priority so that task migration (migrate_all_tasks)
6665 * happens before everything else. This has to be lower priority than
6666 * the notifier in the perf_event subsystem, though.
6667 */
6668 static struct notifier_block __cpuinitdata migration_notifier = {
6669 .notifier_call = migration_call,
6670 .priority = CPU_PRI_MIGRATION,
6671 };
6672
6673 static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6674 unsigned long action, void *hcpu)
6675 {
6676 switch (action & ~CPU_TASKS_FROZEN) {
6677 case CPU_ONLINE:
6678 case CPU_DOWN_FAILED:
6679 set_cpu_active((long)hcpu, true);
6680 return NOTIFY_OK;
6681 default:
6682 return NOTIFY_DONE;
6683 }
6684 }
6685
6686 static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6687 unsigned long action, void *hcpu)
6688 {
6689 switch (action & ~CPU_TASKS_FROZEN) {
6690 case CPU_DOWN_PREPARE:
6691 set_cpu_active((long)hcpu, false);
6692 return NOTIFY_OK;
6693 default:
6694 return NOTIFY_DONE;
6695 }
6696 }
6697
6698 static int __init migration_init(void)
6699 {
6700 void *cpu = (void *)(long)smp_processor_id();
6701 int err;
6702
6703 /* Initialize migration for the boot CPU */
6704 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6705 BUG_ON(err == NOTIFY_BAD);
6706 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6707 register_cpu_notifier(&migration_notifier);
6708
6709 /* Register cpu active notifiers */
6710 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6711 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6712
6713 return 0;
6714 }
6715 early_initcall(migration_init);
6716 #endif
6717
6718 #ifdef CONFIG_SMP
6719
6720 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
6721
6722 #ifdef CONFIG_SCHED_DEBUG
6723
6724 static __read_mostly int sched_domain_debug_enabled;
6725
6726 static int __init sched_domain_debug_setup(char *str)
6727 {
6728 sched_domain_debug_enabled = 1;
6729
6730 return 0;
6731 }
6732 early_param("sched_debug", sched_domain_debug_setup);
6733
6734 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6735 struct cpumask *groupmask)
6736 {
6737 struct sched_group *group = sd->groups;
6738 char str[256];
6739
6740 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6741 cpumask_clear(groupmask);
6742
6743 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6744
6745 if (!(sd->flags & SD_LOAD_BALANCE)) {
6746 printk("does not load-balance\n");
6747 if (sd->parent)
6748 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6749 " has parent");
6750 return -1;
6751 }
6752
6753 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6754
6755 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6756 printk(KERN_ERR "ERROR: domain->span does not contain "
6757 "CPU%d\n", cpu);
6758 }
6759 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6760 printk(KERN_ERR "ERROR: domain->groups does not contain"
6761 " CPU%d\n", cpu);
6762 }
6763
6764 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6765 do {
6766 if (!group) {
6767 printk("\n");
6768 printk(KERN_ERR "ERROR: group is NULL\n");
6769 break;
6770 }
6771
6772 if (!group->sgp->power) {
6773 printk(KERN_CONT "\n");
6774 printk(KERN_ERR "ERROR: domain->cpu_power not "
6775 "set\n");
6776 break;
6777 }
6778
6779 if (!cpumask_weight(sched_group_cpus(group))) {
6780 printk(KERN_CONT "\n");
6781 printk(KERN_ERR "ERROR: empty group\n");
6782 break;
6783 }
6784
6785 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6786 printk(KERN_CONT "\n");
6787 printk(KERN_ERR "ERROR: repeated CPUs\n");
6788 break;
6789 }
6790
6791 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6792
6793 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6794
6795 printk(KERN_CONT " %s", str);
6796 if (group->sgp->power != SCHED_POWER_SCALE) {
6797 printk(KERN_CONT " (cpu_power = %d)",
6798 group->sgp->power);
6799 }
6800
6801 group = group->next;
6802 } while (group != sd->groups);
6803 printk(KERN_CONT "\n");
6804
6805 if (!cpumask_equal(sched_domain_span(sd), groupmask))
6806 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6807
6808 if (sd->parent &&
6809 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6810 printk(KERN_ERR "ERROR: parent span is not a superset "
6811 "of domain->span\n");
6812 return 0;
6813 }
6814
6815 static void sched_domain_debug(struct sched_domain *sd, int cpu)
6816 {
6817 int level = 0;
6818
6819 if (!sched_domain_debug_enabled)
6820 return;
6821
6822 if (!sd) {
6823 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6824 return;
6825 }
6826
6827 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6828
6829 for (;;) {
6830 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
6831 break;
6832 level++;
6833 sd = sd->parent;
6834 if (!sd)
6835 break;
6836 }
6837 }
6838 #else /* !CONFIG_SCHED_DEBUG */
6839 # define sched_domain_debug(sd, cpu) do { } while (0)
6840 #endif /* CONFIG_SCHED_DEBUG */
6841
6842 static int sd_degenerate(struct sched_domain *sd)
6843 {
6844 if (cpumask_weight(sched_domain_span(sd)) == 1)
6845 return 1;
6846
6847 /* Following flags need at least 2 groups */
6848 if (sd->flags & (SD_LOAD_BALANCE |
6849 SD_BALANCE_NEWIDLE |
6850 SD_BALANCE_FORK |
6851 SD_BALANCE_EXEC |
6852 SD_SHARE_CPUPOWER |
6853 SD_SHARE_PKG_RESOURCES)) {
6854 if (sd->groups != sd->groups->next)
6855 return 0;
6856 }
6857
6858 /* Following flags don't use groups */
6859 if (sd->flags & (SD_WAKE_AFFINE))
6860 return 0;
6861
6862 return 1;
6863 }
6864
6865 static int
6866 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6867 {
6868 unsigned long cflags = sd->flags, pflags = parent->flags;
6869
6870 if (sd_degenerate(parent))
6871 return 1;
6872
6873 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
6874 return 0;
6875
6876 /* Flags needing groups don't count if only 1 group in parent */
6877 if (parent->groups == parent->groups->next) {
6878 pflags &= ~(SD_LOAD_BALANCE |
6879 SD_BALANCE_NEWIDLE |
6880 SD_BALANCE_FORK |
6881 SD_BALANCE_EXEC |
6882 SD_SHARE_CPUPOWER |
6883 SD_SHARE_PKG_RESOURCES);
6884 if (nr_node_ids == 1)
6885 pflags &= ~SD_SERIALIZE;
6886 }
6887 if (~cflags & pflags)
6888 return 0;
6889
6890 return 1;
6891 }
6892
6893 static void free_rootdomain(struct rcu_head *rcu)
6894 {
6895 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
6896
6897 cpupri_cleanup(&rd->cpupri);
6898 free_cpumask_var(rd->rto_mask);
6899 free_cpumask_var(rd->online);
6900 free_cpumask_var(rd->span);
6901 kfree(rd);
6902 }
6903
6904 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6905 {
6906 struct root_domain *old_rd = NULL;
6907 unsigned long flags;
6908
6909 raw_spin_lock_irqsave(&rq->lock, flags);
6910
6911 if (rq->rd) {
6912 old_rd = rq->rd;
6913
6914 if (cpumask_test_cpu(rq->cpu, old_rd->online))
6915 set_rq_offline(rq);
6916
6917 cpumask_clear_cpu(rq->cpu, old_rd->span);
6918
6919 /*
6920 * If we dont want to free the old_rt yet then
6921 * set old_rd to NULL to skip the freeing later
6922 * in this function:
6923 */
6924 if (!atomic_dec_and_test(&old_rd->refcount))
6925 old_rd = NULL;
6926 }
6927
6928 atomic_inc(&rd->refcount);
6929 rq->rd = rd;
6930
6931 cpumask_set_cpu(rq->cpu, rd->span);
6932 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
6933 set_rq_online(rq);
6934
6935 raw_spin_unlock_irqrestore(&rq->lock, flags);
6936
6937 if (old_rd)
6938 call_rcu_sched(&old_rd->rcu, free_rootdomain);
6939 }
6940
6941 static int init_rootdomain(struct root_domain *rd)
6942 {
6943 memset(rd, 0, sizeof(*rd));
6944
6945 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6946 goto out;
6947 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6948 goto free_span;
6949 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
6950 goto free_online;
6951
6952 if (cpupri_init(&rd->cpupri) != 0)
6953 goto free_rto_mask;
6954 return 0;
6955
6956 free_rto_mask:
6957 free_cpumask_var(rd->rto_mask);
6958 free_online:
6959 free_cpumask_var(rd->online);
6960 free_span:
6961 free_cpumask_var(rd->span);
6962 out:
6963 return -ENOMEM;
6964 }
6965
6966 static void init_defrootdomain(void)
6967 {
6968 init_rootdomain(&def_root_domain);
6969
6970 atomic_set(&def_root_domain.refcount, 1);
6971 }
6972
6973 static struct root_domain *alloc_rootdomain(void)
6974 {
6975 struct root_domain *rd;
6976
6977 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6978 if (!rd)
6979 return NULL;
6980
6981 if (init_rootdomain(rd) != 0) {
6982 kfree(rd);
6983 return NULL;
6984 }
6985
6986 return rd;
6987 }
6988
6989 static void free_sched_groups(struct sched_group *sg, int free_sgp)
6990 {
6991 struct sched_group *tmp, *first;
6992
6993 if (!sg)
6994 return;
6995
6996 first = sg;
6997 do {
6998 tmp = sg->next;
6999
7000 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
7001 kfree(sg->sgp);
7002
7003 kfree(sg);
7004 sg = tmp;
7005 } while (sg != first);
7006 }
7007
7008 static void free_sched_domain(struct rcu_head *rcu)
7009 {
7010 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
7011
7012 /*
7013 * If its an overlapping domain it has private groups, iterate and
7014 * nuke them all.
7015 */
7016 if (sd->flags & SD_OVERLAP) {
7017 free_sched_groups(sd->groups, 1);
7018 } else if (atomic_dec_and_test(&sd->groups->ref)) {
7019 kfree(sd->groups->sgp);
7020 kfree(sd->groups);
7021 }
7022 kfree(sd);
7023 }
7024
7025 static void destroy_sched_domain(struct sched_domain *sd, int cpu)
7026 {
7027 call_rcu(&sd->rcu, free_sched_domain);
7028 }
7029
7030 static void destroy_sched_domains(struct sched_domain *sd, int cpu)
7031 {
7032 for (; sd; sd = sd->parent)
7033 destroy_sched_domain(sd, cpu);
7034 }
7035
7036 /*
7037 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
7038 * hold the hotplug lock.
7039 */
7040 static void
7041 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
7042 {
7043 struct rq *rq = cpu_rq(cpu);
7044 struct sched_domain *tmp;
7045
7046 /* Remove the sched domains which do not contribute to scheduling. */
7047 for (tmp = sd; tmp; ) {
7048 struct sched_domain *parent = tmp->parent;
7049 if (!parent)
7050 break;
7051
7052 if (sd_parent_degenerate(tmp, parent)) {
7053 tmp->parent = parent->parent;
7054 if (parent->parent)
7055 parent->parent->child = tmp;
7056 destroy_sched_domain(parent, cpu);
7057 } else
7058 tmp = tmp->parent;
7059 }
7060
7061 if (sd && sd_degenerate(sd)) {
7062 tmp = sd;
7063 sd = sd->parent;
7064 destroy_sched_domain(tmp, cpu);
7065 if (sd)
7066 sd->child = NULL;
7067 }
7068
7069 sched_domain_debug(sd, cpu);
7070
7071 rq_attach_root(rq, rd);
7072 tmp = rq->sd;
7073 rcu_assign_pointer(rq->sd, sd);
7074 destroy_sched_domains(tmp, cpu);
7075 }
7076
7077 /* cpus with isolated domains */
7078 static cpumask_var_t cpu_isolated_map;
7079
7080 /* Setup the mask of cpus configured for isolated domains */
7081 static int __init isolated_cpu_setup(char *str)
7082 {
7083 alloc_bootmem_cpumask_var(&cpu_isolated_map);
7084 cpulist_parse(str, cpu_isolated_map);
7085 return 1;
7086 }
7087
7088 __setup("isolcpus=", isolated_cpu_setup);
7089
7090 #ifdef CONFIG_NUMA
7091
7092 /**
7093 * find_next_best_node - find the next node to include in a sched_domain
7094 * @node: node whose sched_domain we're building
7095 * @used_nodes: nodes already in the sched_domain
7096 *
7097 * Find the next node to include in a given scheduling domain. Simply
7098 * finds the closest node not already in the @used_nodes map.
7099 *
7100 * Should use nodemask_t.
7101 */
7102 static int find_next_best_node(int node, nodemask_t *used_nodes)
7103 {
7104 int i, n, val, min_val, best_node = -1;
7105
7106 min_val = INT_MAX;
7107
7108 for (i = 0; i < nr_node_ids; i++) {
7109 /* Start at @node */
7110 n = (node + i) % nr_node_ids;
7111
7112 if (!nr_cpus_node(n))
7113 continue;
7114
7115 /* Skip already used nodes */
7116 if (node_isset(n, *used_nodes))
7117 continue;
7118
7119 /* Simple min distance search */
7120 val = node_distance(node, n);
7121
7122 if (val < min_val) {
7123 min_val = val;
7124 best_node = n;
7125 }
7126 }
7127
7128 if (best_node != -1)
7129 node_set(best_node, *used_nodes);
7130 return best_node;
7131 }
7132
7133 /**
7134 * sched_domain_node_span - get a cpumask for a node's sched_domain
7135 * @node: node whose cpumask we're constructing
7136 * @span: resulting cpumask
7137 *
7138 * Given a node, construct a good cpumask for its sched_domain to span. It
7139 * should be one that prevents unnecessary balancing, but also spreads tasks
7140 * out optimally.
7141 */
7142 static void sched_domain_node_span(int node, struct cpumask *span)
7143 {
7144 nodemask_t used_nodes;
7145 int i;
7146
7147 cpumask_clear(span);
7148 nodes_clear(used_nodes);
7149
7150 cpumask_or(span, span, cpumask_of_node(node));
7151 node_set(node, used_nodes);
7152
7153 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
7154 int next_node = find_next_best_node(node, &used_nodes);
7155 if (next_node < 0)
7156 break;
7157 cpumask_or(span, span, cpumask_of_node(next_node));
7158 }
7159 }
7160
7161 static const struct cpumask *cpu_node_mask(int cpu)
7162 {
7163 lockdep_assert_held(&sched_domains_mutex);
7164
7165 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
7166
7167 return sched_domains_tmpmask;
7168 }
7169
7170 static const struct cpumask *cpu_allnodes_mask(int cpu)
7171 {
7172 return cpu_possible_mask;
7173 }
7174 #endif /* CONFIG_NUMA */
7175
7176 static const struct cpumask *cpu_cpu_mask(int cpu)
7177 {
7178 return cpumask_of_node(cpu_to_node(cpu));
7179 }
7180
7181 int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7182
7183 struct sd_data {
7184 struct sched_domain **__percpu sd;
7185 struct sched_group **__percpu sg;
7186 struct sched_group_power **__percpu sgp;
7187 };
7188
7189 struct s_data {
7190 struct sched_domain ** __percpu sd;
7191 struct root_domain *rd;
7192 };
7193
7194 enum s_alloc {
7195 sa_rootdomain,
7196 sa_sd,
7197 sa_sd_storage,
7198 sa_none,
7199 };
7200
7201 struct sched_domain_topology_level;
7202
7203 typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
7204 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
7205
7206 #define SDTL_OVERLAP 0x01
7207
7208 struct sched_domain_topology_level {
7209 sched_domain_init_f init;
7210 sched_domain_mask_f mask;
7211 int flags;
7212 struct sd_data data;
7213 };
7214
7215 static int
7216 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
7217 {
7218 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
7219 const struct cpumask *span = sched_domain_span(sd);
7220 struct cpumask *covered = sched_domains_tmpmask;
7221 struct sd_data *sdd = sd->private;
7222 struct sched_domain *child;
7223 int i;
7224
7225 cpumask_clear(covered);
7226
7227 for_each_cpu(i, span) {
7228 struct cpumask *sg_span;
7229
7230 if (cpumask_test_cpu(i, covered))
7231 continue;
7232
7233 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7234 GFP_KERNEL, cpu_to_node(i));
7235
7236 if (!sg)
7237 goto fail;
7238
7239 sg_span = sched_group_cpus(sg);
7240
7241 child = *per_cpu_ptr(sdd->sd, i);
7242 if (child->child) {
7243 child = child->child;
7244 cpumask_copy(sg_span, sched_domain_span(child));
7245 } else
7246 cpumask_set_cpu(i, sg_span);
7247
7248 cpumask_or(covered, covered, sg_span);
7249
7250 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
7251 atomic_inc(&sg->sgp->ref);
7252
7253 if (cpumask_test_cpu(cpu, sg_span))
7254 groups = sg;
7255
7256 if (!first)
7257 first = sg;
7258 if (last)
7259 last->next = sg;
7260 last = sg;
7261 last->next = first;
7262 }
7263 sd->groups = groups;
7264
7265 return 0;
7266
7267 fail:
7268 free_sched_groups(first, 0);
7269
7270 return -ENOMEM;
7271 }
7272
7273 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
7274 {
7275 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
7276 struct sched_domain *child = sd->child;
7277
7278 if (child)
7279 cpu = cpumask_first(sched_domain_span(child));
7280
7281 if (sg) {
7282 *sg = *per_cpu_ptr(sdd->sg, cpu);
7283 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
7284 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
7285 }
7286
7287 return cpu;
7288 }
7289
7290 /*
7291 * build_sched_groups will build a circular linked list of the groups
7292 * covered by the given span, and will set each group's ->cpumask correctly,
7293 * and ->cpu_power to 0.
7294 *
7295 * Assumes the sched_domain tree is fully constructed
7296 */
7297 static int
7298 build_sched_groups(struct sched_domain *sd, int cpu)
7299 {
7300 struct sched_group *first = NULL, *last = NULL;
7301 struct sd_data *sdd = sd->private;
7302 const struct cpumask *span = sched_domain_span(sd);
7303 struct cpumask *covered;
7304 int i;
7305
7306 get_group(cpu, sdd, &sd->groups);
7307 atomic_inc(&sd->groups->ref);
7308
7309 if (cpu != cpumask_first(sched_domain_span(sd)))
7310 return 0;
7311
7312 lockdep_assert_held(&sched_domains_mutex);
7313 covered = sched_domains_tmpmask;
7314
7315 cpumask_clear(covered);
7316
7317 for_each_cpu(i, span) {
7318 struct sched_group *sg;
7319 int group = get_group(i, sdd, &sg);
7320 int j;
7321
7322 if (cpumask_test_cpu(i, covered))
7323 continue;
7324
7325 cpumask_clear(sched_group_cpus(sg));
7326 sg->sgp->power = 0;
7327
7328 for_each_cpu(j, span) {
7329 if (get_group(j, sdd, NULL) != group)
7330 continue;
7331
7332 cpumask_set_cpu(j, covered);
7333 cpumask_set_cpu(j, sched_group_cpus(sg));
7334 }
7335
7336 if (!first)
7337 first = sg;
7338 if (last)
7339 last->next = sg;
7340 last = sg;
7341 }
7342 last->next = first;
7343
7344 return 0;
7345 }
7346
7347 /*
7348 * Initialize sched groups cpu_power.
7349 *
7350 * cpu_power indicates the capacity of sched group, which is used while
7351 * distributing the load between different sched groups in a sched domain.
7352 * Typically cpu_power for all the groups in a sched domain will be same unless
7353 * there are asymmetries in the topology. If there are asymmetries, group
7354 * having more cpu_power will pickup more load compared to the group having
7355 * less cpu_power.
7356 */
7357 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7358 {
7359 struct sched_group *sg = sd->groups;
7360
7361 WARN_ON(!sd || !sg);
7362
7363 do {
7364 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
7365 sg = sg->next;
7366 } while (sg != sd->groups);
7367
7368 if (cpu != group_first_cpu(sg))
7369 return;
7370
7371 update_group_power(sd, cpu);
7372 }
7373
7374 /*
7375 * Initializers for schedule domains
7376 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7377 */
7378
7379 #ifdef CONFIG_SCHED_DEBUG
7380 # define SD_INIT_NAME(sd, type) sd->name = #type
7381 #else
7382 # define SD_INIT_NAME(sd, type) do { } while (0)
7383 #endif
7384
7385 #define SD_INIT_FUNC(type) \
7386 static noinline struct sched_domain * \
7387 sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
7388 { \
7389 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
7390 *sd = SD_##type##_INIT; \
7391 SD_INIT_NAME(sd, type); \
7392 sd->private = &tl->data; \
7393 return sd; \
7394 }
7395
7396 SD_INIT_FUNC(CPU)
7397 #ifdef CONFIG_NUMA
7398 SD_INIT_FUNC(ALLNODES)
7399 SD_INIT_FUNC(NODE)
7400 #endif
7401 #ifdef CONFIG_SCHED_SMT
7402 SD_INIT_FUNC(SIBLING)
7403 #endif
7404 #ifdef CONFIG_SCHED_MC
7405 SD_INIT_FUNC(MC)
7406 #endif
7407 #ifdef CONFIG_SCHED_BOOK
7408 SD_INIT_FUNC(BOOK)
7409 #endif
7410
7411 static int default_relax_domain_level = -1;
7412 int sched_domain_level_max;
7413
7414 static int __init setup_relax_domain_level(char *str)
7415 {
7416 unsigned long val;
7417
7418 val = simple_strtoul(str, NULL, 0);
7419 if (val < sched_domain_level_max)
7420 default_relax_domain_level = val;
7421
7422 return 1;
7423 }
7424 __setup("relax_domain_level=", setup_relax_domain_level);
7425
7426 static void set_domain_attribute(struct sched_domain *sd,
7427 struct sched_domain_attr *attr)
7428 {
7429 int request;
7430
7431 if (!attr || attr->relax_domain_level < 0) {
7432 if (default_relax_domain_level < 0)
7433 return;
7434 else
7435 request = default_relax_domain_level;
7436 } else
7437 request = attr->relax_domain_level;
7438 if (request < sd->level) {
7439 /* turn off idle balance on this domain */
7440 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
7441 } else {
7442 /* turn on idle balance on this domain */
7443 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
7444 }
7445 }
7446
7447 static void __sdt_free(const struct cpumask *cpu_map);
7448 static int __sdt_alloc(const struct cpumask *cpu_map);
7449
7450 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7451 const struct cpumask *cpu_map)
7452 {
7453 switch (what) {
7454 case sa_rootdomain:
7455 if (!atomic_read(&d->rd->refcount))
7456 free_rootdomain(&d->rd->rcu); /* fall through */
7457 case sa_sd:
7458 free_percpu(d->sd); /* fall through */
7459 case sa_sd_storage:
7460 __sdt_free(cpu_map); /* fall through */
7461 case sa_none:
7462 break;
7463 }
7464 }
7465
7466 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7467 const struct cpumask *cpu_map)
7468 {
7469 memset(d, 0, sizeof(*d));
7470
7471 if (__sdt_alloc(cpu_map))
7472 return sa_sd_storage;
7473 d->sd = alloc_percpu(struct sched_domain *);
7474 if (!d->sd)
7475 return sa_sd_storage;
7476 d->rd = alloc_rootdomain();
7477 if (!d->rd)
7478 return sa_sd;
7479 return sa_rootdomain;
7480 }
7481
7482 /*
7483 * NULL the sd_data elements we've used to build the sched_domain and
7484 * sched_group structure so that the subsequent __free_domain_allocs()
7485 * will not free the data we're using.
7486 */
7487 static void claim_allocations(int cpu, struct sched_domain *sd)
7488 {
7489 struct sd_data *sdd = sd->private;
7490
7491 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7492 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7493
7494 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
7495 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7496
7497 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
7498 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
7499 }
7500
7501 #ifdef CONFIG_SCHED_SMT
7502 static const struct cpumask *cpu_smt_mask(int cpu)
7503 {
7504 return topology_thread_cpumask(cpu);
7505 }
7506 #endif
7507
7508 /*
7509 * Topology list, bottom-up.
7510 */
7511 static struct sched_domain_topology_level default_topology[] = {
7512 #ifdef CONFIG_SCHED_SMT
7513 { sd_init_SIBLING, cpu_smt_mask, },
7514 #endif
7515 #ifdef CONFIG_SCHED_MC
7516 { sd_init_MC, cpu_coregroup_mask, },
7517 #endif
7518 #ifdef CONFIG_SCHED_BOOK
7519 { sd_init_BOOK, cpu_book_mask, },
7520 #endif
7521 { sd_init_CPU, cpu_cpu_mask, },
7522 #ifdef CONFIG_NUMA
7523 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
7524 { sd_init_ALLNODES, cpu_allnodes_mask, },
7525 #endif
7526 { NULL, },
7527 };
7528
7529 static struct sched_domain_topology_level *sched_domain_topology = default_topology;
7530
7531 static int __sdt_alloc(const struct cpumask *cpu_map)
7532 {
7533 struct sched_domain_topology_level *tl;
7534 int j;
7535
7536 for (tl = sched_domain_topology; tl->init; tl++) {
7537 struct sd_data *sdd = &tl->data;
7538
7539 sdd->sd = alloc_percpu(struct sched_domain *);
7540 if (!sdd->sd)
7541 return -ENOMEM;
7542
7543 sdd->sg = alloc_percpu(struct sched_group *);
7544 if (!sdd->sg)
7545 return -ENOMEM;
7546
7547 sdd->sgp = alloc_percpu(struct sched_group_power *);
7548 if (!sdd->sgp)
7549 return -ENOMEM;
7550
7551 for_each_cpu(j, cpu_map) {
7552 struct sched_domain *sd;
7553 struct sched_group *sg;
7554 struct sched_group_power *sgp;
7555
7556 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7557 GFP_KERNEL, cpu_to_node(j));
7558 if (!sd)
7559 return -ENOMEM;
7560
7561 *per_cpu_ptr(sdd->sd, j) = sd;
7562
7563 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7564 GFP_KERNEL, cpu_to_node(j));
7565 if (!sg)
7566 return -ENOMEM;
7567
7568 *per_cpu_ptr(sdd->sg, j) = sg;
7569
7570 sgp = kzalloc_node(sizeof(struct sched_group_power),
7571 GFP_KERNEL, cpu_to_node(j));
7572 if (!sgp)
7573 return -ENOMEM;
7574
7575 *per_cpu_ptr(sdd->sgp, j) = sgp;
7576 }
7577 }
7578
7579 return 0;
7580 }
7581
7582 static void __sdt_free(const struct cpumask *cpu_map)
7583 {
7584 struct sched_domain_topology_level *tl;
7585 int j;
7586
7587 for (tl = sched_domain_topology; tl->init; tl++) {
7588 struct sd_data *sdd = &tl->data;
7589
7590 for_each_cpu(j, cpu_map) {
7591 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7592 if (sd && (sd->flags & SD_OVERLAP))
7593 free_sched_groups(sd->groups, 0);
7594 kfree(*per_cpu_ptr(sdd->sd, j));
7595 kfree(*per_cpu_ptr(sdd->sg, j));
7596 kfree(*per_cpu_ptr(sdd->sgp, j));
7597 }
7598 free_percpu(sdd->sd);
7599 free_percpu(sdd->sg);
7600 free_percpu(sdd->sgp);
7601 }
7602 }
7603
7604 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7605 struct s_data *d, const struct cpumask *cpu_map,
7606 struct sched_domain_attr *attr, struct sched_domain *child,
7607 int cpu)
7608 {
7609 struct sched_domain *sd = tl->init(tl, cpu);
7610 if (!sd)
7611 return child;
7612
7613 set_domain_attribute(sd, attr);
7614 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
7615 if (child) {
7616 sd->level = child->level + 1;
7617 sched_domain_level_max = max(sched_domain_level_max, sd->level);
7618 child->parent = sd;
7619 }
7620 sd->child = child;
7621
7622 return sd;
7623 }
7624
7625 /*
7626 * Build sched domains for a given set of cpus and attach the sched domains
7627 * to the individual cpus
7628 */
7629 static int build_sched_domains(const struct cpumask *cpu_map,
7630 struct sched_domain_attr *attr)
7631 {
7632 enum s_alloc alloc_state = sa_none;
7633 struct sched_domain *sd;
7634 struct s_data d;
7635 int i, ret = -ENOMEM;
7636
7637 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7638 if (alloc_state != sa_rootdomain)
7639 goto error;
7640
7641 /* Set up domains for cpus specified by the cpu_map. */
7642 for_each_cpu(i, cpu_map) {
7643 struct sched_domain_topology_level *tl;
7644
7645 sd = NULL;
7646 for (tl = sched_domain_topology; tl->init; tl++) {
7647 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7648 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7649 sd->flags |= SD_OVERLAP;
7650 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7651 break;
7652 }
7653
7654 while (sd->child)
7655 sd = sd->child;
7656
7657 *per_cpu_ptr(d.sd, i) = sd;
7658 }
7659
7660 /* Build the groups for the domains */
7661 for_each_cpu(i, cpu_map) {
7662 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7663 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7664 if (sd->flags & SD_OVERLAP) {
7665 if (build_overlap_sched_groups(sd, i))
7666 goto error;
7667 } else {
7668 if (build_sched_groups(sd, i))
7669 goto error;
7670 }
7671 }
7672 }
7673
7674 /* Calculate CPU power for physical packages and nodes */
7675 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7676 if (!cpumask_test_cpu(i, cpu_map))
7677 continue;
7678
7679 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7680 claim_allocations(i, sd);
7681 init_sched_groups_power(i, sd);
7682 }
7683 }
7684
7685 /* Attach the domains */
7686 rcu_read_lock();
7687 for_each_cpu(i, cpu_map) {
7688 sd = *per_cpu_ptr(d.sd, i);
7689 cpu_attach_domain(sd, d.rd, i);
7690 }
7691 rcu_read_unlock();
7692
7693 ret = 0;
7694 error:
7695 __free_domain_allocs(&d, alloc_state, cpu_map);
7696 return ret;
7697 }
7698
7699 static cpumask_var_t *doms_cur; /* current sched domains */
7700 static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7701 static struct sched_domain_attr *dattr_cur;
7702 /* attribues of custom domains in 'doms_cur' */
7703
7704 /*
7705 * Special case: If a kmalloc of a doms_cur partition (array of
7706 * cpumask) fails, then fallback to a single sched domain,
7707 * as determined by the single cpumask fallback_doms.
7708 */
7709 static cpumask_var_t fallback_doms;
7710
7711 /*
7712 * arch_update_cpu_topology lets virtualized architectures update the
7713 * cpu core maps. It is supposed to return 1 if the topology changed
7714 * or 0 if it stayed the same.
7715 */
7716 int __attribute__((weak)) arch_update_cpu_topology(void)
7717 {
7718 return 0;
7719 }
7720
7721 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7722 {
7723 int i;
7724 cpumask_var_t *doms;
7725
7726 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7727 if (!doms)
7728 return NULL;
7729 for (i = 0; i < ndoms; i++) {
7730 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7731 free_sched_domains(doms, i);
7732 return NULL;
7733 }
7734 }
7735 return doms;
7736 }
7737
7738 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7739 {
7740 unsigned int i;
7741 for (i = 0; i < ndoms; i++)
7742 free_cpumask_var(doms[i]);
7743 kfree(doms);
7744 }
7745
7746 /*
7747 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7748 * For now this just excludes isolated cpus, but could be used to
7749 * exclude other special cases in the future.
7750 */
7751 static int init_sched_domains(const struct cpumask *cpu_map)
7752 {
7753 int err;
7754
7755 arch_update_cpu_topology();
7756 ndoms_cur = 1;
7757 doms_cur = alloc_sched_domains(ndoms_cur);
7758 if (!doms_cur)
7759 doms_cur = &fallback_doms;
7760 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
7761 dattr_cur = NULL;
7762 err = build_sched_domains(doms_cur[0], NULL);
7763 register_sched_domain_sysctl();
7764
7765 return err;
7766 }
7767
7768 /*
7769 * Detach sched domains from a group of cpus specified in cpu_map
7770 * These cpus will now be attached to the NULL domain
7771 */
7772 static void detach_destroy_domains(const struct cpumask *cpu_map)
7773 {
7774 int i;
7775
7776 rcu_read_lock();
7777 for_each_cpu(i, cpu_map)
7778 cpu_attach_domain(NULL, &def_root_domain, i);
7779 rcu_read_unlock();
7780 }
7781
7782 /* handle null as "default" */
7783 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7784 struct sched_domain_attr *new, int idx_new)
7785 {
7786 struct sched_domain_attr tmp;
7787
7788 /* fast path */
7789 if (!new && !cur)
7790 return 1;
7791
7792 tmp = SD_ATTR_INIT;
7793 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7794 new ? (new + idx_new) : &tmp,
7795 sizeof(struct sched_domain_attr));
7796 }
7797
7798 /*
7799 * Partition sched domains as specified by the 'ndoms_new'
7800 * cpumasks in the array doms_new[] of cpumasks. This compares
7801 * doms_new[] to the current sched domain partitioning, doms_cur[].
7802 * It destroys each deleted domain and builds each new domain.
7803 *
7804 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7805 * The masks don't intersect (don't overlap.) We should setup one
7806 * sched domain for each mask. CPUs not in any of the cpumasks will
7807 * not be load balanced. If the same cpumask appears both in the
7808 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7809 * it as it is.
7810 *
7811 * The passed in 'doms_new' should be allocated using
7812 * alloc_sched_domains. This routine takes ownership of it and will
7813 * free_sched_domains it when done with it. If the caller failed the
7814 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7815 * and partition_sched_domains() will fallback to the single partition
7816 * 'fallback_doms', it also forces the domains to be rebuilt.
7817 *
7818 * If doms_new == NULL it will be replaced with cpu_online_mask.
7819 * ndoms_new == 0 is a special case for destroying existing domains,
7820 * and it will not create the default domain.
7821 *
7822 * Call with hotplug lock held
7823 */
7824 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7825 struct sched_domain_attr *dattr_new)
7826 {
7827 int i, j, n;
7828 int new_topology;
7829
7830 mutex_lock(&sched_domains_mutex);
7831
7832 /* always unregister in case we don't destroy any domains */
7833 unregister_sched_domain_sysctl();
7834
7835 /* Let architecture update cpu core mappings. */
7836 new_topology = arch_update_cpu_topology();
7837
7838 n = doms_new ? ndoms_new : 0;
7839
7840 /* Destroy deleted domains */
7841 for (i = 0; i < ndoms_cur; i++) {
7842 for (j = 0; j < n && !new_topology; j++) {
7843 if (cpumask_equal(doms_cur[i], doms_new[j])
7844 && dattrs_equal(dattr_cur, i, dattr_new, j))
7845 goto match1;
7846 }
7847 /* no match - a current sched domain not in new doms_new[] */
7848 detach_destroy_domains(doms_cur[i]);
7849 match1:
7850 ;
7851 }
7852
7853 if (doms_new == NULL) {
7854 ndoms_cur = 0;
7855 doms_new = &fallback_doms;
7856 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7857 WARN_ON_ONCE(dattr_new);
7858 }
7859
7860 /* Build new domains */
7861 for (i = 0; i < ndoms_new; i++) {
7862 for (j = 0; j < ndoms_cur && !new_topology; j++) {
7863 if (cpumask_equal(doms_new[i], doms_cur[j])
7864 && dattrs_equal(dattr_new, i, dattr_cur, j))
7865 goto match2;
7866 }
7867 /* no match - add a new doms_new */
7868 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7869 match2:
7870 ;
7871 }
7872
7873 /* Remember the new sched domains */
7874 if (doms_cur != &fallback_doms)
7875 free_sched_domains(doms_cur, ndoms_cur);
7876 kfree(dattr_cur); /* kfree(NULL) is safe */
7877 doms_cur = doms_new;
7878 dattr_cur = dattr_new;
7879 ndoms_cur = ndoms_new;
7880
7881 register_sched_domain_sysctl();
7882
7883 mutex_unlock(&sched_domains_mutex);
7884 }
7885
7886 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7887 static void reinit_sched_domains(void)
7888 {
7889 get_online_cpus();
7890
7891 /* Destroy domains first to force the rebuild */
7892 partition_sched_domains(0, NULL, NULL);
7893
7894 rebuild_sched_domains();
7895 put_online_cpus();
7896 }
7897
7898 static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7899 {
7900 unsigned int level = 0;
7901
7902 if (sscanf(buf, "%u", &level) != 1)
7903 return -EINVAL;
7904
7905 /*
7906 * level is always be positive so don't check for
7907 * level < POWERSAVINGS_BALANCE_NONE which is 0
7908 * What happens on 0 or 1 byte write,
7909 * need to check for count as well?
7910 */
7911
7912 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
7913 return -EINVAL;
7914
7915 if (smt)
7916 sched_smt_power_savings = level;
7917 else
7918 sched_mc_power_savings = level;
7919
7920 reinit_sched_domains();
7921
7922 return count;
7923 }
7924
7925 #ifdef CONFIG_SCHED_MC
7926 static ssize_t sched_mc_power_savings_show(struct device *dev,
7927 struct device_attribute *attr,
7928 char *buf)
7929 {
7930 return sprintf(buf, "%u\n", sched_mc_power_savings);
7931 }
7932 static ssize_t sched_mc_power_savings_store(struct device *dev,
7933 struct device_attribute *attr,
7934 const char *buf, size_t count)
7935 {
7936 return sched_power_savings_store(buf, count, 0);
7937 }
7938 static DEVICE_ATTR(sched_mc_power_savings, 0644,
7939 sched_mc_power_savings_show,
7940 sched_mc_power_savings_store);
7941 #endif
7942
7943 #ifdef CONFIG_SCHED_SMT
7944 static ssize_t sched_smt_power_savings_show(struct device *dev,
7945 struct device_attribute *attr,
7946 char *buf)
7947 {
7948 return sprintf(buf, "%u\n", sched_smt_power_savings);
7949 }
7950 static ssize_t sched_smt_power_savings_store(struct device *dev,
7951 struct device_attribute *attr,
7952 const char *buf, size_t count)
7953 {
7954 return sched_power_savings_store(buf, count, 1);
7955 }
7956 static DEVICE_ATTR(sched_smt_power_savings, 0644,
7957 sched_smt_power_savings_show,
7958 sched_smt_power_savings_store);
7959 #endif
7960
7961 int __init sched_create_sysfs_power_savings_entries(struct device *dev)
7962 {
7963 int err = 0;
7964
7965 #ifdef CONFIG_SCHED_SMT
7966 if (smt_capable())
7967 err = device_create_file(dev, &dev_attr_sched_smt_power_savings);
7968 #endif
7969 #ifdef CONFIG_SCHED_MC
7970 if (!err && mc_capable())
7971 err = device_create_file(dev, &dev_attr_sched_mc_power_savings);
7972 #endif
7973 return err;
7974 }
7975 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7976
7977 /*
7978 * Update cpusets according to cpu_active mask. If cpusets are
7979 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7980 * around partition_sched_domains().
7981 */
7982 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7983 void *hcpu)
7984 {
7985 switch (action & ~CPU_TASKS_FROZEN) {
7986 case CPU_ONLINE:
7987 case CPU_DOWN_FAILED:
7988 cpuset_update_active_cpus();
7989 return NOTIFY_OK;
7990 default:
7991 return NOTIFY_DONE;
7992 }
7993 }
7994
7995 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7996 void *hcpu)
7997 {
7998 switch (action & ~CPU_TASKS_FROZEN) {
7999 case CPU_DOWN_PREPARE:
8000 cpuset_update_active_cpus();
8001 return NOTIFY_OK;
8002 default:
8003 return NOTIFY_DONE;
8004 }
8005 }
8006
8007 static int update_runtime(struct notifier_block *nfb,
8008 unsigned long action, void *hcpu)
8009 {
8010 int cpu = (int)(long)hcpu;
8011
8012 switch (action) {
8013 case CPU_DOWN_PREPARE:
8014 case CPU_DOWN_PREPARE_FROZEN:
8015 disable_runtime(cpu_rq(cpu));
8016 return NOTIFY_OK;
8017
8018 case CPU_DOWN_FAILED:
8019 case CPU_DOWN_FAILED_FROZEN:
8020 case CPU_ONLINE:
8021 case CPU_ONLINE_FROZEN:
8022 enable_runtime(cpu_rq(cpu));
8023 return NOTIFY_OK;
8024
8025 default:
8026 return NOTIFY_DONE;
8027 }
8028 }
8029
8030 void __init sched_init_smp(void)
8031 {
8032 cpumask_var_t non_isolated_cpus;
8033
8034 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
8035 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
8036
8037 get_online_cpus();
8038 mutex_lock(&sched_domains_mutex);
8039 init_sched_domains(cpu_active_mask);
8040 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
8041 if (cpumask_empty(non_isolated_cpus))
8042 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
8043 mutex_unlock(&sched_domains_mutex);
8044 put_online_cpus();
8045
8046 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
8047 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
8048
8049 /* RT runtime code needs to handle some hotplug events */
8050 hotcpu_notifier(update_runtime, 0);
8051
8052 init_hrtick();
8053
8054 /* Move init over to a non-isolated CPU */
8055 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
8056 BUG();
8057 sched_init_granularity();
8058 free_cpumask_var(non_isolated_cpus);
8059
8060 init_sched_rt_class();
8061 }
8062 #else
8063 void __init sched_init_smp(void)
8064 {
8065 sched_init_granularity();
8066 }
8067 #endif /* CONFIG_SMP */
8068
8069 const_debug unsigned int sysctl_timer_migration = 1;
8070
8071 int in_sched_functions(unsigned long addr)
8072 {
8073 return in_lock_functions(addr) ||
8074 (addr >= (unsigned long)__sched_text_start
8075 && addr < (unsigned long)__sched_text_end);
8076 }
8077
8078 static void init_cfs_rq(struct cfs_rq *cfs_rq)
8079 {
8080 cfs_rq->tasks_timeline = RB_ROOT;
8081 INIT_LIST_HEAD(&cfs_rq->tasks);
8082 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8083 #ifndef CONFIG_64BIT
8084 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8085 #endif
8086 }
8087
8088 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8089 {
8090 struct rt_prio_array *array;
8091 int i;
8092
8093 array = &rt_rq->active;
8094 for (i = 0; i < MAX_RT_PRIO; i++) {
8095 INIT_LIST_HEAD(array->queue + i);
8096 __clear_bit(i, array->bitmap);
8097 }
8098 /* delimiter for bitsearch: */
8099 __set_bit(MAX_RT_PRIO, array->bitmap);
8100
8101 #if defined CONFIG_SMP
8102 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8103 rt_rq->highest_prio.next = MAX_RT_PRIO;
8104 rt_rq->rt_nr_migratory = 0;
8105 rt_rq->overloaded = 0;
8106 plist_head_init(&rt_rq->pushable_tasks);
8107 #endif
8108
8109 rt_rq->rt_time = 0;
8110 rt_rq->rt_throttled = 0;
8111 rt_rq->rt_runtime = 0;
8112 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
8113 }
8114
8115 #ifdef CONFIG_FAIR_GROUP_SCHED
8116 static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8117 struct sched_entity *se, int cpu,
8118 struct sched_entity *parent)
8119 {
8120 struct rq *rq = cpu_rq(cpu);
8121
8122 cfs_rq->tg = tg;
8123 cfs_rq->rq = rq;
8124 #ifdef CONFIG_SMP
8125 /* allow initial update_cfs_load() to truncate */
8126 cfs_rq->load_stamp = 1;
8127 #endif
8128 init_cfs_rq_runtime(cfs_rq);
8129
8130 tg->cfs_rq[cpu] = cfs_rq;
8131 tg->se[cpu] = se;
8132
8133 /* se could be NULL for root_task_group */
8134 if (!se)
8135 return;
8136
8137 if (!parent)
8138 se->cfs_rq = &rq->cfs;
8139 else
8140 se->cfs_rq = parent->my_q;
8141
8142 se->my_q = cfs_rq;
8143 update_load_set(&se->load, 0);
8144 se->parent = parent;
8145 }
8146 #endif
8147
8148 #ifdef CONFIG_RT_GROUP_SCHED
8149 static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
8150 struct sched_rt_entity *rt_se, int cpu,
8151 struct sched_rt_entity *parent)
8152 {
8153 struct rq *rq = cpu_rq(cpu);
8154
8155 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8156 rt_rq->rt_nr_boosted = 0;
8157 rt_rq->rq = rq;
8158 rt_rq->tg = tg;
8159
8160 tg->rt_rq[cpu] = rt_rq;
8161 tg->rt_se[cpu] = rt_se;
8162
8163 if (!rt_se)
8164 return;
8165
8166 if (!parent)
8167 rt_se->rt_rq = &rq->rt;
8168 else
8169 rt_se->rt_rq = parent->my_q;
8170
8171 rt_se->my_q = rt_rq;
8172 rt_se->parent = parent;
8173 INIT_LIST_HEAD(&rt_se->run_list);
8174 }
8175 #endif
8176
8177 void __init sched_init(void)
8178 {
8179 int i, j;
8180 unsigned long alloc_size = 0, ptr;
8181
8182 #ifdef CONFIG_FAIR_GROUP_SCHED
8183 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8184 #endif
8185 #ifdef CONFIG_RT_GROUP_SCHED
8186 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8187 #endif
8188 #ifdef CONFIG_CPUMASK_OFFSTACK
8189 alloc_size += num_possible_cpus() * cpumask_size();
8190 #endif
8191 if (alloc_size) {
8192 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
8193
8194 #ifdef CONFIG_FAIR_GROUP_SCHED
8195 root_task_group.se = (struct sched_entity **)ptr;
8196 ptr += nr_cpu_ids * sizeof(void **);
8197
8198 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8199 ptr += nr_cpu_ids * sizeof(void **);
8200
8201 #endif /* CONFIG_FAIR_GROUP_SCHED */
8202 #ifdef CONFIG_RT_GROUP_SCHED
8203 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8204 ptr += nr_cpu_ids * sizeof(void **);
8205
8206 root_task_group.rt_rq = (struct rt_rq **)ptr;
8207 ptr += nr_cpu_ids * sizeof(void **);
8208
8209 #endif /* CONFIG_RT_GROUP_SCHED */
8210 #ifdef CONFIG_CPUMASK_OFFSTACK
8211 for_each_possible_cpu(i) {
8212 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8213 ptr += cpumask_size();
8214 }
8215 #endif /* CONFIG_CPUMASK_OFFSTACK */
8216 }
8217
8218 #ifdef CONFIG_SMP
8219 init_defrootdomain();
8220 #endif
8221
8222 init_rt_bandwidth(&def_rt_bandwidth,
8223 global_rt_period(), global_rt_runtime());
8224
8225 #ifdef CONFIG_RT_GROUP_SCHED
8226 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8227 global_rt_period(), global_rt_runtime());
8228 #endif /* CONFIG_RT_GROUP_SCHED */
8229
8230 #ifdef CONFIG_CGROUP_SCHED
8231 list_add(&root_task_group.list, &task_groups);
8232 INIT_LIST_HEAD(&root_task_group.children);
8233 autogroup_init(&init_task);
8234 #endif /* CONFIG_CGROUP_SCHED */
8235
8236 for_each_possible_cpu(i) {
8237 struct rq *rq;
8238
8239 rq = cpu_rq(i);
8240 raw_spin_lock_init(&rq->lock);
8241 rq->nr_running = 0;
8242 rq->calc_load_active = 0;
8243 rq->calc_load_update = jiffies + LOAD_FREQ;
8244 init_cfs_rq(&rq->cfs);
8245 init_rt_rq(&rq->rt, rq);
8246 #ifdef CONFIG_FAIR_GROUP_SCHED
8247 root_task_group.shares = root_task_group_load;
8248 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8249 /*
8250 * How much cpu bandwidth does root_task_group get?
8251 *
8252 * In case of task-groups formed thr' the cgroup filesystem, it
8253 * gets 100% of the cpu resources in the system. This overall
8254 * system cpu resource is divided among the tasks of
8255 * root_task_group and its child task-groups in a fair manner,
8256 * based on each entity's (task or task-group's) weight
8257 * (se->load.weight).
8258 *
8259 * In other words, if root_task_group has 10 tasks of weight
8260 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8261 * then A0's share of the cpu resource is:
8262 *
8263 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8264 *
8265 * We achieve this by letting root_task_group's tasks sit
8266 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8267 */
8268 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
8269 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8270 #endif /* CONFIG_FAIR_GROUP_SCHED */
8271
8272 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
8273 #ifdef CONFIG_RT_GROUP_SCHED
8274 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
8275 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8276 #endif
8277
8278 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8279 rq->cpu_load[j] = 0;
8280
8281 rq->last_load_update_tick = jiffies;
8282
8283 #ifdef CONFIG_SMP
8284 rq->sd = NULL;
8285 rq->rd = NULL;
8286 rq->cpu_power = SCHED_POWER_SCALE;
8287 rq->post_schedule = 0;
8288 rq->active_balance = 0;
8289 rq->next_balance = jiffies;
8290 rq->push_cpu = 0;
8291 rq->cpu = i;
8292 rq->online = 0;
8293 rq->idle_stamp = 0;
8294 rq->avg_idle = 2*sysctl_sched_migration_cost;
8295 rq_attach_root(rq, &def_root_domain);
8296 #ifdef CONFIG_NO_HZ
8297 rq->nohz_balance_kick = 0;
8298 #endif
8299 #endif
8300 init_rq_hrtick(rq);
8301 atomic_set(&rq->nr_iowait, 0);
8302 }
8303
8304 set_load_weight(&init_task);
8305
8306 #ifdef CONFIG_PREEMPT_NOTIFIERS
8307 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8308 #endif
8309
8310 #ifdef CONFIG_SMP
8311 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8312 #endif
8313
8314 #ifdef CONFIG_RT_MUTEXES
8315 plist_head_init(&init_task.pi_waiters);
8316 #endif
8317
8318 /*
8319 * The boot idle thread does lazy MMU switching as well:
8320 */
8321 atomic_inc(&init_mm.mm_count);
8322 enter_lazy_tlb(&init_mm, current);
8323
8324 /*
8325 * Make us the idle thread. Technically, schedule() should not be
8326 * called from this thread, however somewhere below it might be,
8327 * but because we are the idle thread, we just pick up running again
8328 * when this runqueue becomes "idle".
8329 */
8330 init_idle(current, smp_processor_id());
8331
8332 calc_load_update = jiffies + LOAD_FREQ;
8333
8334 /*
8335 * During early bootup we pretend to be a normal task:
8336 */
8337 current->sched_class = &fair_sched_class;
8338
8339 #ifdef CONFIG_SMP
8340 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
8341 #ifdef CONFIG_NO_HZ
8342 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8343 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8344 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8345 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8346 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
8347 #endif
8348 /* May be allocated at isolcpus cmdline parse time */
8349 if (cpu_isolated_map == NULL)
8350 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
8351 #endif /* SMP */
8352
8353 scheduler_running = 1;
8354 }
8355
8356 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8357 static inline int preempt_count_equals(int preempt_offset)
8358 {
8359 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
8360
8361 return (nested == preempt_offset);
8362 }
8363
8364 void __might_sleep(const char *file, int line, int preempt_offset)
8365 {
8366 static unsigned long prev_jiffy; /* ratelimiting */
8367
8368 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
8369 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8370 system_state != SYSTEM_RUNNING || oops_in_progress)
8371 return;
8372 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8373 return;
8374 prev_jiffy = jiffies;
8375
8376 printk(KERN_ERR
8377 "BUG: sleeping function called from invalid context at %s:%d\n",
8378 file, line);
8379 printk(KERN_ERR
8380 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8381 in_atomic(), irqs_disabled(),
8382 current->pid, current->comm);
8383
8384 debug_show_held_locks(current);
8385 if (irqs_disabled())
8386 print_irqtrace_events(current);
8387 dump_stack();
8388 }
8389 EXPORT_SYMBOL(__might_sleep);
8390 #endif
8391
8392 #ifdef CONFIG_MAGIC_SYSRQ
8393 static void normalize_task(struct rq *rq, struct task_struct *p)
8394 {
8395 const struct sched_class *prev_class = p->sched_class;
8396 int old_prio = p->prio;
8397 int on_rq;
8398
8399 on_rq = p->on_rq;
8400 if (on_rq)
8401 deactivate_task(rq, p, 0);
8402 __setscheduler(rq, p, SCHED_NORMAL, 0);
8403 if (on_rq) {
8404 activate_task(rq, p, 0);
8405 resched_task(rq->curr);
8406 }
8407
8408 check_class_changed(rq, p, prev_class, old_prio);
8409 }
8410
8411 void normalize_rt_tasks(void)
8412 {
8413 struct task_struct *g, *p;
8414 unsigned long flags;
8415 struct rq *rq;
8416
8417 read_lock_irqsave(&tasklist_lock, flags);
8418 do_each_thread(g, p) {
8419 /*
8420 * Only normalize user tasks:
8421 */
8422 if (!p->mm)
8423 continue;
8424
8425 p->se.exec_start = 0;
8426 #ifdef CONFIG_SCHEDSTATS
8427 p->se.statistics.wait_start = 0;
8428 p->se.statistics.sleep_start = 0;
8429 p->se.statistics.block_start = 0;
8430 #endif
8431
8432 if (!rt_task(p)) {
8433 /*
8434 * Renice negative nice level userspace
8435 * tasks back to 0:
8436 */
8437 if (TASK_NICE(p) < 0 && p->mm)
8438 set_user_nice(p, 0);
8439 continue;
8440 }
8441
8442 raw_spin_lock(&p->pi_lock);
8443 rq = __task_rq_lock(p);
8444
8445 normalize_task(rq, p);
8446
8447 __task_rq_unlock(rq);
8448 raw_spin_unlock(&p->pi_lock);
8449 } while_each_thread(g, p);
8450
8451 read_unlock_irqrestore(&tasklist_lock, flags);
8452 }
8453
8454 #endif /* CONFIG_MAGIC_SYSRQ */
8455
8456 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
8457 /*
8458 * These functions are only useful for the IA64 MCA handling, or kdb.
8459 *
8460 * They can only be called when the whole system has been
8461 * stopped - every CPU needs to be quiescent, and no scheduling
8462 * activity can take place. Using them for anything else would
8463 * be a serious bug, and as a result, they aren't even visible
8464 * under any other configuration.
8465 */
8466
8467 /**
8468 * curr_task - return the current task for a given cpu.
8469 * @cpu: the processor in question.
8470 *
8471 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8472 */
8473 struct task_struct *curr_task(int cpu)
8474 {
8475 return cpu_curr(cpu);
8476 }
8477
8478 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8479
8480 #ifdef CONFIG_IA64
8481 /**
8482 * set_curr_task - set the current task for a given cpu.
8483 * @cpu: the processor in question.
8484 * @p: the task pointer to set.
8485 *
8486 * Description: This function must only be used when non-maskable interrupts
8487 * are serviced on a separate stack. It allows the architecture to switch the
8488 * notion of the current task on a cpu in a non-blocking manner. This function
8489 * must be called with all CPU's synchronized, and interrupts disabled, the
8490 * and caller must save the original value of the current task (see
8491 * curr_task() above) and restore that value before reenabling interrupts and
8492 * re-starting the system.
8493 *
8494 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8495 */
8496 void set_curr_task(int cpu, struct task_struct *p)
8497 {
8498 cpu_curr(cpu) = p;
8499 }
8500
8501 #endif
8502
8503 #ifdef CONFIG_FAIR_GROUP_SCHED
8504 static void free_fair_sched_group(struct task_group *tg)
8505 {
8506 int i;
8507
8508 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8509
8510 for_each_possible_cpu(i) {
8511 if (tg->cfs_rq)
8512 kfree(tg->cfs_rq[i]);
8513 if (tg->se)
8514 kfree(tg->se[i]);
8515 }
8516
8517 kfree(tg->cfs_rq);
8518 kfree(tg->se);
8519 }
8520
8521 static
8522 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8523 {
8524 struct cfs_rq *cfs_rq;
8525 struct sched_entity *se;
8526 int i;
8527
8528 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8529 if (!tg->cfs_rq)
8530 goto err;
8531 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8532 if (!tg->se)
8533 goto err;
8534
8535 tg->shares = NICE_0_LOAD;
8536
8537 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8538
8539 for_each_possible_cpu(i) {
8540 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8541 GFP_KERNEL, cpu_to_node(i));
8542 if (!cfs_rq)
8543 goto err;
8544
8545 se = kzalloc_node(sizeof(struct sched_entity),
8546 GFP_KERNEL, cpu_to_node(i));
8547 if (!se)
8548 goto err_free_rq;
8549
8550 init_cfs_rq(cfs_rq);
8551 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8552 }
8553
8554 return 1;
8555
8556 err_free_rq:
8557 kfree(cfs_rq);
8558 err:
8559 return 0;
8560 }
8561
8562 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8563 {
8564 struct rq *rq = cpu_rq(cpu);
8565 unsigned long flags;
8566
8567 /*
8568 * Only empty task groups can be destroyed; so we can speculatively
8569 * check on_list without danger of it being re-added.
8570 */
8571 if (!tg->cfs_rq[cpu]->on_list)
8572 return;
8573
8574 raw_spin_lock_irqsave(&rq->lock, flags);
8575 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8576 raw_spin_unlock_irqrestore(&rq->lock, flags);
8577 }
8578 #else /* !CONFIG_FAIR_GROUP_SCHED */
8579 static inline void free_fair_sched_group(struct task_group *tg)
8580 {
8581 }
8582
8583 static inline
8584 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8585 {
8586 return 1;
8587 }
8588
8589 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8590 {
8591 }
8592 #endif /* CONFIG_FAIR_GROUP_SCHED */
8593
8594 #ifdef CONFIG_RT_GROUP_SCHED
8595 static void free_rt_sched_group(struct task_group *tg)
8596 {
8597 int i;
8598
8599 if (tg->rt_se)
8600 destroy_rt_bandwidth(&tg->rt_bandwidth);
8601
8602 for_each_possible_cpu(i) {
8603 if (tg->rt_rq)
8604 kfree(tg->rt_rq[i]);
8605 if (tg->rt_se)
8606 kfree(tg->rt_se[i]);
8607 }
8608
8609 kfree(tg->rt_rq);
8610 kfree(tg->rt_se);
8611 }
8612
8613 static
8614 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8615 {
8616 struct rt_rq *rt_rq;
8617 struct sched_rt_entity *rt_se;
8618 int i;
8619
8620 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
8621 if (!tg->rt_rq)
8622 goto err;
8623 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
8624 if (!tg->rt_se)
8625 goto err;
8626
8627 init_rt_bandwidth(&tg->rt_bandwidth,
8628 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
8629
8630 for_each_possible_cpu(i) {
8631 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8632 GFP_KERNEL, cpu_to_node(i));
8633 if (!rt_rq)
8634 goto err;
8635
8636 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8637 GFP_KERNEL, cpu_to_node(i));
8638 if (!rt_se)
8639 goto err_free_rq;
8640
8641 init_rt_rq(rt_rq, cpu_rq(i));
8642 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
8643 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
8644 }
8645
8646 return 1;
8647
8648 err_free_rq:
8649 kfree(rt_rq);
8650 err:
8651 return 0;
8652 }
8653 #else /* !CONFIG_RT_GROUP_SCHED */
8654 static inline void free_rt_sched_group(struct task_group *tg)
8655 {
8656 }
8657
8658 static inline
8659 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8660 {
8661 return 1;
8662 }
8663 #endif /* CONFIG_RT_GROUP_SCHED */
8664
8665 #ifdef CONFIG_CGROUP_SCHED
8666 static void free_sched_group(struct task_group *tg)
8667 {
8668 free_fair_sched_group(tg);
8669 free_rt_sched_group(tg);
8670 autogroup_free(tg);
8671 kfree(tg);
8672 }
8673
8674 /* allocate runqueue etc for a new task group */
8675 struct task_group *sched_create_group(struct task_group *parent)
8676 {
8677 struct task_group *tg;
8678 unsigned long flags;
8679
8680 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8681 if (!tg)
8682 return ERR_PTR(-ENOMEM);
8683
8684 if (!alloc_fair_sched_group(tg, parent))
8685 goto err;
8686
8687 if (!alloc_rt_sched_group(tg, parent))
8688 goto err;
8689
8690 spin_lock_irqsave(&task_group_lock, flags);
8691 list_add_rcu(&tg->list, &task_groups);
8692
8693 WARN_ON(!parent); /* root should already exist */
8694
8695 tg->parent = parent;
8696 INIT_LIST_HEAD(&tg->children);
8697 list_add_rcu(&tg->siblings, &parent->children);
8698 spin_unlock_irqrestore(&task_group_lock, flags);
8699
8700 return tg;
8701
8702 err:
8703 free_sched_group(tg);
8704 return ERR_PTR(-ENOMEM);
8705 }
8706
8707 /* rcu callback to free various structures associated with a task group */
8708 static void free_sched_group_rcu(struct rcu_head *rhp)
8709 {
8710 /* now it should be safe to free those cfs_rqs */
8711 free_sched_group(container_of(rhp, struct task_group, rcu));
8712 }
8713
8714 /* Destroy runqueue etc associated with a task group */
8715 void sched_destroy_group(struct task_group *tg)
8716 {
8717 unsigned long flags;
8718 int i;
8719
8720 /* end participation in shares distribution */
8721 for_each_possible_cpu(i)
8722 unregister_fair_sched_group(tg, i);
8723
8724 spin_lock_irqsave(&task_group_lock, flags);
8725 list_del_rcu(&tg->list);
8726 list_del_rcu(&tg->siblings);
8727 spin_unlock_irqrestore(&task_group_lock, flags);
8728
8729 /* wait for possible concurrent references to cfs_rqs complete */
8730 call_rcu(&tg->rcu, free_sched_group_rcu);
8731 }
8732
8733 /* change task's runqueue when it moves between groups.
8734 * The caller of this function should have put the task in its new group
8735 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8736 * reflect its new group.
8737 */
8738 void sched_move_task(struct task_struct *tsk)
8739 {
8740 int on_rq, running;
8741 unsigned long flags;
8742 struct rq *rq;
8743
8744 rq = task_rq_lock(tsk, &flags);
8745
8746 running = task_current(rq, tsk);
8747 on_rq = tsk->on_rq;
8748
8749 if (on_rq)
8750 dequeue_task(rq, tsk, 0);
8751 if (unlikely(running))
8752 tsk->sched_class->put_prev_task(rq, tsk);
8753
8754 #ifdef CONFIG_FAIR_GROUP_SCHED
8755 if (tsk->sched_class->task_move_group)
8756 tsk->sched_class->task_move_group(tsk, on_rq);
8757 else
8758 #endif
8759 set_task_rq(tsk, task_cpu(tsk));
8760
8761 if (unlikely(running))
8762 tsk->sched_class->set_curr_task(rq);
8763 if (on_rq)
8764 enqueue_task(rq, tsk, 0);
8765
8766 task_rq_unlock(rq, tsk, &flags);
8767 }
8768 #endif /* CONFIG_CGROUP_SCHED */
8769
8770 #ifdef CONFIG_FAIR_GROUP_SCHED
8771 static DEFINE_MUTEX(shares_mutex);
8772
8773 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8774 {
8775 int i;
8776 unsigned long flags;
8777
8778 /*
8779 * We can't change the weight of the root cgroup.
8780 */
8781 if (!tg->se[0])
8782 return -EINVAL;
8783
8784 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8785
8786 mutex_lock(&shares_mutex);
8787 if (tg->shares == shares)
8788 goto done;
8789
8790 tg->shares = shares;
8791 for_each_possible_cpu(i) {
8792 struct rq *rq = cpu_rq(i);
8793 struct sched_entity *se;
8794
8795 se = tg->se[i];
8796 /* Propagate contribution to hierarchy */
8797 raw_spin_lock_irqsave(&rq->lock, flags);
8798 for_each_sched_entity(se)
8799 update_cfs_shares(group_cfs_rq(se));
8800 raw_spin_unlock_irqrestore(&rq->lock, flags);
8801 }
8802
8803 done:
8804 mutex_unlock(&shares_mutex);
8805 return 0;
8806 }
8807
8808 unsigned long sched_group_shares(struct task_group *tg)
8809 {
8810 return tg->shares;
8811 }
8812 #endif
8813
8814 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
8815 static unsigned long to_ratio(u64 period, u64 runtime)
8816 {
8817 if (runtime == RUNTIME_INF)
8818 return 1ULL << 20;
8819
8820 return div64_u64(runtime << 20, period);
8821 }
8822 #endif
8823
8824 #ifdef CONFIG_RT_GROUP_SCHED
8825 /*
8826 * Ensure that the real time constraints are schedulable.
8827 */
8828 static DEFINE_MUTEX(rt_constraints_mutex);
8829
8830 /* Must be called with tasklist_lock held */
8831 static inline int tg_has_rt_tasks(struct task_group *tg)
8832 {
8833 struct task_struct *g, *p;
8834
8835 do_each_thread(g, p) {
8836 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8837 return 1;
8838 } while_each_thread(g, p);
8839
8840 return 0;
8841 }
8842
8843 struct rt_schedulable_data {
8844 struct task_group *tg;
8845 u64 rt_period;
8846 u64 rt_runtime;
8847 };
8848
8849 static int tg_rt_schedulable(struct task_group *tg, void *data)
8850 {
8851 struct rt_schedulable_data *d = data;
8852 struct task_group *child;
8853 unsigned long total, sum = 0;
8854 u64 period, runtime;
8855
8856 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8857 runtime = tg->rt_bandwidth.rt_runtime;
8858
8859 if (tg == d->tg) {
8860 period = d->rt_period;
8861 runtime = d->rt_runtime;
8862 }
8863
8864 /*
8865 * Cannot have more runtime than the period.
8866 */
8867 if (runtime > period && runtime != RUNTIME_INF)
8868 return -EINVAL;
8869
8870 /*
8871 * Ensure we don't starve existing RT tasks.
8872 */
8873 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8874 return -EBUSY;
8875
8876 total = to_ratio(period, runtime);
8877
8878 /*
8879 * Nobody can have more than the global setting allows.
8880 */
8881 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8882 return -EINVAL;
8883
8884 /*
8885 * The sum of our children's runtime should not exceed our own.
8886 */
8887 list_for_each_entry_rcu(child, &tg->children, siblings) {
8888 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8889 runtime = child->rt_bandwidth.rt_runtime;
8890
8891 if (child == d->tg) {
8892 period = d->rt_period;
8893 runtime = d->rt_runtime;
8894 }
8895
8896 sum += to_ratio(period, runtime);
8897 }
8898
8899 if (sum > total)
8900 return -EINVAL;
8901
8902 return 0;
8903 }
8904
8905 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8906 {
8907 int ret;
8908
8909 struct rt_schedulable_data data = {
8910 .tg = tg,
8911 .rt_period = period,
8912 .rt_runtime = runtime,
8913 };
8914
8915 rcu_read_lock();
8916 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
8917 rcu_read_unlock();
8918
8919 return ret;
8920 }
8921
8922 static int tg_set_rt_bandwidth(struct task_group *tg,
8923 u64 rt_period, u64 rt_runtime)
8924 {
8925 int i, err = 0;
8926
8927 mutex_lock(&rt_constraints_mutex);
8928 read_lock(&tasklist_lock);
8929 err = __rt_schedulable(tg, rt_period, rt_runtime);
8930 if (err)
8931 goto unlock;
8932
8933 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8934 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8935 tg->rt_bandwidth.rt_runtime = rt_runtime;
8936
8937 for_each_possible_cpu(i) {
8938 struct rt_rq *rt_rq = tg->rt_rq[i];
8939
8940 raw_spin_lock(&rt_rq->rt_runtime_lock);
8941 rt_rq->rt_runtime = rt_runtime;
8942 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8943 }
8944 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8945 unlock:
8946 read_unlock(&tasklist_lock);
8947 mutex_unlock(&rt_constraints_mutex);
8948
8949 return err;
8950 }
8951
8952 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8953 {
8954 u64 rt_runtime, rt_period;
8955
8956 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8957 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8958 if (rt_runtime_us < 0)
8959 rt_runtime = RUNTIME_INF;
8960
8961 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8962 }
8963
8964 long sched_group_rt_runtime(struct task_group *tg)
8965 {
8966 u64 rt_runtime_us;
8967
8968 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
8969 return -1;
8970
8971 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
8972 do_div(rt_runtime_us, NSEC_PER_USEC);
8973 return rt_runtime_us;
8974 }
8975
8976 int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8977 {
8978 u64 rt_runtime, rt_period;
8979
8980 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8981 rt_runtime = tg->rt_bandwidth.rt_runtime;
8982
8983 if (rt_period == 0)
8984 return -EINVAL;
8985
8986 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8987 }
8988
8989 long sched_group_rt_period(struct task_group *tg)
8990 {
8991 u64 rt_period_us;
8992
8993 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8994 do_div(rt_period_us, NSEC_PER_USEC);
8995 return rt_period_us;
8996 }
8997
8998 static int sched_rt_global_constraints(void)
8999 {
9000 u64 runtime, period;
9001 int ret = 0;
9002
9003 if (sysctl_sched_rt_period <= 0)
9004 return -EINVAL;
9005
9006 runtime = global_rt_runtime();
9007 period = global_rt_period();
9008
9009 /*
9010 * Sanity check on the sysctl variables.
9011 */
9012 if (runtime > period && runtime != RUNTIME_INF)
9013 return -EINVAL;
9014
9015 mutex_lock(&rt_constraints_mutex);
9016 read_lock(&tasklist_lock);
9017 ret = __rt_schedulable(NULL, 0, 0);
9018 read_unlock(&tasklist_lock);
9019 mutex_unlock(&rt_constraints_mutex);
9020
9021 return ret;
9022 }
9023
9024 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9025 {
9026 /* Don't accept realtime tasks when there is no way for them to run */
9027 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9028 return 0;
9029
9030 return 1;
9031 }
9032
9033 #else /* !CONFIG_RT_GROUP_SCHED */
9034 static int sched_rt_global_constraints(void)
9035 {
9036 unsigned long flags;
9037 int i;
9038
9039 if (sysctl_sched_rt_period <= 0)
9040 return -EINVAL;
9041
9042 /*
9043 * There's always some RT tasks in the root group
9044 * -- migration, kstopmachine etc..
9045 */
9046 if (sysctl_sched_rt_runtime == 0)
9047 return -EBUSY;
9048
9049 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
9050 for_each_possible_cpu(i) {
9051 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
9052
9053 raw_spin_lock(&rt_rq->rt_runtime_lock);
9054 rt_rq->rt_runtime = global_rt_runtime();
9055 raw_spin_unlock(&rt_rq->rt_runtime_lock);
9056 }
9057 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
9058
9059 return 0;
9060 }
9061 #endif /* CONFIG_RT_GROUP_SCHED */
9062
9063 int sched_rt_handler(struct ctl_table *table, int write,
9064 void __user *buffer, size_t *lenp,
9065 loff_t *ppos)
9066 {
9067 int ret;
9068 int old_period, old_runtime;
9069 static DEFINE_MUTEX(mutex);
9070
9071 mutex_lock(&mutex);
9072 old_period = sysctl_sched_rt_period;
9073 old_runtime = sysctl_sched_rt_runtime;
9074
9075 ret = proc_dointvec(table, write, buffer, lenp, ppos);
9076
9077 if (!ret && write) {
9078 ret = sched_rt_global_constraints();
9079 if (ret) {
9080 sysctl_sched_rt_period = old_period;
9081 sysctl_sched_rt_runtime = old_runtime;
9082 } else {
9083 def_rt_bandwidth.rt_runtime = global_rt_runtime();
9084 def_rt_bandwidth.rt_period =
9085 ns_to_ktime(global_rt_period());
9086 }
9087 }
9088 mutex_unlock(&mutex);
9089
9090 return ret;
9091 }
9092
9093 #ifdef CONFIG_CGROUP_SCHED
9094
9095 /* return corresponding task_group object of a cgroup */
9096 static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
9097 {
9098 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9099 struct task_group, css);
9100 }
9101
9102 static struct cgroup_subsys_state *
9103 cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
9104 {
9105 struct task_group *tg, *parent;
9106
9107 if (!cgrp->parent) {
9108 /* This is early initialization for the top cgroup */
9109 return &root_task_group.css;
9110 }
9111
9112 parent = cgroup_tg(cgrp->parent);
9113 tg = sched_create_group(parent);
9114 if (IS_ERR(tg))
9115 return ERR_PTR(-ENOMEM);
9116
9117 return &tg->css;
9118 }
9119
9120 static void
9121 cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9122 {
9123 struct task_group *tg = cgroup_tg(cgrp);
9124
9125 sched_destroy_group(tg);
9126 }
9127
9128 static int
9129 cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
9130 {
9131 #ifdef CONFIG_RT_GROUP_SCHED
9132 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9133 return -EINVAL;
9134 #else
9135 /* We don't support RT-tasks being in separate groups */
9136 if (tsk->sched_class != &fair_sched_class)
9137 return -EINVAL;
9138 #endif
9139 return 0;
9140 }
9141
9142 static void
9143 cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
9144 {
9145 sched_move_task(tsk);
9146 }
9147
9148 static void
9149 cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
9150 struct cgroup *old_cgrp, struct task_struct *task)
9151 {
9152 /*
9153 * cgroup_exit() is called in the copy_process() failure path.
9154 * Ignore this case since the task hasn't ran yet, this avoids
9155 * trying to poke a half freed task state from generic code.
9156 */
9157 if (!(task->flags & PF_EXITING))
9158 return;
9159
9160 sched_move_task(task);
9161 }
9162
9163 #ifdef CONFIG_FAIR_GROUP_SCHED
9164 static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
9165 u64 shareval)
9166 {
9167 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
9168 }
9169
9170 static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
9171 {
9172 struct task_group *tg = cgroup_tg(cgrp);
9173
9174 return (u64) scale_load_down(tg->shares);
9175 }
9176
9177 #ifdef CONFIG_CFS_BANDWIDTH
9178 static DEFINE_MUTEX(cfs_constraints_mutex);
9179
9180 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9181 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9182
9183 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9184
9185 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
9186 {
9187 int i, ret = 0, runtime_enabled;
9188 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
9189
9190 if (tg == &root_task_group)
9191 return -EINVAL;
9192
9193 /*
9194 * Ensure we have at some amount of bandwidth every period. This is
9195 * to prevent reaching a state of large arrears when throttled via
9196 * entity_tick() resulting in prolonged exit starvation.
9197 */
9198 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9199 return -EINVAL;
9200
9201 /*
9202 * Likewise, bound things on the otherside by preventing insane quota
9203 * periods. This also allows us to normalize in computing quota
9204 * feasibility.
9205 */
9206 if (period > max_cfs_quota_period)
9207 return -EINVAL;
9208
9209 mutex_lock(&cfs_constraints_mutex);
9210 ret = __cfs_schedulable(tg, period, quota);
9211 if (ret)
9212 goto out_unlock;
9213
9214 runtime_enabled = quota != RUNTIME_INF;
9215 raw_spin_lock_irq(&cfs_b->lock);
9216 cfs_b->period = ns_to_ktime(period);
9217 cfs_b->quota = quota;
9218
9219 __refill_cfs_bandwidth_runtime(cfs_b);
9220 /* restart the period timer (if active) to handle new period expiry */
9221 if (runtime_enabled && cfs_b->timer_active) {
9222 /* force a reprogram */
9223 cfs_b->timer_active = 0;
9224 __start_cfs_bandwidth(cfs_b);
9225 }
9226 raw_spin_unlock_irq(&cfs_b->lock);
9227
9228 for_each_possible_cpu(i) {
9229 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9230 struct rq *rq = rq_of(cfs_rq);
9231
9232 raw_spin_lock_irq(&rq->lock);
9233 cfs_rq->runtime_enabled = runtime_enabled;
9234 cfs_rq->runtime_remaining = 0;
9235
9236 if (cfs_rq_throttled(cfs_rq))
9237 unthrottle_cfs_rq(cfs_rq);
9238 raw_spin_unlock_irq(&rq->lock);
9239 }
9240 out_unlock:
9241 mutex_unlock(&cfs_constraints_mutex);
9242
9243 return ret;
9244 }
9245
9246 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9247 {
9248 u64 quota, period;
9249
9250 period = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
9251 if (cfs_quota_us < 0)
9252 quota = RUNTIME_INF;
9253 else
9254 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9255
9256 return tg_set_cfs_bandwidth(tg, period, quota);
9257 }
9258
9259 long tg_get_cfs_quota(struct task_group *tg)
9260 {
9261 u64 quota_us;
9262
9263 if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF)
9264 return -1;
9265
9266 quota_us = tg_cfs_bandwidth(tg)->quota;
9267 do_div(quota_us, NSEC_PER_USEC);
9268
9269 return quota_us;
9270 }
9271
9272 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9273 {
9274 u64 quota, period;
9275
9276 period = (u64)cfs_period_us * NSEC_PER_USEC;
9277 quota = tg_cfs_bandwidth(tg)->quota;
9278
9279 if (period <= 0)
9280 return -EINVAL;
9281
9282 return tg_set_cfs_bandwidth(tg, period, quota);
9283 }
9284
9285 long tg_get_cfs_period(struct task_group *tg)
9286 {
9287 u64 cfs_period_us;
9288
9289 cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
9290 do_div(cfs_period_us, NSEC_PER_USEC);
9291
9292 return cfs_period_us;
9293 }
9294
9295 static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
9296 {
9297 return tg_get_cfs_quota(cgroup_tg(cgrp));
9298 }
9299
9300 static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
9301 s64 cfs_quota_us)
9302 {
9303 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
9304 }
9305
9306 static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
9307 {
9308 return tg_get_cfs_period(cgroup_tg(cgrp));
9309 }
9310
9311 static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
9312 u64 cfs_period_us)
9313 {
9314 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
9315 }
9316
9317 struct cfs_schedulable_data {
9318 struct task_group *tg;
9319 u64 period, quota;
9320 };
9321
9322 /*
9323 * normalize group quota/period to be quota/max_period
9324 * note: units are usecs
9325 */
9326 static u64 normalize_cfs_quota(struct task_group *tg,
9327 struct cfs_schedulable_data *d)
9328 {
9329 u64 quota, period;
9330
9331 if (tg == d->tg) {
9332 period = d->period;
9333 quota = d->quota;
9334 } else {
9335 period = tg_get_cfs_period(tg);
9336 quota = tg_get_cfs_quota(tg);
9337 }
9338
9339 /* note: these should typically be equivalent */
9340 if (quota == RUNTIME_INF || quota == -1)
9341 return RUNTIME_INF;
9342
9343 return to_ratio(period, quota);
9344 }
9345
9346 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9347 {
9348 struct cfs_schedulable_data *d = data;
9349 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
9350 s64 quota = 0, parent_quota = -1;
9351
9352 if (!tg->parent) {
9353 quota = RUNTIME_INF;
9354 } else {
9355 struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
9356
9357 quota = normalize_cfs_quota(tg, d);
9358 parent_quota = parent_b->hierarchal_quota;
9359
9360 /*
9361 * ensure max(child_quota) <= parent_quota, inherit when no
9362 * limit is set
9363 */
9364 if (quota == RUNTIME_INF)
9365 quota = parent_quota;
9366 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9367 return -EINVAL;
9368 }
9369 cfs_b->hierarchal_quota = quota;
9370
9371 return 0;
9372 }
9373
9374 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9375 {
9376 int ret;
9377 struct cfs_schedulable_data data = {
9378 .tg = tg,
9379 .period = period,
9380 .quota = quota,
9381 };
9382
9383 if (quota != RUNTIME_INF) {
9384 do_div(data.period, NSEC_PER_USEC);
9385 do_div(data.quota, NSEC_PER_USEC);
9386 }
9387
9388 rcu_read_lock();
9389 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9390 rcu_read_unlock();
9391
9392 return ret;
9393 }
9394
9395 static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
9396 struct cgroup_map_cb *cb)
9397 {
9398 struct task_group *tg = cgroup_tg(cgrp);
9399 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
9400
9401 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
9402 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
9403 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
9404
9405 return 0;
9406 }
9407 #endif /* CONFIG_CFS_BANDWIDTH */
9408 #endif /* CONFIG_FAIR_GROUP_SCHED */
9409
9410 #ifdef CONFIG_RT_GROUP_SCHED
9411 static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
9412 s64 val)
9413 {
9414 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
9415 }
9416
9417 static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
9418 {
9419 return sched_group_rt_runtime(cgroup_tg(cgrp));
9420 }
9421
9422 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9423 u64 rt_period_us)
9424 {
9425 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9426 }
9427
9428 static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9429 {
9430 return sched_group_rt_period(cgroup_tg(cgrp));
9431 }
9432 #endif /* CONFIG_RT_GROUP_SCHED */
9433
9434 static struct cftype cpu_files[] = {
9435 #ifdef CONFIG_FAIR_GROUP_SCHED
9436 {
9437 .name = "shares",
9438 .read_u64 = cpu_shares_read_u64,
9439 .write_u64 = cpu_shares_write_u64,
9440 },
9441 #endif
9442 #ifdef CONFIG_CFS_BANDWIDTH
9443 {
9444 .name = "cfs_quota_us",
9445 .read_s64 = cpu_cfs_quota_read_s64,
9446 .write_s64 = cpu_cfs_quota_write_s64,
9447 },
9448 {
9449 .name = "cfs_period_us",
9450 .read_u64 = cpu_cfs_period_read_u64,
9451 .write_u64 = cpu_cfs_period_write_u64,
9452 },
9453 {
9454 .name = "stat",
9455 .read_map = cpu_stats_show,
9456 },
9457 #endif
9458 #ifdef CONFIG_RT_GROUP_SCHED
9459 {
9460 .name = "rt_runtime_us",
9461 .read_s64 = cpu_rt_runtime_read,
9462 .write_s64 = cpu_rt_runtime_write,
9463 },
9464 {
9465 .name = "rt_period_us",
9466 .read_u64 = cpu_rt_period_read_uint,
9467 .write_u64 = cpu_rt_period_write_uint,
9468 },
9469 #endif
9470 };
9471
9472 static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9473 {
9474 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
9475 }
9476
9477 struct cgroup_subsys cpu_cgroup_subsys = {
9478 .name = "cpu",
9479 .create = cpu_cgroup_create,
9480 .destroy = cpu_cgroup_destroy,
9481 .can_attach_task = cpu_cgroup_can_attach_task,
9482 .attach_task = cpu_cgroup_attach_task,
9483 .exit = cpu_cgroup_exit,
9484 .populate = cpu_cgroup_populate,
9485 .subsys_id = cpu_cgroup_subsys_id,
9486 .early_init = 1,
9487 };
9488
9489 #endif /* CONFIG_CGROUP_SCHED */
9490
9491 #ifdef CONFIG_CGROUP_CPUACCT
9492
9493 /*
9494 * CPU accounting code for task groups.
9495 *
9496 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9497 * (balbir@in.ibm.com).
9498 */
9499
9500 /* track cpu usage of a group of tasks and its child groups */
9501 struct cpuacct {
9502 struct cgroup_subsys_state css;
9503 /* cpuusage holds pointer to a u64-type object on every cpu */
9504 u64 __percpu *cpuusage;
9505 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
9506 struct cpuacct *parent;
9507 };
9508
9509 struct cgroup_subsys cpuacct_subsys;
9510
9511 /* return cpu accounting group corresponding to this container */
9512 static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
9513 {
9514 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
9515 struct cpuacct, css);
9516 }
9517
9518 /* return cpu accounting group to which this task belongs */
9519 static inline struct cpuacct *task_ca(struct task_struct *tsk)
9520 {
9521 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9522 struct cpuacct, css);
9523 }
9524
9525 /* create a new cpu accounting group */
9526 static struct cgroup_subsys_state *cpuacct_create(
9527 struct cgroup_subsys *ss, struct cgroup *cgrp)
9528 {
9529 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
9530 int i;
9531
9532 if (!ca)
9533 goto out;
9534
9535 ca->cpuusage = alloc_percpu(u64);
9536 if (!ca->cpuusage)
9537 goto out_free_ca;
9538
9539 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9540 if (percpu_counter_init(&ca->cpustat[i], 0))
9541 goto out_free_counters;
9542
9543 if (cgrp->parent)
9544 ca->parent = cgroup_ca(cgrp->parent);
9545
9546 return &ca->css;
9547
9548 out_free_counters:
9549 while (--i >= 0)
9550 percpu_counter_destroy(&ca->cpustat[i]);
9551 free_percpu(ca->cpuusage);
9552 out_free_ca:
9553 kfree(ca);
9554 out:
9555 return ERR_PTR(-ENOMEM);
9556 }
9557
9558 /* destroy an existing cpu accounting group */
9559 static void
9560 cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9561 {
9562 struct cpuacct *ca = cgroup_ca(cgrp);
9563 int i;
9564
9565 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9566 percpu_counter_destroy(&ca->cpustat[i]);
9567 free_percpu(ca->cpuusage);
9568 kfree(ca);
9569 }
9570
9571 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9572 {
9573 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9574 u64 data;
9575
9576 #ifndef CONFIG_64BIT
9577 /*
9578 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9579 */
9580 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
9581 data = *cpuusage;
9582 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
9583 #else
9584 data = *cpuusage;
9585 #endif
9586
9587 return data;
9588 }
9589
9590 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9591 {
9592 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9593
9594 #ifndef CONFIG_64BIT
9595 /*
9596 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9597 */
9598 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
9599 *cpuusage = val;
9600 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
9601 #else
9602 *cpuusage = val;
9603 #endif
9604 }
9605
9606 /* return total cpu usage (in nanoseconds) of a group */
9607 static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
9608 {
9609 struct cpuacct *ca = cgroup_ca(cgrp);
9610 u64 totalcpuusage = 0;
9611 int i;
9612
9613 for_each_present_cpu(i)
9614 totalcpuusage += cpuacct_cpuusage_read(ca, i);
9615
9616 return totalcpuusage;
9617 }
9618
9619 static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9620 u64 reset)
9621 {
9622 struct cpuacct *ca = cgroup_ca(cgrp);
9623 int err = 0;
9624 int i;
9625
9626 if (reset) {
9627 err = -EINVAL;
9628 goto out;
9629 }
9630
9631 for_each_present_cpu(i)
9632 cpuacct_cpuusage_write(ca, i, 0);
9633
9634 out:
9635 return err;
9636 }
9637
9638 static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9639 struct seq_file *m)
9640 {
9641 struct cpuacct *ca = cgroup_ca(cgroup);
9642 u64 percpu;
9643 int i;
9644
9645 for_each_present_cpu(i) {
9646 percpu = cpuacct_cpuusage_read(ca, i);
9647 seq_printf(m, "%llu ", (unsigned long long) percpu);
9648 }
9649 seq_printf(m, "\n");
9650 return 0;
9651 }
9652
9653 static const char *cpuacct_stat_desc[] = {
9654 [CPUACCT_STAT_USER] = "user",
9655 [CPUACCT_STAT_SYSTEM] = "system",
9656 };
9657
9658 static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9659 struct cgroup_map_cb *cb)
9660 {
9661 struct cpuacct *ca = cgroup_ca(cgrp);
9662 int i;
9663
9664 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9665 s64 val = percpu_counter_read(&ca->cpustat[i]);
9666 val = cputime64_to_clock_t(val);
9667 cb->fill(cb, cpuacct_stat_desc[i], val);
9668 }
9669 return 0;
9670 }
9671
9672 static struct cftype files[] = {
9673 {
9674 .name = "usage",
9675 .read_u64 = cpuusage_read,
9676 .write_u64 = cpuusage_write,
9677 },
9678 {
9679 .name = "usage_percpu",
9680 .read_seq_string = cpuacct_percpu_seq_read,
9681 },
9682 {
9683 .name = "stat",
9684 .read_map = cpuacct_stats_show,
9685 },
9686 };
9687
9688 static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
9689 {
9690 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
9691 }
9692
9693 /*
9694 * charge this task's execution time to its accounting group.
9695 *
9696 * called with rq->lock held.
9697 */
9698 static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9699 {
9700 struct cpuacct *ca;
9701 int cpu;
9702
9703 if (unlikely(!cpuacct_subsys.active))
9704 return;
9705
9706 cpu = task_cpu(tsk);
9707
9708 rcu_read_lock();
9709
9710 ca = task_ca(tsk);
9711
9712 for (; ca; ca = ca->parent) {
9713 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9714 *cpuusage += cputime;
9715 }
9716
9717 rcu_read_unlock();
9718 }
9719
9720 /*
9721 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9722 * in cputime_t units. As a result, cpuacct_update_stats calls
9723 * percpu_counter_add with values large enough to always overflow the
9724 * per cpu batch limit causing bad SMP scalability.
9725 *
9726 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9727 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9728 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9729 */
9730 #ifdef CONFIG_SMP
9731 #define CPUACCT_BATCH \
9732 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9733 #else
9734 #define CPUACCT_BATCH 0
9735 #endif
9736
9737 /*
9738 * Charge the system/user time to the task's accounting group.
9739 */
9740 static void cpuacct_update_stats(struct task_struct *tsk,
9741 enum cpuacct_stat_index idx, cputime_t val)
9742 {
9743 struct cpuacct *ca;
9744 int batch = CPUACCT_BATCH;
9745
9746 if (unlikely(!cpuacct_subsys.active))
9747 return;
9748
9749 rcu_read_lock();
9750 ca = task_ca(tsk);
9751
9752 do {
9753 __percpu_counter_add(&ca->cpustat[idx], val, batch);
9754 ca = ca->parent;
9755 } while (ca);
9756 rcu_read_unlock();
9757 }
9758
9759 struct cgroup_subsys cpuacct_subsys = {
9760 .name = "cpuacct",
9761 .create = cpuacct_create,
9762 .destroy = cpuacct_destroy,
9763 .populate = cpuacct_populate,
9764 .subsys_id = cpuacct_subsys_id,
9765 };
9766 #endif /* CONFIG_CGROUP_CPUACCT */
This page took 0.240878 seconds and 5 git commands to generate.