Merge tag 'please-pull-pstore' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl...
[deliverable/linux.git] / kernel / sched / idle_task.c
CommitLineData
029632fb
PZ
1#include "sched.h"
2
fa72e9e4
IM
3/*
4 * idle-task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE tasks which are
489a71b0 7 * handled in sched/fair.c)
fa72e9e4
IM
8 */
9
e7693a36 10#ifdef CONFIG_SMP
0017d735 11static int
7608dec2 12select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
e7693a36
GH
13{
14 return task_cpu(p); /* IDLE tasks as never migrated */
15}
642dbc39
VG
16
17static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
18{
19 idle_exit_fair(rq);
265f22a9 20 rq_last_tick_reset(rq);
642dbc39
VG
21}
22
23static void post_schedule_idle(struct rq *rq)
24{
25 idle_enter_fair(rq);
26}
e7693a36 27#endif /* CONFIG_SMP */
fa72e9e4
IM
28/*
29 * Idle tasks are unconditionally rescheduled:
30 */
7d478721 31static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e4
IM
32{
33 resched_task(rq->idle);
34}
35
fb8d4724 36static struct task_struct *pick_next_task_idle(struct rq *rq)
fa72e9e4
IM
37{
38 schedstat_inc(rq, sched_goidle);
642dbc39
VG
39#ifdef CONFIG_SMP
40 /* Trigger the post schedule to do an idle_enter for CFS */
41 rq->post_schedule = 1;
42#endif
fa72e9e4
IM
43 return rq->idle;
44}
45
46/*
47 * It is not legal to sleep in the idle task - print a warning
48 * message if some code attempts to do it:
49 */
50static void
371fd7e7 51dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e4 52{
05fa785c 53 raw_spin_unlock_irq(&rq->lock);
3df0fc5b 54 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
fa72e9e4 55 dump_stack();
05fa785c 56 raw_spin_lock_irq(&rq->lock);
fa72e9e4
IM
57}
58
31ee529c 59static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
fa72e9e4
IM
60{
61}
62
8f4d37ec 63static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
fa72e9e4
IM
64{
65}
66
83b699ed
SV
67static void set_curr_task_idle(struct rq *rq)
68{
69}
70
da7a735e 71static void switched_to_idle(struct rq *rq, struct task_struct *p)
cb469845 72{
a8941d7e 73 BUG();
cb469845
SR
74}
75
da7a735e
PZ
76static void
77prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 78{
a8941d7e 79 BUG();
cb469845
SR
80}
81
6d686f45 82static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
0d721cea
PW
83{
84 return 0;
85}
86
fa72e9e4
IM
87/*
88 * Simple, special scheduling class for the per-CPU idle tasks:
89 */
029632fb 90const struct sched_class idle_sched_class = {
5522d5d5 91 /* .next is NULL */
fa72e9e4
IM
92 /* no enqueue/yield_task for idle tasks */
93
94 /* dequeue is not valid, we print a debug message there: */
95 .dequeue_task = dequeue_task_idle,
96
97 .check_preempt_curr = check_preempt_curr_idle,
98
99 .pick_next_task = pick_next_task_idle,
100 .put_prev_task = put_prev_task_idle,
101
681f3e68 102#ifdef CONFIG_SMP
4ce72a2c 103 .select_task_rq = select_task_rq_idle,
642dbc39
VG
104 .pre_schedule = pre_schedule_idle,
105 .post_schedule = post_schedule_idle,
681f3e68 106#endif
fa72e9e4 107
83b699ed 108 .set_curr_task = set_curr_task_idle,
fa72e9e4 109 .task_tick = task_tick_idle,
cb469845 110
0d721cea
PW
111 .get_rr_interval = get_rr_interval_idle,
112
cb469845
SR
113 .prio_changed = prio_changed_idle,
114 .switched_to = switched_to_idle,
fa72e9e4 115};
This page took 0.352163 seconds and 5 git commands to generate.