Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[deliverable/linux.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 /*
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
9 */
10 static void update_curr_rt(struct rq *rq)
11 {
12 struct task_struct *curr = rq->curr;
13 u64 delta_exec;
14
15 if (!task_has_rt_policy(curr))
16 return;
17
18 delta_exec = rq->clock - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0))
20 delta_exec = 0;
21
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
23
24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = rq->clock;
26 }
27
28 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
29 {
30 struct rt_prio_array *array = &rq->rt.active;
31
32 list_add_tail(&p->run_list, array->queue + p->prio);
33 __set_bit(p->prio, array->bitmap);
34 }
35
36 /*
37 * Adding/removing a task to/from a priority array:
38 */
39 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
40 {
41 struct rt_prio_array *array = &rq->rt.active;
42
43 update_curr_rt(rq);
44
45 list_del(&p->run_list);
46 if (list_empty(array->queue + p->prio))
47 __clear_bit(p->prio, array->bitmap);
48 }
49
50 /*
51 * Put task to the end of the run list without the overhead of dequeue
52 * followed by enqueue.
53 */
54 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
55 {
56 struct rt_prio_array *array = &rq->rt.active;
57
58 list_move_tail(&p->run_list, array->queue + p->prio);
59 }
60
61 static void
62 yield_task_rt(struct rq *rq)
63 {
64 requeue_task_rt(rq, rq->curr);
65 }
66
67 /*
68 * Preempt the current task with a newly woken task if needed:
69 */
70 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
71 {
72 if (p->prio < rq->curr->prio)
73 resched_task(rq->curr);
74 }
75
76 static struct task_struct *pick_next_task_rt(struct rq *rq)
77 {
78 struct rt_prio_array *array = &rq->rt.active;
79 struct task_struct *next;
80 struct list_head *queue;
81 int idx;
82
83 idx = sched_find_first_bit(array->bitmap);
84 if (idx >= MAX_RT_PRIO)
85 return NULL;
86
87 queue = array->queue + idx;
88 next = list_entry(queue->next, struct task_struct, run_list);
89
90 next->se.exec_start = rq->clock;
91
92 return next;
93 }
94
95 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
96 {
97 update_curr_rt(rq);
98 p->se.exec_start = 0;
99 }
100
101 #ifdef CONFIG_SMP
102 /*
103 * Load-balancing iterator. Note: while the runqueue stays locked
104 * during the whole iteration, the current task might be
105 * dequeued so the iterator has to be dequeue-safe. Here we
106 * achieve that by always pre-iterating before returning
107 * the current task:
108 */
109 static struct task_struct *load_balance_start_rt(void *arg)
110 {
111 struct rq *rq = arg;
112 struct rt_prio_array *array = &rq->rt.active;
113 struct list_head *head, *curr;
114 struct task_struct *p;
115 int idx;
116
117 idx = sched_find_first_bit(array->bitmap);
118 if (idx >= MAX_RT_PRIO)
119 return NULL;
120
121 head = array->queue + idx;
122 curr = head->prev;
123
124 p = list_entry(curr, struct task_struct, run_list);
125
126 curr = curr->prev;
127
128 rq->rt.rt_load_balance_idx = idx;
129 rq->rt.rt_load_balance_head = head;
130 rq->rt.rt_load_balance_curr = curr;
131
132 return p;
133 }
134
135 static struct task_struct *load_balance_next_rt(void *arg)
136 {
137 struct rq *rq = arg;
138 struct rt_prio_array *array = &rq->rt.active;
139 struct list_head *head, *curr;
140 struct task_struct *p;
141 int idx;
142
143 idx = rq->rt.rt_load_balance_idx;
144 head = rq->rt.rt_load_balance_head;
145 curr = rq->rt.rt_load_balance_curr;
146
147 /*
148 * If we arrived back to the head again then
149 * iterate to the next queue (if any):
150 */
151 if (unlikely(head == curr)) {
152 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
153
154 if (next_idx >= MAX_RT_PRIO)
155 return NULL;
156
157 idx = next_idx;
158 head = array->queue + idx;
159 curr = head->prev;
160
161 rq->rt.rt_load_balance_idx = idx;
162 rq->rt.rt_load_balance_head = head;
163 }
164
165 p = list_entry(curr, struct task_struct, run_list);
166
167 curr = curr->prev;
168
169 rq->rt.rt_load_balance_curr = curr;
170
171 return p;
172 }
173
174 static unsigned long
175 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
176 unsigned long max_load_move,
177 struct sched_domain *sd, enum cpu_idle_type idle,
178 int *all_pinned, int *this_best_prio)
179 {
180 struct rq_iterator rt_rq_iterator;
181
182 rt_rq_iterator.start = load_balance_start_rt;
183 rt_rq_iterator.next = load_balance_next_rt;
184 /* pass 'busiest' rq argument into
185 * load_balance_[start|next]_rt iterators
186 */
187 rt_rq_iterator.arg = busiest;
188
189 return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
190 idle, all_pinned, this_best_prio, &rt_rq_iterator);
191 }
192
193 static int
194 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
195 struct sched_domain *sd, enum cpu_idle_type idle)
196 {
197 struct rq_iterator rt_rq_iterator;
198
199 rt_rq_iterator.start = load_balance_start_rt;
200 rt_rq_iterator.next = load_balance_next_rt;
201 rt_rq_iterator.arg = busiest;
202
203 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
204 &rt_rq_iterator);
205 }
206 #endif
207
208 static void task_tick_rt(struct rq *rq, struct task_struct *p)
209 {
210 /*
211 * RR tasks need a special form of timeslice management.
212 * FIFO tasks have no timeslices.
213 */
214 if (p->policy != SCHED_RR)
215 return;
216
217 if (--p->time_slice)
218 return;
219
220 p->time_slice = DEF_TIMESLICE;
221
222 /*
223 * Requeue to the end of queue if we are not the only element
224 * on the queue:
225 */
226 if (p->run_list.prev != p->run_list.next) {
227 requeue_task_rt(rq, p);
228 set_tsk_need_resched(p);
229 }
230 }
231
232 static void set_curr_task_rt(struct rq *rq)
233 {
234 struct task_struct *p = rq->curr;
235
236 p->se.exec_start = rq->clock;
237 }
238
239 const struct sched_class rt_sched_class = {
240 .next = &fair_sched_class,
241 .enqueue_task = enqueue_task_rt,
242 .dequeue_task = dequeue_task_rt,
243 .yield_task = yield_task_rt,
244
245 .check_preempt_curr = check_preempt_curr_rt,
246
247 .pick_next_task = pick_next_task_rt,
248 .put_prev_task = put_prev_task_rt,
249
250 #ifdef CONFIG_SMP
251 .load_balance = load_balance_rt,
252 .move_one_task = move_one_task_rt,
253 #endif
254
255 .set_curr_task = set_curr_task_rt,
256 .task_tick = task_tick_rt,
257 };
This page took 0.060047 seconds and 6 git commands to generate.