Commit | Line | Data |
---|---|---|
bb44e5d1 IM |
1 | /* |
2 | * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR | |
3 | * policies) | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Update the current task's runtime statistics. Skip current tasks that | |
8 | * are not in our scheduling class. | |
9 | */ | |
a9957449 | 10 | static void update_curr_rt(struct rq *rq) |
bb44e5d1 IM |
11 | { |
12 | struct task_struct *curr = rq->curr; | |
13 | u64 delta_exec; | |
14 | ||
15 | if (!task_has_rt_policy(curr)) | |
16 | return; | |
17 | ||
d281918d | 18 | delta_exec = rq->clock - curr->se.exec_start; |
bb44e5d1 IM |
19 | if (unlikely((s64)delta_exec < 0)) |
20 | delta_exec = 0; | |
6cfb0d5d IM |
21 | |
22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | |
bb44e5d1 IM |
23 | |
24 | curr->se.sum_exec_runtime += delta_exec; | |
d281918d | 25 | curr->se.exec_start = rq->clock; |
bb44e5d1 IM |
26 | } |
27 | ||
fd390f6a | 28 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |
bb44e5d1 IM |
29 | { |
30 | struct rt_prio_array *array = &rq->rt.active; | |
31 | ||
32 | list_add_tail(&p->run_list, array->queue + p->prio); | |
33 | __set_bit(p->prio, array->bitmap); | |
34 | } | |
35 | ||
36 | /* | |
37 | * Adding/removing a task to/from a priority array: | |
38 | */ | |
f02231e5 | 39 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
bb44e5d1 IM |
40 | { |
41 | struct rt_prio_array *array = &rq->rt.active; | |
42 | ||
f1e14ef6 | 43 | update_curr_rt(rq); |
bb44e5d1 IM |
44 | |
45 | list_del(&p->run_list); | |
46 | if (list_empty(array->queue + p->prio)) | |
47 | __clear_bit(p->prio, array->bitmap); | |
48 | } | |
49 | ||
50 | /* | |
51 | * Put task to the end of the run list without the overhead of dequeue | |
52 | * followed by enqueue. | |
53 | */ | |
54 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | |
55 | { | |
56 | struct rt_prio_array *array = &rq->rt.active; | |
57 | ||
58 | list_move_tail(&p->run_list, array->queue + p->prio); | |
59 | } | |
60 | ||
61 | static void | |
4530d7ab | 62 | yield_task_rt(struct rq *rq) |
bb44e5d1 | 63 | { |
4530d7ab | 64 | requeue_task_rt(rq, rq->curr); |
bb44e5d1 IM |
65 | } |
66 | ||
67 | /* | |
68 | * Preempt the current task with a newly woken task if needed: | |
69 | */ | |
70 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |
71 | { | |
72 | if (p->prio < rq->curr->prio) | |
73 | resched_task(rq->curr); | |
74 | } | |
75 | ||
fb8d4724 | 76 | static struct task_struct *pick_next_task_rt(struct rq *rq) |
bb44e5d1 IM |
77 | { |
78 | struct rt_prio_array *array = &rq->rt.active; | |
79 | struct task_struct *next; | |
80 | struct list_head *queue; | |
81 | int idx; | |
82 | ||
83 | idx = sched_find_first_bit(array->bitmap); | |
84 | if (idx >= MAX_RT_PRIO) | |
85 | return NULL; | |
86 | ||
87 | queue = array->queue + idx; | |
88 | next = list_entry(queue->next, struct task_struct, run_list); | |
89 | ||
d281918d | 90 | next->se.exec_start = rq->clock; |
bb44e5d1 IM |
91 | |
92 | return next; | |
93 | } | |
94 | ||
31ee529c | 95 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
bb44e5d1 | 96 | { |
f1e14ef6 | 97 | update_curr_rt(rq); |
bb44e5d1 IM |
98 | p->se.exec_start = 0; |
99 | } | |
100 | ||
101 | /* | |
102 | * Load-balancing iterator. Note: while the runqueue stays locked | |
103 | * during the whole iteration, the current task might be | |
104 | * dequeued so the iterator has to be dequeue-safe. Here we | |
105 | * achieve that by always pre-iterating before returning | |
106 | * the current task: | |
107 | */ | |
108 | static struct task_struct *load_balance_start_rt(void *arg) | |
109 | { | |
110 | struct rq *rq = arg; | |
111 | struct rt_prio_array *array = &rq->rt.active; | |
112 | struct list_head *head, *curr; | |
113 | struct task_struct *p; | |
114 | int idx; | |
115 | ||
116 | idx = sched_find_first_bit(array->bitmap); | |
117 | if (idx >= MAX_RT_PRIO) | |
118 | return NULL; | |
119 | ||
120 | head = array->queue + idx; | |
121 | curr = head->prev; | |
122 | ||
123 | p = list_entry(curr, struct task_struct, run_list); | |
124 | ||
125 | curr = curr->prev; | |
126 | ||
127 | rq->rt.rt_load_balance_idx = idx; | |
128 | rq->rt.rt_load_balance_head = head; | |
129 | rq->rt.rt_load_balance_curr = curr; | |
130 | ||
131 | return p; | |
132 | } | |
133 | ||
134 | static struct task_struct *load_balance_next_rt(void *arg) | |
135 | { | |
136 | struct rq *rq = arg; | |
137 | struct rt_prio_array *array = &rq->rt.active; | |
138 | struct list_head *head, *curr; | |
139 | struct task_struct *p; | |
140 | int idx; | |
141 | ||
142 | idx = rq->rt.rt_load_balance_idx; | |
143 | head = rq->rt.rt_load_balance_head; | |
144 | curr = rq->rt.rt_load_balance_curr; | |
145 | ||
146 | /* | |
147 | * If we arrived back to the head again then | |
148 | * iterate to the next queue (if any): | |
149 | */ | |
150 | if (unlikely(head == curr)) { | |
151 | int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | |
152 | ||
153 | if (next_idx >= MAX_RT_PRIO) | |
154 | return NULL; | |
155 | ||
156 | idx = next_idx; | |
157 | head = array->queue + idx; | |
158 | curr = head->prev; | |
159 | ||
160 | rq->rt.rt_load_balance_idx = idx; | |
161 | rq->rt.rt_load_balance_head = head; | |
162 | } | |
163 | ||
164 | p = list_entry(curr, struct task_struct, run_list); | |
165 | ||
166 | curr = curr->prev; | |
167 | ||
168 | rq->rt.rt_load_balance_curr = curr; | |
169 | ||
170 | return p; | |
171 | } | |
172 | ||
43010659 | 173 | static unsigned long |
bb44e5d1 IM |
174 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
175 | unsigned long max_nr_move, unsigned long max_load_move, | |
176 | struct sched_domain *sd, enum cpu_idle_type idle, | |
a4ac01c3 | 177 | int *all_pinned, int *this_best_prio) |
bb44e5d1 | 178 | { |
bb44e5d1 IM |
179 | int nr_moved; |
180 | struct rq_iterator rt_rq_iterator; | |
43010659 | 181 | unsigned long load_moved; |
bb44e5d1 | 182 | |
bb44e5d1 IM |
183 | rt_rq_iterator.start = load_balance_start_rt; |
184 | rt_rq_iterator.next = load_balance_next_rt; | |
185 | /* pass 'busiest' rq argument into | |
186 | * load_balance_[start|next]_rt iterators | |
187 | */ | |
188 | rt_rq_iterator.arg = busiest; | |
189 | ||
190 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, | |
43010659 | 191 | max_load_move, sd, idle, all_pinned, &load_moved, |
a4ac01c3 | 192 | this_best_prio, &rt_rq_iterator); |
bb44e5d1 | 193 | |
43010659 | 194 | return load_moved; |
bb44e5d1 IM |
195 | } |
196 | ||
197 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | |
198 | { | |
199 | /* | |
200 | * RR tasks need a special form of timeslice management. | |
201 | * FIFO tasks have no timeslices. | |
202 | */ | |
203 | if (p->policy != SCHED_RR) | |
204 | return; | |
205 | ||
206 | if (--p->time_slice) | |
207 | return; | |
208 | ||
a4ec24b4 | 209 | p->time_slice = DEF_TIMESLICE; |
bb44e5d1 | 210 | |
98fbc798 DA |
211 | /* |
212 | * Requeue to the end of queue if we are not the only element | |
213 | * on the queue: | |
214 | */ | |
215 | if (p->run_list.prev != p->run_list.next) { | |
216 | requeue_task_rt(rq, p); | |
217 | set_tsk_need_resched(p); | |
218 | } | |
bb44e5d1 IM |
219 | } |
220 | ||
83b699ed SV |
221 | static void set_curr_task_rt(struct rq *rq) |
222 | { | |
223 | struct task_struct *p = rq->curr; | |
224 | ||
225 | p->se.exec_start = rq->clock; | |
226 | } | |
227 | ||
5522d5d5 IM |
228 | const struct sched_class rt_sched_class = { |
229 | .next = &fair_sched_class, | |
bb44e5d1 IM |
230 | .enqueue_task = enqueue_task_rt, |
231 | .dequeue_task = dequeue_task_rt, | |
232 | .yield_task = yield_task_rt, | |
233 | ||
234 | .check_preempt_curr = check_preempt_curr_rt, | |
235 | ||
236 | .pick_next_task = pick_next_task_rt, | |
237 | .put_prev_task = put_prev_task_rt, | |
238 | ||
239 | .load_balance = load_balance_rt, | |
240 | ||
83b699ed | 241 | .set_curr_task = set_curr_task_rt, |
bb44e5d1 | 242 | .task_tick = task_tick_rt, |
bb44e5d1 | 243 | }; |