Commit | Line | Data |
---|---|---|
cf37b6b4 NP |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpu.h> | |
6 | #include <linux/cpuidle.h> | |
7 | #include <linux/tick.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/stackprotector.h> | |
38106313 | 10 | #include <linux/suspend.h> |
cf37b6b4 NP |
11 | |
12 | #include <asm/tlb.h> | |
13 | ||
14 | #include <trace/events/power.h> | |
15 | ||
e3baac47 PZ |
16 | #include "sched.h" |
17 | ||
faad3849 RW |
18 | /** |
19 | * sched_idle_set_state - Record idle state for the current CPU. | |
20 | * @idle_state: State to record. | |
21 | */ | |
22 | void sched_idle_set_state(struct cpuidle_state *idle_state) | |
23 | { | |
24 | idle_set_state(this_rq(), idle_state); | |
25 | } | |
26 | ||
cf37b6b4 NP |
27 | static int __read_mostly cpu_idle_force_poll; |
28 | ||
29 | void cpu_idle_poll_ctrl(bool enable) | |
30 | { | |
31 | if (enable) { | |
32 | cpu_idle_force_poll++; | |
33 | } else { | |
34 | cpu_idle_force_poll--; | |
35 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
36 | } | |
37 | } | |
38 | ||
39 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
40 | static int __init cpu_idle_poll_setup(char *__unused) | |
41 | { | |
42 | cpu_idle_force_poll = 1; | |
43 | return 1; | |
44 | } | |
45 | __setup("nohlt", cpu_idle_poll_setup); | |
46 | ||
47 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
48 | { | |
49 | cpu_idle_force_poll = 0; | |
50 | return 1; | |
51 | } | |
52 | __setup("hlt", cpu_idle_nopoll_setup); | |
53 | #endif | |
54 | ||
55 | static inline int cpu_idle_poll(void) | |
56 | { | |
57 | rcu_idle_enter(); | |
58 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | |
59 | local_irq_enable(); | |
9babcd79 | 60 | stop_critical_timings(); |
ff6f2d29 PM |
61 | while (!tif_need_resched() && |
62 | (cpu_idle_force_poll || tick_check_broadcast_expired())) | |
cf37b6b4 | 63 | cpu_relax(); |
9babcd79 | 64 | start_critical_timings(); |
cf37b6b4 NP |
65 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
66 | rcu_idle_exit(); | |
67 | return 1; | |
68 | } | |
69 | ||
70 | /* Weak implementations for optional arch specific functions */ | |
71 | void __weak arch_cpu_idle_prepare(void) { } | |
72 | void __weak arch_cpu_idle_enter(void) { } | |
73 | void __weak arch_cpu_idle_exit(void) { } | |
74 | void __weak arch_cpu_idle_dead(void) { } | |
75 | void __weak arch_cpu_idle(void) | |
76 | { | |
77 | cpu_idle_force_poll = 1; | |
78 | local_irq_enable(); | |
79 | } | |
80 | ||
827a5aef RW |
81 | /** |
82 | * default_idle_call - Default CPU idle routine. | |
83 | * | |
84 | * To use when the cpuidle framework cannot be used. | |
85 | */ | |
86 | void default_idle_call(void) | |
82f66327 | 87 | { |
63caae84 | 88 | if (current_clr_polling_and_test()) { |
82f66327 | 89 | local_irq_enable(); |
63caae84 LS |
90 | } else { |
91 | stop_critical_timings(); | |
82f66327 | 92 | arch_cpu_idle(); |
63caae84 LS |
93 | start_critical_timings(); |
94 | } | |
82f66327 RW |
95 | } |
96 | ||
bcf6ad8a RW |
97 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
98 | int next_state) | |
99 | { | |
bcf6ad8a RW |
100 | /* |
101 | * The idle task must be scheduled, it is pointless to go to idle, just | |
102 | * update no idle residency and return. | |
103 | */ | |
104 | if (current_clr_polling_and_test()) { | |
105 | dev->last_residency = 0; | |
106 | local_irq_enable(); | |
107 | return -EBUSY; | |
108 | } | |
109 | ||
bcf6ad8a RW |
110 | /* |
111 | * Enter the idle state previously returned by the governor decision. | |
112 | * This function will block until an interrupt occurs and will take | |
113 | * care of re-enabling the local interrupts | |
114 | */ | |
827a5aef | 115 | return cpuidle_enter(drv, dev, next_state); |
bcf6ad8a RW |
116 | } |
117 | ||
30cdd69e DL |
118 | /** |
119 | * cpuidle_idle_call - the main idle function | |
120 | * | |
121 | * NOTE: no locks or semaphores should be used here | |
82c65d60 AL |
122 | * |
123 | * On archs that support TIF_POLLING_NRFLAG, is called with polling | |
124 | * set, and it returns with polling set. If it ever stops polling, it | |
125 | * must clear the polling bit. | |
30cdd69e | 126 | */ |
08c373e5 | 127 | static void cpuidle_idle_call(void) |
30cdd69e DL |
128 | { |
129 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
130 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
37352273 | 131 | int next_state, entered_state; |
30cdd69e | 132 | |
a1d028bd DL |
133 | /* |
134 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 135 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 136 | */ |
c444117f | 137 | if (need_resched()) { |
8ca3c642 | 138 | local_irq_enable(); |
08c373e5 | 139 | return; |
8ca3c642 DL |
140 | } |
141 | ||
a1d028bd DL |
142 | /* |
143 | * Tell the RCU framework we are entering an idle section, | |
144 | * so no more rcu read side critical sections and one more | |
145 | * step to the grace period | |
146 | */ | |
c8cc7d4d DL |
147 | rcu_idle_enter(); |
148 | ||
82f66327 RW |
149 | if (cpuidle_not_available(drv, dev)) { |
150 | default_idle_call(); | |
151 | goto exit_idle; | |
152 | } | |
ef2b22ac | 153 | |
38106313 RW |
154 | /* |
155 | * Suspend-to-idle ("freeze") is a system state in which all user space | |
156 | * has been frozen, all I/O devices have been suspended and the only | |
157 | * activity happens here and in iterrupts (if any). In that case bypass | |
158 | * the cpuidle governor and go stratight for the deepest idle state | |
159 | * available. Possibly also suspend the local tick and the entire | |
160 | * timekeeping to prevent timer interrupts from kicking us out of idle | |
161 | * until a proper wakeup interrupt happens. | |
162 | */ | |
163 | if (idle_should_freeze()) { | |
ef2b22ac | 164 | entered_state = cpuidle_enter_freeze(drv, dev); |
6f16886b | 165 | if (entered_state > 0) { |
ef2b22ac RW |
166 | local_irq_enable(); |
167 | goto exit_idle; | |
168 | } | |
169 | ||
ef2b22ac | 170 | next_state = cpuidle_find_deepest_state(drv, dev); |
bcf6ad8a | 171 | call_cpuidle(drv, dev, next_state); |
ef2b22ac | 172 | } else { |
ef2b22ac RW |
173 | /* |
174 | * Ask the cpuidle framework to choose a convenient idle state. | |
175 | */ | |
176 | next_state = cpuidle_select(drv, dev); | |
bcf6ad8a RW |
177 | entered_state = call_cpuidle(drv, dev, next_state); |
178 | /* | |
179 | * Give the governor an opportunity to reflect on the outcome | |
180 | */ | |
ef2b22ac | 181 | cpuidle_reflect(dev, entered_state); |
bcf6ad8a | 182 | } |
37352273 PZ |
183 | |
184 | exit_idle: | |
8ca3c642 | 185 | __current_set_polling(); |
30cdd69e | 186 | |
a1d028bd | 187 | /* |
37352273 | 188 | * It is up to the idle functions to reenable local interrupts |
a1d028bd | 189 | */ |
c8cc7d4d DL |
190 | if (WARN_ON_ONCE(irqs_disabled())) |
191 | local_irq_enable(); | |
192 | ||
193 | rcu_idle_exit(); | |
30cdd69e | 194 | } |
30cdd69e | 195 | |
528a25b0 PM |
196 | DEFINE_PER_CPU(bool, cpu_dead_idle); |
197 | ||
cf37b6b4 NP |
198 | /* |
199 | * Generic idle loop implementation | |
82c65d60 AL |
200 | * |
201 | * Called with polling cleared. | |
cf37b6b4 NP |
202 | */ |
203 | static void cpu_idle_loop(void) | |
204 | { | |
205 | while (1) { | |
82c65d60 AL |
206 | /* |
207 | * If the arch has a polling bit, we maintain an invariant: | |
208 | * | |
209 | * Our polling bit is clear if we're not scheduled (i.e. if | |
210 | * rq->curr != rq->idle). This means that, if rq->idle has | |
211 | * the polling bit set, then setting need_resched is | |
212 | * guaranteed to cause the cpu to reschedule. | |
213 | */ | |
214 | ||
215 | __current_set_polling(); | |
0eb77e98 | 216 | quiet_vmstat(); |
cf37b6b4 NP |
217 | tick_nohz_idle_enter(); |
218 | ||
219 | while (!need_resched()) { | |
220 | check_pgt_cache(); | |
221 | rmb(); | |
222 | ||
528a25b0 | 223 | if (cpu_is_offline(smp_processor_id())) { |
88428cc5 PM |
224 | rcu_cpu_notify(NULL, CPU_DYING_IDLE, |
225 | (void *)(long)smp_processor_id()); | |
528a25b0 PM |
226 | smp_mb(); /* all activity before dead. */ |
227 | this_cpu_write(cpu_dead_idle, true); | |
cf37b6b4 | 228 | arch_cpu_idle_dead(); |
528a25b0 | 229 | } |
cf37b6b4 NP |
230 | |
231 | local_irq_disable(); | |
232 | arch_cpu_idle_enter(); | |
233 | ||
234 | /* | |
235 | * In poll mode we reenable interrupts and spin. | |
236 | * | |
237 | * Also if we detected in the wakeup from idle | |
238 | * path that the tick broadcast device expired | |
239 | * for us, we don't want to go deep idle as we | |
240 | * know that the IPI is going to arrive right | |
241 | * away | |
242 | */ | |
8ca3c642 | 243 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) |
cf37b6b4 | 244 | cpu_idle_poll(); |
8ca3c642 DL |
245 | else |
246 | cpuidle_idle_call(); | |
247 | ||
cf37b6b4 | 248 | arch_cpu_idle_exit(); |
cf37b6b4 | 249 | } |
06d50c65 PZ |
250 | |
251 | /* | |
252 | * Since we fell out of the loop above, we know | |
253 | * TIF_NEED_RESCHED must be set, propagate it into | |
254 | * PREEMPT_NEED_RESCHED. | |
255 | * | |
256 | * This is required because for polling idle loops we will | |
257 | * not have had an IPI to fold the state for us. | |
258 | */ | |
259 | preempt_set_need_resched(); | |
cf37b6b4 | 260 | tick_nohz_idle_exit(); |
82c65d60 AL |
261 | __current_clr_polling(); |
262 | ||
263 | /* | |
e3baac47 PZ |
264 | * We promise to call sched_ttwu_pending and reschedule |
265 | * if need_resched is set while polling is set. That | |
266 | * means that clearing polling needs to be visible | |
267 | * before doing these things. | |
82c65d60 AL |
268 | */ |
269 | smp_mb__after_atomic(); | |
270 | ||
e3baac47 | 271 | sched_ttwu_pending(); |
cf37b6b4 NP |
272 | schedule_preempt_disabled(); |
273 | } | |
274 | } | |
275 | ||
276 | void cpu_startup_entry(enum cpuhp_state state) | |
277 | { | |
278 | /* | |
279 | * This #ifdef needs to die, but it's too late in the cycle to | |
280 | * make this generic (arm and sh have never invoked the canary | |
281 | * init for the non boot cpus!). Will be fixed in 3.11 | |
282 | */ | |
283 | #ifdef CONFIG_X86 | |
284 | /* | |
285 | * If we're the non-boot CPU, nothing set the stack canary up | |
286 | * for us. The boot CPU already has it initialized but no harm | |
287 | * in doing it again. This is a good place for updating it, as | |
288 | * we wont ever return from this function (so the invalid | |
289 | * canaries already on the stack wont ever trigger). | |
290 | */ | |
291 | boot_init_stack_canary(); | |
292 | #endif | |
cf37b6b4 NP |
293 | arch_cpu_idle_prepare(); |
294 | cpu_idle_loop(); | |
295 | } |