Commit | Line | Data |
---|---|---|
cf37b6b4 NP |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpu.h> | |
6 | #include <linux/cpuidle.h> | |
7 | #include <linux/tick.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/stackprotector.h> | |
10 | ||
11 | #include <asm/tlb.h> | |
12 | ||
13 | #include <trace/events/power.h> | |
14 | ||
e3baac47 PZ |
15 | #include "sched.h" |
16 | ||
cf37b6b4 NP |
17 | static int __read_mostly cpu_idle_force_poll; |
18 | ||
19 | void cpu_idle_poll_ctrl(bool enable) | |
20 | { | |
21 | if (enable) { | |
22 | cpu_idle_force_poll++; | |
23 | } else { | |
24 | cpu_idle_force_poll--; | |
25 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
26 | } | |
27 | } | |
28 | ||
29 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
30 | static int __init cpu_idle_poll_setup(char *__unused) | |
31 | { | |
32 | cpu_idle_force_poll = 1; | |
33 | return 1; | |
34 | } | |
35 | __setup("nohlt", cpu_idle_poll_setup); | |
36 | ||
37 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
38 | { | |
39 | cpu_idle_force_poll = 0; | |
40 | return 1; | |
41 | } | |
42 | __setup("hlt", cpu_idle_nopoll_setup); | |
43 | #endif | |
44 | ||
45 | static inline int cpu_idle_poll(void) | |
46 | { | |
47 | rcu_idle_enter(); | |
48 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | |
49 | local_irq_enable(); | |
50 | while (!tif_need_resched()) | |
51 | cpu_relax(); | |
52 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | |
53 | rcu_idle_exit(); | |
54 | return 1; | |
55 | } | |
56 | ||
57 | /* Weak implementations for optional arch specific functions */ | |
58 | void __weak arch_cpu_idle_prepare(void) { } | |
59 | void __weak arch_cpu_idle_enter(void) { } | |
60 | void __weak arch_cpu_idle_exit(void) { } | |
61 | void __weak arch_cpu_idle_dead(void) { } | |
62 | void __weak arch_cpu_idle(void) | |
63 | { | |
64 | cpu_idle_force_poll = 1; | |
65 | local_irq_enable(); | |
66 | } | |
67 | ||
30cdd69e DL |
68 | /** |
69 | * cpuidle_idle_call - the main idle function | |
70 | * | |
71 | * NOTE: no locks or semaphores should be used here | |
82c65d60 AL |
72 | * |
73 | * On archs that support TIF_POLLING_NRFLAG, is called with polling | |
74 | * set, and it returns with polling set. If it ever stops polling, it | |
75 | * must clear the polling bit. | |
30cdd69e | 76 | */ |
08c373e5 | 77 | static void cpuidle_idle_call(void) |
30cdd69e DL |
78 | { |
79 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
80 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
37352273 | 81 | int next_state, entered_state; |
89abb5ad | 82 | unsigned int broadcast; |
30cdd69e | 83 | |
a1d028bd DL |
84 | /* |
85 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 86 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 87 | */ |
c444117f | 88 | if (need_resched()) { |
8ca3c642 | 89 | local_irq_enable(); |
08c373e5 | 90 | return; |
8ca3c642 DL |
91 | } |
92 | ||
a1d028bd DL |
93 | /* |
94 | * During the idle period, stop measuring the disabled irqs | |
95 | * critical sections latencies | |
96 | */ | |
c8cc7d4d | 97 | stop_critical_timings(); |
a1d028bd DL |
98 | |
99 | /* | |
100 | * Tell the RCU framework we are entering an idle section, | |
101 | * so no more rcu read side critical sections and one more | |
102 | * step to the grace period | |
103 | */ | |
c8cc7d4d DL |
104 | rcu_idle_enter(); |
105 | ||
a1d028bd | 106 | /* |
52c324f8 | 107 | * Ask the cpuidle framework to choose a convenient idle state. |
ec6e7f40 | 108 | * Fall back to the default arch idle method on errors. |
a1d028bd | 109 | */ |
52c324f8 | 110 | next_state = cpuidle_select(drv, dev); |
ec6e7f40 | 111 | if (next_state < 0) { |
37352273 | 112 | use_default: |
a1d028bd | 113 | /* |
37352273 PZ |
114 | * We can't use the cpuidle framework, let's use the default |
115 | * idle routine. | |
a1d028bd | 116 | */ |
37352273 | 117 | if (current_clr_polling_and_test()) |
8ca3c642 | 118 | local_irq_enable(); |
37352273 PZ |
119 | else |
120 | arch_cpu_idle(); | |
121 | ||
122 | goto exit_idle; | |
8ca3c642 DL |
123 | } |
124 | ||
37352273 PZ |
125 | |
126 | /* | |
127 | * The idle task must be scheduled, it is pointless to | |
128 | * go to idle, just update no idle residency and get | |
129 | * out of this function | |
130 | */ | |
131 | if (current_clr_polling_and_test()) { | |
132 | dev->last_residency = 0; | |
133 | entered_state = next_state; | |
134 | local_irq_enable(); | |
135 | goto exit_idle; | |
c444117f | 136 | } |
8ca3c642 | 137 | |
89abb5ad | 138 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; |
37352273 PZ |
139 | |
140 | /* | |
141 | * Tell the time framework to switch to a broadcast timer | |
142 | * because our local timer will be shutdown. If a local timer | |
143 | * is used from another cpu as a broadcast timer, this call may | |
144 | * fail if it is not available | |
145 | */ | |
146 | if (broadcast && | |
147 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) | |
148 | goto use_default; | |
149 | ||
442bf3aa DL |
150 | /* Take note of the planned idle state. */ |
151 | idle_set_state(this_rq(), &drv->states[next_state]); | |
152 | ||
37352273 PZ |
153 | /* |
154 | * Enter the idle state previously returned by the governor decision. | |
155 | * This function will block until an interrupt occurs and will take | |
156 | * care of re-enabling the local interrupts | |
157 | */ | |
158 | entered_state = cpuidle_enter(drv, dev, next_state); | |
159 | ||
442bf3aa DL |
160 | /* The cpu is no longer idle or about to enter idle. */ |
161 | idle_set_state(this_rq(), NULL); | |
162 | ||
37352273 PZ |
163 | if (broadcast) |
164 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); | |
165 | ||
166 | /* | |
167 | * Give the governor an opportunity to reflect on the outcome | |
168 | */ | |
169 | cpuidle_reflect(dev, entered_state); | |
170 | ||
171 | exit_idle: | |
8ca3c642 | 172 | __current_set_polling(); |
30cdd69e | 173 | |
a1d028bd | 174 | /* |
37352273 | 175 | * It is up to the idle functions to reenable local interrupts |
a1d028bd | 176 | */ |
c8cc7d4d DL |
177 | if (WARN_ON_ONCE(irqs_disabled())) |
178 | local_irq_enable(); | |
179 | ||
180 | rcu_idle_exit(); | |
181 | start_critical_timings(); | |
30cdd69e | 182 | } |
30cdd69e | 183 | |
cf37b6b4 NP |
184 | /* |
185 | * Generic idle loop implementation | |
82c65d60 AL |
186 | * |
187 | * Called with polling cleared. | |
cf37b6b4 NP |
188 | */ |
189 | static void cpu_idle_loop(void) | |
190 | { | |
191 | while (1) { | |
82c65d60 AL |
192 | /* |
193 | * If the arch has a polling bit, we maintain an invariant: | |
194 | * | |
195 | * Our polling bit is clear if we're not scheduled (i.e. if | |
196 | * rq->curr != rq->idle). This means that, if rq->idle has | |
197 | * the polling bit set, then setting need_resched is | |
198 | * guaranteed to cause the cpu to reschedule. | |
199 | */ | |
200 | ||
201 | __current_set_polling(); | |
cf37b6b4 NP |
202 | tick_nohz_idle_enter(); |
203 | ||
204 | while (!need_resched()) { | |
205 | check_pgt_cache(); | |
206 | rmb(); | |
207 | ||
208 | if (cpu_is_offline(smp_processor_id())) | |
209 | arch_cpu_idle_dead(); | |
210 | ||
211 | local_irq_disable(); | |
212 | arch_cpu_idle_enter(); | |
213 | ||
214 | /* | |
215 | * In poll mode we reenable interrupts and spin. | |
216 | * | |
217 | * Also if we detected in the wakeup from idle | |
218 | * path that the tick broadcast device expired | |
219 | * for us, we don't want to go deep idle as we | |
220 | * know that the IPI is going to arrive right | |
221 | * away | |
222 | */ | |
8ca3c642 | 223 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) |
cf37b6b4 | 224 | cpu_idle_poll(); |
8ca3c642 DL |
225 | else |
226 | cpuidle_idle_call(); | |
227 | ||
cf37b6b4 | 228 | arch_cpu_idle_exit(); |
cf37b6b4 | 229 | } |
06d50c65 PZ |
230 | |
231 | /* | |
232 | * Since we fell out of the loop above, we know | |
233 | * TIF_NEED_RESCHED must be set, propagate it into | |
234 | * PREEMPT_NEED_RESCHED. | |
235 | * | |
236 | * This is required because for polling idle loops we will | |
237 | * not have had an IPI to fold the state for us. | |
238 | */ | |
239 | preempt_set_need_resched(); | |
cf37b6b4 | 240 | tick_nohz_idle_exit(); |
82c65d60 AL |
241 | __current_clr_polling(); |
242 | ||
243 | /* | |
e3baac47 PZ |
244 | * We promise to call sched_ttwu_pending and reschedule |
245 | * if need_resched is set while polling is set. That | |
246 | * means that clearing polling needs to be visible | |
247 | * before doing these things. | |
82c65d60 AL |
248 | */ |
249 | smp_mb__after_atomic(); | |
250 | ||
e3baac47 | 251 | sched_ttwu_pending(); |
cf37b6b4 NP |
252 | schedule_preempt_disabled(); |
253 | } | |
254 | } | |
255 | ||
256 | void cpu_startup_entry(enum cpuhp_state state) | |
257 | { | |
258 | /* | |
259 | * This #ifdef needs to die, but it's too late in the cycle to | |
260 | * make this generic (arm and sh have never invoked the canary | |
261 | * init for the non boot cpus!). Will be fixed in 3.11 | |
262 | */ | |
263 | #ifdef CONFIG_X86 | |
264 | /* | |
265 | * If we're the non-boot CPU, nothing set the stack canary up | |
266 | * for us. The boot CPU already has it initialized but no harm | |
267 | * in doing it again. This is a good place for updating it, as | |
268 | * we wont ever return from this function (so the invalid | |
269 | * canaries already on the stack wont ever trigger). | |
270 | */ | |
271 | boot_init_stack_canary(); | |
272 | #endif | |
cf37b6b4 NP |
273 | arch_cpu_idle_prepare(); |
274 | cpu_idle_loop(); | |
275 | } |