Commit | Line | Data |
---|---|---|
cf37b6b4 NP |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpu.h> | |
6 | #include <linux/cpuidle.h> | |
7 | #include <linux/tick.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/stackprotector.h> | |
10 | ||
11 | #include <asm/tlb.h> | |
12 | ||
13 | #include <trace/events/power.h> | |
14 | ||
15 | static int __read_mostly cpu_idle_force_poll; | |
16 | ||
17 | void cpu_idle_poll_ctrl(bool enable) | |
18 | { | |
19 | if (enable) { | |
20 | cpu_idle_force_poll++; | |
21 | } else { | |
22 | cpu_idle_force_poll--; | |
23 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
24 | } | |
25 | } | |
26 | ||
27 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
28 | static int __init cpu_idle_poll_setup(char *__unused) | |
29 | { | |
30 | cpu_idle_force_poll = 1; | |
31 | return 1; | |
32 | } | |
33 | __setup("nohlt", cpu_idle_poll_setup); | |
34 | ||
35 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
36 | { | |
37 | cpu_idle_force_poll = 0; | |
38 | return 1; | |
39 | } | |
40 | __setup("hlt", cpu_idle_nopoll_setup); | |
41 | #endif | |
42 | ||
43 | static inline int cpu_idle_poll(void) | |
44 | { | |
45 | rcu_idle_enter(); | |
46 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | |
47 | local_irq_enable(); | |
48 | while (!tif_need_resched()) | |
49 | cpu_relax(); | |
50 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | |
51 | rcu_idle_exit(); | |
52 | return 1; | |
53 | } | |
54 | ||
55 | /* Weak implementations for optional arch specific functions */ | |
56 | void __weak arch_cpu_idle_prepare(void) { } | |
57 | void __weak arch_cpu_idle_enter(void) { } | |
58 | void __weak arch_cpu_idle_exit(void) { } | |
59 | void __weak arch_cpu_idle_dead(void) { } | |
60 | void __weak arch_cpu_idle(void) | |
61 | { | |
62 | cpu_idle_force_poll = 1; | |
63 | local_irq_enable(); | |
64 | } | |
65 | ||
30cdd69e DL |
66 | /** |
67 | * cpuidle_idle_call - the main idle function | |
68 | * | |
69 | * NOTE: no locks or semaphores should be used here | |
70 | * return non-zero on failure | |
71 | */ | |
72 | static int cpuidle_idle_call(void) | |
73 | { | |
74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
76 | int next_state, entered_state, ret; | |
77 | bool broadcast; | |
78 | ||
a1d028bd DL |
79 | /* |
80 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 81 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 82 | */ |
c444117f | 83 | if (need_resched()) { |
8ca3c642 | 84 | local_irq_enable(); |
8ca3c642 DL |
85 | return 0; |
86 | } | |
87 | ||
a1d028bd DL |
88 | /* |
89 | * During the idle period, stop measuring the disabled irqs | |
90 | * critical sections latencies | |
91 | */ | |
c8cc7d4d | 92 | stop_critical_timings(); |
a1d028bd DL |
93 | |
94 | /* | |
95 | * Tell the RCU framework we are entering an idle section, | |
96 | * so no more rcu read side critical sections and one more | |
97 | * step to the grace period | |
98 | */ | |
c8cc7d4d DL |
99 | rcu_idle_enter(); |
100 | ||
a1d028bd DL |
101 | /* |
102 | * Check if the cpuidle framework is ready, otherwise fallback | |
103 | * to the default arch specific idle method | |
104 | */ | |
30cdd69e | 105 | ret = cpuidle_enabled(drv, dev); |
30cdd69e | 106 | |
8ca3c642 | 107 | if (!ret) { |
a1d028bd DL |
108 | /* |
109 | * Ask the governor to choose an idle state it thinks | |
110 | * it is convenient to go to. There is *always* a | |
111 | * convenient idle state | |
112 | */ | |
8ca3c642 | 113 | next_state = cpuidle_select(drv, dev); |
30cdd69e | 114 | |
a1d028bd DL |
115 | /* |
116 | * The idle task must be scheduled, it is pointless to | |
117 | * go to idle, just update no idle residency and get | |
118 | * out of this function | |
119 | */ | |
8ca3c642 DL |
120 | if (current_clr_polling_and_test()) { |
121 | dev->last_residency = 0; | |
122 | entered_state = next_state; | |
123 | local_irq_enable(); | |
124 | } else { | |
125 | broadcast = !!(drv->states[next_state].flags & | |
126 | CPUIDLE_FLAG_TIMER_STOP); | |
30cdd69e | 127 | |
c444117f | 128 | if (broadcast) { |
a1d028bd DL |
129 | /* |
130 | * Tell the time framework to switch | |
131 | * to a broadcast timer because our | |
132 | * local timer will be shutdown. If a | |
133 | * local timer is used from another | |
134 | * cpu as a broadcast timer, this call | |
135 | * may fail if it is not available | |
136 | */ | |
8ca3c642 DL |
137 | ret = clockevents_notify( |
138 | CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | |
139 | &dev->cpu); | |
c444117f | 140 | } |
30cdd69e | 141 | |
8ca3c642 DL |
142 | if (!ret) { |
143 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | |
30cdd69e | 144 | |
a1d028bd DL |
145 | /* |
146 | * Enter the idle state previously | |
147 | * returned by the governor | |
148 | * decision. This function will block | |
149 | * until an interrupt occurs and will | |
150 | * take care of re-enabling the local | |
151 | * interrupts | |
152 | */ | |
8ca3c642 DL |
153 | entered_state = cpuidle_enter(drv, dev, |
154 | next_state); | |
30cdd69e | 155 | |
8ca3c642 DL |
156 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, |
157 | dev->cpu); | |
30cdd69e | 158 | |
8ca3c642 DL |
159 | if (broadcast) |
160 | clockevents_notify( | |
161 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | |
162 | &dev->cpu); | |
30cdd69e | 163 | |
a1d028bd DL |
164 | /* |
165 | * Give the governor an opportunity to reflect on the | |
166 | * outcome | |
167 | */ | |
8ca3c642 DL |
168 | cpuidle_reflect(dev, entered_state); |
169 | } | |
170 | } | |
171 | } | |
172 | ||
a1d028bd DL |
173 | /* |
174 | * We can't use the cpuidle framework, let's use the default | |
175 | * idle routine | |
176 | */ | |
c444117f PZ |
177 | if (ret) { |
178 | if (!current_clr_polling_and_test()) | |
179 | arch_cpu_idle(); | |
180 | else | |
181 | local_irq_enable(); | |
182 | } | |
8ca3c642 DL |
183 | |
184 | __current_set_polling(); | |
30cdd69e | 185 | |
a1d028bd DL |
186 | /* |
187 | * It is up to the idle functions to enable back the local | |
188 | * interrupt | |
189 | */ | |
c8cc7d4d DL |
190 | if (WARN_ON_ONCE(irqs_disabled())) |
191 | local_irq_enable(); | |
192 | ||
193 | rcu_idle_exit(); | |
194 | start_critical_timings(); | |
30cdd69e DL |
195 | |
196 | return 0; | |
197 | } | |
30cdd69e | 198 | |
cf37b6b4 NP |
199 | /* |
200 | * Generic idle loop implementation | |
201 | */ | |
202 | static void cpu_idle_loop(void) | |
203 | { | |
204 | while (1) { | |
205 | tick_nohz_idle_enter(); | |
206 | ||
207 | while (!need_resched()) { | |
208 | check_pgt_cache(); | |
209 | rmb(); | |
210 | ||
211 | if (cpu_is_offline(smp_processor_id())) | |
212 | arch_cpu_idle_dead(); | |
213 | ||
214 | local_irq_disable(); | |
215 | arch_cpu_idle_enter(); | |
216 | ||
217 | /* | |
218 | * In poll mode we reenable interrupts and spin. | |
219 | * | |
220 | * Also if we detected in the wakeup from idle | |
221 | * path that the tick broadcast device expired | |
222 | * for us, we don't want to go deep idle as we | |
223 | * know that the IPI is going to arrive right | |
224 | * away | |
225 | */ | |
8ca3c642 | 226 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) |
cf37b6b4 | 227 | cpu_idle_poll(); |
8ca3c642 DL |
228 | else |
229 | cpuidle_idle_call(); | |
230 | ||
cf37b6b4 | 231 | arch_cpu_idle_exit(); |
cf37b6b4 | 232 | } |
06d50c65 PZ |
233 | |
234 | /* | |
235 | * Since we fell out of the loop above, we know | |
236 | * TIF_NEED_RESCHED must be set, propagate it into | |
237 | * PREEMPT_NEED_RESCHED. | |
238 | * | |
239 | * This is required because for polling idle loops we will | |
240 | * not have had an IPI to fold the state for us. | |
241 | */ | |
242 | preempt_set_need_resched(); | |
cf37b6b4 NP |
243 | tick_nohz_idle_exit(); |
244 | schedule_preempt_disabled(); | |
245 | } | |
246 | } | |
247 | ||
248 | void cpu_startup_entry(enum cpuhp_state state) | |
249 | { | |
250 | /* | |
251 | * This #ifdef needs to die, but it's too late in the cycle to | |
252 | * make this generic (arm and sh have never invoked the canary | |
253 | * init for the non boot cpus!). Will be fixed in 3.11 | |
254 | */ | |
255 | #ifdef CONFIG_X86 | |
256 | /* | |
257 | * If we're the non-boot CPU, nothing set the stack canary up | |
258 | * for us. The boot CPU already has it initialized but no harm | |
259 | * in doing it again. This is a good place for updating it, as | |
260 | * we wont ever return from this function (so the invalid | |
261 | * canaries already on the stack wont ever trigger). | |
262 | */ | |
263 | boot_init_stack_canary(); | |
264 | #endif | |
265 | __current_set_polling(); | |
266 | arch_cpu_idle_prepare(); | |
267 | cpu_idle_loop(); | |
268 | } |