Commit | Line | Data |
---|---|---|
4126c019 CC |
1 | /* |
2 | * coupled.c - helper functions to enter the same idle state on multiple cpus | |
3 | * | |
4 | * Copyright (c) 2011 Google, Inc. | |
5 | * | |
6 | * Author: Colin Cross <ccross@android.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/cpuidle.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #include "cpuidle.h" | |
28 | ||
29 | /** | |
30 | * DOC: Coupled cpuidle states | |
31 | * | |
32 | * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the | |
33 | * cpus cannot be independently powered down, either due to | |
34 | * sequencing restrictions (on Tegra 2, cpu 0 must be the last to | |
35 | * power down), or due to HW bugs (on OMAP4460, a cpu powering up | |
36 | * will corrupt the gic state unless the other cpu runs a work | |
37 | * around). Each cpu has a power state that it can enter without | |
38 | * coordinating with the other cpu (usually Wait For Interrupt, or | |
39 | * WFI), and one or more "coupled" power states that affect blocks | |
40 | * shared between the cpus (L2 cache, interrupt controller, and | |
41 | * sometimes the whole SoC). Entering a coupled power state must | |
42 | * be tightly controlled on both cpus. | |
43 | * | |
44 | * This file implements a solution, where each cpu will wait in the | |
45 | * WFI state until all cpus are ready to enter a coupled state, at | |
46 | * which point the coupled state function will be called on all | |
47 | * cpus at approximately the same time. | |
48 | * | |
49 | * Once all cpus are ready to enter idle, they are woken by an smp | |
50 | * cross call. At this point, there is a chance that one of the | |
51 | * cpus will find work to do, and choose not to enter idle. A | |
52 | * final pass is needed to guarantee that all cpus will call the | |
53 | * power state enter function at the same time. During this pass, | |
54 | * each cpu will increment the ready counter, and continue once the | |
55 | * ready counter matches the number of online coupled cpus. If any | |
56 | * cpu exits idle, the other cpus will decrement their counter and | |
57 | * retry. | |
58 | * | |
59 | * requested_state stores the deepest coupled idle state each cpu | |
60 | * is ready for. It is assumed that the states are indexed from | |
61 | * shallowest (highest power, lowest exit latency) to deepest | |
62 | * (lowest power, highest exit latency). The requested_state | |
63 | * variable is not locked. It is only written from the cpu that | |
64 | * it stores (or by the on/offlining cpu if that cpu is offline), | |
65 | * and only read after all the cpus are ready for the coupled idle | |
66 | * state are are no longer updating it. | |
67 | * | |
68 | * Three atomic counters are used. alive_count tracks the number | |
69 | * of cpus in the coupled set that are currently or soon will be | |
70 | * online. waiting_count tracks the number of cpus that are in | |
71 | * the waiting loop, in the ready loop, or in the coupled idle state. | |
72 | * ready_count tracks the number of cpus that are in the ready loop | |
73 | * or in the coupled idle state. | |
74 | * | |
75 | * To use coupled cpuidle states, a cpuidle driver must: | |
76 | * | |
77 | * Set struct cpuidle_device.coupled_cpus to the mask of all | |
78 | * coupled cpus, usually the same as cpu_possible_mask if all cpus | |
79 | * are part of the same cluster. The coupled_cpus mask must be | |
80 | * set in the struct cpuidle_device for each cpu. | |
81 | * | |
82 | * Set struct cpuidle_device.safe_state to a state that is not a | |
83 | * coupled state. This is usually WFI. | |
84 | * | |
85 | * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each | |
86 | * state that affects multiple cpus. | |
87 | * | |
88 | * Provide a struct cpuidle_state.enter function for each state | |
89 | * that affects multiple cpus. This function is guaranteed to be | |
90 | * called on all cpus at approximately the same time. The driver | |
91 | * should ensure that the cpus all abort together if any cpu tries | |
92 | * to abort once the function is called. The function should return | |
93 | * with interrupts still disabled. | |
94 | */ | |
95 | ||
96 | /** | |
97 | * struct cpuidle_coupled - data for set of cpus that share a coupled idle state | |
98 | * @coupled_cpus: mask of cpus that are part of the coupled set | |
99 | * @requested_state: array of requested states for cpus in the coupled set | |
100 | * @ready_waiting_counts: combined count of cpus in ready or waiting loops | |
101 | * @online_count: count of cpus that are online | |
102 | * @refcnt: reference count of cpuidle devices that are using this struct | |
103 | * @prevent: flag to prevent coupled idle while a cpu is hotplugging | |
104 | */ | |
105 | struct cpuidle_coupled { | |
106 | cpumask_t coupled_cpus; | |
107 | int requested_state[NR_CPUS]; | |
108 | atomic_t ready_waiting_counts; | |
f983827b | 109 | atomic_t abort_barrier; |
4126c019 CC |
110 | int online_count; |
111 | int refcnt; | |
112 | int prevent; | |
113 | }; | |
114 | ||
115 | #define WAITING_BITS 16 | |
116 | #define MAX_WAITING_CPUS (1 << WAITING_BITS) | |
117 | #define WAITING_MASK (MAX_WAITING_CPUS - 1) | |
118 | #define READY_MASK (~WAITING_MASK) | |
119 | ||
120 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) | |
121 | ||
4126c019 CC |
122 | static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); |
123 | ||
124 | /* | |
f983827b | 125 | * The cpuidle_coupled_poke_pending mask is used to avoid calling |
4126c019 CC |
126 | * __smp_call_function_single with the per cpu call_single_data struct already |
127 | * in use. This prevents a deadlock where two cpus are waiting for each others | |
128 | * call_single_data struct to be available | |
129 | */ | |
f983827b CC |
130 | static cpumask_t cpuidle_coupled_poke_pending; |
131 | ||
132 | /* | |
133 | * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked | |
134 | * once to minimize entering the ready loop with a poke pending, which would | |
135 | * require aborting and retrying. | |
136 | */ | |
137 | static cpumask_t cpuidle_coupled_poked; | |
4126c019 | 138 | |
20ff51a3 CC |
139 | /** |
140 | * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus | |
141 | * @dev: cpuidle_device of the calling cpu | |
142 | * @a: atomic variable to hold the barrier | |
143 | * | |
144 | * No caller to this function will return from this function until all online | |
145 | * cpus in the same coupled group have called this function. Once any caller | |
146 | * has returned from this function, the barrier is immediately available for | |
147 | * reuse. | |
148 | * | |
caf4a36e | 149 | * The atomic variable must be initialized to 0 before any cpu calls |
20ff51a3 CC |
150 | * this function, will be reset to 0 before any cpu returns from this function. |
151 | * | |
152 | * Must only be called from within a coupled idle state handler | |
153 | * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). | |
154 | * | |
155 | * Provides full smp barrier semantics before and after calling. | |
156 | */ | |
157 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) | |
158 | { | |
159 | int n = dev->coupled->online_count; | |
160 | ||
4e857c58 | 161 | smp_mb__before_atomic(); |
20ff51a3 CC |
162 | atomic_inc(a); |
163 | ||
164 | while (atomic_read(a) < n) | |
165 | cpu_relax(); | |
166 | ||
167 | if (atomic_inc_return(a) == n * 2) { | |
168 | atomic_set(a, 0); | |
169 | return; | |
170 | } | |
171 | ||
172 | while (atomic_read(a) > n) | |
173 | cpu_relax(); | |
174 | } | |
175 | ||
4126c019 CC |
176 | /** |
177 | * cpuidle_state_is_coupled - check if a state is part of a coupled set | |
4126c019 CC |
178 | * @drv: struct cpuidle_driver for the platform |
179 | * @state: index of the target state in drv->states | |
180 | * | |
181 | * Returns true if the target state is coupled with cpus besides this one | |
182 | */ | |
4c1ed5a6 | 183 | bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) |
4126c019 CC |
184 | { |
185 | return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; | |
186 | } | |
187 | ||
abceaa9c XP |
188 | /** |
189 | * cpuidle_coupled_state_verify - check if the coupled states are correctly set. | |
190 | * @drv: struct cpuidle_driver for the platform | |
191 | * | |
192 | * Returns 0 for valid state values, a negative error code otherwise: | |
193 | * * -EINVAL if any coupled state(safe_state_index) is wrongly set. | |
194 | */ | |
195 | int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) | |
196 | { | |
197 | int i; | |
198 | ||
199 | for (i = drv->state_count - 1; i >= 0; i--) { | |
200 | if (cpuidle_state_is_coupled(drv, i) && | |
201 | (drv->safe_state_index == i || | |
202 | drv->safe_state_index < 0 || | |
203 | drv->safe_state_index >= drv->state_count)) | |
204 | return -EINVAL; | |
205 | } | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
4126c019 CC |
210 | /** |
211 | * cpuidle_coupled_set_ready - mark a cpu as ready | |
212 | * @coupled: the struct coupled that contains the current cpu | |
213 | */ | |
214 | static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) | |
215 | { | |
216 | atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
217 | } | |
218 | ||
219 | /** | |
220 | * cpuidle_coupled_set_not_ready - mark a cpu as not ready | |
221 | * @coupled: the struct coupled that contains the current cpu | |
222 | * | |
223 | * Decrements the ready counter, unless the ready (and thus the waiting) counter | |
224 | * is equal to the number of online cpus. Prevents a race where one cpu | |
225 | * decrements the waiting counter and then re-increments it just before another | |
226 | * cpu has decremented its ready counter, leading to the ready counter going | |
227 | * down from the number of online cpus without going through the coupled idle | |
228 | * state. | |
229 | * | |
230 | * Returns 0 if the counter was decremented successfully, -EINVAL if the ready | |
231 | * counter was equal to the number of online cpus. | |
232 | */ | |
233 | static | |
234 | inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) | |
235 | { | |
236 | int all; | |
237 | int ret; | |
238 | ||
92638e2f | 239 | all = coupled->online_count | (coupled->online_count << WAITING_BITS); |
4126c019 CC |
240 | ret = atomic_add_unless(&coupled->ready_waiting_counts, |
241 | -MAX_WAITING_CPUS, all); | |
242 | ||
243 | return ret ? 0 : -EINVAL; | |
244 | } | |
245 | ||
246 | /** | |
247 | * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready | |
248 | * @coupled: the struct coupled that contains the current cpu | |
249 | * | |
250 | * Returns true if all of the cpus in a coupled set are out of the ready loop. | |
251 | */ | |
252 | static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) | |
253 | { | |
254 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
255 | return r == 0; | |
256 | } | |
257 | ||
258 | /** | |
259 | * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready | |
260 | * @coupled: the struct coupled that contains the current cpu | |
261 | * | |
262 | * Returns true if all cpus coupled to this target state are in the ready loop | |
263 | */ | |
264 | static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) | |
265 | { | |
266 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
267 | return r == coupled->online_count; | |
268 | } | |
269 | ||
270 | /** | |
271 | * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting | |
272 | * @coupled: the struct coupled that contains the current cpu | |
273 | * | |
274 | * Returns true if all cpus coupled to this target state are in the wait loop | |
275 | */ | |
276 | static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) | |
277 | { | |
278 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
279 | return w == coupled->online_count; | |
280 | } | |
281 | ||
282 | /** | |
283 | * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting | |
284 | * @coupled: the struct coupled that contains the current cpu | |
285 | * | |
286 | * Returns true if all of the cpus in a coupled set are out of the waiting loop. | |
287 | */ | |
288 | static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) | |
289 | { | |
290 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
291 | return w == 0; | |
292 | } | |
293 | ||
294 | /** | |
295 | * cpuidle_coupled_get_state - determine the deepest idle state | |
296 | * @dev: struct cpuidle_device for this cpu | |
297 | * @coupled: the struct coupled that contains the current cpu | |
298 | * | |
299 | * Returns the deepest idle state that all coupled cpus can enter | |
300 | */ | |
301 | static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, | |
302 | struct cpuidle_coupled *coupled) | |
303 | { | |
304 | int i; | |
305 | int state = INT_MAX; | |
306 | ||
307 | /* | |
308 | * Read barrier ensures that read of requested_state is ordered after | |
309 | * reads of ready_count. Matches the write barriers | |
310 | * cpuidle_set_state_waiting. | |
311 | */ | |
312 | smp_rmb(); | |
313 | ||
f9b531fe | 314 | for_each_cpu(i, &coupled->coupled_cpus) |
4126c019 CC |
315 | if (cpu_online(i) && coupled->requested_state[i] < state) |
316 | state = coupled->requested_state[i]; | |
317 | ||
318 | return state; | |
319 | } | |
320 | ||
f983827b | 321 | static void cpuidle_coupled_handle_poke(void *info) |
4126c019 CC |
322 | { |
323 | int cpu = (unsigned long)info; | |
f983827b CC |
324 | cpumask_set_cpu(cpu, &cpuidle_coupled_poked); |
325 | cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); | |
4126c019 CC |
326 | } |
327 | ||
328 | /** | |
329 | * cpuidle_coupled_poke - wake up a cpu that may be waiting | |
330 | * @cpu: target cpu | |
331 | * | |
332 | * Ensures that the target cpu exits it's waiting idle state (if it is in it) | |
333 | * and will see updates to waiting_count before it re-enters it's waiting idle | |
334 | * state. | |
335 | * | |
336 | * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu | |
337 | * either has or will soon have a pending IPI that will wake it out of idle, | |
338 | * or it is currently processing the IPI and is not in idle. | |
339 | */ | |
340 | static void cpuidle_coupled_poke(int cpu) | |
341 | { | |
342 | struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); | |
343 | ||
f983827b | 344 | if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) |
c46fff2a | 345 | smp_call_function_single_async(cpu, csd); |
4126c019 CC |
346 | } |
347 | ||
348 | /** | |
349 | * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting | |
350 | * @dev: struct cpuidle_device for this cpu | |
351 | * @coupled: the struct coupled that contains the current cpu | |
352 | * | |
353 | * Calls cpuidle_coupled_poke on all other online cpus. | |
354 | */ | |
355 | static void cpuidle_coupled_poke_others(int this_cpu, | |
356 | struct cpuidle_coupled *coupled) | |
357 | { | |
358 | int cpu; | |
359 | ||
f9b531fe | 360 | for_each_cpu(cpu, &coupled->coupled_cpus) |
4126c019 CC |
361 | if (cpu != this_cpu && cpu_online(cpu)) |
362 | cpuidle_coupled_poke(cpu); | |
363 | } | |
364 | ||
365 | /** | |
366 | * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop | |
367 | * @dev: struct cpuidle_device for this cpu | |
368 | * @coupled: the struct coupled that contains the current cpu | |
369 | * @next_state: the index in drv->states of the requested state for this cpu | |
370 | * | |
f983827b CC |
371 | * Updates the requested idle state for the specified cpuidle device. |
372 | * Returns the number of waiting cpus. | |
4126c019 | 373 | */ |
f983827b | 374 | static int cpuidle_coupled_set_waiting(int cpu, |
4126c019 CC |
375 | struct cpuidle_coupled *coupled, int next_state) |
376 | { | |
4126c019 CC |
377 | coupled->requested_state[cpu] = next_state; |
378 | ||
379 | /* | |
4126c019 CC |
380 | * The atomic_inc_return provides a write barrier to order the write |
381 | * to requested_state with the later write that increments ready_count. | |
382 | */ | |
f983827b | 383 | return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; |
4126c019 CC |
384 | } |
385 | ||
386 | /** | |
387 | * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop | |
388 | * @dev: struct cpuidle_device for this cpu | |
389 | * @coupled: the struct coupled that contains the current cpu | |
390 | * | |
391 | * Removes the requested idle state for the specified cpuidle device. | |
392 | */ | |
393 | static void cpuidle_coupled_set_not_waiting(int cpu, | |
394 | struct cpuidle_coupled *coupled) | |
395 | { | |
396 | /* | |
397 | * Decrementing waiting count can race with incrementing it in | |
398 | * cpuidle_coupled_set_waiting, but that's OK. Worst case, some | |
399 | * cpus will increment ready_count and then spin until they | |
400 | * notice that this cpu has cleared it's requested_state. | |
401 | */ | |
402 | atomic_dec(&coupled->ready_waiting_counts); | |
403 | ||
404 | coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; | |
405 | } | |
406 | ||
407 | /** | |
408 | * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop | |
409 | * @cpu: the current cpu | |
410 | * @coupled: the struct coupled that contains the current cpu | |
411 | * | |
412 | * Marks this cpu as no longer in the ready and waiting loops. Decrements | |
413 | * the waiting count first to prevent another cpu looping back in and seeing | |
414 | * this cpu as waiting just before it exits idle. | |
415 | */ | |
416 | static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) | |
417 | { | |
418 | cpuidle_coupled_set_not_waiting(cpu, coupled); | |
419 | atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
420 | } | |
421 | ||
422 | /** | |
423 | * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed | |
424 | * @cpu - this cpu | |
425 | * | |
426 | * Turns on interrupts and spins until any outstanding poke interrupts have | |
427 | * been processed and the poke bit has been cleared. | |
428 | * | |
429 | * Other interrupts may also be processed while interrupts are enabled, so | |
9e19b73c | 430 | * need_resched() must be tested after this function returns to make sure |
4126c019 CC |
431 | * the interrupt didn't schedule work that should take the cpu out of idle. |
432 | * | |
9e19b73c | 433 | * Returns 0 if no poke was pending, 1 if a poke was cleared. |
4126c019 CC |
434 | */ |
435 | static int cpuidle_coupled_clear_pokes(int cpu) | |
436 | { | |
9e19b73c CC |
437 | if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
438 | return 0; | |
439 | ||
4126c019 | 440 | local_irq_enable(); |
f983827b | 441 | while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
4126c019 CC |
442 | cpu_relax(); |
443 | local_irq_disable(); | |
444 | ||
9e19b73c | 445 | return 1; |
4126c019 CC |
446 | } |
447 | ||
f983827b CC |
448 | static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) |
449 | { | |
450 | cpumask_t cpus; | |
451 | int ret; | |
452 | ||
453 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
454 | ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); | |
455 | ||
456 | return ret; | |
457 | } | |
458 | ||
4126c019 CC |
459 | /** |
460 | * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus | |
461 | * @dev: struct cpuidle_device for the current cpu | |
462 | * @drv: struct cpuidle_driver for the platform | |
463 | * @next_state: index of the requested state in drv->states | |
464 | * | |
465 | * Coordinate with coupled cpus to enter the target state. This is a two | |
466 | * stage process. In the first stage, the cpus are operating independently, | |
467 | * and may call into cpuidle_enter_state_coupled at completely different times. | |
468 | * To save as much power as possible, the first cpus to call this function will | |
469 | * go to an intermediate state (the cpuidle_device's safe state), and wait for | |
470 | * all the other cpus to call this function. Once all coupled cpus are idle, | |
471 | * the second stage will start. Each coupled cpu will spin until all cpus have | |
472 | * guaranteed that they will call the target_state. | |
473 | * | |
474 | * This function must be called with interrupts disabled. It may enable | |
475 | * interrupts while preparing for idle, and it will always return with | |
476 | * interrupts enabled. | |
477 | */ | |
478 | int cpuidle_enter_state_coupled(struct cpuidle_device *dev, | |
479 | struct cpuidle_driver *drv, int next_state) | |
480 | { | |
481 | int entered_state = -1; | |
482 | struct cpuidle_coupled *coupled = dev->coupled; | |
f983827b | 483 | int w; |
4126c019 CC |
484 | |
485 | if (!coupled) | |
486 | return -EINVAL; | |
487 | ||
488 | while (coupled->prevent) { | |
9e19b73c CC |
489 | cpuidle_coupled_clear_pokes(dev->cpu); |
490 | if (need_resched()) { | |
4126c019 CC |
491 | local_irq_enable(); |
492 | return entered_state; | |
493 | } | |
494 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 495 | drv->safe_state_index); |
59e99856 | 496 | local_irq_disable(); |
4126c019 CC |
497 | } |
498 | ||
499 | /* Read barrier ensures online_count is read after prevent is cleared */ | |
500 | smp_rmb(); | |
501 | ||
f983827b CC |
502 | reset: |
503 | cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); | |
504 | ||
505 | w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); | |
506 | /* | |
507 | * If this is the last cpu to enter the waiting state, poke | |
508 | * all the other cpus out of their waiting state so they can | |
509 | * enter a deeper state. This can race with one of the cpus | |
510 | * exiting the waiting state due to an interrupt and | |
511 | * decrementing waiting_count, see comment below. | |
512 | */ | |
513 | if (w == coupled->online_count) { | |
514 | cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); | |
515 | cpuidle_coupled_poke_others(dev->cpu, coupled); | |
516 | } | |
4126c019 CC |
517 | |
518 | retry: | |
519 | /* | |
520 | * Wait for all coupled cpus to be idle, using the deepest state | |
f983827b CC |
521 | * allowed for a single cpu. If this was not the poking cpu, wait |
522 | * for at least one poke before leaving to avoid a race where | |
523 | * two cpus could arrive at the waiting loop at the same time, | |
524 | * but the first of the two to arrive could skip the loop without | |
525 | * processing the pokes from the last to arrive. | |
4126c019 | 526 | */ |
f983827b CC |
527 | while (!cpuidle_coupled_cpus_waiting(coupled) || |
528 | !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { | |
9e19b73c CC |
529 | if (cpuidle_coupled_clear_pokes(dev->cpu)) |
530 | continue; | |
531 | ||
532 | if (need_resched()) { | |
4126c019 CC |
533 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
534 | goto out; | |
535 | } | |
536 | ||
537 | if (coupled->prevent) { | |
538 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); | |
539 | goto out; | |
540 | } | |
541 | ||
542 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 543 | drv->safe_state_index); |
59e99856 | 544 | local_irq_disable(); |
4126c019 CC |
545 | } |
546 | ||
9e19b73c CC |
547 | cpuidle_coupled_clear_pokes(dev->cpu); |
548 | if (need_resched()) { | |
4126c019 CC |
549 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
550 | goto out; | |
551 | } | |
552 | ||
f983827b CC |
553 | /* |
554 | * Make sure final poke status for this cpu is visible before setting | |
555 | * cpu as ready. | |
556 | */ | |
557 | smp_wmb(); | |
558 | ||
4126c019 CC |
559 | /* |
560 | * All coupled cpus are probably idle. There is a small chance that | |
561 | * one of the other cpus just became active. Increment the ready count, | |
562 | * and spin until all coupled cpus have incremented the counter. Once a | |
563 | * cpu has incremented the ready counter, it cannot abort idle and must | |
564 | * spin until either all cpus have incremented the ready counter, or | |
565 | * another cpu leaves idle and decrements the waiting counter. | |
566 | */ | |
567 | ||
568 | cpuidle_coupled_set_ready(coupled); | |
569 | while (!cpuidle_coupled_cpus_ready(coupled)) { | |
570 | /* Check if any other cpus bailed out of idle. */ | |
571 | if (!cpuidle_coupled_cpus_waiting(coupled)) | |
572 | if (!cpuidle_coupled_set_not_ready(coupled)) | |
573 | goto retry; | |
574 | ||
575 | cpu_relax(); | |
576 | } | |
577 | ||
f983827b CC |
578 | /* |
579 | * Make sure read of all cpus ready is done before reading pending pokes | |
580 | */ | |
581 | smp_rmb(); | |
582 | ||
583 | /* | |
584 | * There is a small chance that a cpu left and reentered idle after this | |
585 | * cpu saw that all cpus were waiting. The cpu that reentered idle will | |
586 | * have sent this cpu a poke, which will still be pending after the | |
587 | * ready loop. The pending interrupt may be lost by the interrupt | |
588 | * controller when entering the deep idle state. It's not possible to | |
589 | * clear a pending interrupt without turning interrupts on and handling | |
590 | * it, and it's too late to turn on interrupts here, so reset the | |
591 | * coupled idle state of all cpus and retry. | |
592 | */ | |
593 | if (cpuidle_coupled_any_pokes_pending(coupled)) { | |
594 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
595 | /* Wait for all cpus to see the pending pokes */ | |
596 | cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); | |
597 | goto reset; | |
598 | } | |
599 | ||
4126c019 CC |
600 | /* all cpus have acked the coupled state */ |
601 | next_state = cpuidle_coupled_get_state(dev, coupled); | |
602 | ||
603 | entered_state = cpuidle_enter_state(dev, drv, next_state); | |
604 | ||
605 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
606 | ||
607 | out: | |
608 | /* | |
609 | * Normal cpuidle states are expected to return with irqs enabled. | |
610 | * That leads to an inefficiency where a cpu receiving an interrupt | |
611 | * that brings it out of idle will process that interrupt before | |
612 | * exiting the idle enter function and decrementing ready_count. All | |
613 | * other cpus will need to spin waiting for the cpu that is processing | |
614 | * the interrupt. If the driver returns with interrupts disabled, | |
615 | * all other cpus will loop back into the safe idle state instead of | |
616 | * spinning, saving power. | |
617 | * | |
618 | * Calling local_irq_enable here allows coupled states to return with | |
619 | * interrupts disabled, but won't cause problems for drivers that | |
620 | * exit with interrupts enabled. | |
621 | */ | |
622 | local_irq_enable(); | |
623 | ||
624 | /* | |
625 | * Wait until all coupled cpus have exited idle. There is no risk that | |
626 | * a cpu exits and re-enters the ready state because this cpu has | |
627 | * already decremented its waiting_count. | |
628 | */ | |
629 | while (!cpuidle_coupled_no_cpus_ready(coupled)) | |
630 | cpu_relax(); | |
631 | ||
632 | return entered_state; | |
633 | } | |
634 | ||
635 | static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) | |
636 | { | |
637 | cpumask_t cpus; | |
638 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
639 | coupled->online_count = cpumask_weight(&cpus); | |
640 | } | |
641 | ||
642 | /** | |
643 | * cpuidle_coupled_register_device - register a coupled cpuidle device | |
644 | * @dev: struct cpuidle_device for the current cpu | |
645 | * | |
646 | * Called from cpuidle_register_device to handle coupled idle init. Finds the | |
647 | * cpuidle_coupled struct for this set of coupled cpus, or creates one if none | |
648 | * exists yet. | |
649 | */ | |
650 | int cpuidle_coupled_register_device(struct cpuidle_device *dev) | |
651 | { | |
652 | int cpu; | |
653 | struct cpuidle_device *other_dev; | |
654 | struct call_single_data *csd; | |
655 | struct cpuidle_coupled *coupled; | |
656 | ||
657 | if (cpumask_empty(&dev->coupled_cpus)) | |
658 | return 0; | |
659 | ||
f9b531fe | 660 | for_each_cpu(cpu, &dev->coupled_cpus) { |
4126c019 CC |
661 | other_dev = per_cpu(cpuidle_devices, cpu); |
662 | if (other_dev && other_dev->coupled) { | |
663 | coupled = other_dev->coupled; | |
664 | goto have_coupled; | |
665 | } | |
666 | } | |
667 | ||
668 | /* No existing coupled info found, create a new one */ | |
669 | coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); | |
670 | if (!coupled) | |
671 | return -ENOMEM; | |
672 | ||
673 | coupled->coupled_cpus = dev->coupled_cpus; | |
674 | ||
675 | have_coupled: | |
676 | dev->coupled = coupled; | |
677 | if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) | |
678 | coupled->prevent++; | |
679 | ||
680 | cpuidle_coupled_update_online_cpus(coupled); | |
681 | ||
682 | coupled->refcnt++; | |
683 | ||
684 | csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); | |
f983827b | 685 | csd->func = cpuidle_coupled_handle_poke; |
4126c019 CC |
686 | csd->info = (void *)(unsigned long)dev->cpu; |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | /** | |
692 | * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device | |
693 | * @dev: struct cpuidle_device for the current cpu | |
694 | * | |
695 | * Called from cpuidle_unregister_device to tear down coupled idle. Removes the | |
696 | * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if | |
697 | * this was the last cpu in the set. | |
698 | */ | |
699 | void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) | |
700 | { | |
701 | struct cpuidle_coupled *coupled = dev->coupled; | |
702 | ||
703 | if (cpumask_empty(&dev->coupled_cpus)) | |
704 | return; | |
705 | ||
706 | if (--coupled->refcnt) | |
707 | kfree(coupled); | |
708 | dev->coupled = NULL; | |
709 | } | |
710 | ||
711 | /** | |
712 | * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state | |
713 | * @coupled: the struct coupled that contains the cpu that is changing state | |
714 | * | |
715 | * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
716 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
717 | */ | |
718 | static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) | |
719 | { | |
720 | int cpu = get_cpu(); | |
721 | ||
722 | /* Force all cpus out of the waiting loop. */ | |
723 | coupled->prevent++; | |
724 | cpuidle_coupled_poke_others(cpu, coupled); | |
725 | put_cpu(); | |
726 | while (!cpuidle_coupled_no_cpus_waiting(coupled)) | |
727 | cpu_relax(); | |
728 | } | |
729 | ||
730 | /** | |
731 | * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state | |
732 | * @coupled: the struct coupled that contains the cpu that is changing state | |
733 | * | |
734 | * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
735 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
736 | */ | |
737 | static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) | |
738 | { | |
739 | int cpu = get_cpu(); | |
740 | ||
741 | /* | |
742 | * Write barrier ensures readers see the new online_count when they | |
743 | * see prevent == 0. | |
744 | */ | |
745 | smp_wmb(); | |
746 | coupled->prevent--; | |
747 | /* Force cpus out of the prevent loop. */ | |
748 | cpuidle_coupled_poke_others(cpu, coupled); | |
749 | put_cpu(); | |
750 | } | |
751 | ||
752 | /** | |
753 | * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions | |
754 | * @nb: notifier block | |
755 | * @action: hotplug transition | |
756 | * @hcpu: target cpu number | |
757 | * | |
758 | * Called when a cpu is brought on or offline using hotplug. Updates the | |
759 | * coupled cpu set appropriately | |
760 | */ | |
761 | static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, | |
762 | unsigned long action, void *hcpu) | |
763 | { | |
764 | int cpu = (unsigned long)hcpu; | |
765 | struct cpuidle_device *dev; | |
766 | ||
63c6ba43 CC |
767 | switch (action & ~CPU_TASKS_FROZEN) { |
768 | case CPU_UP_PREPARE: | |
769 | case CPU_DOWN_PREPARE: | |
770 | case CPU_ONLINE: | |
771 | case CPU_DEAD: | |
772 | case CPU_UP_CANCELED: | |
773 | case CPU_DOWN_FAILED: | |
774 | break; | |
775 | default: | |
776 | return NOTIFY_OK; | |
777 | } | |
778 | ||
4126c019 CC |
779 | mutex_lock(&cpuidle_lock); |
780 | ||
781 | dev = per_cpu(cpuidle_devices, cpu); | |
5fbbb90d | 782 | if (!dev || !dev->coupled) |
4126c019 CC |
783 | goto out; |
784 | ||
785 | switch (action & ~CPU_TASKS_FROZEN) { | |
786 | case CPU_UP_PREPARE: | |
787 | case CPU_DOWN_PREPARE: | |
788 | cpuidle_coupled_prevent_idle(dev->coupled); | |
789 | break; | |
790 | case CPU_ONLINE: | |
791 | case CPU_DEAD: | |
792 | cpuidle_coupled_update_online_cpus(dev->coupled); | |
793 | /* Fall through */ | |
794 | case CPU_UP_CANCELED: | |
795 | case CPU_DOWN_FAILED: | |
796 | cpuidle_coupled_allow_idle(dev->coupled); | |
797 | break; | |
798 | } | |
799 | ||
800 | out: | |
801 | mutex_unlock(&cpuidle_lock); | |
802 | return NOTIFY_OK; | |
803 | } | |
804 | ||
805 | static struct notifier_block cpuidle_coupled_cpu_notifier = { | |
806 | .notifier_call = cpuidle_coupled_cpu_notify, | |
807 | }; | |
808 | ||
809 | static int __init cpuidle_coupled_init(void) | |
810 | { | |
811 | return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); | |
812 | } | |
813 | core_initcall(cpuidle_coupled_init); |