Commit | Line | Data |
---|---|---|
f8381cba TG |
1 | /* |
2 | * linux/kernel/time/tick-broadcast.c | |
3 | * | |
4 | * This file contains functions which emulate a local clock-event | |
5 | * device via a broadcast event source. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
d7b90689 | 17 | #include <linux/interrupt.h> |
f8381cba TG |
18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
12ad1000 | 21 | #include <linux/smp.h> |
ccf33d68 | 22 | #include <linux/module.h> |
f8381cba TG |
23 | |
24 | #include "tick-internal.h" | |
25 | ||
26 | /* | |
27 | * Broadcast support for broken x86 hardware, where the local apic | |
28 | * timer stops in C3 state. | |
29 | */ | |
30 | ||
a52f5c56 | 31 | static struct tick_device tick_broadcast_device; |
b352bc1c | 32 | static cpumask_var_t tick_broadcast_mask; |
07bd1172 | 33 | static cpumask_var_t tick_broadcast_on; |
b352bc1c | 34 | static cpumask_var_t tmpmask; |
b5f91da0 | 35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
592a438f | 36 | static int tick_broadcast_forced; |
f8381cba | 37 | |
5590a536 TG |
38 | #ifdef CONFIG_TICK_ONESHOT |
39 | static void tick_broadcast_clear_oneshot(int cpu); | |
080873ce | 40 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
5590a536 TG |
41 | #else |
42 | static inline void tick_broadcast_clear_oneshot(int cpu) { } | |
080873ce | 43 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
5590a536 TG |
44 | #endif |
45 | ||
289f480a IM |
46 | /* |
47 | * Debugging: see timer_list.c | |
48 | */ | |
49 | struct tick_device *tick_get_broadcast_device(void) | |
50 | { | |
51 | return &tick_broadcast_device; | |
52 | } | |
53 | ||
6b954823 | 54 | struct cpumask *tick_get_broadcast_mask(void) |
289f480a | 55 | { |
b352bc1c | 56 | return tick_broadcast_mask; |
289f480a IM |
57 | } |
58 | ||
f8381cba TG |
59 | /* |
60 | * Start the device in periodic mode | |
61 | */ | |
62 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
63 | { | |
18de5bc4 | 64 | if (bc) |
f8381cba TG |
65 | tick_setup_periodic(bc, 1); |
66 | } | |
67 | ||
68 | /* | |
69 | * Check, if the device can be utilized as broadcast device: | |
70 | */ | |
45cb8e01 TG |
71 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
72 | struct clock_event_device *newdev) | |
73 | { | |
74 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | |
245a3496 | 75 | (newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
45cb8e01 TG |
76 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) |
77 | return false; | |
78 | ||
79 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | |
80 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
81 | return false; | |
82 | ||
83 | return !curdev || newdev->rating > curdev->rating; | |
84 | } | |
85 | ||
86 | /* | |
87 | * Conditionally install/replace broadcast device | |
88 | */ | |
7172a286 | 89 | void tick_install_broadcast_device(struct clock_event_device *dev) |
f8381cba | 90 | { |
6f7a05d7 TG |
91 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
92 | ||
45cb8e01 | 93 | if (!tick_check_broadcast_device(cur, dev)) |
7172a286 | 94 | return; |
45cb8e01 | 95 | |
ccf33d68 TG |
96 | if (!try_module_get(dev->owner)) |
97 | return; | |
f8381cba | 98 | |
45cb8e01 | 99 | clockevents_exchange_device(cur, dev); |
6f7a05d7 TG |
100 | if (cur) |
101 | cur->event_handler = clockevents_handle_noop; | |
f8381cba | 102 | tick_broadcast_device.evtdev = dev; |
b352bc1c | 103 | if (!cpumask_empty(tick_broadcast_mask)) |
f8381cba | 104 | tick_broadcast_start_periodic(dev); |
c038c1c4 SB |
105 | /* |
106 | * Inform all cpus about this. We might be in a situation | |
107 | * where we did not switch to oneshot mode because the per cpu | |
108 | * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack | |
109 | * of a oneshot capable broadcast device. Without that | |
110 | * notification the systems stays stuck in periodic mode | |
111 | * forever. | |
112 | */ | |
113 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | |
114 | tick_clock_notify(); | |
f8381cba TG |
115 | } |
116 | ||
117 | /* | |
118 | * Check, if the device is the broadcast device | |
119 | */ | |
120 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
121 | { | |
122 | return (dev && tick_broadcast_device.evtdev == dev); | |
123 | } | |
124 | ||
627ee794 TG |
125 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) |
126 | { | |
127 | int ret = -ENODEV; | |
128 | ||
129 | if (tick_is_broadcast_device(dev)) { | |
130 | raw_spin_lock(&tick_broadcast_lock); | |
131 | ret = __clockevents_update_freq(dev, freq); | |
132 | raw_spin_unlock(&tick_broadcast_lock); | |
133 | } | |
134 | return ret; | |
135 | } | |
136 | ||
137 | ||
12ad1000 MR |
138 | static void err_broadcast(const struct cpumask *mask) |
139 | { | |
140 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | |
141 | } | |
142 | ||
5d1d9a29 MR |
143 | static void tick_device_setup_broadcast_func(struct clock_event_device *dev) |
144 | { | |
145 | if (!dev->broadcast) | |
146 | dev->broadcast = tick_broadcast; | |
147 | if (!dev->broadcast) { | |
148 | pr_warn_once("%s depends on broadcast, but no broadcast function available\n", | |
149 | dev->name); | |
150 | dev->broadcast = err_broadcast; | |
151 | } | |
152 | } | |
153 | ||
f8381cba TG |
154 | /* |
155 | * Check, if the device is disfunctional and a place holder, which | |
156 | * needs to be handled by the broadcast device. | |
157 | */ | |
158 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
159 | { | |
07bd1172 | 160 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba | 161 | unsigned long flags; |
07bd1172 | 162 | int ret; |
f8381cba | 163 | |
b5f91da0 | 164 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
165 | |
166 | /* | |
167 | * Devices might be registered with both periodic and oneshot | |
168 | * mode disabled. This signals, that the device needs to be | |
169 | * operated from the broadcast device and is a placeholder for | |
170 | * the cpu local device. | |
171 | */ | |
172 | if (!tick_device_is_functional(dev)) { | |
173 | dev->event_handler = tick_handle_periodic; | |
5d1d9a29 | 174 | tick_device_setup_broadcast_func(dev); |
b352bc1c | 175 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
a272dcca SB |
176 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
177 | tick_broadcast_start_periodic(bc); | |
178 | else | |
179 | tick_broadcast_setup_oneshot(bc); | |
f8381cba | 180 | ret = 1; |
5590a536 TG |
181 | } else { |
182 | /* | |
07bd1172 TG |
183 | * Clear the broadcast bit for this cpu if the |
184 | * device is not power state affected. | |
5590a536 | 185 | */ |
07bd1172 | 186 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
b352bc1c | 187 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 188 | else |
5d1d9a29 | 189 | tick_device_setup_broadcast_func(dev); |
07bd1172 TG |
190 | |
191 | /* | |
192 | * Clear the broadcast bit if the CPU is not in | |
193 | * periodic broadcast on state. | |
194 | */ | |
195 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | |
196 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | |
197 | ||
198 | switch (tick_broadcast_device.mode) { | |
199 | case TICKDEV_MODE_ONESHOT: | |
200 | /* | |
201 | * If the system is in oneshot mode we can | |
202 | * unconditionally clear the oneshot mask bit, | |
203 | * because the CPU is running and therefore | |
204 | * not in an idle state which causes the power | |
205 | * state affected device to stop. Let the | |
206 | * caller initialize the device. | |
207 | */ | |
208 | tick_broadcast_clear_oneshot(cpu); | |
209 | ret = 0; | |
210 | break; | |
211 | ||
212 | case TICKDEV_MODE_PERIODIC: | |
213 | /* | |
214 | * If the system is in periodic mode, check | |
215 | * whether the broadcast device can be | |
216 | * switched off now. | |
217 | */ | |
218 | if (cpumask_empty(tick_broadcast_mask) && bc) | |
219 | clockevents_shutdown(bc); | |
220 | /* | |
221 | * If we kept the cpu in the broadcast mask, | |
222 | * tell the caller to leave the per cpu device | |
223 | * in shutdown state. The periodic interrupt | |
224 | * is delivered by the broadcast device. | |
225 | */ | |
226 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | |
227 | break; | |
228 | default: | |
229 | /* Nothing to do */ | |
230 | ret = 0; | |
231 | break; | |
5590a536 TG |
232 | } |
233 | } | |
b5f91da0 | 234 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba TG |
235 | return ret; |
236 | } | |
237 | ||
12572dbb MR |
238 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
239 | int tick_receive_broadcast(void) | |
240 | { | |
241 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
242 | struct clock_event_device *evt = td->evtdev; | |
243 | ||
244 | if (!evt) | |
245 | return -ENODEV; | |
246 | ||
247 | if (!evt->event_handler) | |
248 | return -EINVAL; | |
249 | ||
250 | evt->event_handler(evt); | |
251 | return 0; | |
252 | } | |
253 | #endif | |
254 | ||
f8381cba | 255 | /* |
6b954823 | 256 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba | 257 | */ |
2951d5c0 | 258 | static bool tick_do_broadcast(struct cpumask *mask) |
f8381cba | 259 | { |
186e3cb8 | 260 | int cpu = smp_processor_id(); |
f8381cba | 261 | struct tick_device *td; |
2951d5c0 | 262 | bool local = false; |
f8381cba TG |
263 | |
264 | /* | |
265 | * Check, if the current cpu is in the mask | |
266 | */ | |
6b954823 RR |
267 | if (cpumask_test_cpu(cpu, mask)) { |
268 | cpumask_clear_cpu(cpu, mask); | |
2951d5c0 | 269 | local = true; |
f8381cba TG |
270 | } |
271 | ||
6b954823 | 272 | if (!cpumask_empty(mask)) { |
f8381cba TG |
273 | /* |
274 | * It might be necessary to actually check whether the devices | |
275 | * have different broadcast functions. For now, just use the | |
276 | * one of the first device. This works as long as we have this | |
277 | * misfeature only on x86 (lapic) | |
278 | */ | |
6b954823 RR |
279 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
280 | td->evtdev->broadcast(mask); | |
f8381cba | 281 | } |
2951d5c0 | 282 | return local; |
f8381cba TG |
283 | } |
284 | ||
285 | /* | |
286 | * Periodic broadcast: | |
287 | * - invoke the broadcast handlers | |
288 | */ | |
2951d5c0 | 289 | static bool tick_do_periodic_broadcast(void) |
f8381cba | 290 | { |
b352bc1c | 291 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
2951d5c0 | 292 | return tick_do_broadcast(tmpmask); |
f8381cba TG |
293 | } |
294 | ||
295 | /* | |
296 | * Event handler for periodic broadcast ticks | |
297 | */ | |
298 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
299 | { | |
2951d5c0 TG |
300 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
301 | bool bc_local; | |
d4496b39 | 302 | |
627ee794 | 303 | raw_spin_lock(&tick_broadcast_lock); |
2951d5c0 | 304 | bc_local = tick_do_periodic_broadcast(); |
627ee794 | 305 | |
472c4a94 | 306 | if (clockevent_state_oneshot(dev)) { |
2951d5c0 | 307 | ktime_t next = ktime_add(dev->next_event, tick_period); |
f8381cba | 308 | |
2951d5c0 TG |
309 | clockevents_program_event(dev, next, true); |
310 | } | |
311 | raw_spin_unlock(&tick_broadcast_lock); | |
f8381cba TG |
312 | |
313 | /* | |
2951d5c0 TG |
314 | * We run the handler of the local cpu after dropping |
315 | * tick_broadcast_lock because the handler might deadlock when | |
316 | * trying to switch to oneshot mode. | |
f8381cba | 317 | */ |
2951d5c0 TG |
318 | if (bc_local) |
319 | td->evtdev->event_handler(td->evtdev); | |
f8381cba TG |
320 | } |
321 | ||
592a438f TG |
322 | /** |
323 | * tick_broadcast_control - Enable/disable or force broadcast mode | |
324 | * @mode: The selected broadcast mode | |
325 | * | |
326 | * Called when the system enters a state where affected tick devices | |
327 | * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. | |
328 | * | |
329 | * Called with interrupts disabled, so clockevents_lock is not | |
330 | * required here because the local clock event device cannot go away | |
331 | * under us. | |
f8381cba | 332 | */ |
592a438f | 333 | void tick_broadcast_control(enum tick_broadcast_mode mode) |
f8381cba TG |
334 | { |
335 | struct clock_event_device *bc, *dev; | |
336 | struct tick_device *td; | |
9c17bcda | 337 | int cpu, bc_stopped; |
f8381cba | 338 | |
592a438f | 339 | td = this_cpu_ptr(&tick_cpu_device); |
f8381cba | 340 | dev = td->evtdev; |
f8381cba TG |
341 | |
342 | /* | |
1595f452 | 343 | * Is the device not affected by the powerstate ? |
f8381cba | 344 | */ |
1595f452 | 345 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
592a438f | 346 | return; |
f8381cba | 347 | |
3dfbc884 | 348 | if (!tick_device_is_functional(dev)) |
592a438f | 349 | return; |
1595f452 | 350 | |
592a438f TG |
351 | raw_spin_lock(&tick_broadcast_lock); |
352 | cpu = smp_processor_id(); | |
353 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 354 | bc_stopped = cpumask_empty(tick_broadcast_mask); |
9c17bcda | 355 | |
592a438f TG |
356 | switch (mode) { |
357 | case TICK_BROADCAST_FORCE: | |
358 | tick_broadcast_forced = 1; | |
359 | case TICK_BROADCAST_ON: | |
07bd1172 | 360 | cpumask_set_cpu(cpu, tick_broadcast_on); |
b352bc1c | 361 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
07454bff TG |
362 | if (tick_broadcast_device.mode == |
363 | TICKDEV_MODE_PERIODIC) | |
2344abbc | 364 | clockevents_shutdown(dev); |
f8381cba | 365 | } |
1595f452 | 366 | break; |
592a438f TG |
367 | |
368 | case TICK_BROADCAST_OFF: | |
369 | if (tick_broadcast_forced) | |
07bd1172 TG |
370 | break; |
371 | cpumask_clear_cpu(cpu, tick_broadcast_on); | |
372 | if (!tick_device_is_functional(dev)) | |
373 | break; | |
374 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | |
07454bff TG |
375 | if (tick_broadcast_device.mode == |
376 | TICKDEV_MODE_PERIODIC) | |
f8381cba TG |
377 | tick_setup_periodic(dev, 0); |
378 | } | |
1595f452 | 379 | break; |
f8381cba TG |
380 | } |
381 | ||
b352bc1c | 382 | if (cpumask_empty(tick_broadcast_mask)) { |
9c17bcda | 383 | if (!bc_stopped) |
2344abbc | 384 | clockevents_shutdown(bc); |
9c17bcda | 385 | } else if (bc_stopped) { |
f8381cba TG |
386 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
387 | tick_broadcast_start_periodic(bc); | |
79bf2bb3 TG |
388 | else |
389 | tick_broadcast_setup_oneshot(bc); | |
f8381cba | 390 | } |
592a438f | 391 | raw_spin_unlock(&tick_broadcast_lock); |
f8381cba | 392 | } |
592a438f | 393 | EXPORT_SYMBOL_GPL(tick_broadcast_control); |
f8381cba TG |
394 | |
395 | /* | |
396 | * Set the periodic handler depending on broadcast on/off | |
397 | */ | |
398 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
399 | { | |
400 | if (!broadcast) | |
401 | dev->event_handler = tick_handle_periodic; | |
402 | else | |
403 | dev->event_handler = tick_handle_periodic_broadcast; | |
404 | } | |
405 | ||
a49b116d | 406 | #ifdef CONFIG_HOTPLUG_CPU |
f8381cba TG |
407 | /* |
408 | * Remove a CPU from broadcasting | |
409 | */ | |
a49b116d | 410 | void tick_shutdown_broadcast(unsigned int cpu) |
f8381cba TG |
411 | { |
412 | struct clock_event_device *bc; | |
413 | unsigned long flags; | |
f8381cba | 414 | |
b5f91da0 | 415 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
416 | |
417 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 418 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 419 | cpumask_clear_cpu(cpu, tick_broadcast_on); |
f8381cba TG |
420 | |
421 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
b352bc1c | 422 | if (bc && cpumask_empty(tick_broadcast_mask)) |
2344abbc | 423 | clockevents_shutdown(bc); |
f8381cba TG |
424 | } |
425 | ||
b5f91da0 | 426 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba | 427 | } |
a49b116d | 428 | #endif |
79bf2bb3 | 429 | |
6321dd60 TG |
430 | void tick_suspend_broadcast(void) |
431 | { | |
432 | struct clock_event_device *bc; | |
433 | unsigned long flags; | |
434 | ||
b5f91da0 | 435 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
436 | |
437 | bc = tick_broadcast_device.evtdev; | |
18de5bc4 | 438 | if (bc) |
2344abbc | 439 | clockevents_shutdown(bc); |
6321dd60 | 440 | |
b5f91da0 | 441 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
442 | } |
443 | ||
f46481d0 TG |
444 | /* |
445 | * This is called from tick_resume_local() on a resuming CPU. That's | |
446 | * called from the core resume function, tick_unfreeze() and the magic XEN | |
447 | * resume hackery. | |
448 | * | |
449 | * In none of these cases the broadcast device mode can change and the | |
450 | * bit of the resuming CPU in the broadcast mask is safe as well. | |
451 | */ | |
452 | bool tick_resume_check_broadcast(void) | |
453 | { | |
454 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | |
455 | return false; | |
456 | else | |
457 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | |
458 | } | |
459 | ||
460 | void tick_resume_broadcast(void) | |
6321dd60 TG |
461 | { |
462 | struct clock_event_device *bc; | |
463 | unsigned long flags; | |
6321dd60 | 464 | |
b5f91da0 | 465 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
466 | |
467 | bc = tick_broadcast_device.evtdev; | |
6321dd60 | 468 | |
cd05a1f8 | 469 | if (bc) { |
554ef387 | 470 | clockevents_tick_resume(bc); |
18de5bc4 | 471 | |
cd05a1f8 TG |
472 | switch (tick_broadcast_device.mode) { |
473 | case TICKDEV_MODE_PERIODIC: | |
b352bc1c | 474 | if (!cpumask_empty(tick_broadcast_mask)) |
cd05a1f8 | 475 | tick_broadcast_start_periodic(bc); |
cd05a1f8 TG |
476 | break; |
477 | case TICKDEV_MODE_ONESHOT: | |
b352bc1c | 478 | if (!cpumask_empty(tick_broadcast_mask)) |
080873ce | 479 | tick_resume_broadcast_oneshot(bc); |
cd05a1f8 TG |
480 | break; |
481 | } | |
6321dd60 | 482 | } |
b5f91da0 | 483 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
484 | } |
485 | ||
79bf2bb3 TG |
486 | #ifdef CONFIG_TICK_ONESHOT |
487 | ||
b352bc1c | 488 | static cpumask_var_t tick_broadcast_oneshot_mask; |
26517f3e | 489 | static cpumask_var_t tick_broadcast_pending_mask; |
989dcb64 | 490 | static cpumask_var_t tick_broadcast_force_mask; |
79bf2bb3 | 491 | |
289f480a | 492 | /* |
6b954823 | 493 | * Exposed for debugging: see timer_list.c |
289f480a | 494 | */ |
6b954823 | 495 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480a | 496 | { |
b352bc1c | 497 | return tick_broadcast_oneshot_mask; |
289f480a IM |
498 | } |
499 | ||
eaa907c5 TG |
500 | /* |
501 | * Called before going idle with interrupts disabled. Checks whether a | |
502 | * broadcast event from the other core is about to happen. We detected | |
503 | * that in tick_broadcast_oneshot_control(). The callsite can use this | |
504 | * to avoid a deep idle transition as we are about to get the | |
505 | * broadcast IPI right away. | |
506 | */ | |
507 | int tick_check_broadcast_expired(void) | |
508 | { | |
509 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
510 | } | |
511 | ||
d2348fb6 DL |
512 | /* |
513 | * Set broadcast interrupt affinity | |
514 | */ | |
515 | static void tick_broadcast_set_affinity(struct clock_event_device *bc, | |
516 | const struct cpumask *cpumask) | |
517 | { | |
518 | if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) | |
519 | return; | |
520 | ||
521 | if (cpumask_equal(bc->cpumask, cpumask)) | |
522 | return; | |
523 | ||
524 | bc->cpumask = cpumask; | |
525 | irq_set_affinity(bc->irq, bc->cpumask); | |
526 | } | |
527 | ||
298dbd1c TG |
528 | static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, |
529 | ktime_t expires) | |
79bf2bb3 | 530 | { |
472c4a94 | 531 | if (!clockevent_state_oneshot(bc)) |
d7eb231c | 532 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b9a6a235 | 533 | |
298dbd1c TG |
534 | clockevents_program_event(bc, expires, 1); |
535 | tick_broadcast_set_affinity(bc, cpumask_of(cpu)); | |
79bf2bb3 TG |
536 | } |
537 | ||
080873ce | 538 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
cd05a1f8 | 539 | { |
d7eb231c | 540 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
cd05a1f8 TG |
541 | } |
542 | ||
fb02fbc1 TG |
543 | /* |
544 | * Called from irq_enter() when idle was interrupted to reenable the | |
545 | * per cpu device. | |
546 | */ | |
e8fcaa5c | 547 | void tick_check_oneshot_broadcast_this_cpu(void) |
fb02fbc1 | 548 | { |
e8fcaa5c | 549 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
22127e93 | 550 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
fb02fbc1 | 551 | |
1f73a980 TG |
552 | /* |
553 | * We might be in the middle of switching over from | |
554 | * periodic to oneshot. If the CPU has not yet | |
555 | * switched over, leave the device alone. | |
556 | */ | |
557 | if (td->mode == TICKDEV_MODE_ONESHOT) { | |
d7eb231c | 558 | clockevents_switch_state(td->evtdev, |
77e32c89 | 559 | CLOCK_EVT_STATE_ONESHOT); |
1f73a980 | 560 | } |
fb02fbc1 TG |
561 | } |
562 | } | |
563 | ||
79bf2bb3 TG |
564 | /* |
565 | * Handle oneshot mode broadcasting | |
566 | */ | |
567 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
568 | { | |
569 | struct tick_device *td; | |
cdc6f27d | 570 | ktime_t now, next_event; |
d2348fb6 | 571 | int cpu, next_cpu = 0; |
298dbd1c | 572 | bool bc_local; |
79bf2bb3 | 573 | |
b5f91da0 | 574 | raw_spin_lock(&tick_broadcast_lock); |
79bf2bb3 | 575 | dev->next_event.tv64 = KTIME_MAX; |
cdc6f27d | 576 | next_event.tv64 = KTIME_MAX; |
b352bc1c | 577 | cpumask_clear(tmpmask); |
79bf2bb3 TG |
578 | now = ktime_get(); |
579 | /* Find all expired events */ | |
b352bc1c | 580 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
79bf2bb3 | 581 | td = &per_cpu(tick_cpu_device, cpu); |
d2348fb6 | 582 | if (td->evtdev->next_event.tv64 <= now.tv64) { |
b352bc1c | 583 | cpumask_set_cpu(cpu, tmpmask); |
26517f3e TG |
584 | /* |
585 | * Mark the remote cpu in the pending mask, so | |
586 | * it can avoid reprogramming the cpu local | |
587 | * timer in tick_broadcast_oneshot_control(). | |
588 | */ | |
589 | cpumask_set_cpu(cpu, tick_broadcast_pending_mask); | |
d2348fb6 | 590 | } else if (td->evtdev->next_event.tv64 < next_event.tv64) { |
cdc6f27d | 591 | next_event.tv64 = td->evtdev->next_event.tv64; |
d2348fb6 DL |
592 | next_cpu = cpu; |
593 | } | |
79bf2bb3 TG |
594 | } |
595 | ||
2938d275 TG |
596 | /* |
597 | * Remove the current cpu from the pending mask. The event is | |
598 | * delivered immediately in tick_do_broadcast() ! | |
599 | */ | |
600 | cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); | |
601 | ||
989dcb64 TG |
602 | /* Take care of enforced broadcast requests */ |
603 | cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); | |
604 | cpumask_clear(tick_broadcast_force_mask); | |
605 | ||
c9b5a266 TG |
606 | /* |
607 | * Sanity check. Catch the case where we try to broadcast to | |
608 | * offline cpus. | |
609 | */ | |
610 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | |
611 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | |
612 | ||
79bf2bb3 | 613 | /* |
298dbd1c | 614 | * Wakeup the cpus which have an expired event. |
cdc6f27d | 615 | */ |
298dbd1c | 616 | bc_local = tick_do_broadcast(tmpmask); |
cdc6f27d TG |
617 | |
618 | /* | |
619 | * Two reasons for reprogram: | |
620 | * | |
621 | * - The global event did not expire any CPU local | |
622 | * events. This happens in dyntick mode, as the maximum PIT | |
623 | * delta is quite small. | |
624 | * | |
625 | * - There are pending events on sleeping CPUs which were not | |
626 | * in the event mask | |
79bf2bb3 | 627 | */ |
298dbd1c TG |
628 | if (next_event.tv64 != KTIME_MAX) |
629 | tick_broadcast_set_event(dev, next_cpu, next_event); | |
630 | ||
b5f91da0 | 631 | raw_spin_unlock(&tick_broadcast_lock); |
298dbd1c TG |
632 | |
633 | if (bc_local) { | |
634 | td = this_cpu_ptr(&tick_cpu_device); | |
635 | td->evtdev->event_handler(td->evtdev); | |
636 | } | |
79bf2bb3 TG |
637 | } |
638 | ||
5d1638ac PM |
639 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) |
640 | { | |
641 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
642 | return 0; | |
643 | if (bc->next_event.tv64 == KTIME_MAX) | |
644 | return 0; | |
645 | return bc->bound_on == cpu ? -EBUSY : 0; | |
646 | } | |
647 | ||
648 | static void broadcast_shutdown_local(struct clock_event_device *bc, | |
649 | struct clock_event_device *dev) | |
650 | { | |
651 | /* | |
652 | * For hrtimer based broadcasting we cannot shutdown the cpu | |
653 | * local device if our own event is the first one to expire or | |
654 | * if we own the broadcast timer. | |
655 | */ | |
656 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | |
657 | if (broadcast_needs_cpu(bc, smp_processor_id())) | |
658 | return; | |
659 | if (dev->next_event.tv64 < bc->next_event.tv64) | |
660 | return; | |
661 | } | |
d7eb231c | 662 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
5d1638ac PM |
663 | } |
664 | ||
1fe5d5c3 TG |
665 | /** |
666 | * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode | |
667 | * @state: The target state (enter/exit) | |
668 | * | |
669 | * The system enters/leaves a state, where affected devices might stop | |
da7e6f45 | 670 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. |
1fe5d5c3 TG |
671 | * |
672 | * Called with interrupts disabled, so clockevents_lock is not | |
673 | * required here because the local clock event device cannot go away | |
674 | * under us. | |
79bf2bb3 | 675 | */ |
1fe5d5c3 | 676 | int tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
79bf2bb3 TG |
677 | { |
678 | struct clock_event_device *bc, *dev; | |
679 | struct tick_device *td; | |
da7e6f45 | 680 | int cpu, ret = 0; |
1fe5d5c3 | 681 | ktime_t now; |
79bf2bb3 | 682 | |
79bf2bb3 TG |
683 | /* |
684 | * Periodic mode does not care about the enter/exit of power | |
685 | * states | |
686 | */ | |
687 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
5d1638ac | 688 | return 0; |
79bf2bb3 | 689 | |
7372b0b1 AK |
690 | /* |
691 | * We are called with preemtion disabled from the depth of the | |
692 | * idle code, so we can't be moved away. | |
693 | */ | |
1fe5d5c3 | 694 | td = this_cpu_ptr(&tick_cpu_device); |
79bf2bb3 TG |
695 | dev = td->evtdev; |
696 | ||
697 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
5d1638ac | 698 | return 0; |
7372b0b1 | 699 | |
1fe5d5c3 | 700 | raw_spin_lock(&tick_broadcast_lock); |
7372b0b1 | 701 | bc = tick_broadcast_device.evtdev; |
1fe5d5c3 | 702 | cpu = smp_processor_id(); |
79bf2bb3 | 703 | |
1fe5d5c3 | 704 | if (state == TICK_BROADCAST_ENTER) { |
b352bc1c | 705 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
2938d275 | 706 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
5d1638ac | 707 | broadcast_shutdown_local(bc, dev); |
989dcb64 TG |
708 | /* |
709 | * We only reprogram the broadcast timer if we | |
710 | * did not mark ourself in the force mask and | |
711 | * if the cpu local event is earlier than the | |
712 | * broadcast event. If the current CPU is in | |
713 | * the force mask, then we are going to be | |
714 | * woken by the IPI right away. | |
715 | */ | |
716 | if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && | |
717 | dev->next_event.tv64 < bc->next_event.tv64) | |
298dbd1c | 718 | tick_broadcast_set_event(bc, cpu, dev->next_event); |
79bf2bb3 | 719 | } |
5d1638ac PM |
720 | /* |
721 | * If the current CPU owns the hrtimer broadcast | |
722 | * mechanism, it cannot go deep idle and we remove the | |
723 | * CPU from the broadcast mask. We don't have to go | |
724 | * through the EXIT path as the local timer is not | |
725 | * shutdown. | |
726 | */ | |
727 | ret = broadcast_needs_cpu(bc, cpu); | |
728 | if (ret) | |
729 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | |
79bf2bb3 | 730 | } else { |
b352bc1c | 731 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
d7eb231c | 732 | clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
26517f3e TG |
733 | /* |
734 | * The cpu which was handling the broadcast | |
735 | * timer marked this cpu in the broadcast | |
736 | * pending mask and fired the broadcast | |
737 | * IPI. So we are going to handle the expired | |
738 | * event anyway via the broadcast IPI | |
739 | * handler. No need to reprogram the timer | |
740 | * with an already expired event. | |
741 | */ | |
742 | if (cpumask_test_and_clear_cpu(cpu, | |
743 | tick_broadcast_pending_mask)) | |
744 | goto out; | |
745 | ||
ea8deb8d DL |
746 | /* |
747 | * Bail out if there is no next event. | |
748 | */ | |
749 | if (dev->next_event.tv64 == KTIME_MAX) | |
750 | goto out; | |
989dcb64 TG |
751 | /* |
752 | * If the pending bit is not set, then we are | |
753 | * either the CPU handling the broadcast | |
754 | * interrupt or we got woken by something else. | |
755 | * | |
756 | * We are not longer in the broadcast mask, so | |
757 | * if the cpu local expiry time is already | |
758 | * reached, we would reprogram the cpu local | |
759 | * timer with an already expired event. | |
760 | * | |
761 | * This can lead to a ping-pong when we return | |
762 | * to idle and therefor rearm the broadcast | |
763 | * timer before the cpu local timer was able | |
764 | * to fire. This happens because the forced | |
765 | * reprogramming makes sure that the event | |
766 | * will happen in the future and depending on | |
767 | * the min_delta setting this might be far | |
768 | * enough out that the ping-pong starts. | |
769 | * | |
770 | * If the cpu local next_event has expired | |
771 | * then we know that the broadcast timer | |
772 | * next_event has expired as well and | |
773 | * broadcast is about to be handled. So we | |
774 | * avoid reprogramming and enforce that the | |
775 | * broadcast handler, which did not run yet, | |
776 | * will invoke the cpu local handler. | |
777 | * | |
778 | * We cannot call the handler directly from | |
779 | * here, because we might be in a NOHZ phase | |
780 | * and we did not go through the irq_enter() | |
781 | * nohz fixups. | |
782 | */ | |
783 | now = ktime_get(); | |
784 | if (dev->next_event.tv64 <= now.tv64) { | |
785 | cpumask_set_cpu(cpu, tick_broadcast_force_mask); | |
786 | goto out; | |
787 | } | |
788 | /* | |
789 | * We got woken by something else. Reprogram | |
790 | * the cpu local timer device. | |
791 | */ | |
26517f3e | 792 | tick_program_event(dev->next_event, 1); |
79bf2bb3 TG |
793 | } |
794 | } | |
26517f3e | 795 | out: |
1fe5d5c3 | 796 | raw_spin_unlock(&tick_broadcast_lock); |
da7e6f45 | 797 | return ret; |
79bf2bb3 | 798 | } |
1fe5d5c3 | 799 | EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); |
79bf2bb3 | 800 | |
5590a536 TG |
801 | /* |
802 | * Reset the one shot broadcast for a cpu | |
803 | * | |
804 | * Called with tick_broadcast_lock held | |
805 | */ | |
806 | static void tick_broadcast_clear_oneshot(int cpu) | |
807 | { | |
b352bc1c | 808 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
dd5fd9b9 | 809 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
5590a536 TG |
810 | } |
811 | ||
6b954823 RR |
812 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
813 | ktime_t expires) | |
7300711e TG |
814 | { |
815 | struct tick_device *td; | |
816 | int cpu; | |
817 | ||
5db0e1e9 | 818 | for_each_cpu(cpu, mask) { |
7300711e TG |
819 | td = &per_cpu(tick_cpu_device, cpu); |
820 | if (td->evtdev) | |
821 | td->evtdev->next_event = expires; | |
822 | } | |
823 | } | |
824 | ||
79bf2bb3 | 825 | /** |
8dce39c2 | 826 | * tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb3 TG |
827 | */ |
828 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |
829 | { | |
07f4beb0 TG |
830 | int cpu = smp_processor_id(); |
831 | ||
9c17bcda TG |
832 | /* Set it up only once ! */ |
833 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | |
472c4a94 | 834 | int was_periodic = clockevent_state_periodic(bc); |
7300711e | 835 | |
9c17bcda | 836 | bc->event_handler = tick_handle_oneshot_broadcast; |
7300711e | 837 | |
7300711e TG |
838 | /* |
839 | * We must be careful here. There might be other CPUs | |
840 | * waiting for periodic broadcast. We need to set the | |
841 | * oneshot_mask bits for those and program the | |
842 | * broadcast device to fire. | |
843 | */ | |
b352bc1c TG |
844 | cpumask_copy(tmpmask, tick_broadcast_mask); |
845 | cpumask_clear_cpu(cpu, tmpmask); | |
846 | cpumask_or(tick_broadcast_oneshot_mask, | |
847 | tick_broadcast_oneshot_mask, tmpmask); | |
6b954823 | 848 | |
b352bc1c | 849 | if (was_periodic && !cpumask_empty(tmpmask)) { |
d7eb231c | 850 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b352bc1c | 851 | tick_broadcast_init_next_event(tmpmask, |
6b954823 | 852 | tick_next_period); |
298dbd1c | 853 | tick_broadcast_set_event(bc, cpu, tick_next_period); |
7300711e TG |
854 | } else |
855 | bc->next_event.tv64 = KTIME_MAX; | |
07f4beb0 TG |
856 | } else { |
857 | /* | |
858 | * The first cpu which switches to oneshot mode sets | |
859 | * the bit for all other cpus which are in the general | |
860 | * (periodic) broadcast mask. So the bit is set and | |
861 | * would prevent the first broadcast enter after this | |
862 | * to program the bc device. | |
863 | */ | |
864 | tick_broadcast_clear_oneshot(cpu); | |
9c17bcda | 865 | } |
79bf2bb3 TG |
866 | } |
867 | ||
868 | /* | |
869 | * Select oneshot operating mode for the broadcast device | |
870 | */ | |
871 | void tick_broadcast_switch_to_oneshot(void) | |
872 | { | |
873 | struct clock_event_device *bc; | |
874 | unsigned long flags; | |
875 | ||
b5f91da0 | 876 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
fa4da365 SS |
877 | |
878 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
79bf2bb3 TG |
879 | bc = tick_broadcast_device.evtdev; |
880 | if (bc) | |
881 | tick_broadcast_setup_oneshot(bc); | |
77b0d60c | 882 | |
b5f91da0 | 883 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 TG |
884 | } |
885 | ||
a49b116d TG |
886 | #ifdef CONFIG_HOTPLUG_CPU |
887 | void hotplug_cpu__broadcast_tick_pull(int deadcpu) | |
888 | { | |
889 | struct clock_event_device *bc; | |
890 | unsigned long flags; | |
891 | ||
892 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
893 | bc = tick_broadcast_device.evtdev; | |
894 | ||
895 | if (bc && broadcast_needs_cpu(bc, deadcpu)) { | |
896 | /* This moves the broadcast assignment to this CPU: */ | |
897 | clockevents_program_event(bc, bc->next_event, 1); | |
898 | } | |
899 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
900 | } | |
79bf2bb3 TG |
901 | |
902 | /* | |
903 | * Remove a dead CPU from broadcasting | |
904 | */ | |
a49b116d | 905 | void tick_shutdown_broadcast_oneshot(unsigned int cpu) |
79bf2bb3 | 906 | { |
79bf2bb3 | 907 | unsigned long flags; |
79bf2bb3 | 908 | |
b5f91da0 | 909 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
79bf2bb3 | 910 | |
31d9b393 | 911 | /* |
c9b5a266 TG |
912 | * Clear the broadcast masks for the dead cpu, but do not stop |
913 | * the broadcast device! | |
31d9b393 | 914 | */ |
b352bc1c | 915 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
c9b5a266 TG |
916 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
917 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | |
79bf2bb3 | 918 | |
b5f91da0 | 919 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 | 920 | } |
a49b116d | 921 | #endif |
79bf2bb3 | 922 | |
27ce4cb4 TG |
923 | /* |
924 | * Check, whether the broadcast device is in one shot mode | |
925 | */ | |
926 | int tick_broadcast_oneshot_active(void) | |
927 | { | |
928 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | |
929 | } | |
930 | ||
3a142a06 TG |
931 | /* |
932 | * Check whether the broadcast device supports oneshot. | |
933 | */ | |
934 | bool tick_broadcast_oneshot_available(void) | |
935 | { | |
936 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
937 | ||
938 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | |
939 | } | |
940 | ||
79bf2bb3 | 941 | #endif |
b352bc1c TG |
942 | |
943 | void __init tick_broadcast_init(void) | |
944 | { | |
fbd44a60 | 945 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
07bd1172 | 946 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); |
fbd44a60 | 947 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
b352bc1c | 948 | #ifdef CONFIG_TICK_ONESHOT |
fbd44a60 TG |
949 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
950 | zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); | |
951 | zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); | |
b352bc1c TG |
952 | #endif |
953 | } |