clockevents: Add generic timer broadcast function
[deliverable/linux.git] / kernel / time / tick-broadcast.c
CommitLineData
f8381cba
TG
1/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
d7b90689 17#include <linux/interrupt.h>
f8381cba
TG
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
12ad1000 21#include <linux/smp.h>
f8381cba
TG
22
23#include "tick-internal.h"
24
25/*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
a52f5c56 30static struct tick_device tick_broadcast_device;
6b954823
RR
31/* FIXME: Use cpumask_var_t. */
32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33static DECLARE_BITMAP(tmpmask, NR_CPUS);
b5f91da0 34static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
aa276e1c 35static int tick_broadcast_force;
f8381cba 36
5590a536
TG
37#ifdef CONFIG_TICK_ONESHOT
38static void tick_broadcast_clear_oneshot(int cpu);
39#else
40static inline void tick_broadcast_clear_oneshot(int cpu) { }
41#endif
42
289f480a
IM
43/*
44 * Debugging: see timer_list.c
45 */
46struct tick_device *tick_get_broadcast_device(void)
47{
48 return &tick_broadcast_device;
49}
50
6b954823 51struct cpumask *tick_get_broadcast_mask(void)
289f480a 52{
6b954823 53 return to_cpumask(tick_broadcast_mask);
289f480a
IM
54}
55
f8381cba
TG
56/*
57 * Start the device in periodic mode
58 */
59static void tick_broadcast_start_periodic(struct clock_event_device *bc)
60{
18de5bc4 61 if (bc)
f8381cba
TG
62 tick_setup_periodic(bc, 1);
63}
64
65/*
66 * Check, if the device can be utilized as broadcast device:
67 */
68int tick_check_broadcast_device(struct clock_event_device *dev)
69{
4a93232d
VP
70 if ((tick_broadcast_device.evtdev &&
71 tick_broadcast_device.evtdev->rating >= dev->rating) ||
72 (dev->features & CLOCK_EVT_FEAT_C3STOP))
f8381cba
TG
73 return 0;
74
c1be8430 75 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
f8381cba 76 tick_broadcast_device.evtdev = dev;
6b954823 77 if (!cpumask_empty(tick_get_broadcast_mask()))
f8381cba
TG
78 tick_broadcast_start_periodic(dev);
79 return 1;
80}
81
82/*
83 * Check, if the device is the broadcast device
84 */
85int tick_is_broadcast_device(struct clock_event_device *dev)
86{
87 return (dev && tick_broadcast_device.evtdev == dev);
88}
89
12ad1000
MR
90static void err_broadcast(const struct cpumask *mask)
91{
92 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
93}
94
f8381cba
TG
95/*
96 * Check, if the device is disfunctional and a place holder, which
97 * needs to be handled by the broadcast device.
98 */
99int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
100{
101 unsigned long flags;
102 int ret = 0;
103
b5f91da0 104 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba
TG
105
106 /*
107 * Devices might be registered with both periodic and oneshot
108 * mode disabled. This signals, that the device needs to be
109 * operated from the broadcast device and is a placeholder for
110 * the cpu local device.
111 */
112 if (!tick_device_is_functional(dev)) {
113 dev->event_handler = tick_handle_periodic;
12ad1000
MR
114 if (!dev->broadcast)
115 dev->broadcast = tick_broadcast;
116 if (!dev->broadcast) {
117 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
118 dev->name);
119 dev->broadcast = err_broadcast;
120 }
6b954823 121 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
f8381cba
TG
122 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
123 ret = 1;
5590a536
TG
124 } else {
125 /*
126 * When the new device is not affected by the stop
127 * feature and the cpu is marked in the broadcast mask
128 * then clear the broadcast bit.
129 */
130 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
131 int cpu = smp_processor_id();
f8381cba 132
6b954823 133 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
5590a536
TG
134 tick_broadcast_clear_oneshot(cpu);
135 }
136 }
b5f91da0 137 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba
TG
138 return ret;
139}
140
12572dbb
MR
141#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
142int tick_receive_broadcast(void)
143{
144 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
145 struct clock_event_device *evt = td->evtdev;
146
147 if (!evt)
148 return -ENODEV;
149
150 if (!evt->event_handler)
151 return -EINVAL;
152
153 evt->event_handler(evt);
154 return 0;
155}
156#endif
157
f8381cba 158/*
6b954823 159 * Broadcast the event to the cpus, which are set in the mask (mangled).
f8381cba 160 */
6b954823 161static void tick_do_broadcast(struct cpumask *mask)
f8381cba 162{
186e3cb8 163 int cpu = smp_processor_id();
f8381cba
TG
164 struct tick_device *td;
165
166 /*
167 * Check, if the current cpu is in the mask
168 */
6b954823
RR
169 if (cpumask_test_cpu(cpu, mask)) {
170 cpumask_clear_cpu(cpu, mask);
f8381cba
TG
171 td = &per_cpu(tick_cpu_device, cpu);
172 td->evtdev->event_handler(td->evtdev);
f8381cba
TG
173 }
174
6b954823 175 if (!cpumask_empty(mask)) {
f8381cba
TG
176 /*
177 * It might be necessary to actually check whether the devices
178 * have different broadcast functions. For now, just use the
179 * one of the first device. This works as long as we have this
180 * misfeature only on x86 (lapic)
181 */
6b954823
RR
182 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
183 td->evtdev->broadcast(mask);
f8381cba 184 }
f8381cba
TG
185}
186
187/*
188 * Periodic broadcast:
189 * - invoke the broadcast handlers
190 */
191static void tick_do_periodic_broadcast(void)
192{
b5f91da0 193 raw_spin_lock(&tick_broadcast_lock);
f8381cba 194
6b954823
RR
195 cpumask_and(to_cpumask(tmpmask),
196 cpu_online_mask, tick_get_broadcast_mask());
197 tick_do_broadcast(to_cpumask(tmpmask));
f8381cba 198
b5f91da0 199 raw_spin_unlock(&tick_broadcast_lock);
f8381cba
TG
200}
201
202/*
203 * Event handler for periodic broadcast ticks
204 */
205static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
206{
d4496b39
TG
207 ktime_t next;
208
f8381cba
TG
209 tick_do_periodic_broadcast();
210
211 /*
212 * The device is in periodic mode. No reprogramming necessary:
213 */
214 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
215 return;
216
217 /*
218 * Setup the next period for devices, which do not have
d4496b39 219 * periodic mode. We read dev->next_event first and add to it
698f9315 220 * when the event already expired. clockevents_program_event()
d4496b39
TG
221 * sets dev->next_event only when the event is really
222 * programmed to the device.
f8381cba 223 */
d4496b39
TG
224 for (next = dev->next_event; ;) {
225 next = ktime_add(next, tick_period);
f8381cba 226
d1748302 227 if (!clockevents_program_event(dev, next, false))
f8381cba
TG
228 return;
229 tick_do_periodic_broadcast();
230 }
231}
232
233/*
234 * Powerstate information: The system enters/leaves a state, where
235 * affected devices might stop
236 */
f833bab8 237static void tick_do_broadcast_on_off(unsigned long *reason)
f8381cba
TG
238{
239 struct clock_event_device *bc, *dev;
240 struct tick_device *td;
f833bab8 241 unsigned long flags;
9c17bcda 242 int cpu, bc_stopped;
f8381cba 243
b5f91da0 244 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba
TG
245
246 cpu = smp_processor_id();
247 td = &per_cpu(tick_cpu_device, cpu);
248 dev = td->evtdev;
249 bc = tick_broadcast_device.evtdev;
250
251 /*
1595f452 252 * Is the device not affected by the powerstate ?
f8381cba 253 */
1595f452 254 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
f8381cba
TG
255 goto out;
256
3dfbc884
TG
257 if (!tick_device_is_functional(dev))
258 goto out;
1595f452 259
6b954823 260 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
9c17bcda 261
1595f452
TG
262 switch (*reason) {
263 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
264 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
6b954823
RR
265 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
266 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
07454bff
TG
267 if (tick_broadcast_device.mode ==
268 TICKDEV_MODE_PERIODIC)
2344abbc 269 clockevents_shutdown(dev);
f8381cba 270 }
3dfbc884 271 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
aa276e1c 272 tick_broadcast_force = 1;
1595f452
TG
273 break;
274 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
aa276e1c 275 if (!tick_broadcast_force &&
6b954823
RR
276 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
277 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
07454bff
TG
278 if (tick_broadcast_device.mode ==
279 TICKDEV_MODE_PERIODIC)
f8381cba
TG
280 tick_setup_periodic(dev, 0);
281 }
1595f452 282 break;
f8381cba
TG
283 }
284
6b954823 285 if (cpumask_empty(tick_get_broadcast_mask())) {
9c17bcda 286 if (!bc_stopped)
2344abbc 287 clockevents_shutdown(bc);
9c17bcda 288 } else if (bc_stopped) {
f8381cba
TG
289 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
290 tick_broadcast_start_periodic(bc);
79bf2bb3
TG
291 else
292 tick_broadcast_setup_oneshot(bc);
f8381cba
TG
293 }
294out:
b5f91da0 295 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba
TG
296}
297
298/*
299 * Powerstate information: The system enters/leaves a state, where
300 * affected devices might stop.
301 */
302void tick_broadcast_on_off(unsigned long reason, int *oncpu)
303{
6b954823 304 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
833df317 305 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
72fcde96 306 "offline CPU #%d\n", *oncpu);
bf020cb7 307 else
f833bab8 308 tick_do_broadcast_on_off(&reason);
f8381cba
TG
309}
310
311/*
312 * Set the periodic handler depending on broadcast on/off
313 */
314void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
315{
316 if (!broadcast)
317 dev->event_handler = tick_handle_periodic;
318 else
319 dev->event_handler = tick_handle_periodic_broadcast;
320}
321
322/*
323 * Remove a CPU from broadcasting
324 */
325void tick_shutdown_broadcast(unsigned int *cpup)
326{
327 struct clock_event_device *bc;
328 unsigned long flags;
329 unsigned int cpu = *cpup;
330
b5f91da0 331 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba
TG
332
333 bc = tick_broadcast_device.evtdev;
6b954823 334 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
f8381cba
TG
335
336 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
6b954823 337 if (bc && cpumask_empty(tick_get_broadcast_mask()))
2344abbc 338 clockevents_shutdown(bc);
f8381cba
TG
339 }
340
b5f91da0 341 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba 342}
79bf2bb3 343
6321dd60
TG
344void tick_suspend_broadcast(void)
345{
346 struct clock_event_device *bc;
347 unsigned long flags;
348
b5f91da0 349 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60
TG
350
351 bc = tick_broadcast_device.evtdev;
18de5bc4 352 if (bc)
2344abbc 353 clockevents_shutdown(bc);
6321dd60 354
b5f91da0 355 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60
TG
356}
357
358int tick_resume_broadcast(void)
359{
360 struct clock_event_device *bc;
361 unsigned long flags;
362 int broadcast = 0;
363
b5f91da0 364 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60
TG
365
366 bc = tick_broadcast_device.evtdev;
6321dd60 367
cd05a1f8 368 if (bc) {
18de5bc4
TG
369 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
370
cd05a1f8
TG
371 switch (tick_broadcast_device.mode) {
372 case TICKDEV_MODE_PERIODIC:
6b954823 373 if (!cpumask_empty(tick_get_broadcast_mask()))
cd05a1f8 374 tick_broadcast_start_periodic(bc);
6b954823
RR
375 broadcast = cpumask_test_cpu(smp_processor_id(),
376 tick_get_broadcast_mask());
cd05a1f8
TG
377 break;
378 case TICKDEV_MODE_ONESHOT:
a6371f80
SS
379 if (!cpumask_empty(tick_get_broadcast_mask()))
380 broadcast = tick_resume_broadcast_oneshot(bc);
cd05a1f8
TG
381 break;
382 }
6321dd60 383 }
b5f91da0 384 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60
TG
385
386 return broadcast;
387}
388
389
79bf2bb3
TG
390#ifdef CONFIG_TICK_ONESHOT
391
6b954823
RR
392/* FIXME: use cpumask_var_t. */
393static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
79bf2bb3 394
289f480a 395/*
6b954823 396 * Exposed for debugging: see timer_list.c
289f480a 397 */
6b954823 398struct cpumask *tick_get_broadcast_oneshot_mask(void)
289f480a 399{
6b954823 400 return to_cpumask(tick_broadcast_oneshot_mask);
289f480a
IM
401}
402
79bf2bb3
TG
403static int tick_broadcast_set_event(ktime_t expires, int force)
404{
405 struct clock_event_device *bc = tick_broadcast_device.evtdev;
1fb9b7d2 406
b9a6a235
TG
407 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
408 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
409
d1748302 410 return clockevents_program_event(bc, expires, force);
79bf2bb3
TG
411}
412
cd05a1f8
TG
413int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
414{
415 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
b7e113dc 416 return 0;
cd05a1f8
TG
417}
418
fb02fbc1
TG
419/*
420 * Called from irq_enter() when idle was interrupted to reenable the
421 * per cpu device.
422 */
423void tick_check_oneshot_broadcast(int cpu)
424{
6b954823 425 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
fb02fbc1
TG
426 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
427
428 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
429 }
430}
431
79bf2bb3
TG
432/*
433 * Handle oneshot mode broadcasting
434 */
435static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
436{
437 struct tick_device *td;
cdc6f27d 438 ktime_t now, next_event;
79bf2bb3
TG
439 int cpu;
440
b5f91da0 441 raw_spin_lock(&tick_broadcast_lock);
79bf2bb3
TG
442again:
443 dev->next_event.tv64 = KTIME_MAX;
cdc6f27d 444 next_event.tv64 = KTIME_MAX;
6b954823 445 cpumask_clear(to_cpumask(tmpmask));
79bf2bb3
TG
446 now = ktime_get();
447 /* Find all expired events */
6b954823 448 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
79bf2bb3
TG
449 td = &per_cpu(tick_cpu_device, cpu);
450 if (td->evtdev->next_event.tv64 <= now.tv64)
6b954823 451 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
cdc6f27d
TG
452 else if (td->evtdev->next_event.tv64 < next_event.tv64)
453 next_event.tv64 = td->evtdev->next_event.tv64;
79bf2bb3
TG
454 }
455
456 /*
cdc6f27d
TG
457 * Wakeup the cpus which have an expired event.
458 */
6b954823 459 tick_do_broadcast(to_cpumask(tmpmask));
cdc6f27d
TG
460
461 /*
462 * Two reasons for reprogram:
463 *
464 * - The global event did not expire any CPU local
465 * events. This happens in dyntick mode, as the maximum PIT
466 * delta is quite small.
467 *
468 * - There are pending events on sleeping CPUs which were not
469 * in the event mask
79bf2bb3 470 */
cdc6f27d 471 if (next_event.tv64 != KTIME_MAX) {
79bf2bb3 472 /*
cdc6f27d
TG
473 * Rearm the broadcast device. If event expired,
474 * repeat the above
79bf2bb3 475 */
cdc6f27d 476 if (tick_broadcast_set_event(next_event, 0))
79bf2bb3
TG
477 goto again;
478 }
b5f91da0 479 raw_spin_unlock(&tick_broadcast_lock);
79bf2bb3
TG
480}
481
482/*
483 * Powerstate information: The system enters/leaves a state, where
484 * affected devices might stop
485 */
486void tick_broadcast_oneshot_control(unsigned long reason)
487{
488 struct clock_event_device *bc, *dev;
489 struct tick_device *td;
490 unsigned long flags;
491 int cpu;
492
79bf2bb3
TG
493 /*
494 * Periodic mode does not care about the enter/exit of power
495 * states
496 */
497 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
7372b0b1 498 return;
79bf2bb3 499
7372b0b1
AK
500 /*
501 * We are called with preemtion disabled from the depth of the
502 * idle code, so we can't be moved away.
503 */
79bf2bb3
TG
504 cpu = smp_processor_id();
505 td = &per_cpu(tick_cpu_device, cpu);
506 dev = td->evtdev;
507
508 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
7372b0b1
AK
509 return;
510
511 bc = tick_broadcast_device.evtdev;
79bf2bb3 512
7372b0b1 513 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb3 514 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
6b954823
RR
515 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
516 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
79bf2bb3
TG
517 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
518 if (dev->next_event.tv64 < bc->next_event.tv64)
519 tick_broadcast_set_event(dev->next_event, 1);
520 }
521 } else {
6b954823
RR
522 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
523 cpumask_clear_cpu(cpu,
524 tick_get_broadcast_oneshot_mask());
79bf2bb3
TG
525 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
526 if (dev->next_event.tv64 != KTIME_MAX)
527 tick_program_event(dev->next_event, 1);
528 }
529 }
b5f91da0 530 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb3
TG
531}
532
5590a536
TG
533/*
534 * Reset the one shot broadcast for a cpu
535 *
536 * Called with tick_broadcast_lock held
537 */
538static void tick_broadcast_clear_oneshot(int cpu)
539{
6b954823 540 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
5590a536
TG
541}
542
6b954823
RR
543static void tick_broadcast_init_next_event(struct cpumask *mask,
544 ktime_t expires)
7300711e
TG
545{
546 struct tick_device *td;
547 int cpu;
548
5db0e1e9 549 for_each_cpu(cpu, mask) {
7300711e
TG
550 td = &per_cpu(tick_cpu_device, cpu);
551 if (td->evtdev)
552 td->evtdev->next_event = expires;
553 }
554}
555
79bf2bb3 556/**
8dce39c2 557 * tick_broadcast_setup_oneshot - setup the broadcast device
79bf2bb3
TG
558 */
559void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
560{
07f4beb0
TG
561 int cpu = smp_processor_id();
562
9c17bcda
TG
563 /* Set it up only once ! */
564 if (bc->event_handler != tick_handle_oneshot_broadcast) {
7300711e 565 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
7300711e 566
9c17bcda 567 bc->event_handler = tick_handle_oneshot_broadcast;
7300711e
TG
568
569 /* Take the do_timer update */
570 tick_do_timer_cpu = cpu;
571
572 /*
573 * We must be careful here. There might be other CPUs
574 * waiting for periodic broadcast. We need to set the
575 * oneshot_mask bits for those and program the
576 * broadcast device to fire.
577 */
6b954823
RR
578 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
579 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
580 cpumask_or(tick_get_broadcast_oneshot_mask(),
581 tick_get_broadcast_oneshot_mask(),
582 to_cpumask(tmpmask));
583
584 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
b435092f 585 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
6b954823
RR
586 tick_broadcast_init_next_event(to_cpumask(tmpmask),
587 tick_next_period);
7300711e
TG
588 tick_broadcast_set_event(tick_next_period, 1);
589 } else
590 bc->next_event.tv64 = KTIME_MAX;
07f4beb0
TG
591 } else {
592 /*
593 * The first cpu which switches to oneshot mode sets
594 * the bit for all other cpus which are in the general
595 * (periodic) broadcast mask. So the bit is set and
596 * would prevent the first broadcast enter after this
597 * to program the bc device.
598 */
599 tick_broadcast_clear_oneshot(cpu);
9c17bcda 600 }
79bf2bb3
TG
601}
602
603/*
604 * Select oneshot operating mode for the broadcast device
605 */
606void tick_broadcast_switch_to_oneshot(void)
607{
608 struct clock_event_device *bc;
609 unsigned long flags;
610
b5f91da0 611 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
fa4da365
SS
612
613 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
79bf2bb3
TG
614 bc = tick_broadcast_device.evtdev;
615 if (bc)
616 tick_broadcast_setup_oneshot(bc);
77b0d60c 617
b5f91da0 618 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb3
TG
619}
620
621
622/*
623 * Remove a dead CPU from broadcasting
624 */
625void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
626{
79bf2bb3
TG
627 unsigned long flags;
628 unsigned int cpu = *cpup;
629
b5f91da0 630 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb3 631
31d9b393
TG
632 /*
633 * Clear the broadcast mask flag for the dead cpu, but do not
634 * stop the broadcast device!
635 */
6b954823 636 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
79bf2bb3 637
b5f91da0 638 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb3
TG
639}
640
27ce4cb4
TG
641/*
642 * Check, whether the broadcast device is in one shot mode
643 */
644int tick_broadcast_oneshot_active(void)
645{
646 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
647}
648
3a142a06
TG
649/*
650 * Check whether the broadcast device supports oneshot.
651 */
652bool tick_broadcast_oneshot_available(void)
653{
654 struct clock_event_device *bc = tick_broadcast_device.evtdev;
655
656 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
657}
658
79bf2bb3 659#endif
This page took 0.429196 seconds and 5 git commands to generate.