Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[deliverable/linux.git] / kernel / time / clockevents.c
CommitLineData
d316c57f
TG
1/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
d316c57f 18#include <linux/smp.h>
501f8670 19#include <linux/device.h>
d316c57f 20
8e1a928a
HS
21#include "tick-internal.h"
22
d316c57f
TG
23/* The registered clock event devices */
24static LIST_HEAD(clockevent_devices);
25static LIST_HEAD(clockevents_released);
d316c57f 26/* Protection for the above */
b5f91da0 27static DEFINE_RAW_SPINLOCK(clockevents_lock);
03e13cf5
TG
28/* Protection for unbind operations */
29static DEFINE_MUTEX(clockevents_mutex);
30
31struct ce_unbind {
32 struct clock_event_device *ce;
33 int res;
34};
d316c57f 35
97b94106
TG
36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 bool ismax)
d316c57f 38{
97813f2f 39 u64 clc = (u64) latch << evt->shift;
97b94106 40 u64 rnd;
d316c57f 41
45fe4fe1
IM
42 if (unlikely(!evt->mult)) {
43 evt->mult = 1;
44 WARN_ON(1);
45 }
97b94106
TG
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
10632008 75 (!ismax || evt->mult <= (1ULL << evt->shift)))
97b94106 76 clc += rnd;
45fe4fe1 77
d316c57f 78 do_div(clc, evt->mult);
d316c57f 79
97b94106
TG
80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82}
83
84/**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92{
93 return cev_delta2ns(latch, evt, false);
d316c57f 94}
c81fc2c3 95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
d316c57f 96
d7eb231c
TG
97static int __clockevents_switch_state(struct clock_event_device *dev,
98 enum clock_event_state state)
bd624d75
VK
99{
100 /* Transition with legacy set_mode() callback */
101 if (dev->set_mode) {
102 /* Legacy callback doesn't support new modes */
77e32c89 103 if (state > CLOCK_EVT_STATE_ONESHOT)
bd624d75 104 return -ENOSYS;
77e32c89
VK
105 /*
106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
107 * mapping until *_ONESHOT, and so a simple cast will work.
108 */
109 dev->set_mode((enum clock_event_mode)state, dev);
110 dev->mode = (enum clock_event_mode)state;
bd624d75
VK
111 return 0;
112 }
113
114 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
115 return 0;
116
77e32c89
VK
117 /* Transition with new state-specific callbacks */
118 switch (state) {
119 case CLOCK_EVT_STATE_DETACHED:
149aabcc 120 /* The clockevent device is getting replaced. Shut it down. */
bd624d75 121
77e32c89
VK
122 case CLOCK_EVT_STATE_SHUTDOWN:
123 return dev->set_state_shutdown(dev);
bd624d75 124
77e32c89 125 case CLOCK_EVT_STATE_PERIODIC:
bd624d75
VK
126 /* Core internal bug */
127 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
128 return -ENOSYS;
77e32c89 129 return dev->set_state_periodic(dev);
bd624d75 130
77e32c89 131 case CLOCK_EVT_STATE_ONESHOT:
bd624d75
VK
132 /* Core internal bug */
133 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
134 return -ENOSYS;
77e32c89 135 return dev->set_state_oneshot(dev);
bd624d75 136
8fff52fd
VK
137 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
138 /* Core internal bug */
472c4a94 139 if (WARN_ONCE(!clockevent_state_oneshot(dev),
051ebd10
TG
140 "Current state: %d\n",
141 clockevent_get_state(dev)))
8fff52fd
VK
142 return -EINVAL;
143
144 if (dev->set_state_oneshot_stopped)
145 return dev->set_state_oneshot_stopped(dev);
146 else
147 return -ENOSYS;
148
bd624d75
VK
149 default:
150 return -ENOSYS;
151 }
152}
153
d316c57f 154/**
d7eb231c 155 * clockevents_switch_state - set the operating state of a clock event device
d316c57f 156 * @dev: device to modify
77e32c89 157 * @state: new state
d316c57f
TG
158 *
159 * Must be called with interrupts disabled !
160 */
d7eb231c
TG
161void clockevents_switch_state(struct clock_event_device *dev,
162 enum clock_event_state state)
d316c57f 163{
051ebd10 164 if (clockevent_get_state(dev) != state) {
d7eb231c 165 if (__clockevents_switch_state(dev, state))
bd624d75
VK
166 return;
167
051ebd10 168 clockevent_set_state(dev, state);
2d68259d
MD
169
170 /*
171 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
172 * on it, so fix it up and emit a warning:
173 */
472c4a94 174 if (clockevent_state_oneshot(dev)) {
2d68259d
MD
175 if (unlikely(!dev->mult)) {
176 dev->mult = 1;
177 WARN_ON(1);
178 }
179 }
d316c57f
TG
180 }
181}
182
2344abbc
TG
183/**
184 * clockevents_shutdown - shutdown the device and clear next_event
185 * @dev: device to shutdown
186 */
187void clockevents_shutdown(struct clock_event_device *dev)
188{
d7eb231c 189 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
2344abbc
TG
190 dev->next_event.tv64 = KTIME_MAX;
191}
192
554ef387
VK
193/**
194 * clockevents_tick_resume - Resume the tick device before using it again
195 * @dev: device to resume
196 */
197int clockevents_tick_resume(struct clock_event_device *dev)
198{
199 int ret = 0;
200
77e32c89 201 if (dev->set_mode) {
554ef387 202 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
554ef387 203 dev->mode = CLOCK_EVT_MODE_RESUME;
77e32c89
VK
204 } else if (dev->tick_resume) {
205 ret = dev->tick_resume(dev);
206 }
554ef387
VK
207
208 return ret;
209}
210
d1748302
MS
211#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
212
213/* Limit min_delta to a jiffie */
214#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
215
216/**
217 * clockevents_increase_min_delta - raise minimum delta of a clock event device
218 * @dev: device to increase the minimum delta
219 *
220 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
221 */
222static int clockevents_increase_min_delta(struct clock_event_device *dev)
223{
224 /* Nothing to do if we already reached the limit */
225 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
504d5874
JK
226 printk_deferred(KERN_WARNING
227 "CE: Reprogramming failure. Giving up\n");
d1748302
MS
228 dev->next_event.tv64 = KTIME_MAX;
229 return -ETIME;
230 }
231
232 if (dev->min_delta_ns < 5000)
233 dev->min_delta_ns = 5000;
234 else
235 dev->min_delta_ns += dev->min_delta_ns >> 1;
236
237 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
238 dev->min_delta_ns = MIN_DELTA_LIMIT;
239
504d5874
JK
240 printk_deferred(KERN_WARNING
241 "CE: %s increased min_delta_ns to %llu nsec\n",
242 dev->name ? dev->name : "?",
243 (unsigned long long) dev->min_delta_ns);
d1748302
MS
244 return 0;
245}
246
247/**
248 * clockevents_program_min_delta - Set clock event device to the minimum delay.
249 * @dev: device to program
250 *
251 * Returns 0 on success, -ETIME when the retry loop failed.
252 */
253static int clockevents_program_min_delta(struct clock_event_device *dev)
254{
255 unsigned long long clc;
256 int64_t delta;
257 int i;
258
259 for (i = 0;;) {
260 delta = dev->min_delta_ns;
261 dev->next_event = ktime_add_ns(ktime_get(), delta);
262
472c4a94 263 if (clockevent_state_shutdown(dev))
d1748302
MS
264 return 0;
265
266 dev->retries++;
267 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
268 if (dev->set_next_event((unsigned long) clc, dev) == 0)
269 return 0;
270
271 if (++i > 2) {
272 /*
273 * We tried 3 times to program the device with the
274 * given min_delta_ns. Try to increase the minimum
275 * delta, if that fails as well get out of here.
276 */
277 if (clockevents_increase_min_delta(dev))
278 return -ETIME;
279 i = 0;
280 }
281 }
282}
283
284#else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
285
286/**
287 * clockevents_program_min_delta - Set clock event device to the minimum delay.
288 * @dev: device to program
289 *
290 * Returns 0 on success, -ETIME when the retry loop failed.
291 */
292static int clockevents_program_min_delta(struct clock_event_device *dev)
293{
294 unsigned long long clc;
295 int64_t delta;
296
297 delta = dev->min_delta_ns;
298 dev->next_event = ktime_add_ns(ktime_get(), delta);
299
472c4a94 300 if (clockevent_state_shutdown(dev))
d1748302
MS
301 return 0;
302
303 dev->retries++;
304 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
305 return dev->set_next_event((unsigned long) clc, dev);
306}
307
308#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
309
d316c57f
TG
310/**
311 * clockevents_program_event - Reprogram the clock event device.
d1748302 312 * @dev: device to program
d316c57f 313 * @expires: absolute expiry time (monotonic clock)
d1748302 314 * @force: program minimum delay if expires can not be set
d316c57f
TG
315 *
316 * Returns 0 on success, -ETIME when the event is in the past.
317 */
318int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
d1748302 319 bool force)
d316c57f
TG
320{
321 unsigned long long clc;
322 int64_t delta;
d1748302 323 int rc;
d316c57f 324
167b1de3
TG
325 if (unlikely(expires.tv64 < 0)) {
326 WARN_ON_ONCE(1);
327 return -ETIME;
328 }
329
d316c57f
TG
330 dev->next_event = expires;
331
472c4a94 332 if (clockevent_state_shutdown(dev))
d316c57f
TG
333 return 0;
334
d2540875 335 /* We must be in ONESHOT state here */
472c4a94 336 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
051ebd10 337 clockevent_get_state(dev));
d2540875 338
65516f8a
MS
339 /* Shortcut for clockevent devices that can deal with ktime. */
340 if (dev->features & CLOCK_EVT_FEAT_KTIME)
341 return dev->set_next_ktime(expires, dev);
342
d1748302
MS
343 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
344 if (delta <= 0)
345 return force ? clockevents_program_min_delta(dev) : -ETIME;
d316c57f 346
d1748302
MS
347 delta = min(delta, (int64_t) dev->max_delta_ns);
348 delta = max(delta, (int64_t) dev->min_delta_ns);
d316c57f 349
d1748302
MS
350 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
351 rc = dev->set_next_event((unsigned long) clc, dev);
352
353 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
d316c57f
TG
354}
355
d316c57f 356/*
3eb05676 357 * Called after a notify add to make devices available which were
d316c57f
TG
358 * released from the notifier call.
359 */
360static void clockevents_notify_released(void)
361{
362 struct clock_event_device *dev;
363
364 while (!list_empty(&clockevents_released)) {
365 dev = list_entry(clockevents_released.next,
366 struct clock_event_device, list);
367 list_del(&dev->list);
368 list_add(&dev->list, &clockevent_devices);
7172a286 369 tick_check_new_device(dev);
d316c57f
TG
370 }
371}
372
03e13cf5
TG
373/*
374 * Try to install a replacement clock event device
375 */
376static int clockevents_replace(struct clock_event_device *ced)
377{
378 struct clock_event_device *dev, *newdev = NULL;
379
380 list_for_each_entry(dev, &clockevent_devices, list) {
472c4a94 381 if (dev == ced || !clockevent_state_detached(dev))
03e13cf5
TG
382 continue;
383
384 if (!tick_check_replacement(newdev, dev))
385 continue;
386
387 if (!try_module_get(dev->owner))
388 continue;
389
390 if (newdev)
391 module_put(newdev->owner);
392 newdev = dev;
393 }
394 if (newdev) {
395 tick_install_replacement(newdev);
396 list_del_init(&ced->list);
397 }
398 return newdev ? 0 : -EBUSY;
399}
400
401/*
402 * Called with clockevents_mutex and clockevents_lock held
403 */
404static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
405{
406 /* Fast track. Device is unused */
472c4a94 407 if (clockevent_state_detached(ced)) {
03e13cf5
TG
408 list_del_init(&ced->list);
409 return 0;
410 }
411
412 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
413}
414
415/*
416 * SMP function call to unbind a device
417 */
418static void __clockevents_unbind(void *arg)
419{
420 struct ce_unbind *cu = arg;
421 int res;
422
423 raw_spin_lock(&clockevents_lock);
424 res = __clockevents_try_unbind(cu->ce, smp_processor_id());
425 if (res == -EAGAIN)
426 res = clockevents_replace(cu->ce);
427 cu->res = res;
428 raw_spin_unlock(&clockevents_lock);
429}
430
431/*
432 * Issues smp function call to unbind a per cpu device. Called with
433 * clockevents_mutex held.
434 */
435static int clockevents_unbind(struct clock_event_device *ced, int cpu)
436{
437 struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
438
439 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
440 return cu.res;
441}
442
443/*
444 * Unbind a clockevents device.
445 */
446int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
447{
448 int ret;
449
450 mutex_lock(&clockevents_mutex);
451 ret = clockevents_unbind(ced, cpu);
452 mutex_unlock(&clockevents_mutex);
453 return ret;
454}
32a15832 455EXPORT_SYMBOL_GPL(clockevents_unbind_device);
03e13cf5 456
77e32c89 457/* Sanity check of state transition callbacks */
bd624d75
VK
458static int clockevents_sanity_check(struct clock_event_device *dev)
459{
460 /* Legacy set_mode() callback */
461 if (dev->set_mode) {
462 /* We shouldn't be supporting new modes now */
77e32c89 463 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
8fff52fd
VK
464 dev->set_state_shutdown || dev->tick_resume ||
465 dev->set_state_oneshot_stopped);
de81e64b
VK
466
467 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
bd624d75
VK
468 return 0;
469 }
470
471 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
472 return 0;
473
77e32c89
VK
474 /* New state-specific callbacks */
475 if (!dev->set_state_shutdown)
bd624d75
VK
476 return -EINVAL;
477
478 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
77e32c89 479 !dev->set_state_periodic)
bd624d75
VK
480 return -EINVAL;
481
482 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
77e32c89 483 !dev->set_state_oneshot)
bd624d75
VK
484 return -EINVAL;
485
486 return 0;
487}
488
d316c57f
TG
489/**
490 * clockevents_register_device - register a clock event device
491 * @dev: device to register
492 */
493void clockevents_register_device(struct clock_event_device *dev)
494{
f833bab8
SS
495 unsigned long flags;
496
bd624d75
VK
497 BUG_ON(clockevents_sanity_check(dev));
498
77e32c89 499 /* Initialize state to DETACHED */
051ebd10 500 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
77e32c89 501
1b054b67
TG
502 if (!dev->cpumask) {
503 WARN_ON(num_possible_cpus() > 1);
504 dev->cpumask = cpumask_of(smp_processor_id());
505 }
320ab2b0 506
b5f91da0 507 raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57f
TG
508
509 list_add(&dev->list, &clockevent_devices);
7172a286 510 tick_check_new_device(dev);
d316c57f
TG
511 clockevents_notify_released();
512
b5f91da0 513 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57f 514}
c81fc2c3 515EXPORT_SYMBOL_GPL(clockevents_register_device);
d316c57f 516
e5400321 517void clockevents_config(struct clock_event_device *dev, u32 freq)
57f0fcbe 518{
c0e299b1 519 u64 sec;
57f0fcbe
TG
520
521 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
522 return;
523
524 /*
525 * Calculate the maximum number of seconds we can sleep. Limit
526 * to 10 minutes for hardware which can program more than
527 * 32bit ticks so we still get reasonable conversion values.
528 */
529 sec = dev->max_delta_ticks;
530 do_div(sec, freq);
531 if (!sec)
532 sec = 1;
533 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
534 sec = 600;
535
536 clockevents_calc_mult_shift(dev, freq, sec);
97b94106
TG
537 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
538 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
57f0fcbe
TG
539}
540
541/**
542 * clockevents_config_and_register - Configure and register a clock event device
543 * @dev: device to register
544 * @freq: The clock frequency
545 * @min_delta: The minimum clock ticks to program in oneshot mode
546 * @max_delta: The maximum clock ticks to program in oneshot mode
547 *
548 * min/max_delta can be 0 for devices which do not support oneshot mode.
549 */
550void clockevents_config_and_register(struct clock_event_device *dev,
551 u32 freq, unsigned long min_delta,
552 unsigned long max_delta)
553{
554 dev->min_delta_ticks = min_delta;
555 dev->max_delta_ticks = max_delta;
556 clockevents_config(dev, freq);
557 clockevents_register_device(dev);
558}
c35ef95c 559EXPORT_SYMBOL_GPL(clockevents_config_and_register);
57f0fcbe 560
627ee794
TG
561int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
562{
563 clockevents_config(dev, freq);
564
472c4a94 565 if (clockevent_state_oneshot(dev))
fe79a9ba
SB
566 return clockevents_program_event(dev, dev->next_event, false);
567
472c4a94 568 if (clockevent_state_periodic(dev))
d7eb231c 569 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
627ee794 570
fe79a9ba 571 return 0;
627ee794
TG
572}
573
80b816b7
TG
574/**
575 * clockevents_update_freq - Update frequency and reprogram a clock event device.
576 * @dev: device to modify
577 * @freq: new device frequency
578 *
579 * Reconfigure and reprogram a clock event device in oneshot
580 * mode. Must be called on the cpu for which the device delivers per
627ee794
TG
581 * cpu timer events. If called for the broadcast device the core takes
582 * care of serialization.
583 *
584 * Returns 0 on success, -ETIME when the event is in the past.
80b816b7
TG
585 */
586int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
587{
627ee794
TG
588 unsigned long flags;
589 int ret;
80b816b7 590
627ee794
TG
591 local_irq_save(flags);
592 ret = tick_broadcast_update_freq(dev, freq);
593 if (ret == -ENODEV)
594 ret = __clockevents_update_freq(dev, freq);
595 local_irq_restore(flags);
596 return ret;
80b816b7
TG
597}
598
d316c57f
TG
599/*
600 * Noop handler when we shut down an event device
601 */
7c1e7689 602void clockevents_handle_noop(struct clock_event_device *dev)
d316c57f
TG
603{
604}
605
606/**
607 * clockevents_exchange_device - release and request clock devices
608 * @old: device to release (can be NULL)
609 * @new: device to request (can be NULL)
610 *
db6f672e
TG
611 * Called from various tick functions with clockevents_lock held and
612 * interrupts disabled.
d316c57f
TG
613 */
614void clockevents_exchange_device(struct clock_event_device *old,
615 struct clock_event_device *new)
616{
d316c57f
TG
617 /*
618 * Caller releases a clock event device. We queue it into the
619 * released list and do a notify add later.
620 */
621 if (old) {
ccf33d68 622 module_put(old->owner);
d7eb231c 623 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
d316c57f
TG
624 list_del(&old->list);
625 list_add(&old->list, &clockevents_released);
626 }
627
628 if (new) {
472c4a94 629 BUG_ON(!clockevent_state_detached(new));
2344abbc 630 clockevents_shutdown(new);
d316c57f 631 }
d316c57f
TG
632}
633
adc78e6b
RW
634/**
635 * clockevents_suspend - suspend clock devices
636 */
637void clockevents_suspend(void)
638{
639 struct clock_event_device *dev;
640
641 list_for_each_entry_reverse(dev, &clockevent_devices, list)
a9d20988 642 if (dev->suspend && !clockevent_state_detached(dev))
adc78e6b
RW
643 dev->suspend(dev);
644}
645
646/**
647 * clockevents_resume - resume clock devices
648 */
649void clockevents_resume(void)
650{
651 struct clock_event_device *dev;
652
653 list_for_each_entry(dev, &clockevent_devices, list)
a9d20988 654 if (dev->resume && !clockevent_state_detached(dev))
adc78e6b
RW
655 dev->resume(dev);
656}
657
a49b116d 658#ifdef CONFIG_HOTPLUG_CPU
d316c57f 659/**
a49b116d 660 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
d316c57f 661 */
a49b116d 662void tick_cleanup_dead_cpu(int cpu)
d316c57f 663{
bb6eddf7 664 struct clock_event_device *dev, *tmp;
f833bab8 665 unsigned long flags;
0b858e6f 666
b5f91da0 667 raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57f 668
a49b116d
TG
669 tick_shutdown_broadcast_oneshot(cpu);
670 tick_shutdown_broadcast(cpu);
671 tick_shutdown(cpu);
672 /*
673 * Unregister the clock event devices which were
674 * released from the users in the notify chain.
675 */
676 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
677 list_del(&dev->list);
678 /*
679 * Now check whether the CPU has left unused per cpu devices
680 */
681 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
682 if (cpumask_test_cpu(cpu, dev->cpumask) &&
683 cpumask_weight(dev->cpumask) == 1 &&
684 !tick_is_broadcast_device(dev)) {
472c4a94 685 BUG_ON(!clockevent_state_detached(dev));
bb6eddf7 686 list_del(&dev->list);
bb6eddf7 687 }
d316c57f 688 }
b5f91da0 689 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57f 690}
a49b116d 691#endif
501f8670
TG
692
693#ifdef CONFIG_SYSFS
694struct bus_type clockevents_subsys = {
695 .name = "clockevents",
696 .dev_name = "clockevent",
697};
698
699static DEFINE_PER_CPU(struct device, tick_percpu_dev);
700static struct tick_device *tick_get_tick_dev(struct device *dev);
701
702static ssize_t sysfs_show_current_tick_dev(struct device *dev,
703 struct device_attribute *attr,
704 char *buf)
705{
706 struct tick_device *td;
707 ssize_t count = 0;
708
709 raw_spin_lock_irq(&clockevents_lock);
710 td = tick_get_tick_dev(dev);
711 if (td && td->evtdev)
712 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
713 raw_spin_unlock_irq(&clockevents_lock);
714 return count;
715}
716static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
717
03e13cf5
TG
718/* We don't support the abomination of removable broadcast devices */
719static ssize_t sysfs_unbind_tick_dev(struct device *dev,
720 struct device_attribute *attr,
721 const char *buf, size_t count)
722{
723 char name[CS_NAME_LEN];
891292a7 724 ssize_t ret = sysfs_get_uname(buf, name, count);
03e13cf5
TG
725 struct clock_event_device *ce;
726
727 if (ret < 0)
728 return ret;
729
730 ret = -ENODEV;
731 mutex_lock(&clockevents_mutex);
732 raw_spin_lock_irq(&clockevents_lock);
733 list_for_each_entry(ce, &clockevent_devices, list) {
734 if (!strcmp(ce->name, name)) {
735 ret = __clockevents_try_unbind(ce, dev->id);
736 break;
737 }
738 }
739 raw_spin_unlock_irq(&clockevents_lock);
740 /*
741 * We hold clockevents_mutex, so ce can't go away
742 */
743 if (ret == -EAGAIN)
744 ret = clockevents_unbind(ce, dev->id);
745 mutex_unlock(&clockevents_mutex);
746 return ret ? ret : count;
747}
748static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
749
501f8670
TG
750#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
751static struct device tick_bc_dev = {
752 .init_name = "broadcast",
753 .id = 0,
754 .bus = &clockevents_subsys,
755};
756
757static struct tick_device *tick_get_tick_dev(struct device *dev)
758{
759 return dev == &tick_bc_dev ? tick_get_broadcast_device() :
760 &per_cpu(tick_cpu_device, dev->id);
761}
762
763static __init int tick_broadcast_init_sysfs(void)
764{
765 int err = device_register(&tick_bc_dev);
766
767 if (!err)
768 err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
769 return err;
770}
771#else
772static struct tick_device *tick_get_tick_dev(struct device *dev)
773{
774 return &per_cpu(tick_cpu_device, dev->id);
775}
776static inline int tick_broadcast_init_sysfs(void) { return 0; }
de68d9b1 777#endif
501f8670
TG
778
779static int __init tick_init_sysfs(void)
780{
781 int cpu;
782
783 for_each_possible_cpu(cpu) {
784 struct device *dev = &per_cpu(tick_percpu_dev, cpu);
785 int err;
786
787 dev->id = cpu;
788 dev->bus = &clockevents_subsys;
789 err = device_register(dev);
790 if (!err)
791 err = device_create_file(dev, &dev_attr_current_device);
03e13cf5
TG
792 if (!err)
793 err = device_create_file(dev, &dev_attr_unbind_device);
501f8670
TG
794 if (err)
795 return err;
796 }
797 return tick_broadcast_init_sysfs();
798}
799
800static int __init clockevents_init_sysfs(void)
801{
802 int err = subsys_system_register(&clockevents_subsys, NULL);
803
804 if (!err)
805 err = tick_init_sysfs();
806 return err;
807}
808device_initcall(clockevents_init_sysfs);
809#endif /* SYSFS */
This page took 0.543469 seconds and 5 git commands to generate.