| 1 | /* |
| 2 | * linux/kernel/time/tick-sched.c |
| 3 | * |
| 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
| 7 | * |
| 8 | * No idle tick implementation for low and high resolution timers |
| 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | * |
| 12 | * Distribute under GPLv2. |
| 13 | */ |
| 14 | #include <linux/cpu.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/hrtimer.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/kernel_stat.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/profile.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/tick.h> |
| 23 | #include <linux/module.h> |
| 24 | |
| 25 | #include <asm/irq_regs.h> |
| 26 | |
| 27 | #include "tick-internal.h" |
| 28 | |
| 29 | /* |
| 30 | * Per cpu nohz control structure |
| 31 | */ |
| 32 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
| 33 | |
| 34 | /* |
| 35 | * The time, when the last jiffy update happened. Protected by xtime_lock. |
| 36 | */ |
| 37 | static ktime_t last_jiffies_update; |
| 38 | |
| 39 | struct tick_sched *tick_get_tick_sched(int cpu) |
| 40 | { |
| 41 | return &per_cpu(tick_cpu_sched, cpu); |
| 42 | } |
| 43 | |
| 44 | /* |
| 45 | * Must be called with interrupts disabled ! |
| 46 | */ |
| 47 | static void tick_do_update_jiffies64(ktime_t now) |
| 48 | { |
| 49 | unsigned long ticks = 0; |
| 50 | ktime_t delta; |
| 51 | |
| 52 | /* |
| 53 | * Do a quick check without holding xtime_lock: |
| 54 | */ |
| 55 | delta = ktime_sub(now, last_jiffies_update); |
| 56 | if (delta.tv64 < tick_period.tv64) |
| 57 | return; |
| 58 | |
| 59 | /* Reevalute with xtime_lock held */ |
| 60 | write_seqlock(&xtime_lock); |
| 61 | |
| 62 | delta = ktime_sub(now, last_jiffies_update); |
| 63 | if (delta.tv64 >= tick_period.tv64) { |
| 64 | |
| 65 | delta = ktime_sub(delta, tick_period); |
| 66 | last_jiffies_update = ktime_add(last_jiffies_update, |
| 67 | tick_period); |
| 68 | |
| 69 | /* Slow path for long timeouts */ |
| 70 | if (unlikely(delta.tv64 >= tick_period.tv64)) { |
| 71 | s64 incr = ktime_to_ns(tick_period); |
| 72 | |
| 73 | ticks = ktime_divns(delta, incr); |
| 74 | |
| 75 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
| 76 | incr * ticks); |
| 77 | } |
| 78 | do_timer(++ticks); |
| 79 | |
| 80 | /* Keep the tick_next_period variable up to date */ |
| 81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
| 82 | } |
| 83 | write_sequnlock(&xtime_lock); |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Initialize and return retrieve the jiffies update. |
| 88 | */ |
| 89 | static ktime_t tick_init_jiffy_update(void) |
| 90 | { |
| 91 | ktime_t period; |
| 92 | |
| 93 | write_seqlock(&xtime_lock); |
| 94 | /* Did we start the jiffies update yet ? */ |
| 95 | if (last_jiffies_update.tv64 == 0) |
| 96 | last_jiffies_update = tick_next_period; |
| 97 | period = last_jiffies_update; |
| 98 | write_sequnlock(&xtime_lock); |
| 99 | return period; |
| 100 | } |
| 101 | |
| 102 | /* |
| 103 | * NOHZ - aka dynamic tick functionality |
| 104 | */ |
| 105 | #ifdef CONFIG_NO_HZ |
| 106 | /* |
| 107 | * NO HZ enabled ? |
| 108 | */ |
| 109 | static int tick_nohz_enabled __read_mostly = 1; |
| 110 | |
| 111 | /* |
| 112 | * Enable / Disable tickless mode |
| 113 | */ |
| 114 | static int __init setup_tick_nohz(char *str) |
| 115 | { |
| 116 | if (!strcmp(str, "off")) |
| 117 | tick_nohz_enabled = 0; |
| 118 | else if (!strcmp(str, "on")) |
| 119 | tick_nohz_enabled = 1; |
| 120 | else |
| 121 | return 0; |
| 122 | return 1; |
| 123 | } |
| 124 | |
| 125 | __setup("nohz=", setup_tick_nohz); |
| 126 | |
| 127 | /** |
| 128 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
| 129 | * |
| 130 | * Called from interrupt entry when the CPU was idle |
| 131 | * |
| 132 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
| 133 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
| 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
| 135 | * cpu, which has the update task assigned is in a long sleep. |
| 136 | */ |
| 137 | static void tick_nohz_update_jiffies(ktime_t now) |
| 138 | { |
| 139 | int cpu = smp_processor_id(); |
| 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 141 | unsigned long flags; |
| 142 | |
| 143 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
| 144 | ts->idle_waketime = now; |
| 145 | |
| 146 | local_irq_save(flags); |
| 147 | tick_do_update_jiffies64(now); |
| 148 | local_irq_restore(flags); |
| 149 | |
| 150 | touch_softlockup_watchdog(); |
| 151 | } |
| 152 | |
| 153 | static void tick_nohz_stop_idle(int cpu, ktime_t now) |
| 154 | { |
| 155 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 156 | ktime_t delta; |
| 157 | |
| 158 | delta = ktime_sub(now, ts->idle_entrytime); |
| 159 | ts->idle_lastupdate = now; |
| 160 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 161 | ts->idle_active = 0; |
| 162 | |
| 163 | sched_clock_idle_wakeup_event(0); |
| 164 | } |
| 165 | |
| 166 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
| 167 | { |
| 168 | ktime_t now, delta; |
| 169 | |
| 170 | now = ktime_get(); |
| 171 | if (ts->idle_active) { |
| 172 | delta = ktime_sub(now, ts->idle_entrytime); |
| 173 | ts->idle_lastupdate = now; |
| 174 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 175 | } |
| 176 | ts->idle_entrytime = now; |
| 177 | ts->idle_active = 1; |
| 178 | sched_clock_idle_sleep_event(); |
| 179 | return now; |
| 180 | } |
| 181 | |
| 182 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
| 183 | { |
| 184 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 185 | |
| 186 | if (!tick_nohz_enabled) |
| 187 | return -1; |
| 188 | |
| 189 | if (ts->idle_active) |
| 190 | *last_update_time = ktime_to_us(ts->idle_lastupdate); |
| 191 | else |
| 192 | *last_update_time = ktime_to_us(ktime_get()); |
| 193 | |
| 194 | return ktime_to_us(ts->idle_sleeptime); |
| 195 | } |
| 196 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
| 197 | |
| 198 | /** |
| 199 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
| 200 | * |
| 201 | * When the next event is more than a tick into the future, stop the idle tick |
| 202 | * Called either from the idle loop or from irq_exit() when an idle period was |
| 203 | * just interrupted by an interrupt which did not cause a reschedule. |
| 204 | */ |
| 205 | void tick_nohz_stop_sched_tick(int inidle) |
| 206 | { |
| 207 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
| 208 | struct tick_sched *ts; |
| 209 | ktime_t last_update, expires, now; |
| 210 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
| 211 | u64 time_delta; |
| 212 | int cpu; |
| 213 | |
| 214 | local_irq_save(flags); |
| 215 | |
| 216 | cpu = smp_processor_id(); |
| 217 | ts = &per_cpu(tick_cpu_sched, cpu); |
| 218 | |
| 219 | /* |
| 220 | * Call to tick_nohz_start_idle stops the last_update_time from being |
| 221 | * updated. Thus, it must not be called in the event we are called from |
| 222 | * irq_exit() with the prior state different than idle. |
| 223 | */ |
| 224 | if (!inidle && !ts->inidle) |
| 225 | goto end; |
| 226 | |
| 227 | now = tick_nohz_start_idle(ts); |
| 228 | |
| 229 | /* |
| 230 | * If this cpu is offline and it is the one which updates |
| 231 | * jiffies, then give up the assignment and let it be taken by |
| 232 | * the cpu which runs the tick timer next. If we don't drop |
| 233 | * this here the jiffies might be stale and do_timer() never |
| 234 | * invoked. |
| 235 | */ |
| 236 | if (unlikely(!cpu_online(cpu))) { |
| 237 | if (cpu == tick_do_timer_cpu) |
| 238 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 239 | } |
| 240 | |
| 241 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| 242 | goto end; |
| 243 | |
| 244 | ts->inidle = 1; |
| 245 | |
| 246 | if (need_resched()) |
| 247 | goto end; |
| 248 | |
| 249 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
| 250 | static int ratelimit; |
| 251 | |
| 252 | if (ratelimit < 10) { |
| 253 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
| 254 | (unsigned int) local_softirq_pending()); |
| 255 | ratelimit++; |
| 256 | } |
| 257 | goto end; |
| 258 | } |
| 259 | |
| 260 | ts->idle_calls++; |
| 261 | /* Read jiffies and the time when jiffies were updated last */ |
| 262 | do { |
| 263 | seq = read_seqbegin(&xtime_lock); |
| 264 | last_update = last_jiffies_update; |
| 265 | last_jiffies = jiffies; |
| 266 | |
| 267 | /* |
| 268 | * On SMP we really should only care for the CPU which |
| 269 | * has the do_timer duty assigned. All other CPUs can |
| 270 | * sleep as long as they want. |
| 271 | */ |
| 272 | if (cpu == tick_do_timer_cpu || |
| 273 | tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
| 274 | time_delta = timekeeping_max_deferment(); |
| 275 | else |
| 276 | time_delta = KTIME_MAX; |
| 277 | } while (read_seqretry(&xtime_lock, seq)); |
| 278 | |
| 279 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || |
| 280 | arch_needs_cpu(cpu)) { |
| 281 | next_jiffies = last_jiffies + 1; |
| 282 | delta_jiffies = 1; |
| 283 | } else { |
| 284 | /* Get the next timer wheel timer */ |
| 285 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
| 286 | delta_jiffies = next_jiffies - last_jiffies; |
| 287 | } |
| 288 | /* |
| 289 | * Do not stop the tick, if we are only one off |
| 290 | * or if the cpu is required for rcu |
| 291 | */ |
| 292 | if (!ts->tick_stopped && delta_jiffies == 1) |
| 293 | goto out; |
| 294 | |
| 295 | /* Schedule the tick, if we are at least one jiffie off */ |
| 296 | if ((long)delta_jiffies >= 1) { |
| 297 | |
| 298 | /* |
| 299 | * calculate the expiry time for the next timer wheel |
| 300 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals |
| 301 | * that there is no timer pending or at least extremely |
| 302 | * far into the future (12 days for HZ=1000). In this |
| 303 | * case we set the expiry to the end of time. |
| 304 | */ |
| 305 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { |
| 306 | /* |
| 307 | * Calculate the time delta for the next timer event. |
| 308 | * If the time delta exceeds the maximum time delta |
| 309 | * permitted by the current clocksource then adjust |
| 310 | * the time delta accordingly to ensure the |
| 311 | * clocksource does not wrap. |
| 312 | */ |
| 313 | time_delta = min_t(u64, time_delta, |
| 314 | tick_period.tv64 * delta_jiffies); |
| 315 | expires = ktime_add_ns(last_update, time_delta); |
| 316 | } else { |
| 317 | expires.tv64 = KTIME_MAX; |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * If this cpu is the one which updates jiffies, then |
| 322 | * give up the assignment and let it be taken by the |
| 323 | * cpu which runs the tick timer next, which might be |
| 324 | * this cpu as well. If we don't drop this here the |
| 325 | * jiffies might be stale and do_timer() never |
| 326 | * invoked. |
| 327 | */ |
| 328 | if (cpu == tick_do_timer_cpu) |
| 329 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 330 | |
| 331 | if (delta_jiffies > 1) |
| 332 | cpumask_set_cpu(cpu, nohz_cpu_mask); |
| 333 | |
| 334 | /* Skip reprogram of event if its not changed */ |
| 335 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) |
| 336 | goto out; |
| 337 | |
| 338 | /* |
| 339 | * nohz_stop_sched_tick can be called several times before |
| 340 | * the nohz_restart_sched_tick is called. This happens when |
| 341 | * interrupts arrive which do not cause a reschedule. In the |
| 342 | * first call we save the current tick time, so we can restart |
| 343 | * the scheduler tick in nohz_restart_sched_tick. |
| 344 | */ |
| 345 | if (!ts->tick_stopped) { |
| 346 | if (select_nohz_load_balancer(1)) { |
| 347 | /* |
| 348 | * sched tick not stopped! |
| 349 | */ |
| 350 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
| 351 | goto out; |
| 352 | } |
| 353 | |
| 354 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
| 355 | ts->tick_stopped = 1; |
| 356 | ts->idle_jiffies = last_jiffies; |
| 357 | rcu_enter_nohz(); |
| 358 | } |
| 359 | |
| 360 | ts->idle_sleeps++; |
| 361 | |
| 362 | /* Mark expires */ |
| 363 | ts->idle_expires = expires; |
| 364 | |
| 365 | /* |
| 366 | * If the expiration time == KTIME_MAX, then |
| 367 | * in this case we simply stop the tick timer. |
| 368 | */ |
| 369 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
| 370 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
| 371 | hrtimer_cancel(&ts->sched_timer); |
| 372 | goto out; |
| 373 | } |
| 374 | |
| 375 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
| 376 | hrtimer_start(&ts->sched_timer, expires, |
| 377 | HRTIMER_MODE_ABS_PINNED); |
| 378 | /* Check, if the timer was already in the past */ |
| 379 | if (hrtimer_active(&ts->sched_timer)) |
| 380 | goto out; |
| 381 | } else if (!tick_program_event(expires, 0)) |
| 382 | goto out; |
| 383 | /* |
| 384 | * We are past the event already. So we crossed a |
| 385 | * jiffie boundary. Update jiffies and raise the |
| 386 | * softirq. |
| 387 | */ |
| 388 | tick_do_update_jiffies64(ktime_get()); |
| 389 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
| 390 | } |
| 391 | raise_softirq_irqoff(TIMER_SOFTIRQ); |
| 392 | out: |
| 393 | ts->next_jiffies = next_jiffies; |
| 394 | ts->last_jiffies = last_jiffies; |
| 395 | ts->sleep_length = ktime_sub(dev->next_event, now); |
| 396 | end: |
| 397 | local_irq_restore(flags); |
| 398 | } |
| 399 | |
| 400 | /** |
| 401 | * tick_nohz_get_sleep_length - return the length of the current sleep |
| 402 | * |
| 403 | * Called from power state control code with interrupts disabled |
| 404 | */ |
| 405 | ktime_t tick_nohz_get_sleep_length(void) |
| 406 | { |
| 407 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 408 | |
| 409 | return ts->sleep_length; |
| 410 | } |
| 411 | |
| 412 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
| 413 | { |
| 414 | hrtimer_cancel(&ts->sched_timer); |
| 415 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); |
| 416 | |
| 417 | while (1) { |
| 418 | /* Forward the time to expire in the future */ |
| 419 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 420 | |
| 421 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
| 422 | hrtimer_start_expires(&ts->sched_timer, |
| 423 | HRTIMER_MODE_ABS_PINNED); |
| 424 | /* Check, if the timer was already in the past */ |
| 425 | if (hrtimer_active(&ts->sched_timer)) |
| 426 | break; |
| 427 | } else { |
| 428 | if (!tick_program_event( |
| 429 | hrtimer_get_expires(&ts->sched_timer), 0)) |
| 430 | break; |
| 431 | } |
| 432 | /* Update jiffies and reread time */ |
| 433 | tick_do_update_jiffies64(now); |
| 434 | now = ktime_get(); |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | /** |
| 439 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
| 440 | * |
| 441 | * Restart the idle tick when the CPU is woken up from idle |
| 442 | */ |
| 443 | void tick_nohz_restart_sched_tick(void) |
| 444 | { |
| 445 | int cpu = smp_processor_id(); |
| 446 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 447 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 448 | unsigned long ticks; |
| 449 | #endif |
| 450 | ktime_t now; |
| 451 | |
| 452 | local_irq_disable(); |
| 453 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) |
| 454 | now = ktime_get(); |
| 455 | |
| 456 | if (ts->idle_active) |
| 457 | tick_nohz_stop_idle(cpu, now); |
| 458 | |
| 459 | if (!ts->inidle || !ts->tick_stopped) { |
| 460 | ts->inidle = 0; |
| 461 | local_irq_enable(); |
| 462 | return; |
| 463 | } |
| 464 | |
| 465 | ts->inidle = 0; |
| 466 | |
| 467 | rcu_exit_nohz(); |
| 468 | |
| 469 | /* Update jiffies first */ |
| 470 | select_nohz_load_balancer(0); |
| 471 | tick_do_update_jiffies64(now); |
| 472 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
| 473 | |
| 474 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 475 | /* |
| 476 | * We stopped the tick in idle. Update process times would miss the |
| 477 | * time we slept as update_process_times does only a 1 tick |
| 478 | * accounting. Enforce that this is accounted to idle ! |
| 479 | */ |
| 480 | ticks = jiffies - ts->idle_jiffies; |
| 481 | /* |
| 482 | * We might be one off. Do not randomly account a huge number of ticks! |
| 483 | */ |
| 484 | if (ticks && ticks < LONG_MAX) |
| 485 | account_idle_ticks(ticks); |
| 486 | #endif |
| 487 | |
| 488 | touch_softlockup_watchdog(); |
| 489 | /* |
| 490 | * Cancel the scheduled timer and restore the tick |
| 491 | */ |
| 492 | ts->tick_stopped = 0; |
| 493 | ts->idle_exittime = now; |
| 494 | |
| 495 | tick_nohz_restart(ts, now); |
| 496 | |
| 497 | local_irq_enable(); |
| 498 | } |
| 499 | |
| 500 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) |
| 501 | { |
| 502 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 503 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * The nohz low res interrupt handler |
| 508 | */ |
| 509 | static void tick_nohz_handler(struct clock_event_device *dev) |
| 510 | { |
| 511 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 512 | struct pt_regs *regs = get_irq_regs(); |
| 513 | int cpu = smp_processor_id(); |
| 514 | ktime_t now = ktime_get(); |
| 515 | |
| 516 | dev->next_event.tv64 = KTIME_MAX; |
| 517 | |
| 518 | /* |
| 519 | * Check if the do_timer duty was dropped. We don't care about |
| 520 | * concurrency: This happens only when the cpu in charge went |
| 521 | * into a long sleep. If two cpus happen to assign themself to |
| 522 | * this duty, then the jiffies update is still serialized by |
| 523 | * xtime_lock. |
| 524 | */ |
| 525 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 526 | tick_do_timer_cpu = cpu; |
| 527 | |
| 528 | /* Check, if the jiffies need an update */ |
| 529 | if (tick_do_timer_cpu == cpu) |
| 530 | tick_do_update_jiffies64(now); |
| 531 | |
| 532 | /* |
| 533 | * When we are idle and the tick is stopped, we have to touch |
| 534 | * the watchdog as we might not schedule for a really long |
| 535 | * time. This happens on complete idle SMP systems while |
| 536 | * waiting on the login prompt. We also increment the "start |
| 537 | * of idle" jiffy stamp so the idle accounting adjustment we |
| 538 | * do when we go busy again does not account too much ticks. |
| 539 | */ |
| 540 | if (ts->tick_stopped) { |
| 541 | touch_softlockup_watchdog(); |
| 542 | ts->idle_jiffies++; |
| 543 | } |
| 544 | |
| 545 | update_process_times(user_mode(regs)); |
| 546 | profile_tick(CPU_PROFILING); |
| 547 | |
| 548 | while (tick_nohz_reprogram(ts, now)) { |
| 549 | now = ktime_get(); |
| 550 | tick_do_update_jiffies64(now); |
| 551 | } |
| 552 | } |
| 553 | |
| 554 | /** |
| 555 | * tick_nohz_switch_to_nohz - switch to nohz mode |
| 556 | */ |
| 557 | static void tick_nohz_switch_to_nohz(void) |
| 558 | { |
| 559 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 560 | ktime_t next; |
| 561 | |
| 562 | if (!tick_nohz_enabled) |
| 563 | return; |
| 564 | |
| 565 | local_irq_disable(); |
| 566 | if (tick_switch_to_oneshot(tick_nohz_handler)) { |
| 567 | local_irq_enable(); |
| 568 | return; |
| 569 | } |
| 570 | |
| 571 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
| 572 | |
| 573 | /* |
| 574 | * Recycle the hrtimer in ts, so we can share the |
| 575 | * hrtimer_forward with the highres code. |
| 576 | */ |
| 577 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 578 | /* Get the next period */ |
| 579 | next = tick_init_jiffy_update(); |
| 580 | |
| 581 | for (;;) { |
| 582 | hrtimer_set_expires(&ts->sched_timer, next); |
| 583 | if (!tick_program_event(next, 0)) |
| 584 | break; |
| 585 | next = ktime_add(next, tick_period); |
| 586 | } |
| 587 | local_irq_enable(); |
| 588 | |
| 589 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", |
| 590 | smp_processor_id()); |
| 591 | } |
| 592 | |
| 593 | /* |
| 594 | * When NOHZ is enabled and the tick is stopped, we need to kick the |
| 595 | * tick timer from irq_enter() so that the jiffies update is kept |
| 596 | * alive during long running softirqs. That's ugly as hell, but |
| 597 | * correctness is key even if we need to fix the offending softirq in |
| 598 | * the first place. |
| 599 | * |
| 600 | * Note, this is different to tick_nohz_restart. We just kick the |
| 601 | * timer and do not touch the other magic bits which need to be done |
| 602 | * when idle is left. |
| 603 | */ |
| 604 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
| 605 | { |
| 606 | #if 0 |
| 607 | /* Switch back to 2.6.27 behaviour */ |
| 608 | |
| 609 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 610 | ktime_t delta; |
| 611 | |
| 612 | /* |
| 613 | * Do not touch the tick device, when the next expiry is either |
| 614 | * already reached or less/equal than the tick period. |
| 615 | */ |
| 616 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
| 617 | if (delta.tv64 <= tick_period.tv64) |
| 618 | return; |
| 619 | |
| 620 | tick_nohz_restart(ts, now); |
| 621 | #endif |
| 622 | } |
| 623 | |
| 624 | static inline void tick_check_nohz(int cpu) |
| 625 | { |
| 626 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 627 | ktime_t now; |
| 628 | |
| 629 | if (!ts->idle_active && !ts->tick_stopped) |
| 630 | return; |
| 631 | now = ktime_get(); |
| 632 | if (ts->idle_active) |
| 633 | tick_nohz_stop_idle(cpu, now); |
| 634 | if (ts->tick_stopped) { |
| 635 | tick_nohz_update_jiffies(now); |
| 636 | tick_nohz_kick_tick(cpu, now); |
| 637 | } |
| 638 | } |
| 639 | |
| 640 | #else |
| 641 | |
| 642 | static inline void tick_nohz_switch_to_nohz(void) { } |
| 643 | static inline void tick_check_nohz(int cpu) { } |
| 644 | |
| 645 | #endif /* NO_HZ */ |
| 646 | |
| 647 | /* |
| 648 | * Called from irq_enter to notify about the possible interruption of idle() |
| 649 | */ |
| 650 | void tick_check_idle(int cpu) |
| 651 | { |
| 652 | tick_check_oneshot_broadcast(cpu); |
| 653 | tick_check_nohz(cpu); |
| 654 | } |
| 655 | |
| 656 | /* |
| 657 | * High resolution timer specific code |
| 658 | */ |
| 659 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 660 | /* |
| 661 | * We rearm the timer until we get disabled by the idle code. |
| 662 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
| 663 | */ |
| 664 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
| 665 | { |
| 666 | struct tick_sched *ts = |
| 667 | container_of(timer, struct tick_sched, sched_timer); |
| 668 | struct pt_regs *regs = get_irq_regs(); |
| 669 | ktime_t now = ktime_get(); |
| 670 | int cpu = smp_processor_id(); |
| 671 | |
| 672 | #ifdef CONFIG_NO_HZ |
| 673 | /* |
| 674 | * Check if the do_timer duty was dropped. We don't care about |
| 675 | * concurrency: This happens only when the cpu in charge went |
| 676 | * into a long sleep. If two cpus happen to assign themself to |
| 677 | * this duty, then the jiffies update is still serialized by |
| 678 | * xtime_lock. |
| 679 | */ |
| 680 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 681 | tick_do_timer_cpu = cpu; |
| 682 | #endif |
| 683 | |
| 684 | /* Check, if the jiffies need an update */ |
| 685 | if (tick_do_timer_cpu == cpu) |
| 686 | tick_do_update_jiffies64(now); |
| 687 | |
| 688 | /* |
| 689 | * Do not call, when we are not in irq context and have |
| 690 | * no valid regs pointer |
| 691 | */ |
| 692 | if (regs) { |
| 693 | /* |
| 694 | * When we are idle and the tick is stopped, we have to touch |
| 695 | * the watchdog as we might not schedule for a really long |
| 696 | * time. This happens on complete idle SMP systems while |
| 697 | * waiting on the login prompt. We also increment the "start of |
| 698 | * idle" jiffy stamp so the idle accounting adjustment we do |
| 699 | * when we go busy again does not account too much ticks. |
| 700 | */ |
| 701 | if (ts->tick_stopped) { |
| 702 | touch_softlockup_watchdog(); |
| 703 | ts->idle_jiffies++; |
| 704 | } |
| 705 | update_process_times(user_mode(regs)); |
| 706 | profile_tick(CPU_PROFILING); |
| 707 | } |
| 708 | |
| 709 | hrtimer_forward(timer, now, tick_period); |
| 710 | |
| 711 | return HRTIMER_RESTART; |
| 712 | } |
| 713 | |
| 714 | /** |
| 715 | * tick_setup_sched_timer - setup the tick emulation timer |
| 716 | */ |
| 717 | void tick_setup_sched_timer(void) |
| 718 | { |
| 719 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 720 | ktime_t now = ktime_get(); |
| 721 | u64 offset; |
| 722 | |
| 723 | /* |
| 724 | * Emulate tick processing via per-CPU hrtimers: |
| 725 | */ |
| 726 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 727 | ts->sched_timer.function = tick_sched_timer; |
| 728 | |
| 729 | /* Get the next period (per cpu) */ |
| 730 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
| 731 | offset = ktime_to_ns(tick_period) >> 1; |
| 732 | do_div(offset, num_possible_cpus()); |
| 733 | offset *= smp_processor_id(); |
| 734 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
| 735 | |
| 736 | for (;;) { |
| 737 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 738 | hrtimer_start_expires(&ts->sched_timer, |
| 739 | HRTIMER_MODE_ABS_PINNED); |
| 740 | /* Check, if the timer was already in the past */ |
| 741 | if (hrtimer_active(&ts->sched_timer)) |
| 742 | break; |
| 743 | now = ktime_get(); |
| 744 | } |
| 745 | |
| 746 | #ifdef CONFIG_NO_HZ |
| 747 | if (tick_nohz_enabled) |
| 748 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 749 | #endif |
| 750 | } |
| 751 | #endif /* HIGH_RES_TIMERS */ |
| 752 | |
| 753 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
| 754 | void tick_cancel_sched_timer(int cpu) |
| 755 | { |
| 756 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 757 | |
| 758 | # ifdef CONFIG_HIGH_RES_TIMERS |
| 759 | if (ts->sched_timer.base) |
| 760 | hrtimer_cancel(&ts->sched_timer); |
| 761 | # endif |
| 762 | |
| 763 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
| 764 | } |
| 765 | #endif |
| 766 | |
| 767 | /** |
| 768 | * Async notification about clocksource changes |
| 769 | */ |
| 770 | void tick_clock_notify(void) |
| 771 | { |
| 772 | int cpu; |
| 773 | |
| 774 | for_each_possible_cpu(cpu) |
| 775 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * Async notification about clock event changes |
| 780 | */ |
| 781 | void tick_oneshot_notify(void) |
| 782 | { |
| 783 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 784 | |
| 785 | set_bit(0, &ts->check_clocks); |
| 786 | } |
| 787 | |
| 788 | /** |
| 789 | * Check, if a change happened, which makes oneshot possible. |
| 790 | * |
| 791 | * Called cyclic from the hrtimer softirq (driven by the timer |
| 792 | * softirq) allow_nohz signals, that we can switch into low-res nohz |
| 793 | * mode, because high resolution timers are disabled (either compile |
| 794 | * or runtime). |
| 795 | */ |
| 796 | int tick_check_oneshot_change(int allow_nohz) |
| 797 | { |
| 798 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 799 | |
| 800 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
| 801 | return 0; |
| 802 | |
| 803 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) |
| 804 | return 0; |
| 805 | |
| 806 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
| 807 | return 0; |
| 808 | |
| 809 | if (!allow_nohz) |
| 810 | return 1; |
| 811 | |
| 812 | tick_nohz_switch_to_nohz(); |
| 813 | return 0; |
| 814 | } |