| 1 | /* |
| 2 | * linux/arch/arm/kernel/smp.c |
| 3 | * |
| 4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/cache.h> |
| 17 | #include <linux/profile.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/cpu.h> |
| 22 | #include <linux/smp.h> |
| 23 | #include <linux/seq_file.h> |
| 24 | #include <linux/irq.h> |
| 25 | #include <linux/percpu.h> |
| 26 | #include <linux/clockchips.h> |
| 27 | #include <linux/completion.h> |
| 28 | |
| 29 | #include <linux/atomic.h> |
| 30 | #include <asm/cacheflush.h> |
| 31 | #include <asm/cpu.h> |
| 32 | #include <asm/cputype.h> |
| 33 | #include <asm/exception.h> |
| 34 | #include <asm/idmap.h> |
| 35 | #include <asm/topology.h> |
| 36 | #include <asm/mmu_context.h> |
| 37 | #include <asm/pgtable.h> |
| 38 | #include <asm/pgalloc.h> |
| 39 | #include <asm/processor.h> |
| 40 | #include <asm/sections.h> |
| 41 | #include <asm/tlbflush.h> |
| 42 | #include <asm/ptrace.h> |
| 43 | #include <asm/localtimer.h> |
| 44 | #include <asm/smp_plat.h> |
| 45 | |
| 46 | /* |
| 47 | * as from 2.5, kernels no longer have an init_tasks structure |
| 48 | * so we need some other way of telling a new secondary core |
| 49 | * where to place its SVC stack |
| 50 | */ |
| 51 | struct secondary_data secondary_data; |
| 52 | |
| 53 | enum ipi_msg_type { |
| 54 | IPI_TIMER = 2, |
| 55 | IPI_RESCHEDULE, |
| 56 | IPI_CALL_FUNC, |
| 57 | IPI_CALL_FUNC_SINGLE, |
| 58 | IPI_CPU_STOP, |
| 59 | }; |
| 60 | |
| 61 | static DECLARE_COMPLETION(cpu_running); |
| 62 | |
| 63 | int __cpuinit __cpu_up(unsigned int cpu) |
| 64 | { |
| 65 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
| 66 | struct task_struct *idle = ci->idle; |
| 67 | int ret; |
| 68 | |
| 69 | /* |
| 70 | * Spawn a new process manually, if not already done. |
| 71 | * Grab a pointer to its task struct so we can mess with it |
| 72 | */ |
| 73 | if (!idle) { |
| 74 | idle = fork_idle(cpu); |
| 75 | if (IS_ERR(idle)) { |
| 76 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); |
| 77 | return PTR_ERR(idle); |
| 78 | } |
| 79 | ci->idle = idle; |
| 80 | } else { |
| 81 | /* |
| 82 | * Since this idle thread is being re-used, call |
| 83 | * init_idle() to reinitialize the thread structure. |
| 84 | */ |
| 85 | init_idle(idle, cpu); |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * We need to tell the secondary core where to find |
| 90 | * its stack and the page tables. |
| 91 | */ |
| 92 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
| 93 | secondary_data.pgdir = virt_to_phys(idmap_pgd); |
| 94 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); |
| 95 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
| 96 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
| 97 | |
| 98 | /* |
| 99 | * Now bring the CPU into our world. |
| 100 | */ |
| 101 | ret = boot_secondary(cpu, idle); |
| 102 | if (ret == 0) { |
| 103 | /* |
| 104 | * CPU was successfully started, wait for it |
| 105 | * to come online or time out. |
| 106 | */ |
| 107 | wait_for_completion_timeout(&cpu_running, |
| 108 | msecs_to_jiffies(1000)); |
| 109 | |
| 110 | if (!cpu_online(cpu)) { |
| 111 | pr_crit("CPU%u: failed to come online\n", cpu); |
| 112 | ret = -EIO; |
| 113 | } |
| 114 | } else { |
| 115 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
| 116 | } |
| 117 | |
| 118 | secondary_data.stack = NULL; |
| 119 | secondary_data.pgdir = 0; |
| 120 | |
| 121 | return ret; |
| 122 | } |
| 123 | |
| 124 | #ifdef CONFIG_HOTPLUG_CPU |
| 125 | static void percpu_timer_stop(void); |
| 126 | |
| 127 | /* |
| 128 | * __cpu_disable runs on the processor to be shutdown. |
| 129 | */ |
| 130 | int __cpu_disable(void) |
| 131 | { |
| 132 | unsigned int cpu = smp_processor_id(); |
| 133 | struct task_struct *p; |
| 134 | int ret; |
| 135 | |
| 136 | ret = platform_cpu_disable(cpu); |
| 137 | if (ret) |
| 138 | return ret; |
| 139 | |
| 140 | /* |
| 141 | * Take this CPU offline. Once we clear this, we can't return, |
| 142 | * and we must not schedule until we're ready to give up the cpu. |
| 143 | */ |
| 144 | set_cpu_online(cpu, false); |
| 145 | |
| 146 | /* |
| 147 | * OK - migrate IRQs away from this CPU |
| 148 | */ |
| 149 | migrate_irqs(); |
| 150 | |
| 151 | /* |
| 152 | * Stop the local timer for this CPU. |
| 153 | */ |
| 154 | percpu_timer_stop(); |
| 155 | |
| 156 | /* |
| 157 | * Flush user cache and TLB mappings, and then remove this CPU |
| 158 | * from the vm mask set of all processes. |
| 159 | */ |
| 160 | flush_cache_all(); |
| 161 | local_flush_tlb_all(); |
| 162 | |
| 163 | read_lock(&tasklist_lock); |
| 164 | for_each_process(p) { |
| 165 | if (p->mm) |
| 166 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
| 167 | } |
| 168 | read_unlock(&tasklist_lock); |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
| 173 | static DECLARE_COMPLETION(cpu_died); |
| 174 | |
| 175 | /* |
| 176 | * called on the thread which is asking for a CPU to be shutdown - |
| 177 | * waits until shutdown has completed, or it is timed out. |
| 178 | */ |
| 179 | void __cpu_die(unsigned int cpu) |
| 180 | { |
| 181 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
| 182 | pr_err("CPU%u: cpu didn't die\n", cpu); |
| 183 | return; |
| 184 | } |
| 185 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); |
| 186 | |
| 187 | if (!platform_cpu_kill(cpu)) |
| 188 | printk("CPU%u: unable to kill\n", cpu); |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Called from the idle thread for the CPU which has been shutdown. |
| 193 | * |
| 194 | * Note that we disable IRQs here, but do not re-enable them |
| 195 | * before returning to the caller. This is also the behaviour |
| 196 | * of the other hotplug-cpu capable cores, so presumably coming |
| 197 | * out of idle fixes this. |
| 198 | */ |
| 199 | void __ref cpu_die(void) |
| 200 | { |
| 201 | unsigned int cpu = smp_processor_id(); |
| 202 | |
| 203 | idle_task_exit(); |
| 204 | |
| 205 | local_irq_disable(); |
| 206 | mb(); |
| 207 | |
| 208 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ |
| 209 | complete(&cpu_died); |
| 210 | |
| 211 | /* |
| 212 | * actual CPU shutdown procedure is at least platform (if not |
| 213 | * CPU) specific. |
| 214 | */ |
| 215 | platform_cpu_die(cpu); |
| 216 | |
| 217 | /* |
| 218 | * Do not return to the idle loop - jump back to the secondary |
| 219 | * cpu initialisation. There's some initialisation which needs |
| 220 | * to be repeated to undo the effects of taking the CPU offline. |
| 221 | */ |
| 222 | __asm__("mov sp, %0\n" |
| 223 | " mov fp, #0\n" |
| 224 | " b secondary_start_kernel" |
| 225 | : |
| 226 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
| 227 | } |
| 228 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 229 | |
| 230 | /* |
| 231 | * Called by both boot and secondaries to move global data into |
| 232 | * per-processor storage. |
| 233 | */ |
| 234 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) |
| 235 | { |
| 236 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
| 237 | |
| 238 | cpu_info->loops_per_jiffy = loops_per_jiffy; |
| 239 | |
| 240 | store_cpu_topology(cpuid); |
| 241 | } |
| 242 | |
| 243 | static void percpu_timer_setup(void); |
| 244 | |
| 245 | /* |
| 246 | * This is the secondary CPU boot entry. We're using this CPUs |
| 247 | * idle thread stack, but a set of temporary page tables. |
| 248 | */ |
| 249 | asmlinkage void __cpuinit secondary_start_kernel(void) |
| 250 | { |
| 251 | struct mm_struct *mm = &init_mm; |
| 252 | unsigned int cpu = smp_processor_id(); |
| 253 | |
| 254 | printk("CPU%u: Booted secondary processor\n", cpu); |
| 255 | |
| 256 | /* |
| 257 | * All kernel threads share the same mm context; grab a |
| 258 | * reference and switch to it. |
| 259 | */ |
| 260 | atomic_inc(&mm->mm_count); |
| 261 | current->active_mm = mm; |
| 262 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
| 263 | cpu_switch_mm(mm->pgd, mm); |
| 264 | enter_lazy_tlb(mm, current); |
| 265 | local_flush_tlb_all(); |
| 266 | |
| 267 | cpu_init(); |
| 268 | preempt_disable(); |
| 269 | trace_hardirqs_off(); |
| 270 | |
| 271 | /* |
| 272 | * Give the platform a chance to do its own initialisation. |
| 273 | */ |
| 274 | platform_secondary_init(cpu); |
| 275 | |
| 276 | notify_cpu_starting(cpu); |
| 277 | |
| 278 | calibrate_delay(); |
| 279 | |
| 280 | smp_store_cpu_info(cpu); |
| 281 | |
| 282 | /* |
| 283 | * OK, now it's safe to let the boot CPU continue. Wait for |
| 284 | * the CPU migration code to notice that the CPU is online |
| 285 | * before we continue - which happens after __cpu_up returns. |
| 286 | */ |
| 287 | set_cpu_online(cpu, true); |
| 288 | complete(&cpu_running); |
| 289 | |
| 290 | /* |
| 291 | * Setup the percpu timer for this CPU. |
| 292 | */ |
| 293 | percpu_timer_setup(); |
| 294 | |
| 295 | local_irq_enable(); |
| 296 | local_fiq_enable(); |
| 297 | |
| 298 | /* |
| 299 | * OK, it's off to the idle thread for us |
| 300 | */ |
| 301 | cpu_idle(); |
| 302 | } |
| 303 | |
| 304 | void __init smp_cpus_done(unsigned int max_cpus) |
| 305 | { |
| 306 | int cpu; |
| 307 | unsigned long bogosum = 0; |
| 308 | |
| 309 | for_each_online_cpu(cpu) |
| 310 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; |
| 311 | |
| 312 | printk(KERN_INFO "SMP: Total of %d processors activated " |
| 313 | "(%lu.%02lu BogoMIPS).\n", |
| 314 | num_online_cpus(), |
| 315 | bogosum / (500000/HZ), |
| 316 | (bogosum / (5000/HZ)) % 100); |
| 317 | } |
| 318 | |
| 319 | void __init smp_prepare_boot_cpu(void) |
| 320 | { |
| 321 | unsigned int cpu = smp_processor_id(); |
| 322 | |
| 323 | per_cpu(cpu_data, cpu).idle = current; |
| 324 | } |
| 325 | |
| 326 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 327 | { |
| 328 | unsigned int ncores = num_possible_cpus(); |
| 329 | |
| 330 | init_cpu_topology(); |
| 331 | |
| 332 | smp_store_cpu_info(smp_processor_id()); |
| 333 | |
| 334 | /* |
| 335 | * are we trying to boot more cores than exist? |
| 336 | */ |
| 337 | if (max_cpus > ncores) |
| 338 | max_cpus = ncores; |
| 339 | if (ncores > 1 && max_cpus) { |
| 340 | /* |
| 341 | * Enable the local timer or broadcast device for the |
| 342 | * boot CPU, but only if we have more than one CPU. |
| 343 | */ |
| 344 | percpu_timer_setup(); |
| 345 | |
| 346 | /* |
| 347 | * Initialise the present map, which describes the set of CPUs |
| 348 | * actually populated at the present time. A platform should |
| 349 | * re-initialize the map in platform_smp_prepare_cpus() if |
| 350 | * present != possible (e.g. physical hotplug). |
| 351 | */ |
| 352 | init_cpu_present(cpu_possible_mask); |
| 353 | |
| 354 | /* |
| 355 | * Initialise the SCU if there are more than one CPU |
| 356 | * and let them know where to start. |
| 357 | */ |
| 358 | platform_smp_prepare_cpus(max_cpus); |
| 359 | } |
| 360 | } |
| 361 | |
| 362 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
| 363 | |
| 364 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
| 365 | { |
| 366 | smp_cross_call = fn; |
| 367 | } |
| 368 | |
| 369 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 370 | { |
| 371 | smp_cross_call(mask, IPI_CALL_FUNC); |
| 372 | } |
| 373 | |
| 374 | void arch_send_call_function_single_ipi(int cpu) |
| 375 | { |
| 376 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
| 377 | } |
| 378 | |
| 379 | static const char *ipi_types[NR_IPI] = { |
| 380 | #define S(x,s) [x - IPI_TIMER] = s |
| 381 | S(IPI_TIMER, "Timer broadcast interrupts"), |
| 382 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
| 383 | S(IPI_CALL_FUNC, "Function call interrupts"), |
| 384 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), |
| 385 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
| 386 | }; |
| 387 | |
| 388 | void show_ipi_list(struct seq_file *p, int prec) |
| 389 | { |
| 390 | unsigned int cpu, i; |
| 391 | |
| 392 | for (i = 0; i < NR_IPI; i++) { |
| 393 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); |
| 394 | |
| 395 | for_each_present_cpu(cpu) |
| 396 | seq_printf(p, "%10u ", |
| 397 | __get_irq_stat(cpu, ipi_irqs[i])); |
| 398 | |
| 399 | seq_printf(p, " %s\n", ipi_types[i]); |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | u64 smp_irq_stat_cpu(unsigned int cpu) |
| 404 | { |
| 405 | u64 sum = 0; |
| 406 | int i; |
| 407 | |
| 408 | for (i = 0; i < NR_IPI; i++) |
| 409 | sum += __get_irq_stat(cpu, ipi_irqs[i]); |
| 410 | |
| 411 | return sum; |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * Timer (local or broadcast) support |
| 416 | */ |
| 417 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); |
| 418 | |
| 419 | static void ipi_timer(void) |
| 420 | { |
| 421 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); |
| 422 | evt->event_handler(evt); |
| 423 | } |
| 424 | |
| 425 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 426 | static void smp_timer_broadcast(const struct cpumask *mask) |
| 427 | { |
| 428 | smp_cross_call(mask, IPI_TIMER); |
| 429 | } |
| 430 | #else |
| 431 | #define smp_timer_broadcast NULL |
| 432 | #endif |
| 433 | |
| 434 | static void broadcast_timer_set_mode(enum clock_event_mode mode, |
| 435 | struct clock_event_device *evt) |
| 436 | { |
| 437 | } |
| 438 | |
| 439 | static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) |
| 440 | { |
| 441 | evt->name = "dummy_timer"; |
| 442 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
| 443 | CLOCK_EVT_FEAT_PERIODIC | |
| 444 | CLOCK_EVT_FEAT_DUMMY; |
| 445 | evt->rating = 400; |
| 446 | evt->mult = 1; |
| 447 | evt->set_mode = broadcast_timer_set_mode; |
| 448 | |
| 449 | clockevents_register_device(evt); |
| 450 | } |
| 451 | |
| 452 | static struct local_timer_ops *lt_ops; |
| 453 | |
| 454 | #ifdef CONFIG_LOCAL_TIMERS |
| 455 | int local_timer_register(struct local_timer_ops *ops) |
| 456 | { |
| 457 | if (lt_ops) |
| 458 | return -EBUSY; |
| 459 | |
| 460 | lt_ops = ops; |
| 461 | return 0; |
| 462 | } |
| 463 | #endif |
| 464 | |
| 465 | static void __cpuinit percpu_timer_setup(void) |
| 466 | { |
| 467 | unsigned int cpu = smp_processor_id(); |
| 468 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); |
| 469 | |
| 470 | evt->cpumask = cpumask_of(cpu); |
| 471 | evt->broadcast = smp_timer_broadcast; |
| 472 | |
| 473 | if (!lt_ops || lt_ops->setup(evt)) |
| 474 | broadcast_timer_setup(evt); |
| 475 | } |
| 476 | |
| 477 | #ifdef CONFIG_HOTPLUG_CPU |
| 478 | /* |
| 479 | * The generic clock events code purposely does not stop the local timer |
| 480 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it |
| 481 | * manually here. |
| 482 | */ |
| 483 | static void percpu_timer_stop(void) |
| 484 | { |
| 485 | unsigned int cpu = smp_processor_id(); |
| 486 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); |
| 487 | |
| 488 | if (lt_ops) |
| 489 | lt_ops->stop(evt); |
| 490 | } |
| 491 | #endif |
| 492 | |
| 493 | static DEFINE_RAW_SPINLOCK(stop_lock); |
| 494 | |
| 495 | /* |
| 496 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
| 497 | */ |
| 498 | static void ipi_cpu_stop(unsigned int cpu) |
| 499 | { |
| 500 | if (system_state == SYSTEM_BOOTING || |
| 501 | system_state == SYSTEM_RUNNING) { |
| 502 | raw_spin_lock(&stop_lock); |
| 503 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
| 504 | dump_stack(); |
| 505 | raw_spin_unlock(&stop_lock); |
| 506 | } |
| 507 | |
| 508 | set_cpu_online(cpu, false); |
| 509 | |
| 510 | local_fiq_disable(); |
| 511 | local_irq_disable(); |
| 512 | |
| 513 | #ifdef CONFIG_HOTPLUG_CPU |
| 514 | platform_cpu_kill(cpu); |
| 515 | #endif |
| 516 | |
| 517 | while (1) |
| 518 | cpu_relax(); |
| 519 | } |
| 520 | |
| 521 | /* |
| 522 | * Main handler for inter-processor interrupts |
| 523 | */ |
| 524 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) |
| 525 | { |
| 526 | handle_IPI(ipinr, regs); |
| 527 | } |
| 528 | |
| 529 | void handle_IPI(int ipinr, struct pt_regs *regs) |
| 530 | { |
| 531 | unsigned int cpu = smp_processor_id(); |
| 532 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 533 | |
| 534 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) |
| 535 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); |
| 536 | |
| 537 | switch (ipinr) { |
| 538 | case IPI_TIMER: |
| 539 | irq_enter(); |
| 540 | ipi_timer(); |
| 541 | irq_exit(); |
| 542 | break; |
| 543 | |
| 544 | case IPI_RESCHEDULE: |
| 545 | scheduler_ipi(); |
| 546 | break; |
| 547 | |
| 548 | case IPI_CALL_FUNC: |
| 549 | irq_enter(); |
| 550 | generic_smp_call_function_interrupt(); |
| 551 | irq_exit(); |
| 552 | break; |
| 553 | |
| 554 | case IPI_CALL_FUNC_SINGLE: |
| 555 | irq_enter(); |
| 556 | generic_smp_call_function_single_interrupt(); |
| 557 | irq_exit(); |
| 558 | break; |
| 559 | |
| 560 | case IPI_CPU_STOP: |
| 561 | irq_enter(); |
| 562 | ipi_cpu_stop(cpu); |
| 563 | irq_exit(); |
| 564 | break; |
| 565 | |
| 566 | default: |
| 567 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", |
| 568 | cpu, ipinr); |
| 569 | break; |
| 570 | } |
| 571 | set_irq_regs(old_regs); |
| 572 | } |
| 573 | |
| 574 | void smp_send_reschedule(int cpu) |
| 575 | { |
| 576 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
| 577 | } |
| 578 | |
| 579 | void smp_send_stop(void) |
| 580 | { |
| 581 | unsigned long timeout; |
| 582 | |
| 583 | if (num_online_cpus() > 1) { |
| 584 | struct cpumask mask; |
| 585 | cpumask_copy(&mask, cpu_online_mask); |
| 586 | cpumask_clear_cpu(smp_processor_id(), &mask); |
| 587 | |
| 588 | smp_cross_call(&mask, IPI_CPU_STOP); |
| 589 | } |
| 590 | |
| 591 | /* Wait up to one second for other CPUs to stop */ |
| 592 | timeout = USEC_PER_SEC; |
| 593 | while (num_online_cpus() > 1 && timeout--) |
| 594 | udelay(1); |
| 595 | |
| 596 | if (num_online_cpus() > 1) |
| 597 | pr_warning("SMP: failed to stop secondary CPUs\n"); |
| 598 | } |
| 599 | |
| 600 | /* |
| 601 | * not supported here |
| 602 | */ |
| 603 | int setup_profiling_timer(unsigned int multiplier) |
| 604 | { |
| 605 | return -EINVAL; |
| 606 | } |