Merge remote-tracking branch 'asoc/fix/sgtl5000' into asoc-linus
[deliverable/linux.git] / arch / arm / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
c97d4869 10#include <linux/module.h>
1da177e4
LT
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
4e950f6f 20#include <linux/err.h>
1da177e4 21#include <linux/cpu.h>
1da177e4 22#include <linux/seq_file.h>
c97d4869 23#include <linux/irq.h>
bc28248e
RK
24#include <linux/percpu.h>
25#include <linux/clockchips.h>
3c030bea 26#include <linux/completion.h>
ec971ea5 27#include <linux/cpufreq.h>
1da177e4 28
60063497 29#include <linux/atomic.h>
abcee5fb 30#include <asm/smp.h>
1da177e4
LT
31#include <asm/cacheflush.h>
32#include <asm/cpu.h>
42578c82 33#include <asm/cputype.h>
5a567d78 34#include <asm/exception.h>
8903826d 35#include <asm/idmap.h>
c9018aab 36#include <asm/topology.h>
e65f38ed
RK
37#include <asm/mmu_context.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
1da177e4 40#include <asm/processor.h>
37b05b63 41#include <asm/sections.h>
1da177e4
LT
42#include <asm/tlbflush.h>
43#include <asm/ptrace.h>
bc28248e 44#include <asm/localtimer.h>
d6257288 45#include <asm/smp_plat.h>
4588c34d 46#include <asm/virt.h>
abcee5fb 47#include <asm/mach/arch.h>
eb08375e 48#include <asm/mpu.h>
1da177e4 49
e65f38ed
RK
50/*
51 * as from 2.5, kernels no longer have an init_tasks structure
52 * so we need some other way of telling a new secondary core
53 * where to place its SVC stack
54 */
55struct secondary_data secondary_data;
56
28e8e29c
MZ
57/*
58 * control for which core is the next to come out of the secondary
59 * boot "holding pen"
60 */
61volatile int __cpuinitdata pen_release = -1;
62
1da177e4 63enum ipi_msg_type {
559a5939
SB
64 IPI_WAKEUP,
65 IPI_TIMER,
1da177e4
LT
66 IPI_RESCHEDULE,
67 IPI_CALL_FUNC,
f6dd9fa5 68 IPI_CALL_FUNC_SINGLE,
1da177e4
LT
69 IPI_CPU_STOP,
70};
71
149c2415
RK
72static DECLARE_COMPLETION(cpu_running);
73
abcee5fb
MZ
74static struct smp_operations smp_ops;
75
76void __init smp_set_ops(struct smp_operations *ops)
77{
78 if (ops)
79 smp_ops = *ops;
80};
81
4756dcbf
CC
82static unsigned long get_arch_pgd(pgd_t *pgd)
83{
84 phys_addr_t pgdir = virt_to_phys(pgd);
85 BUG_ON(pgdir & ARCH_PGD_MASK);
86 return pgdir >> ARCH_PGD_SHIFT;
87}
88
84ec6d57 89int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
1da177e4 90{
1da177e4
LT
91 int ret;
92
e65f38ed
RK
93 /*
94 * We need to tell the secondary core where to find
95 * its stack and the page tables.
96 */
32d39a93 97 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
eb08375e
JA
98#ifdef CONFIG_ARM_MPU
99 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
100#endif
101
c4a1f032 102#ifdef CONFIG_MMU
4756dcbf
CC
103 secondary_data.pgdir = get_arch_pgd(idmap_pgd);
104 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
c4a1f032 105#endif
1027247f
RK
106 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
107 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
e65f38ed 108
1da177e4
LT
109 /*
110 * Now bring the CPU into our world.
111 */
112 ret = boot_secondary(cpu, idle);
e65f38ed 113 if (ret == 0) {
e65f38ed
RK
114 /*
115 * CPU was successfully started, wait for it
116 * to come online or time out.
117 */
149c2415
RK
118 wait_for_completion_timeout(&cpu_running,
119 msecs_to_jiffies(1000));
e65f38ed 120
58613cd1
RK
121 if (!cpu_online(cpu)) {
122 pr_crit("CPU%u: failed to come online\n", cpu);
e65f38ed 123 ret = -EIO;
58613cd1
RK
124 }
125 } else {
126 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
e65f38ed
RK
127 }
128
e65f38ed 129
eb08375e 130 memset(&secondary_data, 0, sizeof(secondary_data));
1da177e4
LT
131 return ret;
132}
133
abcee5fb 134/* platform specific SMP operations */
ac6c7998 135void __init smp_init_cpus(void)
abcee5fb
MZ
136{
137 if (smp_ops.smp_init_cpus)
138 smp_ops.smp_init_cpus();
139}
140
ac6c7998 141int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
abcee5fb
MZ
142{
143 if (smp_ops.smp_boot_secondary)
144 return smp_ops.smp_boot_secondary(cpu, idle);
145 return -ENOSYS;
146}
147
a054a811 148#ifdef CONFIG_HOTPLUG_CPU
10034aab
RK
149static void percpu_timer_stop(void);
150
ac6c7998 151static int platform_cpu_kill(unsigned int cpu)
abcee5fb
MZ
152{
153 if (smp_ops.cpu_kill)
154 return smp_ops.cpu_kill(cpu);
155 return 1;
156}
157
ac6c7998 158static int platform_cpu_disable(unsigned int cpu)
abcee5fb
MZ
159{
160 if (smp_ops.cpu_disable)
161 return smp_ops.cpu_disable(cpu);
162
163 /*
164 * By default, allow disabling all CPUs except the first one,
165 * since this is special on a lot of platforms, e.g. because
166 * of clock tick interrupts.
167 */
168 return cpu == 0 ? -EPERM : 0;
169}
a054a811
RK
170/*
171 * __cpu_disable runs on the processor to be shutdown.
172 */
ac6c7998 173int __cpuinit __cpu_disable(void)
a054a811
RK
174{
175 unsigned int cpu = smp_processor_id();
a054a811
RK
176 int ret;
177
8e2a43f5 178 ret = platform_cpu_disable(cpu);
a054a811
RK
179 if (ret)
180 return ret;
181
182 /*
183 * Take this CPU offline. Once we clear this, we can't return,
184 * and we must not schedule until we're ready to give up the cpu.
185 */
e03cdade 186 set_cpu_online(cpu, false);
a054a811
RK
187
188 /*
189 * OK - migrate IRQs away from this CPU
190 */
191 migrate_irqs();
192
37ee16ae
RK
193 /*
194 * Stop the local timer for this CPU.
195 */
10034aab 196 percpu_timer_stop();
37ee16ae 197
a054a811
RK
198 /*
199 * Flush user cache and TLB mappings, and then remove this CPU
200 * from the vm mask set of all processes.
e6b866e9
LP
201 *
202 * Caches are flushed to the Level of Unification Inner Shareable
203 * to write-back dirty lines to unified caches shared by all CPUs.
a054a811 204 */
e6b866e9 205 flush_cache_louis();
a054a811
RK
206 local_flush_tlb_all();
207
3eaa73bd 208 clear_tasks_mm_cpumask(cpu);
a054a811
RK
209
210 return 0;
211}
212
3c030bea
RK
213static DECLARE_COMPLETION(cpu_died);
214
a054a811
RK
215/*
216 * called on the thread which is asking for a CPU to be shutdown -
217 * waits until shutdown has completed, or it is timed out.
218 */
ac6c7998 219void __cpuinit __cpu_die(unsigned int cpu)
a054a811 220{
3c030bea
RK
221 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
222 pr_err("CPU%u: cpu didn't die\n", cpu);
223 return;
224 }
225 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
226
51acdfd1
RK
227 /*
228 * platform_cpu_kill() is generally expected to do the powering off
229 * and/or cutting of clocks to the dying CPU. Optionally, this may
230 * be done by the CPU which is dying in preference to supporting
231 * this call, but that means there is _no_ synchronisation between
232 * the requesting CPU and the dying CPU actually losing power.
233 */
a054a811
RK
234 if (!platform_cpu_kill(cpu))
235 printk("CPU%u: unable to kill\n", cpu);
236}
237
238/*
239 * Called from the idle thread for the CPU which has been shutdown.
240 *
241 * Note that we disable IRQs here, but do not re-enable them
242 * before returning to the caller. This is also the behaviour
243 * of the other hotplug-cpu capable cores, so presumably coming
244 * out of idle fixes this.
245 */
90140c30 246void __ref cpu_die(void)
a054a811
RK
247{
248 unsigned int cpu = smp_processor_id();
249
a054a811
RK
250 idle_task_exit();
251
f36d3401 252 local_irq_disable();
f36d3401 253
51acdfd1
RK
254 /*
255 * Flush the data out of the L1 cache for this CPU. This must be
256 * before the completion to ensure that data is safely written out
257 * before platform_cpu_kill() gets called - which may disable
258 * *this* CPU and power down its cache.
259 */
260 flush_cache_louis();
261
262 /*
263 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
264 * this returns, power and/or clocks can be removed at any point
265 * from this CPU and its cache by platform_cpu_kill().
266 */
aa033810 267 complete(&cpu_died);
3c030bea 268
a054a811 269 /*
51acdfd1
RK
270 * Ensure that the cache lines associated with that completion are
271 * written out. This covers the case where _this_ CPU is doing the
272 * powering down, to ensure that the completion is visible to the
273 * CPU waiting for this one.
274 */
275 flush_cache_louis();
276
277 /*
278 * The actual CPU shutdown procedure is at least platform (if not
279 * CPU) specific. This may remove power, or it may simply spin.
280 *
281 * Platforms are generally expected *NOT* to return from this call,
282 * although there are some which do because they have no way to
283 * power down the CPU. These platforms are the _only_ reason we
284 * have a return path which uses the fragment of assembly below.
285 *
286 * The return path should not be used for platforms which can
287 * power off the CPU.
a054a811 288 */
0a301110
RK
289 if (smp_ops.cpu_die)
290 smp_ops.cpu_die(cpu);
a054a811
RK
291
292 /*
293 * Do not return to the idle loop - jump back to the secondary
294 * cpu initialisation. There's some initialisation which needs
295 * to be repeated to undo the effects of taking the CPU offline.
296 */
297 __asm__("mov sp, %0\n"
faabfa08 298 " mov fp, #0\n"
a054a811
RK
299 " b secondary_start_kernel"
300 :
32d39a93 301 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
a054a811
RK
302}
303#endif /* CONFIG_HOTPLUG_CPU */
304
05c74a6c
RK
305/*
306 * Called by both boot and secondaries to move global data into
307 * per-processor storage.
308 */
309static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
310{
311 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
312
313 cpu_info->loops_per_jiffy = loops_per_jiffy;
e8d432c9 314 cpu_info->cpuid = read_cpuid_id();
c9018aab
VG
315
316 store_cpu_topology(cpuid);
05c74a6c
RK
317}
318
d4578592
MZ
319static void percpu_timer_setup(void);
320
e65f38ed
RK
321/*
322 * This is the secondary CPU boot entry. We're using this CPUs
323 * idle thread stack, but a set of temporary page tables.
324 */
bd6f68af 325asmlinkage void __cpuinit secondary_start_kernel(void)
e65f38ed
RK
326{
327 struct mm_struct *mm = &init_mm;
5f40b909
WD
328 unsigned int cpu;
329
330 /*
331 * The identity mapping is uncached (strongly ordered), so
332 * switch away from it before attempting any exclusive accesses.
333 */
334 cpu_switch_mm(mm->pgd, mm);
89c7e4b8 335 local_flush_bp_all();
5f40b909
WD
336 enter_lazy_tlb(mm, current);
337 local_flush_tlb_all();
e65f38ed 338
e65f38ed
RK
339 /*
340 * All kernel threads share the same mm context; grab a
341 * reference and switch to it.
342 */
5f40b909 343 cpu = smp_processor_id();
e65f38ed
RK
344 atomic_inc(&mm->mm_count);
345 current->active_mm = mm;
56f8ba83 346 cpumask_set_cpu(cpu, mm_cpumask(mm));
e65f38ed 347
14318efb
RH
348 cpu_init();
349
fde165b2
CC
350 printk("CPU%u: Booted secondary processor\n", cpu);
351
5bfb5d69 352 preempt_disable();
2c0136db 353 trace_hardirqs_off();
e65f38ed
RK
354
355 /*
356 * Give the platform a chance to do its own initialisation.
357 */
0a301110
RK
358 if (smp_ops.smp_secondary_init)
359 smp_ops.smp_secondary_init(cpu);
e65f38ed 360
e545a614 361 notify_cpu_starting(cpu);
a8655e83 362
e65f38ed
RK
363 calibrate_delay();
364
365 smp_store_cpu_info(cpu);
366
367 /*
573619d1
RK
368 * OK, now it's safe to let the boot CPU continue. Wait for
369 * the CPU migration code to notice that the CPU is online
149c2415 370 * before we continue - which happens after __cpu_up returns.
e65f38ed 371 */
e03cdade 372 set_cpu_online(cpu, true);
149c2415 373 complete(&cpu_running);
eb047454
TG
374
375 /*
376 * Setup the percpu timer for this CPU.
377 */
378 percpu_timer_setup();
379
eb047454
TG
380 local_irq_enable();
381 local_fiq_enable();
382
e65f38ed
RK
383 /*
384 * OK, it's off to the idle thread for us
385 */
f7b861b7 386 cpu_startup_entry(CPUHP_ONLINE);
e65f38ed
RK
387}
388
1da177e4
LT
389void __init smp_cpus_done(unsigned int max_cpus)
390{
391 int cpu;
392 unsigned long bogosum = 0;
393
394 for_each_online_cpu(cpu)
395 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
396
397 printk(KERN_INFO "SMP: Total of %d processors activated "
398 "(%lu.%02lu BogoMIPS).\n",
399 num_online_cpus(),
400 bogosum / (500000/HZ),
401 (bogosum / (5000/HZ)) % 100);
4588c34d
DM
402
403 hyp_mode_check();
1da177e4
LT
404}
405
406void __init smp_prepare_boot_cpu(void)
407{
14318efb 408 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
1da177e4
LT
409}
410
05c74a6c 411void __init smp_prepare_cpus(unsigned int max_cpus)
1da177e4 412{
05c74a6c 413 unsigned int ncores = num_possible_cpus();
1da177e4 414
c9018aab
VG
415 init_cpu_topology();
416
05c74a6c 417 smp_store_cpu_info(smp_processor_id());
1da177e4
LT
418
419 /*
05c74a6c 420 * are we trying to boot more cores than exist?
1da177e4 421 */
05c74a6c
RK
422 if (max_cpus > ncores)
423 max_cpus = ncores;
7fa22bd5 424 if (ncores > 1 && max_cpus) {
05c74a6c
RK
425 /*
426 * Enable the local timer or broadcast device for the
427 * boot CPU, but only if we have more than one CPU.
428 */
429 percpu_timer_setup();
1da177e4 430
7fa22bd5
SB
431 /*
432 * Initialise the present map, which describes the set of CPUs
433 * actually populated at the present time. A platform should
0a301110
RK
434 * re-initialize the map in the platforms smp_prepare_cpus()
435 * if present != possible (e.g. physical hotplug).
7fa22bd5 436 */
0b5f9c00 437 init_cpu_present(cpu_possible_mask);
7fa22bd5 438
05c74a6c
RK
439 /*
440 * Initialise the SCU if there are more than one CPU
441 * and let them know where to start.
442 */
0a301110
RK
443 if (smp_ops.smp_prepare_cpus)
444 smp_ops.smp_prepare_cpus(max_cpus);
05c74a6c 445 }
1da177e4
LT
446}
447
0f7b332f
RK
448static void (*smp_cross_call)(const struct cpumask *, unsigned int);
449
450void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
451{
b1cffebf
RH
452 if (!smp_cross_call)
453 smp_cross_call = fn;
0f7b332f
RK
454}
455
82668104 456void arch_send_call_function_ipi_mask(const struct cpumask *mask)
1da177e4 457{
e3fbb087 458 smp_cross_call(mask, IPI_CALL_FUNC);
1da177e4
LT
459}
460
b62655f4
SG
461void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
462{
463 smp_cross_call(mask, IPI_WAKEUP);
464}
465
f6dd9fa5 466void arch_send_call_function_single_ipi(int cpu)
3e459990 467{
e3fbb087 468 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
3e459990 469}
3e459990 470
4a88abd7 471static const char *ipi_types[NR_IPI] = {
559a5939
SB
472#define S(x,s) [x] = s
473 S(IPI_WAKEUP, "CPU wakeup interrupts"),
4a88abd7
RK
474 S(IPI_TIMER, "Timer broadcast interrupts"),
475 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
476 S(IPI_CALL_FUNC, "Function call interrupts"),
477 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
478 S(IPI_CPU_STOP, "CPU stop interrupts"),
479};
480
f13cd417 481void show_ipi_list(struct seq_file *p, int prec)
1da177e4 482{
4a88abd7 483 unsigned int cpu, i;
1da177e4 484
4a88abd7
RK
485 for (i = 0; i < NR_IPI; i++) {
486 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
1da177e4 487
026b7c6b 488 for_each_online_cpu(cpu)
4a88abd7
RK
489 seq_printf(p, "%10u ",
490 __get_irq_stat(cpu, ipi_irqs[i]));
1da177e4 491
4a88abd7
RK
492 seq_printf(p, " %s\n", ipi_types[i]);
493 }
1da177e4
LT
494}
495
b54992fe 496u64 smp_irq_stat_cpu(unsigned int cpu)
37ee16ae 497{
b54992fe
RK
498 u64 sum = 0;
499 int i;
37ee16ae 500
b54992fe
RK
501 for (i = 0; i < NR_IPI; i++)
502 sum += __get_irq_stat(cpu, ipi_irqs[i]);
37ee16ae 503
b54992fe 504 return sum;
37ee16ae
RK
505}
506
bc28248e
RK
507/*
508 * Timer (local or broadcast) support
509 */
510static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
511
bc28248e 512#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
3d06770e 513void tick_broadcast(const struct cpumask *mask)
bc28248e 514{
e3fbb087 515 smp_cross_call(mask, IPI_TIMER);
bc28248e 516}
5388a6b2 517#endif
bc28248e
RK
518
519static void broadcast_timer_set_mode(enum clock_event_mode mode,
520 struct clock_event_device *evt)
521{
522}
523
a8d2518c 524static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
bc28248e
RK
525{
526 evt->name = "dummy_timer";
527 evt->features = CLOCK_EVT_FEAT_ONESHOT |
528 CLOCK_EVT_FEAT_PERIODIC |
529 CLOCK_EVT_FEAT_DUMMY;
f7db706b 530 evt->rating = 100;
bc28248e
RK
531 evt->mult = 1;
532 evt->set_mode = broadcast_timer_set_mode;
bc28248e
RK
533
534 clockevents_register_device(evt);
535}
bc28248e 536
0ef330e1
MZ
537static struct local_timer_ops *lt_ops;
538
539#ifdef CONFIG_LOCAL_TIMERS
540int local_timer_register(struct local_timer_ops *ops)
541{
bfa05f4f
MZ
542 if (!is_smp() || !setup_max_cpus)
543 return -ENXIO;
544
0ef330e1
MZ
545 if (lt_ops)
546 return -EBUSY;
547
548 lt_ops = ops;
549 return 0;
550}
551#endif
552
d4578592 553static void __cpuinit percpu_timer_setup(void)
bc28248e
RK
554{
555 unsigned int cpu = smp_processor_id();
556 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
557
558 evt->cpumask = cpumask_of(cpu);
559
d4578592 560 if (!lt_ops || lt_ops->setup(evt))
af90f10d 561 broadcast_timer_setup(evt);
bc28248e
RK
562}
563
10034aab
RK
564#ifdef CONFIG_HOTPLUG_CPU
565/*
566 * The generic clock events code purposely does not stop the local timer
567 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
568 * manually here.
569 */
570static void percpu_timer_stop(void)
571{
572 unsigned int cpu = smp_processor_id();
573 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
574
d4578592
MZ
575 if (lt_ops)
576 lt_ops->stop(evt);
10034aab
RK
577}
578#endif
579
bd31b859 580static DEFINE_RAW_SPINLOCK(stop_lock);
1da177e4
LT
581
582/*
583 * ipi_cpu_stop - handle IPI from smp_send_stop()
584 */
585static void ipi_cpu_stop(unsigned int cpu)
586{
3d3f78d7
RK
587 if (system_state == SYSTEM_BOOTING ||
588 system_state == SYSTEM_RUNNING) {
bd31b859 589 raw_spin_lock(&stop_lock);
3d3f78d7
RK
590 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
591 dump_stack();
bd31b859 592 raw_spin_unlock(&stop_lock);
3d3f78d7 593 }
1da177e4 594
e03cdade 595 set_cpu_online(cpu, false);
1da177e4
LT
596
597 local_fiq_disable();
598 local_irq_disable();
599
600 while (1)
601 cpu_relax();
602}
603
604/*
605 * Main handler for inter-processor interrupts
1da177e4 606 */
4073723a 607asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
0b5a1b95
SG
608{
609 handle_IPI(ipinr, regs);
610}
611
612void handle_IPI(int ipinr, struct pt_regs *regs)
1da177e4
LT
613{
614 unsigned int cpu = smp_processor_id();
c97d4869 615 struct pt_regs *old_regs = set_irq_regs(regs);
1da177e4 616
559a5939
SB
617 if (ipinr < NR_IPI)
618 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
1da177e4 619
24480d98 620 switch (ipinr) {
559a5939
SB
621 case IPI_WAKEUP:
622 break;
623
e2c50119 624#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
24480d98 625 case IPI_TIMER:
7deabca0 626 irq_enter();
e2c50119 627 tick_receive_broadcast();
7deabca0 628 irq_exit();
24480d98 629 break;
e2c50119 630#endif
1da177e4 631
24480d98 632 case IPI_RESCHEDULE:
184748cc 633 scheduler_ipi();
24480d98 634 break;
1da177e4 635
24480d98 636 case IPI_CALL_FUNC:
7deabca0 637 irq_enter();
24480d98 638 generic_smp_call_function_interrupt();
7deabca0 639 irq_exit();
24480d98 640 break;
f6dd9fa5 641
24480d98 642 case IPI_CALL_FUNC_SINGLE:
7deabca0 643 irq_enter();
24480d98 644 generic_smp_call_function_single_interrupt();
7deabca0 645 irq_exit();
24480d98 646 break;
1da177e4 647
24480d98 648 case IPI_CPU_STOP:
7deabca0 649 irq_enter();
24480d98 650 ipi_cpu_stop(cpu);
7deabca0 651 irq_exit();
24480d98 652 break;
1da177e4 653
24480d98
RK
654 default:
655 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
656 cpu, ipinr);
657 break;
1da177e4 658 }
c97d4869 659 set_irq_regs(old_regs);
1da177e4
LT
660}
661
662void smp_send_reschedule(int cpu)
663{
e3fbb087 664 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
1da177e4
LT
665}
666
1da177e4
LT
667void smp_send_stop(void)
668{
28e18293 669 unsigned long timeout;
6fa99b7f 670 struct cpumask mask;
1da177e4 671
6fa99b7f
WD
672 cpumask_copy(&mask, cpu_online_mask);
673 cpumask_clear_cpu(smp_processor_id(), &mask);
c5dff4ff
JMC
674 if (!cpumask_empty(&mask))
675 smp_cross_call(&mask, IPI_CPU_STOP);
4b0ef3b1 676
28e18293
RK
677 /* Wait up to one second for other CPUs to stop */
678 timeout = USEC_PER_SEC;
679 while (num_online_cpus() > 1 && timeout--)
680 udelay(1);
4b0ef3b1 681
28e18293
RK
682 if (num_online_cpus() > 1)
683 pr_warning("SMP: failed to stop secondary CPUs\n");
4b0ef3b1
RK
684}
685
4b0ef3b1 686/*
1da177e4 687 * not supported here
4b0ef3b1 688 */
5048bcba 689int setup_profiling_timer(unsigned int multiplier)
4b0ef3b1 690{
1da177e4 691 return -EINVAL;
4b0ef3b1 692}
ec971ea5
RZ
693
694#ifdef CONFIG_CPU_FREQ
695
696static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
697static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
698static unsigned long global_l_p_j_ref;
699static unsigned long global_l_p_j_ref_freq;
700
701static int cpufreq_callback(struct notifier_block *nb,
702 unsigned long val, void *data)
703{
704 struct cpufreq_freqs *freq = data;
705 int cpu = freq->cpu;
706
707 if (freq->flags & CPUFREQ_CONST_LOOPS)
708 return NOTIFY_OK;
709
710 if (!per_cpu(l_p_j_ref, cpu)) {
711 per_cpu(l_p_j_ref, cpu) =
712 per_cpu(cpu_data, cpu).loops_per_jiffy;
713 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
714 if (!global_l_p_j_ref) {
715 global_l_p_j_ref = loops_per_jiffy;
716 global_l_p_j_ref_freq = freq->old;
717 }
718 }
719
720 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
721 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
722 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
723 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
724 global_l_p_j_ref_freq,
725 freq->new);
726 per_cpu(cpu_data, cpu).loops_per_jiffy =
727 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
728 per_cpu(l_p_j_ref_freq, cpu),
729 freq->new);
730 }
731 return NOTIFY_OK;
732}
733
734static struct notifier_block cpufreq_notifier = {
735 .notifier_call = cpufreq_callback,
736};
737
738static int __init register_cpufreq_notifier(void)
739{
740 return cpufreq_register_notifier(&cpufreq_notifier,
741 CPUFREQ_TRANSITION_NOTIFIER);
742}
743core_initcall(register_cpufreq_notifier);
744
745#endif
This page took 0.704012 seconds and 5 git commands to generate.