calibrate_delay() must be __cpuinit
[deliverable/linux.git] / arch / alpha / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/alpha/kernel/smp.c
3 *
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
8 *
9 * This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
4e950f6f 19#include <linux/err.h>
1da177e4
LT
20#include <linux/threads.h>
21#include <linux/smp.h>
1da177e4
LT
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
30
31#include <asm/hwrpb.h>
32#include <asm/ptrace.h>
33#include <asm/atomic.h>
34
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41
42#include "proto.h"
43#include "irq_impl.h"
44
45
46#define DEBUG_SMP 0
47#if DEBUG_SMP
48#define DBGS(args) printk args
49#else
50#define DBGS(args)
51#endif
52
53/* A collection of per-processor data. */
54struct cpuinfo_alpha cpu_data[NR_CPUS];
cff52daf 55EXPORT_SYMBOL(cpu_data);
1da177e4
LT
56
57/* A collection of single bit ipi messages. */
58static struct {
59 unsigned long bits ____cacheline_aligned;
60} ipi_data[NR_CPUS] __cacheline_aligned;
61
62enum ipi_message_type {
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
66};
67
68/* Set to a secondary's cpuid when it comes online. */
cc040a8a 69static int smp_secondary_alive __devinitdata = 0;
1da177e4
LT
70
71/* Which cpus ids came online. */
1da177e4
LT
72cpumask_t cpu_online_map;
73
74EXPORT_SYMBOL(cpu_online_map);
75
1da177e4
LT
76int smp_num_probed; /* Internal processor count */
77int smp_num_cpus = 1; /* Number that came online. */
cff52daf 78EXPORT_SYMBOL(smp_num_cpus);
1da177e4 79
1da177e4
LT
80/*
81 * Called by both boot and secondaries to move global data into
82 * per-processor storage.
83 */
84static inline void __init
85smp_store_cpu_info(int cpuid)
86{
87 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
88 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
89 cpu_data[cpuid].need_new_asn = 0;
90 cpu_data[cpuid].asn_lock = 0;
91}
92
93/*
94 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
95 */
96static inline void __init
97smp_setup_percpu_timer(int cpuid)
98{
99 cpu_data[cpuid].prof_counter = 1;
100 cpu_data[cpuid].prof_multiplier = 1;
101}
102
103static void __init
104wait_boot_cpu_to_stop(int cpuid)
105{
106 unsigned long stop = jiffies + 10*HZ;
107
108 while (time_before(jiffies, stop)) {
109 if (!smp_secondary_alive)
110 return;
111 barrier();
112 }
113
114 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
115 for (;;)
116 barrier();
117}
118
119/*
120 * Where secondaries begin a life of C.
121 */
122void __init
123smp_callin(void)
124{
125 int cpuid = hard_smp_processor_id();
126
127 if (cpu_test_and_set(cpuid, cpu_online_map)) {
128 printk("??, cpu 0x%x already present??\n", cpuid);
129 BUG();
130 }
131
132 /* Turn on machine checks. */
133 wrmces(7);
134
135 /* Set trap vectors. */
136 trap_init();
137
138 /* Set interrupt vector. */
139 wrent(entInt, 0);
140
141 /* Get our local ticker going. */
142 smp_setup_percpu_timer(cpuid);
143
144 /* Call platform-specific callin, if specified */
145 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
146
147 /* All kernel threads share the same mm context. */
148 atomic_inc(&init_mm.mm_count);
149 current->active_mm = &init_mm;
150
151 /* Must have completely accurate bogos. */
152 local_irq_enable();
153
154 /* Wait boot CPU to stop with irq enabled before running
155 calibrate_delay. */
156 wait_boot_cpu_to_stop(cpuid);
157 mb();
158 calibrate_delay();
159
160 smp_store_cpu_info(cpuid);
161 /* Allow master to continue only after we written loops_per_jiffy. */
162 wmb();
163 smp_secondary_alive = 1;
164
165 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
166 cpuid, current, current->active_mm));
167
168 /* Do nothing. */
169 cpu_idle();
170}
171
172/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
cc040a8a 173static int __devinit
1da177e4
LT
174wait_for_txrdy (unsigned long cpumask)
175{
176 unsigned long timeout;
177
178 if (!(hwrpb->txrdy & cpumask))
179 return 0;
180
181 timeout = jiffies + 10*HZ;
182 while (time_before(jiffies, timeout)) {
183 if (!(hwrpb->txrdy & cpumask))
184 return 0;
185 udelay(10);
186 barrier();
187 }
188
189 return -1;
190}
191
192/*
193 * Send a message to a secondary's console. "START" is one such
194 * interesting message. ;-)
195 */
196static void __init
197send_secondary_console_msg(char *str, int cpuid)
198{
199 struct percpu_struct *cpu;
200 register char *cp1, *cp2;
201 unsigned long cpumask;
202 size_t len;
203
204 cpu = (struct percpu_struct *)
205 ((char*)hwrpb
206 + hwrpb->processor_offset
207 + cpuid * hwrpb->processor_size);
208
209 cpumask = (1UL << cpuid);
210 if (wait_for_txrdy(cpumask))
211 goto timeout;
212
213 cp2 = str;
214 len = strlen(cp2);
215 *(unsigned int *)&cpu->ipc_buffer[0] = len;
216 cp1 = (char *) &cpu->ipc_buffer[1];
217 memcpy(cp1, cp2, len);
218
219 /* atomic test and set */
220 wmb();
221 set_bit(cpuid, &hwrpb->rxrdy);
222
223 if (wait_for_txrdy(cpumask))
224 goto timeout;
225 return;
226
227 timeout:
228 printk("Processor %x not ready\n", cpuid);
229}
230
231/*
232 * A secondary console wants to send a message. Receive it.
233 */
234static void
235recv_secondary_console_msg(void)
236{
237 int mycpu, i, cnt;
238 unsigned long txrdy = hwrpb->txrdy;
239 char *cp1, *cp2, buf[80];
240 struct percpu_struct *cpu;
241
242 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
243
244 mycpu = hard_smp_processor_id();
245
246 for (i = 0; i < NR_CPUS; i++) {
247 if (!(txrdy & (1UL << i)))
248 continue;
249
250 DBGS(("recv_secondary_console_msg: "
251 "TXRDY contains CPU %d.\n", i));
252
253 cpu = (struct percpu_struct *)
254 ((char*)hwrpb
255 + hwrpb->processor_offset
256 + i * hwrpb->processor_size);
257
258 DBGS(("recv_secondary_console_msg: on %d from %d"
259 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
260 mycpu, i, cpu->halt_reason, cpu->flags));
261
262 cnt = cpu->ipc_buffer[0] >> 32;
263 if (cnt <= 0 || cnt >= 80)
264 strcpy(buf, "<<< BOGUS MSG >>>");
265 else {
266 cp1 = (char *) &cpu->ipc_buffer[11];
267 cp2 = buf;
268 strcpy(cp2, cp1);
269
270 while ((cp2 = strchr(cp2, '\r')) != 0) {
271 *cp2 = ' ';
272 if (cp2[1] == '\n')
273 cp2[1] = ' ';
274 }
275 }
276
277 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
278 "message is '%s'\n", mycpu, buf));
279 }
280
281 hwrpb->txrdy = 0;
282}
283
284/*
285 * Convince the console to have a secondary cpu begin execution.
286 */
287static int __init
288secondary_cpu_start(int cpuid, struct task_struct *idle)
289{
290 struct percpu_struct *cpu;
291 struct pcb_struct *hwpcb, *ipcb;
292 unsigned long timeout;
293
294 cpu = (struct percpu_struct *)
295 ((char*)hwrpb
296 + hwrpb->processor_offset
297 + cpuid * hwrpb->processor_size);
298 hwpcb = (struct pcb_struct *) cpu->hwpcb;
37bfbaf9 299 ipcb = &task_thread_info(idle)->pcb;
1da177e4
LT
300
301 /* Initialize the CPU's HWPCB to something just good enough for
302 us to get started. Immediately after starting, we'll swpctx
303 to the target idle task's pcb. Reuse the stack in the mean
304 time. Precalculate the target PCBB. */
305 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
306 hwpcb->usp = 0;
307 hwpcb->ptbr = ipcb->ptbr;
308 hwpcb->pcc = 0;
309 hwpcb->asn = 0;
310 hwpcb->unique = virt_to_phys(ipcb);
311 hwpcb->flags = ipcb->flags;
312 hwpcb->res1 = hwpcb->res2 = 0;
313
314#if 0
315 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
316 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
317#endif
318 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
319 cpuid, idle->state, ipcb->flags));
320
321 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
322 hwrpb->CPU_restart = __smp_callin;
323 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
324
325 /* Recalculate and update the HWRPB checksum */
326 hwrpb_update_checksum(hwrpb);
327
328 /*
329 * Send a "start" command to the specified processor.
330 */
331
332 /* SRM III 3.4.1.3 */
333 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
334 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
335 wmb();
336
337 send_secondary_console_msg("START\r\n", cpuid);
338
339 /* Wait 10 seconds for an ACK from the console. */
340 timeout = jiffies + 10*HZ;
341 while (time_before(jiffies, timeout)) {
342 if (cpu->flags & 1)
343 goto started;
344 udelay(10);
345 barrier();
346 }
347 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
348 return -1;
349
350 started:
351 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
352 return 0;
353}
354
355/*
356 * Bring one cpu online.
357 */
ed5f6561 358static int __cpuinit
1da177e4
LT
359smp_boot_one_cpu(int cpuid)
360{
361 struct task_struct *idle;
362 unsigned long timeout;
363
364 /* Cook up an idler for this guy. Note that the address we
365 give to kernel_thread is irrelevant -- it's going to start
366 where HWRPB.CPU_restart says to start. But this gets all
367 the other task-y sort of data structures set up like we
368 wish. We can't use kernel_thread since we must avoid
369 rescheduling the child. */
370 idle = fork_idle(cpuid);
371 if (IS_ERR(idle))
372 panic("failed fork for CPU %d", cpuid);
373
374 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
375 cpuid, idle->state, idle->flags));
376
377 /* Signal the secondary to wait a moment. */
378 smp_secondary_alive = -1;
379
380 /* Whirrr, whirrr, whirrrrrrrrr... */
381 if (secondary_cpu_start(cpuid, idle))
382 return -1;
383
384 /* Notify the secondary CPU it can run calibrate_delay. */
385 mb();
386 smp_secondary_alive = 0;
387
388 /* We've been acked by the console; wait one second for
389 the task to start up for real. */
390 timeout = jiffies + 1*HZ;
391 while (time_before(jiffies, timeout)) {
392 if (smp_secondary_alive == 1)
393 goto alive;
394 udelay(10);
395 barrier();
396 }
397
398 /* We failed to boot the CPU. */
399
400 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
401 return -1;
402
403 alive:
404 /* Another "Red Snapper". */
405 return 0;
406}
407
408/*
409 * Called from setup_arch. Detect an SMP system and which processors
410 * are present.
411 */
412void __init
413setup_smp(void)
414{
415 struct percpu_struct *cpubase, *cpu;
416 unsigned long i;
417
418 if (boot_cpuid != 0) {
419 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
420 boot_cpuid);
421 }
422
423 if (hwrpb->nr_processors > 1) {
424 int boot_cpu_palrev;
425
426 DBGS(("setup_smp: nr_processors %ld\n",
427 hwrpb->nr_processors));
428
429 cpubase = (struct percpu_struct *)
430 ((char*)hwrpb + hwrpb->processor_offset);
431 boot_cpu_palrev = cpubase->pal_revision;
432
433 for (i = 0; i < hwrpb->nr_processors; i++) {
434 cpu = (struct percpu_struct *)
435 ((char *)cpubase + i*hwrpb->processor_size);
436 if ((cpu->flags & 0x1cc) == 0x1cc) {
437 smp_num_probed++;
c7d2d28b 438 cpu_set(i, cpu_present_map);
1da177e4
LT
439 cpu->pal_revision = boot_cpu_palrev;
440 }
441
442 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
443 i, cpu->flags, cpu->type));
444 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
445 i, cpu->pal_revision));
446 }
447 } else {
448 smp_num_probed = 1;
1da177e4 449 }
1da177e4 450
c7d2d28b
IK
451 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
452 smp_num_probed, cpu_present_map.bits[0]);
1da177e4
LT
453}
454
455/*
456 * Called by smp_init prepare the secondaries
457 */
458void __init
459smp_prepare_cpus(unsigned int max_cpus)
460{
1da177e4
LT
461 /* Take care of some initial bookkeeping. */
462 memset(ipi_data, 0, sizeof(ipi_data));
463
464 current_thread_info()->cpu = boot_cpuid;
465
466 smp_store_cpu_info(boot_cpuid);
467 smp_setup_percpu_timer(boot_cpuid);
468
469 /* Nothing to do on a UP box, or when told not to. */
470 if (smp_num_probed == 1 || max_cpus == 0) {
c7d2d28b 471 cpu_present_map = cpumask_of_cpu(boot_cpuid);
1da177e4
LT
472 printk(KERN_INFO "SMP mode deactivated.\n");
473 return;
474 }
475
476 printk(KERN_INFO "SMP starting up secondaries.\n");
477
328c2a8a 478 smp_num_cpus = smp_num_probed;
1da177e4
LT
479}
480
481void __devinit
482smp_prepare_boot_cpu(void)
483{
1da177e4
LT
484}
485
ed5f6561 486int __cpuinit
1da177e4
LT
487__cpu_up(unsigned int cpu)
488{
489 smp_boot_one_cpu(cpu);
490
491 return cpu_online(cpu) ? 0 : -ENOSYS;
492}
493
494void __init
495smp_cpus_done(unsigned int max_cpus)
496{
497 int cpu;
498 unsigned long bogosum = 0;
499
500 for(cpu = 0; cpu < NR_CPUS; cpu++)
501 if (cpu_online(cpu))
502 bogosum += cpu_data[cpu].loops_per_jiffy;
503
504 printk(KERN_INFO "SMP: Total of %d processors activated "
505 "(%lu.%02lu BogoMIPS).\n",
506 num_online_cpus(),
507 (bogosum + 2500) / (500000/HZ),
508 ((bogosum + 2500) / (5000/HZ)) % 100);
509}
510
511\f
512void
513smp_percpu_timer_interrupt(struct pt_regs *regs)
514{
8774cb81 515 struct pt_regs *old_regs;
1da177e4
LT
516 int cpu = smp_processor_id();
517 unsigned long user = user_mode(regs);
518 struct cpuinfo_alpha *data = &cpu_data[cpu];
519
8774cb81
AV
520 old_regs = set_irq_regs(regs);
521
1da177e4 522 /* Record kernel PC. */
8774cb81 523 profile_tick(CPU_PROFILING);
1da177e4
LT
524
525 if (!--data->prof_counter) {
526 /* We need to make like a normal interrupt -- otherwise
527 timer interrupts ignore the global interrupt lock,
528 which would be a Bad Thing. */
529 irq_enter();
530
531 update_process_times(user);
532
533 data->prof_counter = data->prof_multiplier;
534
535 irq_exit();
536 }
8774cb81 537 set_irq_regs(old_regs);
1da177e4
LT
538}
539
ed5f6561 540int
1da177e4
LT
541setup_profiling_timer(unsigned int multiplier)
542{
543 return -EINVAL;
544}
545
546\f
547static void
548send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
549{
550 int i;
551
552 mb();
553 for_each_cpu_mask(i, to_whom)
554 set_bit(operation, &ipi_data[i].bits);
555
556 mb();
557 for_each_cpu_mask(i, to_whom)
558 wripir(i);
559}
560
561/* Structure and data for smp_call_function. This is designed to
562 minimize static memory requirements. Plus it looks cleaner. */
563
564struct smp_call_struct {
565 void (*func) (void *info);
566 void *info;
567 long wait;
568 atomic_t unstarted_count;
569 atomic_t unfinished_count;
570};
571
572static struct smp_call_struct *smp_call_function_data;
573
574/* Atomicly drop data into a shared pointer. The pointer is free if
575 it is initially locked. If retry, spin until free. */
576
577static int
578pointer_lock (void *lock, void *data, int retry)
579{
580 void *old, *tmp;
581
582 mb();
583 again:
584 /* Compare and swap with zero. */
585 asm volatile (
586 "1: ldq_l %0,%1\n"
587 " mov %3,%2\n"
588 " bne %0,2f\n"
589 " stq_c %2,%1\n"
590 " beq %2,1b\n"
591 "2:"
592 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
593 : "r"(data)
594 : "memory");
595
596 if (old == 0)
597 return 0;
598 if (! retry)
599 return -EBUSY;
600
601 while (*(void **)lock)
602 barrier();
603 goto again;
604}
605
606void
607handle_ipi(struct pt_regs *regs)
608{
609 int this_cpu = smp_processor_id();
610 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
611 unsigned long ops;
612
613#if 0
614 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
615 this_cpu, *pending_ipis, regs->pc));
616#endif
617
618 mb(); /* Order interrupt and bit testing. */
619 while ((ops = xchg(pending_ipis, 0)) != 0) {
620 mb(); /* Order bit clearing and data access. */
621 do {
622 unsigned long which;
623
624 which = ops & -ops;
625 ops &= ~which;
626 which = __ffs(which);
627
628 switch (which) {
629 case IPI_RESCHEDULE:
630 /* Reschedule callback. Everything to be done
631 is done by the interrupt return path. */
632 break;
633
634 case IPI_CALL_FUNC:
635 {
636 struct smp_call_struct *data;
637 void (*func)(void *info);
638 void *info;
639 int wait;
640
641 data = smp_call_function_data;
642 func = data->func;
643 info = data->info;
644 wait = data->wait;
645
646 /* Notify the sending CPU that the data has been
647 received, and execution is about to begin. */
648 mb();
649 atomic_dec (&data->unstarted_count);
650
651 /* At this point the structure may be gone unless
652 wait is true. */
653 (*func)(info);
654
655 /* Notify the sending CPU that the task is done. */
656 mb();
657 if (wait) atomic_dec (&data->unfinished_count);
658 break;
659 }
660
661 case IPI_CPU_STOP:
662 halt();
663
664 default:
665 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
666 this_cpu, which);
667 break;
668 }
669 } while (ops);
670
671 mb(); /* Order data access and bit testing. */
672 }
673
674 cpu_data[this_cpu].ipi_count++;
675
676 if (hwrpb->txrdy)
677 recv_secondary_console_msg();
678}
679
680void
681smp_send_reschedule(int cpu)
682{
683#ifdef DEBUG_IPI_MSG
684 if (cpu == hard_smp_processor_id())
685 printk(KERN_WARNING
686 "smp_send_reschedule: Sending IPI to self.\n");
687#endif
688 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
689}
690
691void
692smp_send_stop(void)
693{
694 cpumask_t to_whom = cpu_possible_map;
695 cpu_clear(smp_processor_id(), to_whom);
696#ifdef DEBUG_IPI_MSG
697 if (hard_smp_processor_id() != boot_cpu_id)
698 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
699#endif
700 send_ipi_message(to_whom, IPI_CPU_STOP);
701}
702
703/*
704 * Run a function on all other CPUs.
705 * <func> The function to run. This must be fast and non-blocking.
706 * <info> An arbitrary pointer to pass to the function.
707 * <retry> If true, keep retrying until ready.
708 * <wait> If true, wait until function has completed on other CPUs.
709 * [RETURNS] 0 on success, else a negative status code.
710 *
711 * Does not return until remote CPUs are nearly ready to execute <func>
712 * or are or have executed.
713 * You must not call this function with disabled interrupts or from a
714 * hardware interrupt handler or from a bottom half handler.
715 */
716
717int
718smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
719 int wait, cpumask_t to_whom)
720{
721 struct smp_call_struct data;
722 unsigned long timeout;
723 int num_cpus_to_call;
724
725 /* Can deadlock when called with interrupts disabled */
726 WARN_ON(irqs_disabled());
727
728 data.func = func;
729 data.info = info;
730 data.wait = wait;
731
732 cpu_clear(smp_processor_id(), to_whom);
733 num_cpus_to_call = cpus_weight(to_whom);
734
735 atomic_set(&data.unstarted_count, num_cpus_to_call);
736 atomic_set(&data.unfinished_count, num_cpus_to_call);
737
738 /* Acquire the smp_call_function_data mutex. */
739 if (pointer_lock(&smp_call_function_data, &data, retry))
740 return -EBUSY;
741
742 /* Send a message to the requested CPUs. */
743 send_ipi_message(to_whom, IPI_CALL_FUNC);
744
745 /* Wait for a minimal response. */
746 timeout = jiffies + HZ;
747 while (atomic_read (&data.unstarted_count) > 0
748 && time_before (jiffies, timeout))
749 barrier();
750
751 /* If there's no response yet, log a message but allow a longer
752 * timeout period -- if we get a response this time, log
753 * a message saying when we got it..
754 */
755 if (atomic_read(&data.unstarted_count) > 0) {
756 long start_time = jiffies;
757 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
758 __FUNCTION__);
759 timeout = jiffies + 30 * HZ;
760 while (atomic_read(&data.unstarted_count) > 0
761 && time_before(jiffies, timeout))
762 barrier();
763 if (atomic_read(&data.unstarted_count) <= 0) {
764 long delta = jiffies - start_time;
765 printk(KERN_ERR
766 "%s: response %ld.%ld seconds into long wait\n",
767 __FUNCTION__, delta / HZ,
768 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
769 }
770 }
771
772 /* We either got one or timed out -- clear the lock. */
773 mb();
774 smp_call_function_data = NULL;
775
776 /*
777 * If after both the initial and long timeout periods we still don't
778 * have a response, something is very wrong...
779 */
780 BUG_ON(atomic_read (&data.unstarted_count) > 0);
781
782 /* Wait for a complete response, if needed. */
783 if (wait) {
784 while (atomic_read (&data.unfinished_count) > 0)
785 barrier();
786 }
787
788 return 0;
789}
cff52daf 790EXPORT_SYMBOL(smp_call_function_on_cpu);
1da177e4
LT
791
792int
793smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
794{
795 return smp_call_function_on_cpu (func, info, retry, wait,
796 cpu_online_map);
797}
cff52daf 798EXPORT_SYMBOL(smp_call_function);
1da177e4
LT
799
800static void
801ipi_imb(void *ignored)
802{
803 imb();
804}
805
806void
807smp_imb(void)
808{
809 /* Must wait other processors to flush their icache before continue. */
810 if (on_each_cpu(ipi_imb, NULL, 1, 1))
811 printk(KERN_CRIT "smp_imb: timed out\n");
812}
cff52daf 813EXPORT_SYMBOL(smp_imb);
1da177e4
LT
814
815static void
816ipi_flush_tlb_all(void *ignored)
817{
818 tbia();
819}
820
821void
822flush_tlb_all(void)
823{
824 /* Although we don't have any data to pass, we do want to
825 synchronize with the other processors. */
826 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
827 printk(KERN_CRIT "flush_tlb_all: timed out\n");
828 }
829}
830
831#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
832
833static void
834ipi_flush_tlb_mm(void *x)
835{
836 struct mm_struct *mm = (struct mm_struct *) x;
837 if (mm == current->active_mm && !asn_locked())
838 flush_tlb_current(mm);
839 else
840 flush_tlb_other(mm);
841}
842
843void
844flush_tlb_mm(struct mm_struct *mm)
845{
846 preempt_disable();
847
848 if (mm == current->active_mm) {
849 flush_tlb_current(mm);
850 if (atomic_read(&mm->mm_users) <= 1) {
851 int cpu, this_cpu = smp_processor_id();
852 for (cpu = 0; cpu < NR_CPUS; cpu++) {
853 if (!cpu_online(cpu) || cpu == this_cpu)
854 continue;
855 if (mm->context[cpu])
856 mm->context[cpu] = 0;
857 }
858 preempt_enable();
859 return;
860 }
861 }
862
863 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
864 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
865 }
866
867 preempt_enable();
868}
cff52daf 869EXPORT_SYMBOL(flush_tlb_mm);
1da177e4
LT
870
871struct flush_tlb_page_struct {
872 struct vm_area_struct *vma;
873 struct mm_struct *mm;
874 unsigned long addr;
875};
876
877static void
878ipi_flush_tlb_page(void *x)
879{
880 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
881 struct mm_struct * mm = data->mm;
882
883 if (mm == current->active_mm && !asn_locked())
884 flush_tlb_current_page(mm, data->vma, data->addr);
885 else
886 flush_tlb_other(mm);
887}
888
889void
890flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
891{
892 struct flush_tlb_page_struct data;
893 struct mm_struct *mm = vma->vm_mm;
894
895 preempt_disable();
896
897 if (mm == current->active_mm) {
898 flush_tlb_current_page(mm, vma, addr);
899 if (atomic_read(&mm->mm_users) <= 1) {
900 int cpu, this_cpu = smp_processor_id();
901 for (cpu = 0; cpu < NR_CPUS; cpu++) {
902 if (!cpu_online(cpu) || cpu == this_cpu)
903 continue;
904 if (mm->context[cpu])
905 mm->context[cpu] = 0;
906 }
907 preempt_enable();
908 return;
909 }
910 }
911
912 data.vma = vma;
913 data.mm = mm;
914 data.addr = addr;
915
916 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
917 printk(KERN_CRIT "flush_tlb_page: timed out\n");
918 }
919
920 preempt_enable();
921}
cff52daf 922EXPORT_SYMBOL(flush_tlb_page);
1da177e4
LT
923
924void
925flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
926{
927 /* On the Alpha we always flush the whole user tlb. */
928 flush_tlb_mm(vma->vm_mm);
929}
cff52daf 930EXPORT_SYMBOL(flush_tlb_range);
1da177e4
LT
931
932static void
933ipi_flush_icache_page(void *x)
934{
935 struct mm_struct *mm = (struct mm_struct *) x;
936 if (mm == current->active_mm && !asn_locked())
937 __load_new_mm_context(mm);
938 else
939 flush_tlb_other(mm);
940}
941
942void
943flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
944 unsigned long addr, int len)
945{
946 struct mm_struct *mm = vma->vm_mm;
947
948 if ((vma->vm_flags & VM_EXEC) == 0)
949 return;
950
951 preempt_disable();
952
953 if (mm == current->active_mm) {
954 __load_new_mm_context(mm);
955 if (atomic_read(&mm->mm_users) <= 1) {
956 int cpu, this_cpu = smp_processor_id();
957 for (cpu = 0; cpu < NR_CPUS; cpu++) {
958 if (!cpu_online(cpu) || cpu == this_cpu)
959 continue;
960 if (mm->context[cpu])
961 mm->context[cpu] = 0;
962 }
963 preempt_enable();
964 return;
965 }
966 }
967
968 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
969 printk(KERN_CRIT "flush_icache_page: timed out\n");
970 }
971
972 preempt_enable();
973}
This page took 0.317224 seconds and 5 git commands to generate.