Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* smp.c: Sparc64 SMP support. |
2 | * | |
27a2ef38 | 3 | * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
4 | */ |
5 | ||
6 | #include <linux/module.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/threads.h> | |
12 | #include <linux/smp.h> | |
1da177e4 LT |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/seq_file.h> | |
20 | #include <linux/cache.h> | |
21 | #include <linux/jiffies.h> | |
22 | #include <linux/profile.h> | |
23 | #include <linux/bootmem.h> | |
24 | ||
25 | #include <asm/head.h> | |
26 | #include <asm/ptrace.h> | |
27 | #include <asm/atomic.h> | |
28 | #include <asm/tlbflush.h> | |
29 | #include <asm/mmu_context.h> | |
30 | #include <asm/cpudata.h> | |
27a2ef38 DM |
31 | #include <asm/hvtramp.h> |
32 | #include <asm/io.h> | |
1da177e4 LT |
33 | |
34 | #include <asm/irq.h> | |
6d24c8dc | 35 | #include <asm/irq_regs.h> |
1da177e4 LT |
36 | #include <asm/page.h> |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/oplib.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/timer.h> | |
41 | #include <asm/starfire.h> | |
42 | #include <asm/tlb.h> | |
56fb4df6 | 43 | #include <asm/sections.h> |
07f8e5f3 | 44 | #include <asm/prom.h> |
5cbc3073 | 45 | #include <asm/mdesc.h> |
4f0234f4 | 46 | #include <asm/ldc.h> |
e0204409 | 47 | #include <asm/hypervisor.h> |
1da177e4 | 48 | |
a2f9f6bb DM |
49 | int sparc64_multi_core __read_mostly; |
50 | ||
4f0234f4 | 51 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; |
c12a8289 | 52 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
d5a7430d | 53 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
f78eae2e DM |
54 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = |
55 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | |
4f0234f4 DM |
56 | |
57 | EXPORT_SYMBOL(cpu_possible_map); | |
58 | EXPORT_SYMBOL(cpu_online_map); | |
d5a7430d | 59 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
4f0234f4 DM |
60 | EXPORT_SYMBOL(cpu_core_map); |
61 | ||
1da177e4 | 62 | static cpumask_t smp_commenced_mask; |
1da177e4 LT |
63 | |
64 | void smp_info(struct seq_file *m) | |
65 | { | |
66 | int i; | |
67 | ||
68 | seq_printf(m, "State:\n"); | |
394e3902 AM |
69 | for_each_online_cpu(i) |
70 | seq_printf(m, "CPU%d:\t\tonline\n", i); | |
1da177e4 LT |
71 | } |
72 | ||
73 | void smp_bogo(struct seq_file *m) | |
74 | { | |
75 | int i; | |
76 | ||
394e3902 AM |
77 | for_each_online_cpu(i) |
78 | seq_printf(m, | |
394e3902 | 79 | "Cpu%dClkTck\t: %016lx\n", |
394e3902 | 80 | i, cpu_data(i).clock_tick); |
1da177e4 LT |
81 | } |
82 | ||
e0204409 DM |
83 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); |
84 | ||
112f4871 | 85 | extern void setup_sparc64_timer(void); |
1da177e4 LT |
86 | |
87 | static volatile unsigned long callin_flag = 0; | |
88 | ||
4f0234f4 | 89 | void __devinit smp_callin(void) |
1da177e4 LT |
90 | { |
91 | int cpuid = hard_smp_processor_id(); | |
92 | ||
56fb4df6 | 93 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
1da177e4 | 94 | |
4a07e646 | 95 | if (tlb_type == hypervisor) |
490384e7 | 96 | sun4v_ktsb_register(); |
481295f9 | 97 | |
56fb4df6 | 98 | __flush_tlb_all(); |
1da177e4 | 99 | |
112f4871 | 100 | setup_sparc64_timer(); |
1da177e4 | 101 | |
816242da DM |
102 | if (cheetah_pcache_forced_on) |
103 | cheetah_enable_pcache(); | |
104 | ||
1da177e4 LT |
105 | local_irq_enable(); |
106 | ||
1da177e4 LT |
107 | callin_flag = 1; |
108 | __asm__ __volatile__("membar #Sync\n\t" | |
109 | "flush %%g6" : : : "memory"); | |
110 | ||
111 | /* Clear this or we will die instantly when we | |
112 | * schedule back to this idler... | |
113 | */ | |
db7d9a4e | 114 | current_thread_info()->new_child = 0; |
1da177e4 LT |
115 | |
116 | /* Attach to the address space of init_task. */ | |
117 | atomic_inc(&init_mm.mm_count); | |
118 | current->active_mm = &init_mm; | |
119 | ||
120 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
4f07118f | 121 | rmb(); |
1da177e4 | 122 | |
e0204409 | 123 | spin_lock(&call_lock); |
1da177e4 | 124 | cpu_set(cpuid, cpu_online_map); |
e0204409 | 125 | spin_unlock(&call_lock); |
5bfb5d69 NP |
126 | |
127 | /* idle thread is expected to have preempt disabled */ | |
128 | preempt_disable(); | |
1da177e4 LT |
129 | } |
130 | ||
131 | void cpu_panic(void) | |
132 | { | |
133 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | |
134 | panic("SMP bolixed\n"); | |
135 | } | |
136 | ||
1da177e4 LT |
137 | /* This tick register synchronization scheme is taken entirely from |
138 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | |
139 | * | |
140 | * The only change I've made is to rework it so that the master | |
141 | * initiates the synchonization instead of the slave. -DaveM | |
142 | */ | |
143 | ||
144 | #define MASTER 0 | |
145 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) | |
146 | ||
147 | #define NUM_ROUNDS 64 /* magic value */ | |
148 | #define NUM_ITERS 5 /* likewise */ | |
149 | ||
150 | static DEFINE_SPINLOCK(itc_sync_lock); | |
151 | static unsigned long go[SLAVE + 1]; | |
152 | ||
153 | #define DEBUG_TICK_SYNC 0 | |
154 | ||
155 | static inline long get_delta (long *rt, long *master) | |
156 | { | |
157 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | |
158 | unsigned long tcenter, t0, t1, tm; | |
159 | unsigned long i; | |
160 | ||
161 | for (i = 0; i < NUM_ITERS; i++) { | |
162 | t0 = tick_ops->get_tick(); | |
163 | go[MASTER] = 1; | |
4f07118f | 164 | membar_storeload(); |
1da177e4 | 165 | while (!(tm = go[SLAVE])) |
4f07118f | 166 | rmb(); |
1da177e4 | 167 | go[SLAVE] = 0; |
4f07118f | 168 | wmb(); |
1da177e4 LT |
169 | t1 = tick_ops->get_tick(); |
170 | ||
171 | if (t1 - t0 < best_t1 - best_t0) | |
172 | best_t0 = t0, best_t1 = t1, best_tm = tm; | |
173 | } | |
174 | ||
175 | *rt = best_t1 - best_t0; | |
176 | *master = best_tm - best_t0; | |
177 | ||
178 | /* average best_t0 and best_t1 without overflow: */ | |
179 | tcenter = (best_t0/2 + best_t1/2); | |
180 | if (best_t0 % 2 + best_t1 % 2 == 2) | |
181 | tcenter++; | |
182 | return tcenter - best_tm; | |
183 | } | |
184 | ||
185 | void smp_synchronize_tick_client(void) | |
186 | { | |
187 | long i, delta, adj, adjust_latency = 0, done = 0; | |
188 | unsigned long flags, rt, master_time_stamp, bound; | |
189 | #if DEBUG_TICK_SYNC | |
190 | struct { | |
191 | long rt; /* roundtrip time */ | |
192 | long master; /* master's timestamp */ | |
193 | long diff; /* difference between midpoint and master's timestamp */ | |
194 | long lat; /* estimate of itc adjustment latency */ | |
195 | } t[NUM_ROUNDS]; | |
196 | #endif | |
197 | ||
198 | go[MASTER] = 1; | |
199 | ||
200 | while (go[MASTER]) | |
4f07118f | 201 | rmb(); |
1da177e4 LT |
202 | |
203 | local_irq_save(flags); | |
204 | { | |
205 | for (i = 0; i < NUM_ROUNDS; i++) { | |
206 | delta = get_delta(&rt, &master_time_stamp); | |
207 | if (delta == 0) { | |
208 | done = 1; /* let's lock on to this... */ | |
209 | bound = rt; | |
210 | } | |
211 | ||
212 | if (!done) { | |
213 | if (i > 0) { | |
214 | adjust_latency += -delta; | |
215 | adj = -delta + adjust_latency/4; | |
216 | } else | |
217 | adj = -delta; | |
218 | ||
112f4871 | 219 | tick_ops->add_tick(adj); |
1da177e4 LT |
220 | } |
221 | #if DEBUG_TICK_SYNC | |
222 | t[i].rt = rt; | |
223 | t[i].master = master_time_stamp; | |
224 | t[i].diff = delta; | |
225 | t[i].lat = adjust_latency/4; | |
226 | #endif | |
227 | } | |
228 | } | |
229 | local_irq_restore(flags); | |
230 | ||
231 | #if DEBUG_TICK_SYNC | |
232 | for (i = 0; i < NUM_ROUNDS; i++) | |
233 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | |
234 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | |
235 | #endif | |
236 | ||
519c4d2d JP |
237 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " |
238 | "(last diff %ld cycles, maxerr %lu cycles)\n", | |
239 | smp_processor_id(), delta, rt); | |
1da177e4 LT |
240 | } |
241 | ||
242 | static void smp_start_sync_tick_client(int cpu); | |
243 | ||
244 | static void smp_synchronize_one_tick(int cpu) | |
245 | { | |
246 | unsigned long flags, i; | |
247 | ||
248 | go[MASTER] = 0; | |
249 | ||
250 | smp_start_sync_tick_client(cpu); | |
251 | ||
252 | /* wait for client to be ready */ | |
253 | while (!go[MASTER]) | |
4f07118f | 254 | rmb(); |
1da177e4 LT |
255 | |
256 | /* now let the client proceed into his loop */ | |
257 | go[MASTER] = 0; | |
4f07118f | 258 | membar_storeload(); |
1da177e4 LT |
259 | |
260 | spin_lock_irqsave(&itc_sync_lock, flags); | |
261 | { | |
262 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | |
263 | while (!go[MASTER]) | |
4f07118f | 264 | rmb(); |
1da177e4 | 265 | go[MASTER] = 0; |
4f07118f | 266 | wmb(); |
1da177e4 | 267 | go[SLAVE] = tick_ops->get_tick(); |
4f07118f | 268 | membar_storeload(); |
1da177e4 LT |
269 | } |
270 | } | |
271 | spin_unlock_irqrestore(&itc_sync_lock, flags); | |
272 | } | |
273 | ||
b14f5c10 | 274 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
27a2ef38 DM |
275 | /* XXX Put this in some common place. XXX */ |
276 | static unsigned long kimage_addr_to_ra(void *p) | |
277 | { | |
278 | unsigned long val = (unsigned long) p; | |
279 | ||
280 | return kern_base + (val - KERNBASE); | |
281 | } | |
282 | ||
b14f5c10 DM |
283 | static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) |
284 | { | |
285 | extern unsigned long sparc64_ttable_tl0; | |
286 | extern unsigned long kern_locked_tte_data; | |
287 | extern int bigkernel; | |
288 | struct hvtramp_descr *hdesc; | |
289 | unsigned long trampoline_ra; | |
290 | struct trap_per_cpu *tb; | |
291 | u64 tte_vaddr, tte_data; | |
292 | unsigned long hv_err; | |
293 | ||
294 | hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); | |
295 | if (!hdesc) { | |
27a2ef38 | 296 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " |
b14f5c10 DM |
297 | "hvtramp_descr.\n"); |
298 | return; | |
299 | } | |
300 | ||
301 | hdesc->cpu = cpu; | |
302 | hdesc->num_mappings = (bigkernel ? 2 : 1); | |
303 | ||
304 | tb = &trap_block[cpu]; | |
305 | tb->hdesc = hdesc; | |
306 | ||
307 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; | |
308 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); | |
309 | ||
310 | hdesc->thread_reg = thread_reg; | |
311 | ||
312 | tte_vaddr = (unsigned long) KERNBASE; | |
313 | tte_data = kern_locked_tte_data; | |
314 | ||
315 | hdesc->maps[0].vaddr = tte_vaddr; | |
316 | hdesc->maps[0].tte = tte_data; | |
317 | if (bigkernel) { | |
318 | tte_vaddr += 0x400000; | |
319 | tte_data += 0x400000; | |
320 | hdesc->maps[1].vaddr = tte_vaddr; | |
321 | hdesc->maps[1].tte = tte_data; | |
322 | } | |
323 | ||
324 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); | |
325 | ||
326 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, | |
327 | kimage_addr_to_ra(&sparc64_ttable_tl0), | |
328 | __pa(hdesc)); | |
e0204409 DM |
329 | if (hv_err) |
330 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " | |
331 | "gives error %lu\n", hv_err); | |
b14f5c10 DM |
332 | } |
333 | #endif | |
334 | ||
1da177e4 LT |
335 | extern unsigned long sparc64_cpu_startup; |
336 | ||
337 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | |
338 | * 32-bits (I think) so to be safe we have it read the pointer | |
339 | * contained here so we work on >4GB machines. -DaveM | |
340 | */ | |
341 | static struct thread_info *cpu_new_thread = NULL; | |
342 | ||
343 | static int __devinit smp_boot_one_cpu(unsigned int cpu) | |
344 | { | |
b37d40d1 | 345 | struct trap_per_cpu *tb = &trap_block[cpu]; |
1da177e4 LT |
346 | unsigned long entry = |
347 | (unsigned long)(&sparc64_cpu_startup); | |
348 | unsigned long cookie = | |
349 | (unsigned long)(&cpu_new_thread); | |
350 | struct task_struct *p; | |
7890f794 | 351 | int timeout, ret; |
1da177e4 LT |
352 | |
353 | p = fork_idle(cpu); | |
1177bf97 AM |
354 | if (IS_ERR(p)) |
355 | return PTR_ERR(p); | |
1da177e4 | 356 | callin_flag = 0; |
f3169641 | 357 | cpu_new_thread = task_thread_info(p); |
1da177e4 | 358 | |
7890f794 | 359 | if (tlb_type == hypervisor) { |
b14f5c10 | 360 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
4f0234f4 DM |
361 | if (ldom_domaining_enabled) |
362 | ldom_startcpu_cpuid(cpu, | |
363 | (unsigned long) cpu_new_thread); | |
364 | else | |
365 | #endif | |
366 | prom_startcpu_cpuid(cpu, entry, cookie); | |
7890f794 | 367 | } else { |
5cbc3073 | 368 | struct device_node *dp = of_find_node_by_cpuid(cpu); |
7890f794 | 369 | |
07f8e5f3 | 370 | prom_startcpu(dp->node, entry, cookie); |
7890f794 | 371 | } |
1da177e4 | 372 | |
4f0234f4 | 373 | for (timeout = 0; timeout < 50000; timeout++) { |
1da177e4 LT |
374 | if (callin_flag) |
375 | break; | |
376 | udelay(100); | |
377 | } | |
72aff53f | 378 | |
1da177e4 LT |
379 | if (callin_flag) { |
380 | ret = 0; | |
381 | } else { | |
382 | printk("Processor %d is stuck.\n", cpu); | |
1da177e4 LT |
383 | ret = -ENODEV; |
384 | } | |
385 | cpu_new_thread = NULL; | |
386 | ||
b37d40d1 DM |
387 | if (tb->hdesc) { |
388 | kfree(tb->hdesc); | |
389 | tb->hdesc = NULL; | |
390 | } | |
391 | ||
1da177e4 LT |
392 | return ret; |
393 | } | |
394 | ||
395 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | |
396 | { | |
397 | u64 result, target; | |
398 | int stuck, tmp; | |
399 | ||
400 | if (this_is_starfire) { | |
401 | /* map to real upaid */ | |
402 | cpu = (((cpu & 0x3c) << 1) | | |
403 | ((cpu & 0x40) >> 4) | | |
404 | (cpu & 0x3)); | |
405 | } | |
406 | ||
407 | target = (cpu << 14) | 0x70; | |
408 | again: | |
409 | /* Ok, this is the real Spitfire Errata #54. | |
410 | * One must read back from a UDB internal register | |
411 | * after writes to the UDB interrupt dispatch, but | |
412 | * before the membar Sync for that write. | |
413 | * So we use the high UDB control register (ASI 0x7f, | |
414 | * ADDR 0x20) for the dummy read. -DaveM | |
415 | */ | |
416 | tmp = 0x40; | |
417 | __asm__ __volatile__( | |
418 | "wrpr %1, %2, %%pstate\n\t" | |
419 | "stxa %4, [%0] %3\n\t" | |
420 | "stxa %5, [%0+%8] %3\n\t" | |
421 | "add %0, %8, %0\n\t" | |
422 | "stxa %6, [%0+%8] %3\n\t" | |
423 | "membar #Sync\n\t" | |
424 | "stxa %%g0, [%7] %3\n\t" | |
425 | "membar #Sync\n\t" | |
426 | "mov 0x20, %%g1\n\t" | |
427 | "ldxa [%%g1] 0x7f, %%g0\n\t" | |
428 | "membar #Sync" | |
429 | : "=r" (tmp) | |
430 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | |
431 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | |
432 | "r" (0x10), "0" (tmp) | |
433 | : "g1"); | |
434 | ||
435 | /* NOTE: PSTATE_IE is still clear. */ | |
436 | stuck = 100000; | |
437 | do { | |
438 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
439 | : "=r" (result) | |
440 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
441 | if (result == 0) { | |
442 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
443 | : : "r" (pstate)); | |
444 | return; | |
445 | } | |
446 | stuck -= 1; | |
447 | if (stuck == 0) | |
448 | break; | |
449 | } while (result & 0x1); | |
450 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
451 | : : "r" (pstate)); | |
452 | if (stuck == 0) { | |
453 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
454 | smp_processor_id(), result); | |
455 | } else { | |
456 | udelay(2); | |
457 | goto again; | |
458 | } | |
459 | } | |
460 | ||
d979f179 | 461 | static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
1da177e4 LT |
462 | { |
463 | u64 pstate; | |
464 | int i; | |
465 | ||
466 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
467 | for_each_cpu_mask(i, mask) | |
468 | spitfire_xcall_helper(data0, data1, data2, pstate, i); | |
469 | } | |
470 | ||
471 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | |
472 | * packet, but we have no use for that. However we do take advantage of | |
473 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | |
474 | */ | |
475 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | |
476 | { | |
0de56d1a | 477 | u64 pstate, ver, busy_mask; |
22adb358 | 478 | int nack_busy_id, is_jbus, need_more; |
1da177e4 LT |
479 | |
480 | if (cpus_empty(mask)) | |
481 | return; | |
482 | ||
483 | /* Unfortunately, someone at Sun had the brilliant idea to make the | |
484 | * busy/nack fields hard-coded by ITID number for this Ultra-III | |
485 | * derivative processor. | |
486 | */ | |
487 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
92704a1c DM |
488 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
489 | (ver >> 32) == __SERRANO_ID); | |
1da177e4 LT |
490 | |
491 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
492 | ||
493 | retry: | |
22adb358 | 494 | need_more = 0; |
1da177e4 LT |
495 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" |
496 | : : "r" (pstate), "i" (PSTATE_IE)); | |
497 | ||
498 | /* Setup the dispatch data registers. */ | |
499 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" | |
500 | "stxa %1, [%4] %6\n\t" | |
501 | "stxa %2, [%5] %6\n\t" | |
502 | "membar #Sync\n\t" | |
503 | : /* no outputs */ | |
504 | : "r" (data0), "r" (data1), "r" (data2), | |
505 | "r" (0x40), "r" (0x50), "r" (0x60), | |
506 | "i" (ASI_INTR_W)); | |
507 | ||
508 | nack_busy_id = 0; | |
0de56d1a | 509 | busy_mask = 0; |
1da177e4 LT |
510 | { |
511 | int i; | |
512 | ||
513 | for_each_cpu_mask(i, mask) { | |
514 | u64 target = (i << 14) | 0x70; | |
515 | ||
0de56d1a DM |
516 | if (is_jbus) { |
517 | busy_mask |= (0x1UL << (i * 2)); | |
518 | } else { | |
1da177e4 | 519 | target |= (nack_busy_id << 24); |
0de56d1a DM |
520 | busy_mask |= (0x1UL << |
521 | (nack_busy_id * 2)); | |
522 | } | |
1da177e4 LT |
523 | __asm__ __volatile__( |
524 | "stxa %%g0, [%0] %1\n\t" | |
525 | "membar #Sync\n\t" | |
526 | : /* no outputs */ | |
527 | : "r" (target), "i" (ASI_INTR_W)); | |
528 | nack_busy_id++; | |
22adb358 DM |
529 | if (nack_busy_id == 32) { |
530 | need_more = 1; | |
531 | break; | |
532 | } | |
1da177e4 LT |
533 | } |
534 | } | |
535 | ||
536 | /* Now, poll for completion. */ | |
537 | { | |
0de56d1a | 538 | u64 dispatch_stat, nack_mask; |
1da177e4 LT |
539 | long stuck; |
540 | ||
541 | stuck = 100000 * nack_busy_id; | |
0de56d1a | 542 | nack_mask = busy_mask << 1; |
1da177e4 LT |
543 | do { |
544 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
545 | : "=r" (dispatch_stat) | |
546 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
0de56d1a | 547 | if (!(dispatch_stat & (busy_mask | nack_mask))) { |
1da177e4 LT |
548 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
549 | : : "r" (pstate)); | |
22adb358 DM |
550 | if (unlikely(need_more)) { |
551 | int i, cnt = 0; | |
552 | for_each_cpu_mask(i, mask) { | |
553 | cpu_clear(i, mask); | |
554 | cnt++; | |
555 | if (cnt == 32) | |
556 | break; | |
557 | } | |
558 | goto retry; | |
559 | } | |
1da177e4 LT |
560 | return; |
561 | } | |
562 | if (!--stuck) | |
563 | break; | |
0de56d1a | 564 | } while (dispatch_stat & busy_mask); |
1da177e4 LT |
565 | |
566 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
567 | : : "r" (pstate)); | |
568 | ||
0de56d1a | 569 | if (dispatch_stat & busy_mask) { |
1da177e4 LT |
570 | /* Busy bits will not clear, continue instead |
571 | * of freezing up on this cpu. | |
572 | */ | |
573 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
574 | smp_processor_id(), dispatch_stat); | |
575 | } else { | |
576 | int i, this_busy_nack = 0; | |
577 | ||
578 | /* Delay some random time with interrupts enabled | |
579 | * to prevent deadlock. | |
580 | */ | |
581 | udelay(2 * nack_busy_id); | |
582 | ||
583 | /* Clear out the mask bits for cpus which did not | |
584 | * NACK us. | |
585 | */ | |
586 | for_each_cpu_mask(i, mask) { | |
587 | u64 check_mask; | |
588 | ||
92704a1c | 589 | if (is_jbus) |
1da177e4 LT |
590 | check_mask = (0x2UL << (2*i)); |
591 | else | |
592 | check_mask = (0x2UL << | |
593 | this_busy_nack); | |
594 | if ((dispatch_stat & check_mask) == 0) | |
595 | cpu_clear(i, mask); | |
596 | this_busy_nack += 2; | |
22adb358 DM |
597 | if (this_busy_nack == 64) |
598 | break; | |
1da177e4 LT |
599 | } |
600 | ||
601 | goto retry; | |
602 | } | |
603 | } | |
604 | } | |
605 | ||
1d2f1f90 | 606 | /* Multi-cpu list version. */ |
a43fe0e7 DM |
607 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
608 | { | |
b830ab66 DM |
609 | struct trap_per_cpu *tb; |
610 | u16 *cpu_list; | |
611 | u64 *mondo; | |
612 | cpumask_t error_mask; | |
613 | unsigned long flags, status; | |
3cab0c3e | 614 | int cnt, retries, this_cpu, prev_sent, i; |
b830ab66 | 615 | |
17f34f0e DM |
616 | if (cpus_empty(mask)) |
617 | return; | |
618 | ||
b830ab66 DM |
619 | /* We have to do this whole thing with interrupts fully disabled. |
620 | * Otherwise if we send an xcall from interrupt context it will | |
621 | * corrupt both our mondo block and cpu list state. | |
622 | * | |
623 | * One consequence of this is that we cannot use timeout mechanisms | |
624 | * that depend upon interrupts being delivered locally. So, for | |
625 | * example, we cannot sample jiffies and expect it to advance. | |
626 | * | |
627 | * Fortunately, udelay() uses %stick/%tick so we can use that. | |
628 | */ | |
629 | local_irq_save(flags); | |
630 | ||
631 | this_cpu = smp_processor_id(); | |
632 | tb = &trap_block[this_cpu]; | |
1d2f1f90 | 633 | |
b830ab66 | 634 | mondo = __va(tb->cpu_mondo_block_pa); |
1d2f1f90 DM |
635 | mondo[0] = data0; |
636 | mondo[1] = data1; | |
637 | mondo[2] = data2; | |
638 | wmb(); | |
639 | ||
b830ab66 DM |
640 | cpu_list = __va(tb->cpu_list_pa); |
641 | ||
642 | /* Setup the initial cpu list. */ | |
643 | cnt = 0; | |
644 | for_each_cpu_mask(i, mask) | |
645 | cpu_list[cnt++] = i; | |
646 | ||
647 | cpus_clear(error_mask); | |
1d2f1f90 | 648 | retries = 0; |
3cab0c3e | 649 | prev_sent = 0; |
1d2f1f90 | 650 | do { |
3cab0c3e | 651 | int forward_progress, n_sent; |
1d2f1f90 | 652 | |
b830ab66 DM |
653 | status = sun4v_cpu_mondo_send(cnt, |
654 | tb->cpu_list_pa, | |
655 | tb->cpu_mondo_block_pa); | |
656 | ||
657 | /* HV_EOK means all cpus received the xcall, we're done. */ | |
658 | if (likely(status == HV_EOK)) | |
1d2f1f90 | 659 | break; |
b830ab66 | 660 | |
3cab0c3e DM |
661 | /* First, see if we made any forward progress. |
662 | * | |
663 | * The hypervisor indicates successful sends by setting | |
664 | * cpu list entries to the value 0xffff. | |
b830ab66 | 665 | */ |
3cab0c3e | 666 | n_sent = 0; |
b830ab66 | 667 | for (i = 0; i < cnt; i++) { |
3cab0c3e DM |
668 | if (likely(cpu_list[i] == 0xffff)) |
669 | n_sent++; | |
1d2f1f90 DM |
670 | } |
671 | ||
3cab0c3e DM |
672 | forward_progress = 0; |
673 | if (n_sent > prev_sent) | |
674 | forward_progress = 1; | |
675 | ||
676 | prev_sent = n_sent; | |
677 | ||
b830ab66 DM |
678 | /* If we get a HV_ECPUERROR, then one or more of the cpus |
679 | * in the list are in error state. Use the cpu_state() | |
680 | * hypervisor call to find out which cpus are in error state. | |
681 | */ | |
682 | if (unlikely(status == HV_ECPUERROR)) { | |
683 | for (i = 0; i < cnt; i++) { | |
684 | long err; | |
685 | u16 cpu; | |
686 | ||
687 | cpu = cpu_list[i]; | |
688 | if (cpu == 0xffff) | |
689 | continue; | |
690 | ||
691 | err = sun4v_cpu_state(cpu); | |
692 | if (err >= 0 && | |
693 | err == HV_CPU_STATE_ERROR) { | |
3cab0c3e | 694 | cpu_list[i] = 0xffff; |
b830ab66 DM |
695 | cpu_set(cpu, error_mask); |
696 | } | |
697 | } | |
698 | } else if (unlikely(status != HV_EWOULDBLOCK)) | |
699 | goto fatal_mondo_error; | |
700 | ||
3cab0c3e DM |
701 | /* Don't bother rewriting the CPU list, just leave the |
702 | * 0xffff and non-0xffff entries in there and the | |
703 | * hypervisor will do the right thing. | |
704 | * | |
705 | * Only advance timeout state if we didn't make any | |
706 | * forward progress. | |
707 | */ | |
b830ab66 DM |
708 | if (unlikely(!forward_progress)) { |
709 | if (unlikely(++retries > 10000)) | |
710 | goto fatal_mondo_timeout; | |
711 | ||
712 | /* Delay a little bit to let other cpus catch up | |
713 | * on their cpu mondo queue work. | |
714 | */ | |
715 | udelay(2 * cnt); | |
716 | } | |
1d2f1f90 DM |
717 | } while (1); |
718 | ||
b830ab66 DM |
719 | local_irq_restore(flags); |
720 | ||
721 | if (unlikely(!cpus_empty(error_mask))) | |
722 | goto fatal_mondo_cpu_error; | |
723 | ||
724 | return; | |
725 | ||
726 | fatal_mondo_cpu_error: | |
727 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | |
728 | "were in error state\n", | |
729 | this_cpu); | |
730 | printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); | |
731 | for_each_cpu_mask(i, error_mask) | |
732 | printk("%d ", i); | |
733 | printk("]\n"); | |
734 | return; | |
735 | ||
736 | fatal_mondo_timeout: | |
737 | local_irq_restore(flags); | |
738 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | |
739 | " progress after %d retries.\n", | |
740 | this_cpu, retries); | |
741 | goto dump_cpu_list_and_out; | |
742 | ||
743 | fatal_mondo_error: | |
744 | local_irq_restore(flags); | |
745 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | |
746 | this_cpu, status); | |
747 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | |
748 | "mondo_block_pa(%lx)\n", | |
749 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | |
750 | ||
751 | dump_cpu_list_and_out: | |
752 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | |
753 | for (i = 0; i < cnt; i++) | |
754 | printk("%u ", cpu_list[i]); | |
755 | printk("]\n"); | |
1d2f1f90 | 756 | } |
a43fe0e7 | 757 | |
1da177e4 LT |
758 | /* Send cross call to all processors mentioned in MASK |
759 | * except self. | |
760 | */ | |
761 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask) | |
762 | { | |
763 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | |
764 | int this_cpu = get_cpu(); | |
765 | ||
766 | cpus_and(mask, mask, cpu_online_map); | |
767 | cpu_clear(this_cpu, mask); | |
768 | ||
769 | if (tlb_type == spitfire) | |
770 | spitfire_xcall_deliver(data0, data1, data2, mask); | |
a43fe0e7 | 771 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
1da177e4 | 772 | cheetah_xcall_deliver(data0, data1, data2, mask); |
a43fe0e7 DM |
773 | else |
774 | hypervisor_xcall_deliver(data0, data1, data2, mask); | |
1da177e4 LT |
775 | /* NOTE: Caller runs local copy on master. */ |
776 | ||
777 | put_cpu(); | |
778 | } | |
779 | ||
780 | extern unsigned long xcall_sync_tick; | |
781 | ||
782 | static void smp_start_sync_tick_client(int cpu) | |
783 | { | |
784 | cpumask_t mask = cpumask_of_cpu(cpu); | |
785 | ||
786 | smp_cross_call_masked(&xcall_sync_tick, | |
787 | 0, 0, 0, mask); | |
788 | } | |
789 | ||
790 | /* Send cross call to all processors except self. */ | |
791 | #define smp_cross_call(func, ctx, data1, data2) \ | |
792 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | |
793 | ||
794 | struct call_data_struct { | |
795 | void (*func) (void *info); | |
796 | void *info; | |
797 | atomic_t finished; | |
798 | int wait; | |
799 | }; | |
800 | ||
1da177e4 LT |
801 | static struct call_data_struct *call_data; |
802 | ||
803 | extern unsigned long xcall_call_function; | |
804 | ||
aa1d1a0a DM |
805 | /** |
806 | * smp_call_function(): Run a function on all other CPUs. | |
807 | * @func: The function to run. This must be fast and non-blocking. | |
808 | * @info: An arbitrary pointer to pass to the function. | |
809 | * @nonatomic: currently unused. | |
810 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
811 | * | |
812 | * Returns 0 on success, else a negative status code. Does not return until | |
813 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
814 | * | |
1da177e4 LT |
815 | * You must not call this function with disabled interrupts or from a |
816 | * hardware interrupt handler or from a bottom half handler. | |
817 | */ | |
bd40791e DM |
818 | static int smp_call_function_mask(void (*func)(void *info), void *info, |
819 | int nonatomic, int wait, cpumask_t mask) | |
1da177e4 LT |
820 | { |
821 | struct call_data_struct data; | |
ee29074d | 822 | int cpus; |
1da177e4 | 823 | |
1da177e4 LT |
824 | /* Can deadlock when called with interrupts disabled */ |
825 | WARN_ON(irqs_disabled()); | |
826 | ||
827 | data.func = func; | |
828 | data.info = info; | |
829 | atomic_set(&data.finished, 0); | |
830 | data.wait = wait; | |
831 | ||
832 | spin_lock(&call_lock); | |
833 | ||
ee29074d DM |
834 | cpu_clear(smp_processor_id(), mask); |
835 | cpus = cpus_weight(mask); | |
836 | if (!cpus) | |
837 | goto out_unlock; | |
838 | ||
1da177e4 | 839 | call_data = &data; |
aa1d1a0a | 840 | mb(); |
1da177e4 | 841 | |
bd40791e | 842 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
1da177e4 | 843 | |
aa1d1a0a DM |
844 | /* Wait for response */ |
845 | while (atomic_read(&data.finished) != cpus) | |
846 | cpu_relax(); | |
1da177e4 | 847 | |
ee29074d | 848 | out_unlock: |
1da177e4 LT |
849 | spin_unlock(&call_lock); |
850 | ||
851 | return 0; | |
1da177e4 LT |
852 | } |
853 | ||
bd40791e DM |
854 | int smp_call_function(void (*func)(void *info), void *info, |
855 | int nonatomic, int wait) | |
856 | { | |
857 | return smp_call_function_mask(func, info, nonatomic, wait, | |
858 | cpu_online_map); | |
859 | } | |
860 | ||
1da177e4 LT |
861 | void smp_call_function_client(int irq, struct pt_regs *regs) |
862 | { | |
863 | void (*func) (void *info) = call_data->func; | |
864 | void *info = call_data->info; | |
865 | ||
866 | clear_softint(1 << irq); | |
867 | if (call_data->wait) { | |
868 | /* let initiator proceed only after completion */ | |
869 | func(info); | |
870 | atomic_inc(&call_data->finished); | |
871 | } else { | |
872 | /* let initiator proceed after getting data */ | |
873 | atomic_inc(&call_data->finished); | |
874 | func(info); | |
875 | } | |
876 | } | |
877 | ||
bd40791e DM |
878 | static void tsb_sync(void *info) |
879 | { | |
6f25f398 | 880 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; |
bd40791e DM |
881 | struct mm_struct *mm = info; |
882 | ||
6f25f398 DM |
883 | /* It is not valid to test "currrent->active_mm == mm" here. |
884 | * | |
885 | * The value of "current" is not changed atomically with | |
886 | * switch_mm(). But that's OK, we just need to check the | |
887 | * current cpu's trap block PGD physical address. | |
888 | */ | |
889 | if (tp->pgd_paddr == __pa(mm->pgd)) | |
bd40791e DM |
890 | tsb_context_switch(mm); |
891 | } | |
892 | ||
893 | void smp_tsb_sync(struct mm_struct *mm) | |
894 | { | |
895 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | |
896 | } | |
897 | ||
1da177e4 LT |
898 | extern unsigned long xcall_flush_tlb_mm; |
899 | extern unsigned long xcall_flush_tlb_pending; | |
900 | extern unsigned long xcall_flush_tlb_kernel_range; | |
1da177e4 LT |
901 | extern unsigned long xcall_report_regs; |
902 | extern unsigned long xcall_receive_signal; | |
ee29074d | 903 | extern unsigned long xcall_new_mmu_context_version; |
1da177e4 LT |
904 | |
905 | #ifdef DCACHE_ALIASING_POSSIBLE | |
906 | extern unsigned long xcall_flush_dcache_page_cheetah; | |
907 | #endif | |
908 | extern unsigned long xcall_flush_dcache_page_spitfire; | |
909 | ||
910 | #ifdef CONFIG_DEBUG_DCFLUSH | |
911 | extern atomic_t dcpage_flushes; | |
912 | extern atomic_t dcpage_flushes_xcall; | |
913 | #endif | |
914 | ||
d979f179 | 915 | static inline void __local_flush_dcache_page(struct page *page) |
1da177e4 LT |
916 | { |
917 | #ifdef DCACHE_ALIASING_POSSIBLE | |
918 | __flush_dcache_page(page_address(page), | |
919 | ((tlb_type == spitfire) && | |
920 | page_mapping(page) != NULL)); | |
921 | #else | |
922 | if (page_mapping(page) != NULL && | |
923 | tlb_type == spitfire) | |
924 | __flush_icache_page(__pa(page_address(page))); | |
925 | #endif | |
926 | } | |
927 | ||
928 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | |
929 | { | |
930 | cpumask_t mask = cpumask_of_cpu(cpu); | |
a43fe0e7 DM |
931 | int this_cpu; |
932 | ||
933 | if (tlb_type == hypervisor) | |
934 | return; | |
1da177e4 LT |
935 | |
936 | #ifdef CONFIG_DEBUG_DCFLUSH | |
937 | atomic_inc(&dcpage_flushes); | |
938 | #endif | |
a43fe0e7 DM |
939 | |
940 | this_cpu = get_cpu(); | |
941 | ||
1da177e4 LT |
942 | if (cpu == this_cpu) { |
943 | __local_flush_dcache_page(page); | |
944 | } else if (cpu_online(cpu)) { | |
945 | void *pg_addr = page_address(page); | |
946 | u64 data0; | |
947 | ||
948 | if (tlb_type == spitfire) { | |
949 | data0 = | |
950 | ((u64)&xcall_flush_dcache_page_spitfire); | |
951 | if (page_mapping(page) != NULL) | |
952 | data0 |= ((u64)1 << 32); | |
953 | spitfire_xcall_deliver(data0, | |
954 | __pa(pg_addr), | |
955 | (u64) pg_addr, | |
956 | mask); | |
a43fe0e7 | 957 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
958 | #ifdef DCACHE_ALIASING_POSSIBLE |
959 | data0 = | |
960 | ((u64)&xcall_flush_dcache_page_cheetah); | |
961 | cheetah_xcall_deliver(data0, | |
962 | __pa(pg_addr), | |
963 | 0, mask); | |
964 | #endif | |
965 | } | |
966 | #ifdef CONFIG_DEBUG_DCFLUSH | |
967 | atomic_inc(&dcpage_flushes_xcall); | |
968 | #endif | |
969 | } | |
970 | ||
971 | put_cpu(); | |
972 | } | |
973 | ||
974 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |
975 | { | |
976 | void *pg_addr = page_address(page); | |
977 | cpumask_t mask = cpu_online_map; | |
978 | u64 data0; | |
a43fe0e7 DM |
979 | int this_cpu; |
980 | ||
981 | if (tlb_type == hypervisor) | |
982 | return; | |
983 | ||
984 | this_cpu = get_cpu(); | |
1da177e4 LT |
985 | |
986 | cpu_clear(this_cpu, mask); | |
987 | ||
988 | #ifdef CONFIG_DEBUG_DCFLUSH | |
989 | atomic_inc(&dcpage_flushes); | |
990 | #endif | |
991 | if (cpus_empty(mask)) | |
992 | goto flush_self; | |
993 | if (tlb_type == spitfire) { | |
994 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | |
995 | if (page_mapping(page) != NULL) | |
996 | data0 |= ((u64)1 << 32); | |
997 | spitfire_xcall_deliver(data0, | |
998 | __pa(pg_addr), | |
999 | (u64) pg_addr, | |
1000 | mask); | |
a43fe0e7 | 1001 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
1002 | #ifdef DCACHE_ALIASING_POSSIBLE |
1003 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | |
1004 | cheetah_xcall_deliver(data0, | |
1005 | __pa(pg_addr), | |
1006 | 0, mask); | |
1007 | #endif | |
1008 | } | |
1009 | #ifdef CONFIG_DEBUG_DCFLUSH | |
1010 | atomic_inc(&dcpage_flushes_xcall); | |
1011 | #endif | |
1012 | flush_self: | |
1013 | __local_flush_dcache_page(page); | |
1014 | ||
1015 | put_cpu(); | |
1016 | } | |
1017 | ||
a0663a79 DM |
1018 | static void __smp_receive_signal_mask(cpumask_t mask) |
1019 | { | |
1020 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | |
1021 | } | |
1022 | ||
1da177e4 LT |
1023 | void smp_receive_signal(int cpu) |
1024 | { | |
1025 | cpumask_t mask = cpumask_of_cpu(cpu); | |
1026 | ||
a0663a79 DM |
1027 | if (cpu_online(cpu)) |
1028 | __smp_receive_signal_mask(mask); | |
1da177e4 LT |
1029 | } |
1030 | ||
1031 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | |
ee29074d DM |
1032 | { |
1033 | clear_softint(1 << irq); | |
1034 | } | |
1035 | ||
1036 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | |
1da177e4 | 1037 | { |
a0663a79 | 1038 | struct mm_struct *mm; |
ee29074d | 1039 | unsigned long flags; |
a0663a79 | 1040 | |
1da177e4 | 1041 | clear_softint(1 << irq); |
a0663a79 DM |
1042 | |
1043 | /* See if we need to allocate a new TLB context because | |
1044 | * the version of the one we are using is now out of date. | |
1045 | */ | |
1046 | mm = current->active_mm; | |
ee29074d DM |
1047 | if (unlikely(!mm || (mm == &init_mm))) |
1048 | return; | |
a0663a79 | 1049 | |
ee29074d | 1050 | spin_lock_irqsave(&mm->context.lock, flags); |
aac0aadf | 1051 | |
ee29074d DM |
1052 | if (unlikely(!CTX_VALID(mm->context))) |
1053 | get_new_mmu_context(mm); | |
aac0aadf | 1054 | |
ee29074d | 1055 | spin_unlock_irqrestore(&mm->context.lock, flags); |
aac0aadf | 1056 | |
ee29074d DM |
1057 | load_secondary_context(mm); |
1058 | __flush_tlb_mm(CTX_HWBITS(mm->context), | |
1059 | SECONDARY_CONTEXT); | |
a0663a79 DM |
1060 | } |
1061 | ||
1062 | void smp_new_mmu_context_version(void) | |
1063 | { | |
ee29074d | 1064 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
1da177e4 LT |
1065 | } |
1066 | ||
1067 | void smp_report_regs(void) | |
1068 | { | |
1069 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | |
1070 | } | |
1071 | ||
1da177e4 LT |
1072 | /* We know that the window frames of the user have been flushed |
1073 | * to the stack before we get here because all callers of us | |
1074 | * are flush_tlb_*() routines, and these run after flush_cache_*() | |
1075 | * which performs the flushw. | |
1076 | * | |
1077 | * The SMP TLB coherency scheme we use works as follows: | |
1078 | * | |
1079 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | |
1080 | * space has (potentially) executed on, this is the heuristic | |
1081 | * we use to avoid doing cross calls. | |
1082 | * | |
1083 | * Also, for flushing from kswapd and also for clones, we | |
1084 | * use cpu_vm_mask as the list of cpus to make run the TLB. | |
1085 | * | |
1086 | * 2) TLB context numbers are shared globally across all processors | |
1087 | * in the system, this allows us to play several games to avoid | |
1088 | * cross calls. | |
1089 | * | |
1090 | * One invariant is that when a cpu switches to a process, and | |
1091 | * that processes tsk->active_mm->cpu_vm_mask does not have the | |
1092 | * current cpu's bit set, that tlb context is flushed locally. | |
1093 | * | |
1094 | * If the address space is non-shared (ie. mm->count == 1) we avoid | |
1095 | * cross calls when we want to flush the currently running process's | |
1096 | * tlb state. This is done by clearing all cpu bits except the current | |
1097 | * processor's in current->active_mm->cpu_vm_mask and performing the | |
1098 | * flush locally only. This will force any subsequent cpus which run | |
1099 | * this task to flush the context from the local tlb if the process | |
1100 | * migrates to another cpu (again). | |
1101 | * | |
1102 | * 3) For shared address spaces (threads) and swapping we bite the | |
1103 | * bullet for most cases and perform the cross call (but only to | |
1104 | * the cpus listed in cpu_vm_mask). | |
1105 | * | |
1106 | * The performance gain from "optimizing" away the cross call for threads is | |
1107 | * questionable (in theory the big win for threads is the massive sharing of | |
1108 | * address space state across processors). | |
1109 | */ | |
62dbec78 DM |
1110 | |
1111 | /* This currently is only used by the hugetlb arch pre-fault | |
1112 | * hook on UltraSPARC-III+ and later when changing the pagesize | |
1113 | * bits of the context register for an address space. | |
1114 | */ | |
1da177e4 LT |
1115 | void smp_flush_tlb_mm(struct mm_struct *mm) |
1116 | { | |
62dbec78 DM |
1117 | u32 ctx = CTX_HWBITS(mm->context); |
1118 | int cpu = get_cpu(); | |
1da177e4 | 1119 | |
62dbec78 DM |
1120 | if (atomic_read(&mm->mm_users) == 1) { |
1121 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | |
1122 | goto local_flush_and_out; | |
1123 | } | |
1da177e4 | 1124 | |
62dbec78 DM |
1125 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
1126 | ctx, 0, 0, | |
1127 | mm->cpu_vm_mask); | |
1da177e4 | 1128 | |
62dbec78 DM |
1129 | local_flush_and_out: |
1130 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | |
1da177e4 | 1131 | |
62dbec78 | 1132 | put_cpu(); |
1da177e4 LT |
1133 | } |
1134 | ||
1135 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | |
1136 | { | |
1137 | u32 ctx = CTX_HWBITS(mm->context); | |
1138 | int cpu = get_cpu(); | |
1139 | ||
dedeb002 | 1140 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) |
1da177e4 | 1141 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); |
dedeb002 HD |
1142 | else |
1143 | smp_cross_call_masked(&xcall_flush_tlb_pending, | |
1144 | ctx, nr, (unsigned long) vaddrs, | |
1145 | mm->cpu_vm_mask); | |
1da177e4 | 1146 | |
1da177e4 LT |
1147 | __flush_tlb_pending(ctx, nr, vaddrs); |
1148 | ||
1149 | put_cpu(); | |
1150 | } | |
1151 | ||
1152 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
1153 | { | |
1154 | start &= PAGE_MASK; | |
1155 | end = PAGE_ALIGN(end); | |
1156 | if (start != end) { | |
1157 | smp_cross_call(&xcall_flush_tlb_kernel_range, | |
1158 | 0, start, end); | |
1159 | ||
1160 | __flush_tlb_kernel_range(start, end); | |
1161 | } | |
1162 | } | |
1163 | ||
1164 | /* CPU capture. */ | |
1165 | /* #define CAPTURE_DEBUG */ | |
1166 | extern unsigned long xcall_capture; | |
1167 | ||
1168 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | |
1169 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | |
1170 | static unsigned long penguins_are_doing_time; | |
1171 | ||
1172 | void smp_capture(void) | |
1173 | { | |
1174 | int result = atomic_add_ret(1, &smp_capture_depth); | |
1175 | ||
1176 | if (result == 1) { | |
1177 | int ncpus = num_online_cpus(); | |
1178 | ||
1179 | #ifdef CAPTURE_DEBUG | |
1180 | printk("CPU[%d]: Sending penguins to jail...", | |
1181 | smp_processor_id()); | |
1182 | #endif | |
1183 | penguins_are_doing_time = 1; | |
4f07118f | 1184 | membar_storestore_loadstore(); |
1da177e4 LT |
1185 | atomic_inc(&smp_capture_registry); |
1186 | smp_cross_call(&xcall_capture, 0, 0, 0); | |
1187 | while (atomic_read(&smp_capture_registry) != ncpus) | |
4f07118f | 1188 | rmb(); |
1da177e4 LT |
1189 | #ifdef CAPTURE_DEBUG |
1190 | printk("done\n"); | |
1191 | #endif | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | void smp_release(void) | |
1196 | { | |
1197 | if (atomic_dec_and_test(&smp_capture_depth)) { | |
1198 | #ifdef CAPTURE_DEBUG | |
1199 | printk("CPU[%d]: Giving pardon to " | |
1200 | "imprisoned penguins\n", | |
1201 | smp_processor_id()); | |
1202 | #endif | |
1203 | penguins_are_doing_time = 0; | |
4f07118f | 1204 | membar_storeload_storestore(); |
1da177e4 LT |
1205 | atomic_dec(&smp_capture_registry); |
1206 | } | |
1207 | } | |
1208 | ||
1209 | /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they | |
1210 | * can service tlb flush xcalls... | |
1211 | */ | |
1212 | extern void prom_world(int); | |
96c6e0d8 | 1213 | |
1da177e4 LT |
1214 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) |
1215 | { | |
1da177e4 LT |
1216 | clear_softint(1 << irq); |
1217 | ||
1218 | preempt_disable(); | |
1219 | ||
1220 | __asm__ __volatile__("flushw"); | |
1da177e4 LT |
1221 | prom_world(1); |
1222 | atomic_inc(&smp_capture_registry); | |
4f07118f | 1223 | membar_storeload_storestore(); |
1da177e4 | 1224 | while (penguins_are_doing_time) |
4f07118f | 1225 | rmb(); |
1da177e4 LT |
1226 | atomic_dec(&smp_capture_registry); |
1227 | prom_world(0); | |
1228 | ||
1229 | preempt_enable(); | |
1230 | } | |
1231 | ||
1da177e4 | 1232 | /* /proc/profile writes can call this, don't __init it please. */ |
1da177e4 LT |
1233 | int setup_profiling_timer(unsigned int multiplier) |
1234 | { | |
777a4475 | 1235 | return -EINVAL; |
1da177e4 LT |
1236 | } |
1237 | ||
1238 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
1239 | { | |
1da177e4 LT |
1240 | } |
1241 | ||
5cbc3073 | 1242 | void __devinit smp_prepare_boot_cpu(void) |
7abea921 | 1243 | { |
7abea921 DM |
1244 | } |
1245 | ||
5cbc3073 | 1246 | void __devinit smp_fill_in_sib_core_maps(void) |
1da177e4 | 1247 | { |
5cbc3073 DM |
1248 | unsigned int i; |
1249 | ||
e0204409 | 1250 | for_each_present_cpu(i) { |
5cbc3073 DM |
1251 | unsigned int j; |
1252 | ||
39dd992a | 1253 | cpus_clear(cpu_core_map[i]); |
5cbc3073 | 1254 | if (cpu_data(i).core_id == 0) { |
f78eae2e | 1255 | cpu_set(i, cpu_core_map[i]); |
5cbc3073 DM |
1256 | continue; |
1257 | } | |
1258 | ||
e0204409 | 1259 | for_each_present_cpu(j) { |
5cbc3073 DM |
1260 | if (cpu_data(i).core_id == |
1261 | cpu_data(j).core_id) | |
f78eae2e DM |
1262 | cpu_set(j, cpu_core_map[i]); |
1263 | } | |
1264 | } | |
1265 | ||
e0204409 | 1266 | for_each_present_cpu(i) { |
f78eae2e DM |
1267 | unsigned int j; |
1268 | ||
d5a7430d | 1269 | cpus_clear(per_cpu(cpu_sibling_map, i)); |
f78eae2e | 1270 | if (cpu_data(i).proc_id == -1) { |
d5a7430d | 1271 | cpu_set(i, per_cpu(cpu_sibling_map, i)); |
f78eae2e DM |
1272 | continue; |
1273 | } | |
1274 | ||
e0204409 | 1275 | for_each_present_cpu(j) { |
f78eae2e DM |
1276 | if (cpu_data(i).proc_id == |
1277 | cpu_data(j).proc_id) | |
d5a7430d | 1278 | cpu_set(j, per_cpu(cpu_sibling_map, i)); |
5cbc3073 DM |
1279 | } |
1280 | } | |
1da177e4 LT |
1281 | } |
1282 | ||
b282b6f8 | 1283 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 LT |
1284 | { |
1285 | int ret = smp_boot_one_cpu(cpu); | |
1286 | ||
1287 | if (!ret) { | |
1288 | cpu_set(cpu, smp_commenced_mask); | |
1289 | while (!cpu_isset(cpu, cpu_online_map)) | |
1290 | mb(); | |
1291 | if (!cpu_isset(cpu, cpu_online_map)) { | |
1292 | ret = -ENODEV; | |
1293 | } else { | |
02fead75 DM |
1294 | /* On SUN4V, writes to %tick and %stick are |
1295 | * not allowed. | |
1296 | */ | |
1297 | if (tlb_type != hypervisor) | |
1298 | smp_synchronize_one_tick(cpu); | |
1da177e4 LT |
1299 | } |
1300 | } | |
1301 | return ret; | |
1302 | } | |
1303 | ||
4f0234f4 | 1304 | #ifdef CONFIG_HOTPLUG_CPU |
e0204409 DM |
1305 | void cpu_play_dead(void) |
1306 | { | |
1307 | int cpu = smp_processor_id(); | |
1308 | unsigned long pstate; | |
1309 | ||
1310 | idle_task_exit(); | |
1311 | ||
1312 | if (tlb_type == hypervisor) { | |
1313 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1314 | ||
1315 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, | |
1316 | tb->cpu_mondo_pa, 0); | |
1317 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, | |
1318 | tb->dev_mondo_pa, 0); | |
1319 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, | |
1320 | tb->resum_mondo_pa, 0); | |
1321 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, | |
1322 | tb->nonresum_mondo_pa, 0); | |
1323 | } | |
1324 | ||
1325 | cpu_clear(cpu, smp_commenced_mask); | |
1326 | membar_safe("#Sync"); | |
1327 | ||
1328 | local_irq_disable(); | |
1329 | ||
1330 | __asm__ __volatile__( | |
1331 | "rdpr %%pstate, %0\n\t" | |
1332 | "wrpr %0, %1, %%pstate" | |
1333 | : "=r" (pstate) | |
1334 | : "i" (PSTATE_IE)); | |
1335 | ||
1336 | while (1) | |
1337 | barrier(); | |
1338 | } | |
1339 | ||
4f0234f4 DM |
1340 | int __cpu_disable(void) |
1341 | { | |
e0204409 DM |
1342 | int cpu = smp_processor_id(); |
1343 | cpuinfo_sparc *c; | |
1344 | int i; | |
1345 | ||
1346 | for_each_cpu_mask(i, cpu_core_map[cpu]) | |
1347 | cpu_clear(cpu, cpu_core_map[i]); | |
1348 | cpus_clear(cpu_core_map[cpu]); | |
1349 | ||
d5a7430d MT |
1350 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) |
1351 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | |
1352 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | |
e0204409 DM |
1353 | |
1354 | c = &cpu_data(cpu); | |
1355 | ||
1356 | c->core_id = 0; | |
1357 | c->proc_id = -1; | |
1358 | ||
1359 | spin_lock(&call_lock); | |
1360 | cpu_clear(cpu, cpu_online_map); | |
1361 | spin_unlock(&call_lock); | |
1362 | ||
1363 | smp_wmb(); | |
1364 | ||
1365 | /* Make sure no interrupts point to this cpu. */ | |
1366 | fixup_irqs(); | |
1367 | ||
1368 | local_irq_enable(); | |
1369 | mdelay(1); | |
1370 | local_irq_disable(); | |
1371 | ||
1372 | return 0; | |
4f0234f4 DM |
1373 | } |
1374 | ||
1375 | void __cpu_die(unsigned int cpu) | |
1376 | { | |
e0204409 DM |
1377 | int i; |
1378 | ||
1379 | for (i = 0; i < 100; i++) { | |
1380 | smp_rmb(); | |
1381 | if (!cpu_isset(cpu, smp_commenced_mask)) | |
1382 | break; | |
1383 | msleep(100); | |
1384 | } | |
1385 | if (cpu_isset(cpu, smp_commenced_mask)) { | |
1386 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
1387 | } else { | |
1388 | #if defined(CONFIG_SUN_LDOMS) | |
1389 | unsigned long hv_err; | |
1390 | int limit = 100; | |
1391 | ||
1392 | do { | |
1393 | hv_err = sun4v_cpu_stop(cpu); | |
1394 | if (hv_err == HV_EOK) { | |
1395 | cpu_clear(cpu, cpu_present_map); | |
1396 | break; | |
1397 | } | |
1398 | } while (--limit > 0); | |
1399 | if (limit <= 0) { | |
1400 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", | |
1401 | hv_err); | |
1402 | } | |
1403 | #endif | |
1404 | } | |
4f0234f4 DM |
1405 | } |
1406 | #endif | |
1407 | ||
1da177e4 LT |
1408 | void __init smp_cpus_done(unsigned int max_cpus) |
1409 | { | |
1da177e4 LT |
1410 | } |
1411 | ||
1da177e4 LT |
1412 | void smp_send_reschedule(int cpu) |
1413 | { | |
64c7c8f8 | 1414 | smp_receive_signal(cpu); |
1da177e4 LT |
1415 | } |
1416 | ||
1417 | /* This is a nop because we capture all other cpus | |
1418 | * anyways when making the PROM active. | |
1419 | */ | |
1420 | void smp_send_stop(void) | |
1421 | { | |
1422 | } | |
1423 | ||
d369ddd2 DM |
1424 | unsigned long __per_cpu_base __read_mostly; |
1425 | unsigned long __per_cpu_shift __read_mostly; | |
1da177e4 LT |
1426 | |
1427 | EXPORT_SYMBOL(__per_cpu_base); | |
1428 | EXPORT_SYMBOL(__per_cpu_shift); | |
1429 | ||
5cbc3073 | 1430 | void __init real_setup_per_cpu_areas(void) |
1da177e4 LT |
1431 | { |
1432 | unsigned long goal, size, i; | |
1433 | char *ptr; | |
1da177e4 LT |
1434 | |
1435 | /* Copy section for each CPU (we discard the original) */ | |
5a089006 DM |
1436 | goal = PERCPU_ENOUGH_ROOM; |
1437 | ||
b6e3590f JF |
1438 | __per_cpu_shift = PAGE_SHIFT; |
1439 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | |
1da177e4 LT |
1440 | __per_cpu_shift++; |
1441 | ||
b6e3590f | 1442 | ptr = alloc_bootmem_pages(size * NR_CPUS); |
1da177e4 LT |
1443 | |
1444 | __per_cpu_base = ptr - __per_cpu_start; | |
1445 | ||
1da177e4 LT |
1446 | for (i = 0; i < NR_CPUS; i++, ptr += size) |
1447 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
951bc82c DM |
1448 | |
1449 | /* Setup %g5 for the boot cpu. */ | |
1450 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | |
1da177e4 | 1451 | } |