Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* smp.c: Sparc64 SMP support. |
2 | * | |
cf3d7c1e | 3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
4 | */ |
5 | ||
066bcaca | 6 | #include <linux/export.h> |
1da177e4 LT |
7 | #include <linux/kernel.h> |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/threads.h> | |
12 | #include <linux/smp.h> | |
1da177e4 LT |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/seq_file.h> | |
20 | #include <linux/cache.h> | |
21 | #include <linux/jiffies.h> | |
22 | #include <linux/profile.h> | |
73fffc03 | 23 | #include <linux/bootmem.h> |
4fd78a5f | 24 | #include <linux/vmalloc.h> |
9960e9e8 | 25 | #include <linux/ftrace.h> |
82960b85 | 26 | #include <linux/cpu.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
1da177e4 LT |
28 | |
29 | #include <asm/head.h> | |
30 | #include <asm/ptrace.h> | |
60063497 | 31 | #include <linux/atomic.h> |
1da177e4 LT |
32 | #include <asm/tlbflush.h> |
33 | #include <asm/mmu_context.h> | |
34 | #include <asm/cpudata.h> | |
27a2ef38 DM |
35 | #include <asm/hvtramp.h> |
36 | #include <asm/io.h> | |
cf3d7c1e | 37 | #include <asm/timer.h> |
1da177e4 LT |
38 | |
39 | #include <asm/irq.h> | |
6d24c8dc | 40 | #include <asm/irq_regs.h> |
1da177e4 LT |
41 | #include <asm/page.h> |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/oplib.h> | |
44 | #include <asm/uaccess.h> | |
1da177e4 LT |
45 | #include <asm/starfire.h> |
46 | #include <asm/tlb.h> | |
56fb4df6 | 47 | #include <asm/sections.h> |
07f8e5f3 | 48 | #include <asm/prom.h> |
5cbc3073 | 49 | #include <asm/mdesc.h> |
4f0234f4 | 50 | #include <asm/ldc.h> |
e0204409 | 51 | #include <asm/hypervisor.h> |
b62818e5 | 52 | #include <asm/pcr.h> |
1da177e4 | 53 | |
280ff974 HP |
54 | #include "cpumap.h" |
55 | ||
d5a7430d | 56 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
f78eae2e DM |
57 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = |
58 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | |
4f0234f4 | 59 | |
d5a7430d | 60 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
4f0234f4 DM |
61 | EXPORT_SYMBOL(cpu_core_map); |
62 | ||
1da177e4 | 63 | static cpumask_t smp_commenced_mask; |
1da177e4 LT |
64 | |
65 | void smp_info(struct seq_file *m) | |
66 | { | |
67 | int i; | |
68 | ||
69 | seq_printf(m, "State:\n"); | |
394e3902 AM |
70 | for_each_online_cpu(i) |
71 | seq_printf(m, "CPU%d:\t\tonline\n", i); | |
1da177e4 LT |
72 | } |
73 | ||
74 | void smp_bogo(struct seq_file *m) | |
75 | { | |
76 | int i; | |
77 | ||
394e3902 AM |
78 | for_each_online_cpu(i) |
79 | seq_printf(m, | |
394e3902 | 80 | "Cpu%dClkTck\t: %016lx\n", |
394e3902 | 81 | i, cpu_data(i).clock_tick); |
1da177e4 LT |
82 | } |
83 | ||
112f4871 | 84 | extern void setup_sparc64_timer(void); |
1da177e4 LT |
85 | |
86 | static volatile unsigned long callin_flag = 0; | |
87 | ||
2066aadd | 88 | void smp_callin(void) |
1da177e4 LT |
89 | { |
90 | int cpuid = hard_smp_processor_id(); | |
91 | ||
56fb4df6 | 92 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
1da177e4 | 93 | |
4a07e646 | 94 | if (tlb_type == hypervisor) |
490384e7 | 95 | sun4v_ktsb_register(); |
481295f9 | 96 | |
56fb4df6 | 97 | __flush_tlb_all(); |
1da177e4 | 98 | |
112f4871 | 99 | setup_sparc64_timer(); |
1da177e4 | 100 | |
816242da DM |
101 | if (cheetah_pcache_forced_on) |
102 | cheetah_enable_pcache(); | |
103 | ||
1da177e4 LT |
104 | callin_flag = 1; |
105 | __asm__ __volatile__("membar #Sync\n\t" | |
106 | "flush %%g6" : : : "memory"); | |
107 | ||
108 | /* Clear this or we will die instantly when we | |
109 | * schedule back to this idler... | |
110 | */ | |
db7d9a4e | 111 | current_thread_info()->new_child = 0; |
1da177e4 LT |
112 | |
113 | /* Attach to the address space of init_task. */ | |
114 | atomic_inc(&init_mm.mm_count); | |
115 | current->active_mm = &init_mm; | |
116 | ||
82960b85 DM |
117 | /* inform the notifiers about the new cpu */ |
118 | notify_cpu_starting(cpuid); | |
119 | ||
fb1fece5 | 120 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
4f07118f | 121 | rmb(); |
1da177e4 | 122 | |
fb1fece5 | 123 | set_cpu_online(cpuid, true); |
5bfb5d69 NP |
124 | |
125 | /* idle thread is expected to have preempt disabled */ | |
126 | preempt_disable(); | |
87fa05ae | 127 | |
ce2521bf KT |
128 | local_irq_enable(); |
129 | ||
87fa05ae | 130 | cpu_startup_entry(CPUHP_ONLINE); |
1da177e4 LT |
131 | } |
132 | ||
133 | void cpu_panic(void) | |
134 | { | |
135 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | |
136 | panic("SMP bolixed\n"); | |
137 | } | |
138 | ||
1da177e4 LT |
139 | /* This tick register synchronization scheme is taken entirely from |
140 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | |
141 | * | |
142 | * The only change I've made is to rework it so that the master | |
143 | * initiates the synchonization instead of the slave. -DaveM | |
144 | */ | |
145 | ||
146 | #define MASTER 0 | |
147 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) | |
148 | ||
149 | #define NUM_ROUNDS 64 /* magic value */ | |
150 | #define NUM_ITERS 5 /* likewise */ | |
151 | ||
152 | static DEFINE_SPINLOCK(itc_sync_lock); | |
153 | static unsigned long go[SLAVE + 1]; | |
154 | ||
155 | #define DEBUG_TICK_SYNC 0 | |
156 | ||
157 | static inline long get_delta (long *rt, long *master) | |
158 | { | |
159 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | |
160 | unsigned long tcenter, t0, t1, tm; | |
161 | unsigned long i; | |
162 | ||
163 | for (i = 0; i < NUM_ITERS; i++) { | |
164 | t0 = tick_ops->get_tick(); | |
165 | go[MASTER] = 1; | |
293666b7 | 166 | membar_safe("#StoreLoad"); |
1da177e4 | 167 | while (!(tm = go[SLAVE])) |
4f07118f | 168 | rmb(); |
1da177e4 | 169 | go[SLAVE] = 0; |
4f07118f | 170 | wmb(); |
1da177e4 LT |
171 | t1 = tick_ops->get_tick(); |
172 | ||
173 | if (t1 - t0 < best_t1 - best_t0) | |
174 | best_t0 = t0, best_t1 = t1, best_tm = tm; | |
175 | } | |
176 | ||
177 | *rt = best_t1 - best_t0; | |
178 | *master = best_tm - best_t0; | |
179 | ||
180 | /* average best_t0 and best_t1 without overflow: */ | |
181 | tcenter = (best_t0/2 + best_t1/2); | |
182 | if (best_t0 % 2 + best_t1 % 2 == 2) | |
183 | tcenter++; | |
184 | return tcenter - best_tm; | |
185 | } | |
186 | ||
187 | void smp_synchronize_tick_client(void) | |
188 | { | |
189 | long i, delta, adj, adjust_latency = 0, done = 0; | |
c6fee081 | 190 | unsigned long flags, rt, master_time_stamp; |
1da177e4 LT |
191 | #if DEBUG_TICK_SYNC |
192 | struct { | |
193 | long rt; /* roundtrip time */ | |
194 | long master; /* master's timestamp */ | |
195 | long diff; /* difference between midpoint and master's timestamp */ | |
196 | long lat; /* estimate of itc adjustment latency */ | |
197 | } t[NUM_ROUNDS]; | |
198 | #endif | |
199 | ||
200 | go[MASTER] = 1; | |
201 | ||
202 | while (go[MASTER]) | |
4f07118f | 203 | rmb(); |
1da177e4 LT |
204 | |
205 | local_irq_save(flags); | |
206 | { | |
207 | for (i = 0; i < NUM_ROUNDS; i++) { | |
208 | delta = get_delta(&rt, &master_time_stamp); | |
c6fee081 | 209 | if (delta == 0) |
1da177e4 | 210 | done = 1; /* let's lock on to this... */ |
1da177e4 LT |
211 | |
212 | if (!done) { | |
213 | if (i > 0) { | |
214 | adjust_latency += -delta; | |
215 | adj = -delta + adjust_latency/4; | |
216 | } else | |
217 | adj = -delta; | |
218 | ||
112f4871 | 219 | tick_ops->add_tick(adj); |
1da177e4 LT |
220 | } |
221 | #if DEBUG_TICK_SYNC | |
222 | t[i].rt = rt; | |
223 | t[i].master = master_time_stamp; | |
224 | t[i].diff = delta; | |
225 | t[i].lat = adjust_latency/4; | |
226 | #endif | |
227 | } | |
228 | } | |
229 | local_irq_restore(flags); | |
230 | ||
231 | #if DEBUG_TICK_SYNC | |
232 | for (i = 0; i < NUM_ROUNDS; i++) | |
233 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | |
234 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | |
235 | #endif | |
236 | ||
519c4d2d JP |
237 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " |
238 | "(last diff %ld cycles, maxerr %lu cycles)\n", | |
239 | smp_processor_id(), delta, rt); | |
1da177e4 LT |
240 | } |
241 | ||
242 | static void smp_start_sync_tick_client(int cpu); | |
243 | ||
244 | static void smp_synchronize_one_tick(int cpu) | |
245 | { | |
246 | unsigned long flags, i; | |
247 | ||
248 | go[MASTER] = 0; | |
249 | ||
250 | smp_start_sync_tick_client(cpu); | |
251 | ||
252 | /* wait for client to be ready */ | |
253 | while (!go[MASTER]) | |
4f07118f | 254 | rmb(); |
1da177e4 LT |
255 | |
256 | /* now let the client proceed into his loop */ | |
257 | go[MASTER] = 0; | |
293666b7 | 258 | membar_safe("#StoreLoad"); |
1da177e4 LT |
259 | |
260 | spin_lock_irqsave(&itc_sync_lock, flags); | |
261 | { | |
262 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | |
263 | while (!go[MASTER]) | |
4f07118f | 264 | rmb(); |
1da177e4 | 265 | go[MASTER] = 0; |
4f07118f | 266 | wmb(); |
1da177e4 | 267 | go[SLAVE] = tick_ops->get_tick(); |
293666b7 | 268 | membar_safe("#StoreLoad"); |
1da177e4 LT |
269 | } |
270 | } | |
271 | spin_unlock_irqrestore(&itc_sync_lock, flags); | |
272 | } | |
273 | ||
b14f5c10 | 274 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
27a2ef38 DM |
275 | /* XXX Put this in some common place. XXX */ |
276 | static unsigned long kimage_addr_to_ra(void *p) | |
277 | { | |
278 | unsigned long val = (unsigned long) p; | |
279 | ||
280 | return kern_base + (val - KERNBASE); | |
281 | } | |
282 | ||
2066aadd PG |
283 | static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, |
284 | void **descrp) | |
b14f5c10 DM |
285 | { |
286 | extern unsigned long sparc64_ttable_tl0; | |
287 | extern unsigned long kern_locked_tte_data; | |
b14f5c10 DM |
288 | struct hvtramp_descr *hdesc; |
289 | unsigned long trampoline_ra; | |
290 | struct trap_per_cpu *tb; | |
291 | u64 tte_vaddr, tte_data; | |
292 | unsigned long hv_err; | |
64658743 | 293 | int i; |
b14f5c10 | 294 | |
64658743 DM |
295 | hdesc = kzalloc(sizeof(*hdesc) + |
296 | (sizeof(struct hvtramp_mapping) * | |
297 | num_kernel_image_mappings - 1), | |
298 | GFP_KERNEL); | |
b14f5c10 | 299 | if (!hdesc) { |
27a2ef38 | 300 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " |
b14f5c10 DM |
301 | "hvtramp_descr.\n"); |
302 | return; | |
303 | } | |
557fe0e8 | 304 | *descrp = hdesc; |
b14f5c10 DM |
305 | |
306 | hdesc->cpu = cpu; | |
64658743 | 307 | hdesc->num_mappings = num_kernel_image_mappings; |
b14f5c10 DM |
308 | |
309 | tb = &trap_block[cpu]; | |
b14f5c10 DM |
310 | |
311 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; | |
312 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); | |
313 | ||
314 | hdesc->thread_reg = thread_reg; | |
315 | ||
316 | tte_vaddr = (unsigned long) KERNBASE; | |
317 | tte_data = kern_locked_tte_data; | |
318 | ||
64658743 DM |
319 | for (i = 0; i < hdesc->num_mappings; i++) { |
320 | hdesc->maps[i].vaddr = tte_vaddr; | |
321 | hdesc->maps[i].tte = tte_data; | |
b14f5c10 DM |
322 | tte_vaddr += 0x400000; |
323 | tte_data += 0x400000; | |
b14f5c10 DM |
324 | } |
325 | ||
326 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); | |
327 | ||
328 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, | |
329 | kimage_addr_to_ra(&sparc64_ttable_tl0), | |
330 | __pa(hdesc)); | |
e0204409 DM |
331 | if (hv_err) |
332 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " | |
333 | "gives error %lu\n", hv_err); | |
b14f5c10 DM |
334 | } |
335 | #endif | |
336 | ||
1da177e4 LT |
337 | extern unsigned long sparc64_cpu_startup; |
338 | ||
339 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | |
340 | * 32-bits (I think) so to be safe we have it read the pointer | |
341 | * contained here so we work on >4GB machines. -DaveM | |
342 | */ | |
343 | static struct thread_info *cpu_new_thread = NULL; | |
344 | ||
2066aadd | 345 | static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) |
1da177e4 LT |
346 | { |
347 | unsigned long entry = | |
348 | (unsigned long)(&sparc64_cpu_startup); | |
349 | unsigned long cookie = | |
350 | (unsigned long)(&cpu_new_thread); | |
557fe0e8 | 351 | void *descr = NULL; |
7890f794 | 352 | int timeout, ret; |
1da177e4 | 353 | |
1da177e4 | 354 | callin_flag = 0; |
f0a2bc7e | 355 | cpu_new_thread = task_thread_info(idle); |
1da177e4 | 356 | |
7890f794 | 357 | if (tlb_type == hypervisor) { |
b14f5c10 | 358 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
4f0234f4 DM |
359 | if (ldom_domaining_enabled) |
360 | ldom_startcpu_cpuid(cpu, | |
557fe0e8 DM |
361 | (unsigned long) cpu_new_thread, |
362 | &descr); | |
4f0234f4 DM |
363 | else |
364 | #endif | |
365 | prom_startcpu_cpuid(cpu, entry, cookie); | |
7890f794 | 366 | } else { |
5cbc3073 | 367 | struct device_node *dp = of_find_node_by_cpuid(cpu); |
7890f794 | 368 | |
6016a363 | 369 | prom_startcpu(dp->phandle, entry, cookie); |
7890f794 | 370 | } |
1da177e4 | 371 | |
4f0234f4 | 372 | for (timeout = 0; timeout < 50000; timeout++) { |
1da177e4 LT |
373 | if (callin_flag) |
374 | break; | |
375 | udelay(100); | |
376 | } | |
72aff53f | 377 | |
1da177e4 LT |
378 | if (callin_flag) { |
379 | ret = 0; | |
380 | } else { | |
381 | printk("Processor %d is stuck.\n", cpu); | |
1da177e4 LT |
382 | ret = -ENODEV; |
383 | } | |
384 | cpu_new_thread = NULL; | |
385 | ||
557fe0e8 | 386 | kfree(descr); |
b37d40d1 | 387 | |
1da177e4 LT |
388 | return ret; |
389 | } | |
390 | ||
391 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | |
392 | { | |
393 | u64 result, target; | |
394 | int stuck, tmp; | |
395 | ||
396 | if (this_is_starfire) { | |
397 | /* map to real upaid */ | |
398 | cpu = (((cpu & 0x3c) << 1) | | |
399 | ((cpu & 0x40) >> 4) | | |
400 | (cpu & 0x3)); | |
401 | } | |
402 | ||
403 | target = (cpu << 14) | 0x70; | |
404 | again: | |
405 | /* Ok, this is the real Spitfire Errata #54. | |
406 | * One must read back from a UDB internal register | |
407 | * after writes to the UDB interrupt dispatch, but | |
408 | * before the membar Sync for that write. | |
409 | * So we use the high UDB control register (ASI 0x7f, | |
410 | * ADDR 0x20) for the dummy read. -DaveM | |
411 | */ | |
412 | tmp = 0x40; | |
413 | __asm__ __volatile__( | |
414 | "wrpr %1, %2, %%pstate\n\t" | |
415 | "stxa %4, [%0] %3\n\t" | |
416 | "stxa %5, [%0+%8] %3\n\t" | |
417 | "add %0, %8, %0\n\t" | |
418 | "stxa %6, [%0+%8] %3\n\t" | |
419 | "membar #Sync\n\t" | |
420 | "stxa %%g0, [%7] %3\n\t" | |
421 | "membar #Sync\n\t" | |
422 | "mov 0x20, %%g1\n\t" | |
423 | "ldxa [%%g1] 0x7f, %%g0\n\t" | |
424 | "membar #Sync" | |
425 | : "=r" (tmp) | |
426 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | |
427 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | |
428 | "r" (0x10), "0" (tmp) | |
429 | : "g1"); | |
430 | ||
431 | /* NOTE: PSTATE_IE is still clear. */ | |
432 | stuck = 100000; | |
433 | do { | |
434 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
435 | : "=r" (result) | |
436 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
437 | if (result == 0) { | |
438 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
439 | : : "r" (pstate)); | |
440 | return; | |
441 | } | |
442 | stuck -= 1; | |
443 | if (stuck == 0) | |
444 | break; | |
445 | } while (result & 0x1); | |
446 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
447 | : : "r" (pstate)); | |
448 | if (stuck == 0) { | |
90181136 | 449 | printk("CPU[%d]: mondo stuckage result[%016llx]\n", |
1da177e4 LT |
450 | smp_processor_id(), result); |
451 | } else { | |
452 | udelay(2); | |
453 | goto again; | |
454 | } | |
455 | } | |
456 | ||
90f7ae8a | 457 | static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
1da177e4 | 458 | { |
90f7ae8a DM |
459 | u64 *mondo, data0, data1, data2; |
460 | u16 *cpu_list; | |
1da177e4 LT |
461 | u64 pstate; |
462 | int i; | |
463 | ||
464 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
90f7ae8a DM |
465 | cpu_list = __va(tb->cpu_list_pa); |
466 | mondo = __va(tb->cpu_mondo_block_pa); | |
467 | data0 = mondo[0]; | |
468 | data1 = mondo[1]; | |
469 | data2 = mondo[2]; | |
470 | for (i = 0; i < cnt; i++) | |
471 | spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); | |
1da177e4 LT |
472 | } |
473 | ||
474 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | |
475 | * packet, but we have no use for that. However we do take advantage of | |
476 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | |
477 | */ | |
90f7ae8a | 478 | static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
1da177e4 | 479 | { |
22adb358 | 480 | int nack_busy_id, is_jbus, need_more; |
90f7ae8a DM |
481 | u64 *mondo, pstate, ver, busy_mask; |
482 | u16 *cpu_list; | |
1da177e4 | 483 | |
90f7ae8a DM |
484 | cpu_list = __va(tb->cpu_list_pa); |
485 | mondo = __va(tb->cpu_mondo_block_pa); | |
cd5bc89d | 486 | |
1da177e4 LT |
487 | /* Unfortunately, someone at Sun had the brilliant idea to make the |
488 | * busy/nack fields hard-coded by ITID number for this Ultra-III | |
489 | * derivative processor. | |
490 | */ | |
491 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
92704a1c DM |
492 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
493 | (ver >> 32) == __SERRANO_ID); | |
1da177e4 LT |
494 | |
495 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
496 | ||
497 | retry: | |
22adb358 | 498 | need_more = 0; |
1da177e4 LT |
499 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" |
500 | : : "r" (pstate), "i" (PSTATE_IE)); | |
501 | ||
502 | /* Setup the dispatch data registers. */ | |
503 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" | |
504 | "stxa %1, [%4] %6\n\t" | |
505 | "stxa %2, [%5] %6\n\t" | |
506 | "membar #Sync\n\t" | |
507 | : /* no outputs */ | |
90f7ae8a | 508 | : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), |
1da177e4 LT |
509 | "r" (0x40), "r" (0x50), "r" (0x60), |
510 | "i" (ASI_INTR_W)); | |
511 | ||
512 | nack_busy_id = 0; | |
0de56d1a | 513 | busy_mask = 0; |
1da177e4 LT |
514 | { |
515 | int i; | |
516 | ||
90f7ae8a DM |
517 | for (i = 0; i < cnt; i++) { |
518 | u64 target, nr; | |
519 | ||
520 | nr = cpu_list[i]; | |
521 | if (nr == 0xffff) | |
522 | continue; | |
1da177e4 | 523 | |
90f7ae8a | 524 | target = (nr << 14) | 0x70; |
0de56d1a | 525 | if (is_jbus) { |
90f7ae8a | 526 | busy_mask |= (0x1UL << (nr * 2)); |
0de56d1a | 527 | } else { |
1da177e4 | 528 | target |= (nack_busy_id << 24); |
0de56d1a DM |
529 | busy_mask |= (0x1UL << |
530 | (nack_busy_id * 2)); | |
531 | } | |
1da177e4 LT |
532 | __asm__ __volatile__( |
533 | "stxa %%g0, [%0] %1\n\t" | |
534 | "membar #Sync\n\t" | |
535 | : /* no outputs */ | |
536 | : "r" (target), "i" (ASI_INTR_W)); | |
537 | nack_busy_id++; | |
22adb358 DM |
538 | if (nack_busy_id == 32) { |
539 | need_more = 1; | |
540 | break; | |
541 | } | |
1da177e4 LT |
542 | } |
543 | } | |
544 | ||
545 | /* Now, poll for completion. */ | |
546 | { | |
0de56d1a | 547 | u64 dispatch_stat, nack_mask; |
1da177e4 LT |
548 | long stuck; |
549 | ||
550 | stuck = 100000 * nack_busy_id; | |
0de56d1a | 551 | nack_mask = busy_mask << 1; |
1da177e4 LT |
552 | do { |
553 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
554 | : "=r" (dispatch_stat) | |
555 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
0de56d1a | 556 | if (!(dispatch_stat & (busy_mask | nack_mask))) { |
1da177e4 LT |
557 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
558 | : : "r" (pstate)); | |
22adb358 | 559 | if (unlikely(need_more)) { |
90f7ae8a DM |
560 | int i, this_cnt = 0; |
561 | for (i = 0; i < cnt; i++) { | |
562 | if (cpu_list[i] == 0xffff) | |
563 | continue; | |
564 | cpu_list[i] = 0xffff; | |
565 | this_cnt++; | |
566 | if (this_cnt == 32) | |
22adb358 DM |
567 | break; |
568 | } | |
569 | goto retry; | |
570 | } | |
1da177e4 LT |
571 | return; |
572 | } | |
573 | if (!--stuck) | |
574 | break; | |
0de56d1a | 575 | } while (dispatch_stat & busy_mask); |
1da177e4 LT |
576 | |
577 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
578 | : : "r" (pstate)); | |
579 | ||
0de56d1a | 580 | if (dispatch_stat & busy_mask) { |
1da177e4 LT |
581 | /* Busy bits will not clear, continue instead |
582 | * of freezing up on this cpu. | |
583 | */ | |
90181136 | 584 | printk("CPU[%d]: mondo stuckage result[%016llx]\n", |
1da177e4 LT |
585 | smp_processor_id(), dispatch_stat); |
586 | } else { | |
587 | int i, this_busy_nack = 0; | |
588 | ||
589 | /* Delay some random time with interrupts enabled | |
590 | * to prevent deadlock. | |
591 | */ | |
592 | udelay(2 * nack_busy_id); | |
593 | ||
594 | /* Clear out the mask bits for cpus which did not | |
595 | * NACK us. | |
596 | */ | |
90f7ae8a DM |
597 | for (i = 0; i < cnt; i++) { |
598 | u64 check_mask, nr; | |
599 | ||
600 | nr = cpu_list[i]; | |
601 | if (nr == 0xffff) | |
602 | continue; | |
1da177e4 | 603 | |
92704a1c | 604 | if (is_jbus) |
90f7ae8a | 605 | check_mask = (0x2UL << (2*nr)); |
1da177e4 LT |
606 | else |
607 | check_mask = (0x2UL << | |
608 | this_busy_nack); | |
609 | if ((dispatch_stat & check_mask) == 0) | |
90f7ae8a | 610 | cpu_list[i] = 0xffff; |
1da177e4 | 611 | this_busy_nack += 2; |
22adb358 DM |
612 | if (this_busy_nack == 64) |
613 | break; | |
1da177e4 LT |
614 | } |
615 | ||
616 | goto retry; | |
617 | } | |
618 | } | |
619 | } | |
620 | ||
1d2f1f90 | 621 | /* Multi-cpu list version. */ |
90f7ae8a | 622 | static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
a43fe0e7 | 623 | { |
ed4d9c66 | 624 | int retries, this_cpu, prev_sent, i, saw_cpu_error; |
c02a5119 | 625 | unsigned long status; |
b830ab66 | 626 | u16 *cpu_list; |
17f34f0e | 627 | |
b830ab66 | 628 | this_cpu = smp_processor_id(); |
1d2f1f90 | 629 | |
b830ab66 DM |
630 | cpu_list = __va(tb->cpu_list_pa); |
631 | ||
ed4d9c66 | 632 | saw_cpu_error = 0; |
1d2f1f90 | 633 | retries = 0; |
3cab0c3e | 634 | prev_sent = 0; |
1d2f1f90 | 635 | do { |
3cab0c3e | 636 | int forward_progress, n_sent; |
1d2f1f90 | 637 | |
b830ab66 DM |
638 | status = sun4v_cpu_mondo_send(cnt, |
639 | tb->cpu_list_pa, | |
640 | tb->cpu_mondo_block_pa); | |
641 | ||
642 | /* HV_EOK means all cpus received the xcall, we're done. */ | |
643 | if (likely(status == HV_EOK)) | |
1d2f1f90 | 644 | break; |
b830ab66 | 645 | |
3cab0c3e DM |
646 | /* First, see if we made any forward progress. |
647 | * | |
648 | * The hypervisor indicates successful sends by setting | |
649 | * cpu list entries to the value 0xffff. | |
b830ab66 | 650 | */ |
3cab0c3e | 651 | n_sent = 0; |
b830ab66 | 652 | for (i = 0; i < cnt; i++) { |
3cab0c3e DM |
653 | if (likely(cpu_list[i] == 0xffff)) |
654 | n_sent++; | |
1d2f1f90 DM |
655 | } |
656 | ||
3cab0c3e DM |
657 | forward_progress = 0; |
658 | if (n_sent > prev_sent) | |
659 | forward_progress = 1; | |
660 | ||
661 | prev_sent = n_sent; | |
662 | ||
b830ab66 DM |
663 | /* If we get a HV_ECPUERROR, then one or more of the cpus |
664 | * in the list are in error state. Use the cpu_state() | |
665 | * hypervisor call to find out which cpus are in error state. | |
666 | */ | |
667 | if (unlikely(status == HV_ECPUERROR)) { | |
668 | for (i = 0; i < cnt; i++) { | |
669 | long err; | |
670 | u16 cpu; | |
671 | ||
672 | cpu = cpu_list[i]; | |
673 | if (cpu == 0xffff) | |
674 | continue; | |
675 | ||
676 | err = sun4v_cpu_state(cpu); | |
ed4d9c66 DM |
677 | if (err == HV_CPU_STATE_ERROR) { |
678 | saw_cpu_error = (cpu + 1); | |
3cab0c3e | 679 | cpu_list[i] = 0xffff; |
b830ab66 DM |
680 | } |
681 | } | |
682 | } else if (unlikely(status != HV_EWOULDBLOCK)) | |
683 | goto fatal_mondo_error; | |
684 | ||
3cab0c3e DM |
685 | /* Don't bother rewriting the CPU list, just leave the |
686 | * 0xffff and non-0xffff entries in there and the | |
687 | * hypervisor will do the right thing. | |
688 | * | |
689 | * Only advance timeout state if we didn't make any | |
690 | * forward progress. | |
691 | */ | |
b830ab66 DM |
692 | if (unlikely(!forward_progress)) { |
693 | if (unlikely(++retries > 10000)) | |
694 | goto fatal_mondo_timeout; | |
695 | ||
696 | /* Delay a little bit to let other cpus catch up | |
697 | * on their cpu mondo queue work. | |
698 | */ | |
699 | udelay(2 * cnt); | |
700 | } | |
1d2f1f90 DM |
701 | } while (1); |
702 | ||
ed4d9c66 | 703 | if (unlikely(saw_cpu_error)) |
b830ab66 DM |
704 | goto fatal_mondo_cpu_error; |
705 | ||
706 | return; | |
707 | ||
708 | fatal_mondo_cpu_error: | |
709 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | |
ed4d9c66 DM |
710 | "(including %d) were in error state\n", |
711 | this_cpu, saw_cpu_error - 1); | |
b830ab66 DM |
712 | return; |
713 | ||
714 | fatal_mondo_timeout: | |
b830ab66 DM |
715 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " |
716 | " progress after %d retries.\n", | |
717 | this_cpu, retries); | |
718 | goto dump_cpu_list_and_out; | |
719 | ||
720 | fatal_mondo_error: | |
b830ab66 DM |
721 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", |
722 | this_cpu, status); | |
723 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | |
724 | "mondo_block_pa(%lx)\n", | |
725 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | |
726 | ||
727 | dump_cpu_list_and_out: | |
728 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | |
729 | for (i = 0; i < cnt; i++) | |
730 | printk("%u ", cpu_list[i]); | |
731 | printk("]\n"); | |
1d2f1f90 | 732 | } |
a43fe0e7 | 733 | |
90f7ae8a | 734 | static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); |
deb16999 DM |
735 | |
736 | static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) | |
737 | { | |
90f7ae8a DM |
738 | struct trap_per_cpu *tb; |
739 | int this_cpu, i, cnt; | |
c02a5119 | 740 | unsigned long flags; |
90f7ae8a DM |
741 | u16 *cpu_list; |
742 | u64 *mondo; | |
c02a5119 DM |
743 | |
744 | /* We have to do this whole thing with interrupts fully disabled. | |
745 | * Otherwise if we send an xcall from interrupt context it will | |
746 | * corrupt both our mondo block and cpu list state. | |
747 | * | |
748 | * One consequence of this is that we cannot use timeout mechanisms | |
749 | * that depend upon interrupts being delivered locally. So, for | |
750 | * example, we cannot sample jiffies and expect it to advance. | |
751 | * | |
752 | * Fortunately, udelay() uses %stick/%tick so we can use that. | |
753 | */ | |
754 | local_irq_save(flags); | |
90f7ae8a DM |
755 | |
756 | this_cpu = smp_processor_id(); | |
757 | tb = &trap_block[this_cpu]; | |
758 | ||
759 | mondo = __va(tb->cpu_mondo_block_pa); | |
760 | mondo[0] = data0; | |
761 | mondo[1] = data1; | |
762 | mondo[2] = data2; | |
763 | wmb(); | |
764 | ||
765 | cpu_list = __va(tb->cpu_list_pa); | |
766 | ||
767 | /* Setup the initial cpu list. */ | |
768 | cnt = 0; | |
8e757281 | 769 | for_each_cpu(i, mask) { |
90f7ae8a DM |
770 | if (i == this_cpu || !cpu_online(i)) |
771 | continue; | |
772 | cpu_list[cnt++] = i; | |
773 | } | |
774 | ||
775 | if (cnt) | |
776 | xcall_deliver_impl(tb, cnt); | |
777 | ||
c02a5119 | 778 | local_irq_restore(flags); |
deb16999 | 779 | } |
5e0797e5 | 780 | |
91a4231c DM |
781 | /* Send cross call to all processors mentioned in MASK_P |
782 | * except self. Really, there are only two cases currently, | |
fb1fece5 | 783 | * "cpu_online_mask" and "mm_cpumask(mm)". |
1da177e4 | 784 | */ |
ae583885 | 785 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) |
1da177e4 LT |
786 | { |
787 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | |
1da177e4 | 788 | |
ae583885 DM |
789 | xcall_deliver(data0, data1, data2, mask); |
790 | } | |
1da177e4 | 791 | |
ae583885 DM |
792 | /* Send cross call to all processors except self. */ |
793 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) | |
794 | { | |
fb1fece5 | 795 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); |
1da177e4 LT |
796 | } |
797 | ||
798 | extern unsigned long xcall_sync_tick; | |
799 | ||
800 | static void smp_start_sync_tick_client(int cpu) | |
801 | { | |
24445a4a | 802 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, |
fb1fece5 | 803 | cpumask_of(cpu)); |
1da177e4 LT |
804 | } |
805 | ||
1da177e4 LT |
806 | extern unsigned long xcall_call_function; |
807 | ||
f46df02a | 808 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 809 | { |
f46df02a | 810 | xcall_deliver((u64) &xcall_call_function, 0, 0, mask); |
d172ad18 | 811 | } |
1da177e4 | 812 | |
d172ad18 | 813 | extern unsigned long xcall_call_function_single; |
1da177e4 | 814 | |
d172ad18 DM |
815 | void arch_send_call_function_single_ipi(int cpu) |
816 | { | |
19926630 | 817 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, |
fb1fece5 | 818 | cpumask_of(cpu)); |
1da177e4 LT |
819 | } |
820 | ||
9960e9e8 | 821 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
1da177e4 | 822 | { |
d172ad18 DM |
823 | clear_softint(1 << irq); |
824 | generic_smp_call_function_interrupt(); | |
825 | } | |
1da177e4 | 826 | |
9960e9e8 | 827 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) |
d172ad18 | 828 | { |
1da177e4 | 829 | clear_softint(1 << irq); |
d172ad18 | 830 | generic_smp_call_function_single_interrupt(); |
1da177e4 LT |
831 | } |
832 | ||
bd40791e DM |
833 | static void tsb_sync(void *info) |
834 | { | |
6f25f398 | 835 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; |
bd40791e DM |
836 | struct mm_struct *mm = info; |
837 | ||
42b2aa86 | 838 | /* It is not valid to test "current->active_mm == mm" here. |
6f25f398 DM |
839 | * |
840 | * The value of "current" is not changed atomically with | |
841 | * switch_mm(). But that's OK, we just need to check the | |
842 | * current cpu's trap block PGD physical address. | |
843 | */ | |
844 | if (tp->pgd_paddr == __pa(mm->pgd)) | |
bd40791e DM |
845 | tsb_context_switch(mm); |
846 | } | |
847 | ||
848 | void smp_tsb_sync(struct mm_struct *mm) | |
849 | { | |
81f1adf0 | 850 | smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); |
bd40791e DM |
851 | } |
852 | ||
1da177e4 | 853 | extern unsigned long xcall_flush_tlb_mm; |
f36391d2 | 854 | extern unsigned long xcall_flush_tlb_page; |
1da177e4 | 855 | extern unsigned long xcall_flush_tlb_kernel_range; |
93dae5b7 | 856 | extern unsigned long xcall_fetch_glob_regs; |
916ca14a DM |
857 | extern unsigned long xcall_fetch_glob_pmu; |
858 | extern unsigned long xcall_fetch_glob_pmu_n4; | |
1da177e4 | 859 | extern unsigned long xcall_receive_signal; |
ee29074d | 860 | extern unsigned long xcall_new_mmu_context_version; |
e2fdd7fd DM |
861 | #ifdef CONFIG_KGDB |
862 | extern unsigned long xcall_kgdb_capture; | |
863 | #endif | |
1da177e4 LT |
864 | |
865 | #ifdef DCACHE_ALIASING_POSSIBLE | |
866 | extern unsigned long xcall_flush_dcache_page_cheetah; | |
867 | #endif | |
868 | extern unsigned long xcall_flush_dcache_page_spitfire; | |
869 | ||
870 | #ifdef CONFIG_DEBUG_DCFLUSH | |
871 | extern atomic_t dcpage_flushes; | |
872 | extern atomic_t dcpage_flushes_xcall; | |
873 | #endif | |
874 | ||
d979f179 | 875 | static inline void __local_flush_dcache_page(struct page *page) |
1da177e4 LT |
876 | { |
877 | #ifdef DCACHE_ALIASING_POSSIBLE | |
878 | __flush_dcache_page(page_address(page), | |
879 | ((tlb_type == spitfire) && | |
880 | page_mapping(page) != NULL)); | |
881 | #else | |
882 | if (page_mapping(page) != NULL && | |
883 | tlb_type == spitfire) | |
884 | __flush_icache_page(__pa(page_address(page))); | |
885 | #endif | |
886 | } | |
887 | ||
888 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | |
889 | { | |
a43fe0e7 DM |
890 | int this_cpu; |
891 | ||
892 | if (tlb_type == hypervisor) | |
893 | return; | |
1da177e4 LT |
894 | |
895 | #ifdef CONFIG_DEBUG_DCFLUSH | |
896 | atomic_inc(&dcpage_flushes); | |
897 | #endif | |
a43fe0e7 DM |
898 | |
899 | this_cpu = get_cpu(); | |
900 | ||
1da177e4 LT |
901 | if (cpu == this_cpu) { |
902 | __local_flush_dcache_page(page); | |
903 | } else if (cpu_online(cpu)) { | |
904 | void *pg_addr = page_address(page); | |
622824db | 905 | u64 data0 = 0; |
1da177e4 LT |
906 | |
907 | if (tlb_type == spitfire) { | |
622824db | 908 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); |
1da177e4 LT |
909 | if (page_mapping(page) != NULL) |
910 | data0 |= ((u64)1 << 32); | |
a43fe0e7 | 911 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 | 912 | #ifdef DCACHE_ALIASING_POSSIBLE |
622824db | 913 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
1da177e4 LT |
914 | #endif |
915 | } | |
622824db DM |
916 | if (data0) { |
917 | xcall_deliver(data0, __pa(pg_addr), | |
fb1fece5 | 918 | (u64) pg_addr, cpumask_of(cpu)); |
1da177e4 | 919 | #ifdef CONFIG_DEBUG_DCFLUSH |
622824db | 920 | atomic_inc(&dcpage_flushes_xcall); |
1da177e4 | 921 | #endif |
622824db | 922 | } |
1da177e4 LT |
923 | } |
924 | ||
925 | put_cpu(); | |
926 | } | |
927 | ||
928 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |
929 | { | |
622824db | 930 | void *pg_addr; |
622824db | 931 | u64 data0; |
a43fe0e7 DM |
932 | |
933 | if (tlb_type == hypervisor) | |
934 | return; | |
935 | ||
c6fee081 | 936 | preempt_disable(); |
1da177e4 | 937 | |
1da177e4 LT |
938 | #ifdef CONFIG_DEBUG_DCFLUSH |
939 | atomic_inc(&dcpage_flushes); | |
940 | #endif | |
622824db DM |
941 | data0 = 0; |
942 | pg_addr = page_address(page); | |
1da177e4 LT |
943 | if (tlb_type == spitfire) { |
944 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | |
945 | if (page_mapping(page) != NULL) | |
946 | data0 |= ((u64)1 << 32); | |
a43fe0e7 | 947 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1da177e4 LT |
948 | #ifdef DCACHE_ALIASING_POSSIBLE |
949 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | |
1da177e4 LT |
950 | #endif |
951 | } | |
622824db DM |
952 | if (data0) { |
953 | xcall_deliver(data0, __pa(pg_addr), | |
fb1fece5 | 954 | (u64) pg_addr, cpu_online_mask); |
1da177e4 | 955 | #ifdef CONFIG_DEBUG_DCFLUSH |
622824db | 956 | atomic_inc(&dcpage_flushes_xcall); |
1da177e4 | 957 | #endif |
622824db | 958 | } |
1da177e4 LT |
959 | __local_flush_dcache_page(page); |
960 | ||
c6fee081 | 961 | preempt_enable(); |
1da177e4 LT |
962 | } |
963 | ||
9960e9e8 | 964 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
1da177e4 | 965 | { |
a0663a79 | 966 | struct mm_struct *mm; |
ee29074d | 967 | unsigned long flags; |
a0663a79 | 968 | |
1da177e4 | 969 | clear_softint(1 << irq); |
a0663a79 DM |
970 | |
971 | /* See if we need to allocate a new TLB context because | |
972 | * the version of the one we are using is now out of date. | |
973 | */ | |
974 | mm = current->active_mm; | |
ee29074d DM |
975 | if (unlikely(!mm || (mm == &init_mm))) |
976 | return; | |
a0663a79 | 977 | |
ee29074d | 978 | spin_lock_irqsave(&mm->context.lock, flags); |
aac0aadf | 979 | |
ee29074d DM |
980 | if (unlikely(!CTX_VALID(mm->context))) |
981 | get_new_mmu_context(mm); | |
aac0aadf | 982 | |
ee29074d | 983 | spin_unlock_irqrestore(&mm->context.lock, flags); |
aac0aadf | 984 | |
ee29074d DM |
985 | load_secondary_context(mm); |
986 | __flush_tlb_mm(CTX_HWBITS(mm->context), | |
987 | SECONDARY_CONTEXT); | |
a0663a79 DM |
988 | } |
989 | ||
990 | void smp_new_mmu_context_version(void) | |
991 | { | |
ee29074d | 992 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
1da177e4 LT |
993 | } |
994 | ||
e2fdd7fd DM |
995 | #ifdef CONFIG_KGDB |
996 | void kgdb_roundup_cpus(unsigned long flags) | |
997 | { | |
998 | smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); | |
999 | } | |
1000 | #endif | |
1001 | ||
93dae5b7 DM |
1002 | void smp_fetch_global_regs(void) |
1003 | { | |
1004 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); | |
1005 | } | |
93dae5b7 | 1006 | |
916ca14a DM |
1007 | void smp_fetch_global_pmu(void) |
1008 | { | |
1009 | if (tlb_type == hypervisor && | |
1010 | sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) | |
1011 | smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); | |
1012 | else | |
1013 | smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); | |
1014 | } | |
1015 | ||
1da177e4 LT |
1016 | /* We know that the window frames of the user have been flushed |
1017 | * to the stack before we get here because all callers of us | |
1018 | * are flush_tlb_*() routines, and these run after flush_cache_*() | |
1019 | * which performs the flushw. | |
1020 | * | |
1021 | * The SMP TLB coherency scheme we use works as follows: | |
1022 | * | |
1023 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | |
1024 | * space has (potentially) executed on, this is the heuristic | |
1025 | * we use to avoid doing cross calls. | |
1026 | * | |
1027 | * Also, for flushing from kswapd and also for clones, we | |
1028 | * use cpu_vm_mask as the list of cpus to make run the TLB. | |
1029 | * | |
1030 | * 2) TLB context numbers are shared globally across all processors | |
1031 | * in the system, this allows us to play several games to avoid | |
1032 | * cross calls. | |
1033 | * | |
1034 | * One invariant is that when a cpu switches to a process, and | |
1035 | * that processes tsk->active_mm->cpu_vm_mask does not have the | |
1036 | * current cpu's bit set, that tlb context is flushed locally. | |
1037 | * | |
1038 | * If the address space is non-shared (ie. mm->count == 1) we avoid | |
1039 | * cross calls when we want to flush the currently running process's | |
1040 | * tlb state. This is done by clearing all cpu bits except the current | |
f9384d41 | 1041 | * processor's in current->mm->cpu_vm_mask and performing the |
1da177e4 LT |
1042 | * flush locally only. This will force any subsequent cpus which run |
1043 | * this task to flush the context from the local tlb if the process | |
1044 | * migrates to another cpu (again). | |
1045 | * | |
1046 | * 3) For shared address spaces (threads) and swapping we bite the | |
1047 | * bullet for most cases and perform the cross call (but only to | |
1048 | * the cpus listed in cpu_vm_mask). | |
1049 | * | |
1050 | * The performance gain from "optimizing" away the cross call for threads is | |
1051 | * questionable (in theory the big win for threads is the massive sharing of | |
1052 | * address space state across processors). | |
1053 | */ | |
62dbec78 DM |
1054 | |
1055 | /* This currently is only used by the hugetlb arch pre-fault | |
1056 | * hook on UltraSPARC-III+ and later when changing the pagesize | |
1057 | * bits of the context register for an address space. | |
1058 | */ | |
1da177e4 LT |
1059 | void smp_flush_tlb_mm(struct mm_struct *mm) |
1060 | { | |
62dbec78 DM |
1061 | u32 ctx = CTX_HWBITS(mm->context); |
1062 | int cpu = get_cpu(); | |
1da177e4 | 1063 | |
62dbec78 | 1064 | if (atomic_read(&mm->mm_users) == 1) { |
81f1adf0 | 1065 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
62dbec78 DM |
1066 | goto local_flush_and_out; |
1067 | } | |
1da177e4 | 1068 | |
62dbec78 DM |
1069 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
1070 | ctx, 0, 0, | |
81f1adf0 | 1071 | mm_cpumask(mm)); |
1da177e4 | 1072 | |
62dbec78 DM |
1073 | local_flush_and_out: |
1074 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | |
1da177e4 | 1075 | |
62dbec78 | 1076 | put_cpu(); |
1da177e4 LT |
1077 | } |
1078 | ||
f36391d2 DM |
1079 | struct tlb_pending_info { |
1080 | unsigned long ctx; | |
1081 | unsigned long nr; | |
1082 | unsigned long *vaddrs; | |
1083 | }; | |
1084 | ||
1085 | static void tlb_pending_func(void *info) | |
1086 | { | |
1087 | struct tlb_pending_info *t = info; | |
1088 | ||
1089 | __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); | |
1090 | } | |
1091 | ||
1da177e4 LT |
1092 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
1093 | { | |
1094 | u32 ctx = CTX_HWBITS(mm->context); | |
f36391d2 | 1095 | struct tlb_pending_info info; |
1da177e4 LT |
1096 | int cpu = get_cpu(); |
1097 | ||
f36391d2 DM |
1098 | info.ctx = ctx; |
1099 | info.nr = nr; | |
1100 | info.vaddrs = vaddrs; | |
1101 | ||
f9384d41 | 1102 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
81f1adf0 | 1103 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
dedeb002 | 1104 | else |
f36391d2 DM |
1105 | smp_call_function_many(mm_cpumask(mm), tlb_pending_func, |
1106 | &info, 1); | |
1da177e4 | 1107 | |
1da177e4 LT |
1108 | __flush_tlb_pending(ctx, nr, vaddrs); |
1109 | ||
1110 | put_cpu(); | |
1111 | } | |
1112 | ||
f36391d2 DM |
1113 | void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) |
1114 | { | |
1115 | unsigned long context = CTX_HWBITS(mm->context); | |
1116 | int cpu = get_cpu(); | |
1117 | ||
1118 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) | |
1119 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); | |
1120 | else | |
1121 | smp_cross_call_masked(&xcall_flush_tlb_page, | |
1122 | context, vaddr, 0, | |
1123 | mm_cpumask(mm)); | |
1124 | __flush_tlb_page(context, vaddr); | |
1125 | ||
1126 | put_cpu(); | |
1127 | } | |
1128 | ||
1da177e4 LT |
1129 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
1130 | { | |
1131 | start &= PAGE_MASK; | |
1132 | end = PAGE_ALIGN(end); | |
1133 | if (start != end) { | |
1134 | smp_cross_call(&xcall_flush_tlb_kernel_range, | |
1135 | 0, start, end); | |
1136 | ||
1137 | __flush_tlb_kernel_range(start, end); | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | /* CPU capture. */ | |
1142 | /* #define CAPTURE_DEBUG */ | |
1143 | extern unsigned long xcall_capture; | |
1144 | ||
1145 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | |
1146 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | |
1147 | static unsigned long penguins_are_doing_time; | |
1148 | ||
1149 | void smp_capture(void) | |
1150 | { | |
1151 | int result = atomic_add_ret(1, &smp_capture_depth); | |
1152 | ||
1153 | if (result == 1) { | |
1154 | int ncpus = num_online_cpus(); | |
1155 | ||
1156 | #ifdef CAPTURE_DEBUG | |
1157 | printk("CPU[%d]: Sending penguins to jail...", | |
1158 | smp_processor_id()); | |
1159 | #endif | |
1160 | penguins_are_doing_time = 1; | |
1da177e4 LT |
1161 | atomic_inc(&smp_capture_registry); |
1162 | smp_cross_call(&xcall_capture, 0, 0, 0); | |
1163 | while (atomic_read(&smp_capture_registry) != ncpus) | |
4f07118f | 1164 | rmb(); |
1da177e4 LT |
1165 | #ifdef CAPTURE_DEBUG |
1166 | printk("done\n"); | |
1167 | #endif | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | void smp_release(void) | |
1172 | { | |
1173 | if (atomic_dec_and_test(&smp_capture_depth)) { | |
1174 | #ifdef CAPTURE_DEBUG | |
1175 | printk("CPU[%d]: Giving pardon to " | |
1176 | "imprisoned penguins\n", | |
1177 | smp_processor_id()); | |
1178 | #endif | |
1179 | penguins_are_doing_time = 0; | |
293666b7 | 1180 | membar_safe("#StoreLoad"); |
1da177e4 LT |
1181 | atomic_dec(&smp_capture_registry); |
1182 | } | |
1183 | } | |
1184 | ||
b4f4372f DM |
1185 | /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE |
1186 | * set, so they can service tlb flush xcalls... | |
1da177e4 LT |
1187 | */ |
1188 | extern void prom_world(int); | |
96c6e0d8 | 1189 | |
9960e9e8 | 1190 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) |
1da177e4 | 1191 | { |
1da177e4 LT |
1192 | clear_softint(1 << irq); |
1193 | ||
1194 | preempt_disable(); | |
1195 | ||
1196 | __asm__ __volatile__("flushw"); | |
1da177e4 LT |
1197 | prom_world(1); |
1198 | atomic_inc(&smp_capture_registry); | |
293666b7 | 1199 | membar_safe("#StoreLoad"); |
1da177e4 | 1200 | while (penguins_are_doing_time) |
4f07118f | 1201 | rmb(); |
1da177e4 LT |
1202 | atomic_dec(&smp_capture_registry); |
1203 | prom_world(0); | |
1204 | ||
1205 | preempt_enable(); | |
1206 | } | |
1207 | ||
1da177e4 | 1208 | /* /proc/profile writes can call this, don't __init it please. */ |
1da177e4 LT |
1209 | int setup_profiling_timer(unsigned int multiplier) |
1210 | { | |
777a4475 | 1211 | return -EINVAL; |
1da177e4 LT |
1212 | } |
1213 | ||
1214 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
1215 | { | |
1da177e4 LT |
1216 | } |
1217 | ||
7c9503b8 | 1218 | void smp_prepare_boot_cpu(void) |
7abea921 | 1219 | { |
7abea921 DM |
1220 | } |
1221 | ||
5e0797e5 DM |
1222 | void __init smp_setup_processor_id(void) |
1223 | { | |
1224 | if (tlb_type == spitfire) | |
deb16999 | 1225 | xcall_deliver_impl = spitfire_xcall_deliver; |
5e0797e5 | 1226 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
deb16999 | 1227 | xcall_deliver_impl = cheetah_xcall_deliver; |
5e0797e5 | 1228 | else |
deb16999 | 1229 | xcall_deliver_impl = hypervisor_xcall_deliver; |
5e0797e5 DM |
1230 | } |
1231 | ||
7c9503b8 | 1232 | void smp_fill_in_sib_core_maps(void) |
1da177e4 | 1233 | { |
5cbc3073 DM |
1234 | unsigned int i; |
1235 | ||
e0204409 | 1236 | for_each_present_cpu(i) { |
5cbc3073 DM |
1237 | unsigned int j; |
1238 | ||
fb1fece5 | 1239 | cpumask_clear(&cpu_core_map[i]); |
5cbc3073 | 1240 | if (cpu_data(i).core_id == 0) { |
fb1fece5 | 1241 | cpumask_set_cpu(i, &cpu_core_map[i]); |
5cbc3073 DM |
1242 | continue; |
1243 | } | |
1244 | ||
e0204409 | 1245 | for_each_present_cpu(j) { |
5cbc3073 DM |
1246 | if (cpu_data(i).core_id == |
1247 | cpu_data(j).core_id) | |
fb1fece5 | 1248 | cpumask_set_cpu(j, &cpu_core_map[i]); |
f78eae2e DM |
1249 | } |
1250 | } | |
1251 | ||
e0204409 | 1252 | for_each_present_cpu(i) { |
f78eae2e DM |
1253 | unsigned int j; |
1254 | ||
fb1fece5 | 1255 | cpumask_clear(&per_cpu(cpu_sibling_map, i)); |
f78eae2e | 1256 | if (cpu_data(i).proc_id == -1) { |
fb1fece5 | 1257 | cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); |
f78eae2e DM |
1258 | continue; |
1259 | } | |
1260 | ||
e0204409 | 1261 | for_each_present_cpu(j) { |
f78eae2e DM |
1262 | if (cpu_data(i).proc_id == |
1263 | cpu_data(j).proc_id) | |
fb1fece5 | 1264 | cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); |
5cbc3073 DM |
1265 | } |
1266 | } | |
1da177e4 LT |
1267 | } |
1268 | ||
2066aadd | 1269 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1da177e4 | 1270 | { |
f0a2bc7e | 1271 | int ret = smp_boot_one_cpu(cpu, tidle); |
1da177e4 LT |
1272 | |
1273 | if (!ret) { | |
fb1fece5 KM |
1274 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
1275 | while (!cpu_online(cpu)) | |
1da177e4 | 1276 | mb(); |
fb1fece5 | 1277 | if (!cpu_online(cpu)) { |
1da177e4 LT |
1278 | ret = -ENODEV; |
1279 | } else { | |
02fead75 DM |
1280 | /* On SUN4V, writes to %tick and %stick are |
1281 | * not allowed. | |
1282 | */ | |
1283 | if (tlb_type != hypervisor) | |
1284 | smp_synchronize_one_tick(cpu); | |
1da177e4 LT |
1285 | } |
1286 | } | |
1287 | return ret; | |
1288 | } | |
1289 | ||
4f0234f4 | 1290 | #ifdef CONFIG_HOTPLUG_CPU |
e0204409 DM |
1291 | void cpu_play_dead(void) |
1292 | { | |
1293 | int cpu = smp_processor_id(); | |
1294 | unsigned long pstate; | |
1295 | ||
1296 | idle_task_exit(); | |
1297 | ||
1298 | if (tlb_type == hypervisor) { | |
1299 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1300 | ||
1301 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, | |
1302 | tb->cpu_mondo_pa, 0); | |
1303 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, | |
1304 | tb->dev_mondo_pa, 0); | |
1305 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, | |
1306 | tb->resum_mondo_pa, 0); | |
1307 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, | |
1308 | tb->nonresum_mondo_pa, 0); | |
1309 | } | |
1310 | ||
fb1fece5 | 1311 | cpumask_clear_cpu(cpu, &smp_commenced_mask); |
e0204409 DM |
1312 | membar_safe("#Sync"); |
1313 | ||
1314 | local_irq_disable(); | |
1315 | ||
1316 | __asm__ __volatile__( | |
1317 | "rdpr %%pstate, %0\n\t" | |
1318 | "wrpr %0, %1, %%pstate" | |
1319 | : "=r" (pstate) | |
1320 | : "i" (PSTATE_IE)); | |
1321 | ||
1322 | while (1) | |
1323 | barrier(); | |
1324 | } | |
1325 | ||
4f0234f4 DM |
1326 | int __cpu_disable(void) |
1327 | { | |
e0204409 DM |
1328 | int cpu = smp_processor_id(); |
1329 | cpuinfo_sparc *c; | |
1330 | int i; | |
1331 | ||
fb1fece5 KM |
1332 | for_each_cpu(i, &cpu_core_map[cpu]) |
1333 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); | |
1334 | cpumask_clear(&cpu_core_map[cpu]); | |
e0204409 | 1335 | |
fb1fece5 KM |
1336 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
1337 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); | |
1338 | cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); | |
e0204409 DM |
1339 | |
1340 | c = &cpu_data(cpu); | |
1341 | ||
1342 | c->core_id = 0; | |
1343 | c->proc_id = -1; | |
1344 | ||
e0204409 DM |
1345 | smp_wmb(); |
1346 | ||
1347 | /* Make sure no interrupts point to this cpu. */ | |
1348 | fixup_irqs(); | |
1349 | ||
1350 | local_irq_enable(); | |
1351 | mdelay(1); | |
1352 | local_irq_disable(); | |
1353 | ||
fb1fece5 | 1354 | set_cpu_online(cpu, false); |
4d084617 | 1355 | |
280ff974 HP |
1356 | cpu_map_rebuild(); |
1357 | ||
e0204409 | 1358 | return 0; |
4f0234f4 DM |
1359 | } |
1360 | ||
1361 | void __cpu_die(unsigned int cpu) | |
1362 | { | |
e0204409 DM |
1363 | int i; |
1364 | ||
1365 | for (i = 0; i < 100; i++) { | |
1366 | smp_rmb(); | |
fb1fece5 | 1367 | if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) |
e0204409 DM |
1368 | break; |
1369 | msleep(100); | |
1370 | } | |
fb1fece5 | 1371 | if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { |
e0204409 DM |
1372 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1373 | } else { | |
1374 | #if defined(CONFIG_SUN_LDOMS) | |
1375 | unsigned long hv_err; | |
1376 | int limit = 100; | |
1377 | ||
1378 | do { | |
1379 | hv_err = sun4v_cpu_stop(cpu); | |
1380 | if (hv_err == HV_EOK) { | |
fb1fece5 | 1381 | set_cpu_present(cpu, false); |
e0204409 DM |
1382 | break; |
1383 | } | |
1384 | } while (--limit > 0); | |
1385 | if (limit <= 0) { | |
1386 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", | |
1387 | hv_err); | |
1388 | } | |
1389 | #endif | |
1390 | } | |
4f0234f4 DM |
1391 | } |
1392 | #endif | |
1393 | ||
1da177e4 LT |
1394 | void __init smp_cpus_done(unsigned int max_cpus) |
1395 | { | |
b62818e5 | 1396 | pcr_arch_init(); |
1da177e4 LT |
1397 | } |
1398 | ||
1da177e4 LT |
1399 | void smp_send_reschedule(int cpu) |
1400 | { | |
1a36265b KT |
1401 | if (cpu == smp_processor_id()) { |
1402 | WARN_ON_ONCE(preemptible()); | |
1403 | set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); | |
1404 | } else { | |
1405 | xcall_deliver((u64) &xcall_receive_signal, | |
1406 | 0, 0, cpumask_of(cpu)); | |
1407 | } | |
19926630 DM |
1408 | } |
1409 | ||
9960e9e8 | 1410 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
19926630 DM |
1411 | { |
1412 | clear_softint(1 << irq); | |
184748cc | 1413 | scheduler_ipi(); |
1da177e4 LT |
1414 | } |
1415 | ||
1416 | /* This is a nop because we capture all other cpus | |
1417 | * anyways when making the PROM active. | |
1418 | */ | |
1419 | void smp_send_stop(void) | |
1420 | { | |
1421 | } | |
1422 | ||
4fd78a5f DM |
1423 | /** |
1424 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | |
1425 | * @cpu: cpu to allocate for | |
1426 | * @size: size allocation in bytes | |
1427 | * @align: alignment | |
1428 | * | |
1429 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | |
1430 | * does the right thing for NUMA regardless of the current | |
1431 | * configuration. | |
1432 | * | |
1433 | * RETURNS: | |
1434 | * Pointer to the allocated area on success, NULL on failure. | |
1435 | */ | |
bcb2107f TH |
1436 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, |
1437 | size_t align) | |
4fd78a5f DM |
1438 | { |
1439 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | |
1440 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
1441 | int node = cpu_to_node(cpu); | |
1442 | void *ptr; | |
1443 | ||
1444 | if (!node_online(node) || !NODE_DATA(node)) { | |
1445 | ptr = __alloc_bootmem(size, align, goal); | |
1446 | pr_info("cpu %d has no node %d or node-local memory\n", | |
1447 | cpu, node); | |
1448 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | |
1449 | cpu, size, __pa(ptr)); | |
1450 | } else { | |
1451 | ptr = __alloc_bootmem_node(NODE_DATA(node), | |
1452 | size, align, goal); | |
1453 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | |
1454 | "%016lx\n", cpu, size, node, __pa(ptr)); | |
1455 | } | |
1456 | return ptr; | |
1457 | #else | |
1458 | return __alloc_bootmem(size, align, goal); | |
1459 | #endif | |
1460 | } | |
1461 | ||
bcb2107f | 1462 | static void __init pcpu_free_bootmem(void *ptr, size_t size) |
4fd78a5f | 1463 | { |
bcb2107f TH |
1464 | free_bootmem(__pa(ptr), size); |
1465 | } | |
4fd78a5f | 1466 | |
a70c6913 | 1467 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
bcb2107f TH |
1468 | { |
1469 | if (cpu_to_node(from) == cpu_to_node(to)) | |
1470 | return LOCAL_DISTANCE; | |
1471 | else | |
1472 | return REMOTE_DISTANCE; | |
4fd78a5f DM |
1473 | } |
1474 | ||
a70c6913 TH |
1475 | static void __init pcpu_populate_pte(unsigned long addr) |
1476 | { | |
1477 | pgd_t *pgd = pgd_offset_k(addr); | |
1478 | pud_t *pud; | |
1479 | pmd_t *pmd; | |
1480 | ||
1481 | pud = pud_offset(pgd, addr); | |
1482 | if (pud_none(*pud)) { | |
1483 | pmd_t *new; | |
1484 | ||
1485 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1486 | pud_populate(&init_mm, pud, new); | |
1487 | } | |
1488 | ||
1489 | pmd = pmd_offset(pud, addr); | |
1490 | if (!pmd_present(*pmd)) { | |
1491 | pte_t *new; | |
1492 | ||
1493 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1494 | pmd_populate_kernel(&init_mm, pmd, new); | |
1495 | } | |
1496 | } | |
1497 | ||
73fffc03 | 1498 | void __init setup_per_cpu_areas(void) |
1da177e4 | 1499 | { |
bcb2107f TH |
1500 | unsigned long delta; |
1501 | unsigned int cpu; | |
a70c6913 TH |
1502 | int rc = -EINVAL; |
1503 | ||
1504 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { | |
1505 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, | |
1506 | PERCPU_DYNAMIC_RESERVE, 4 << 20, | |
1507 | pcpu_cpu_distance, | |
1508 | pcpu_alloc_bootmem, | |
1509 | pcpu_free_bootmem); | |
1510 | if (rc) | |
1511 | pr_warning("PERCPU: %s allocator failed (%d), " | |
1512 | "falling back to page size\n", | |
1513 | pcpu_fc_names[pcpu_chosen_fc], rc); | |
1514 | } | |
1515 | if (rc < 0) | |
1516 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, | |
1517 | pcpu_alloc_bootmem, | |
1518 | pcpu_free_bootmem, | |
1519 | pcpu_populate_pte); | |
1520 | if (rc < 0) | |
1521 | panic("cannot initialize percpu area (err=%d)", rc); | |
5a089006 | 1522 | |
4fd78a5f | 1523 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
fb435d52 TH |
1524 | for_each_possible_cpu(cpu) |
1525 | __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; | |
951bc82c DM |
1526 | |
1527 | /* Setup %g5 for the boot cpu. */ | |
1528 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | |
b696fdc2 DM |
1529 | |
1530 | of_fill_in_cpu_data(); | |
1531 | if (tlb_type == hypervisor) | |
6ac5c610 | 1532 | mdesc_fill_in_cpu_data(cpu_all_mask); |
1da177e4 | 1533 | } |