| 1 | /* smp.c: Sparc64 SMP support. |
| 2 | * |
| 3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) |
| 4 | */ |
| 5 | |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/threads.h> |
| 12 | #include <linux/smp.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/fs.h> |
| 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/cache.h> |
| 21 | #include <linux/jiffies.h> |
| 22 | #include <linux/profile.h> |
| 23 | #include <linux/bootmem.h> |
| 24 | #include <linux/vmalloc.h> |
| 25 | #include <linux/ftrace.h> |
| 26 | #include <linux/cpu.h> |
| 27 | #include <linux/slab.h> |
| 28 | |
| 29 | #include <asm/head.h> |
| 30 | #include <asm/ptrace.h> |
| 31 | #include <linux/atomic.h> |
| 32 | #include <asm/tlbflush.h> |
| 33 | #include <asm/mmu_context.h> |
| 34 | #include <asm/cpudata.h> |
| 35 | #include <asm/hvtramp.h> |
| 36 | #include <asm/io.h> |
| 37 | #include <asm/timer.h> |
| 38 | |
| 39 | #include <asm/irq.h> |
| 40 | #include <asm/irq_regs.h> |
| 41 | #include <asm/page.h> |
| 42 | #include <asm/pgtable.h> |
| 43 | #include <asm/oplib.h> |
| 44 | #include <asm/uaccess.h> |
| 45 | #include <asm/starfire.h> |
| 46 | #include <asm/tlb.h> |
| 47 | #include <asm/sections.h> |
| 48 | #include <asm/prom.h> |
| 49 | #include <asm/mdesc.h> |
| 50 | #include <asm/ldc.h> |
| 51 | #include <asm/hypervisor.h> |
| 52 | #include <asm/pcr.h> |
| 53 | |
| 54 | #include "cpumap.h" |
| 55 | |
| 56 | int sparc64_multi_core __read_mostly; |
| 57 | |
| 58 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
| 59 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = |
| 60 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; |
| 61 | |
| 62 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 63 | EXPORT_SYMBOL(cpu_core_map); |
| 64 | |
| 65 | static cpumask_t smp_commenced_mask; |
| 66 | |
| 67 | void smp_info(struct seq_file *m) |
| 68 | { |
| 69 | int i; |
| 70 | |
| 71 | seq_printf(m, "State:\n"); |
| 72 | for_each_online_cpu(i) |
| 73 | seq_printf(m, "CPU%d:\t\tonline\n", i); |
| 74 | } |
| 75 | |
| 76 | void smp_bogo(struct seq_file *m) |
| 77 | { |
| 78 | int i; |
| 79 | |
| 80 | for_each_online_cpu(i) |
| 81 | seq_printf(m, |
| 82 | "Cpu%dClkTck\t: %016lx\n", |
| 83 | i, cpu_data(i).clock_tick); |
| 84 | } |
| 85 | |
| 86 | extern void setup_sparc64_timer(void); |
| 87 | |
| 88 | static volatile unsigned long callin_flag = 0; |
| 89 | |
| 90 | void __cpuinit smp_callin(void) |
| 91 | { |
| 92 | int cpuid = hard_smp_processor_id(); |
| 93 | |
| 94 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
| 95 | |
| 96 | if (tlb_type == hypervisor) |
| 97 | sun4v_ktsb_register(); |
| 98 | |
| 99 | __flush_tlb_all(); |
| 100 | |
| 101 | setup_sparc64_timer(); |
| 102 | |
| 103 | if (cheetah_pcache_forced_on) |
| 104 | cheetah_enable_pcache(); |
| 105 | |
| 106 | callin_flag = 1; |
| 107 | __asm__ __volatile__("membar #Sync\n\t" |
| 108 | "flush %%g6" : : : "memory"); |
| 109 | |
| 110 | /* Clear this or we will die instantly when we |
| 111 | * schedule back to this idler... |
| 112 | */ |
| 113 | current_thread_info()->new_child = 0; |
| 114 | |
| 115 | /* Attach to the address space of init_task. */ |
| 116 | atomic_inc(&init_mm.mm_count); |
| 117 | current->active_mm = &init_mm; |
| 118 | |
| 119 | /* inform the notifiers about the new cpu */ |
| 120 | notify_cpu_starting(cpuid); |
| 121 | |
| 122 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
| 123 | rmb(); |
| 124 | |
| 125 | set_cpu_online(cpuid, true); |
| 126 | local_irq_enable(); |
| 127 | |
| 128 | /* idle thread is expected to have preempt disabled */ |
| 129 | preempt_disable(); |
| 130 | } |
| 131 | |
| 132 | void cpu_panic(void) |
| 133 | { |
| 134 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); |
| 135 | panic("SMP bolixed\n"); |
| 136 | } |
| 137 | |
| 138 | /* This tick register synchronization scheme is taken entirely from |
| 139 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. |
| 140 | * |
| 141 | * The only change I've made is to rework it so that the master |
| 142 | * initiates the synchonization instead of the slave. -DaveM |
| 143 | */ |
| 144 | |
| 145 | #define MASTER 0 |
| 146 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) |
| 147 | |
| 148 | #define NUM_ROUNDS 64 /* magic value */ |
| 149 | #define NUM_ITERS 5 /* likewise */ |
| 150 | |
| 151 | static DEFINE_SPINLOCK(itc_sync_lock); |
| 152 | static unsigned long go[SLAVE + 1]; |
| 153 | |
| 154 | #define DEBUG_TICK_SYNC 0 |
| 155 | |
| 156 | static inline long get_delta (long *rt, long *master) |
| 157 | { |
| 158 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; |
| 159 | unsigned long tcenter, t0, t1, tm; |
| 160 | unsigned long i; |
| 161 | |
| 162 | for (i = 0; i < NUM_ITERS; i++) { |
| 163 | t0 = tick_ops->get_tick(); |
| 164 | go[MASTER] = 1; |
| 165 | membar_safe("#StoreLoad"); |
| 166 | while (!(tm = go[SLAVE])) |
| 167 | rmb(); |
| 168 | go[SLAVE] = 0; |
| 169 | wmb(); |
| 170 | t1 = tick_ops->get_tick(); |
| 171 | |
| 172 | if (t1 - t0 < best_t1 - best_t0) |
| 173 | best_t0 = t0, best_t1 = t1, best_tm = tm; |
| 174 | } |
| 175 | |
| 176 | *rt = best_t1 - best_t0; |
| 177 | *master = best_tm - best_t0; |
| 178 | |
| 179 | /* average best_t0 and best_t1 without overflow: */ |
| 180 | tcenter = (best_t0/2 + best_t1/2); |
| 181 | if (best_t0 % 2 + best_t1 % 2 == 2) |
| 182 | tcenter++; |
| 183 | return tcenter - best_tm; |
| 184 | } |
| 185 | |
| 186 | void smp_synchronize_tick_client(void) |
| 187 | { |
| 188 | long i, delta, adj, adjust_latency = 0, done = 0; |
| 189 | unsigned long flags, rt, master_time_stamp; |
| 190 | #if DEBUG_TICK_SYNC |
| 191 | struct { |
| 192 | long rt; /* roundtrip time */ |
| 193 | long master; /* master's timestamp */ |
| 194 | long diff; /* difference between midpoint and master's timestamp */ |
| 195 | long lat; /* estimate of itc adjustment latency */ |
| 196 | } t[NUM_ROUNDS]; |
| 197 | #endif |
| 198 | |
| 199 | go[MASTER] = 1; |
| 200 | |
| 201 | while (go[MASTER]) |
| 202 | rmb(); |
| 203 | |
| 204 | local_irq_save(flags); |
| 205 | { |
| 206 | for (i = 0; i < NUM_ROUNDS; i++) { |
| 207 | delta = get_delta(&rt, &master_time_stamp); |
| 208 | if (delta == 0) |
| 209 | done = 1; /* let's lock on to this... */ |
| 210 | |
| 211 | if (!done) { |
| 212 | if (i > 0) { |
| 213 | adjust_latency += -delta; |
| 214 | adj = -delta + adjust_latency/4; |
| 215 | } else |
| 216 | adj = -delta; |
| 217 | |
| 218 | tick_ops->add_tick(adj); |
| 219 | } |
| 220 | #if DEBUG_TICK_SYNC |
| 221 | t[i].rt = rt; |
| 222 | t[i].master = master_time_stamp; |
| 223 | t[i].diff = delta; |
| 224 | t[i].lat = adjust_latency/4; |
| 225 | #endif |
| 226 | } |
| 227 | } |
| 228 | local_irq_restore(flags); |
| 229 | |
| 230 | #if DEBUG_TICK_SYNC |
| 231 | for (i = 0; i < NUM_ROUNDS; i++) |
| 232 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", |
| 233 | t[i].rt, t[i].master, t[i].diff, t[i].lat); |
| 234 | #endif |
| 235 | |
| 236 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " |
| 237 | "(last diff %ld cycles, maxerr %lu cycles)\n", |
| 238 | smp_processor_id(), delta, rt); |
| 239 | } |
| 240 | |
| 241 | static void smp_start_sync_tick_client(int cpu); |
| 242 | |
| 243 | static void smp_synchronize_one_tick(int cpu) |
| 244 | { |
| 245 | unsigned long flags, i; |
| 246 | |
| 247 | go[MASTER] = 0; |
| 248 | |
| 249 | smp_start_sync_tick_client(cpu); |
| 250 | |
| 251 | /* wait for client to be ready */ |
| 252 | while (!go[MASTER]) |
| 253 | rmb(); |
| 254 | |
| 255 | /* now let the client proceed into his loop */ |
| 256 | go[MASTER] = 0; |
| 257 | membar_safe("#StoreLoad"); |
| 258 | |
| 259 | spin_lock_irqsave(&itc_sync_lock, flags); |
| 260 | { |
| 261 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { |
| 262 | while (!go[MASTER]) |
| 263 | rmb(); |
| 264 | go[MASTER] = 0; |
| 265 | wmb(); |
| 266 | go[SLAVE] = tick_ops->get_tick(); |
| 267 | membar_safe("#StoreLoad"); |
| 268 | } |
| 269 | } |
| 270 | spin_unlock_irqrestore(&itc_sync_lock, flags); |
| 271 | } |
| 272 | |
| 273 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
| 274 | /* XXX Put this in some common place. XXX */ |
| 275 | static unsigned long kimage_addr_to_ra(void *p) |
| 276 | { |
| 277 | unsigned long val = (unsigned long) p; |
| 278 | |
| 279 | return kern_base + (val - KERNBASE); |
| 280 | } |
| 281 | |
| 282 | static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) |
| 283 | { |
| 284 | extern unsigned long sparc64_ttable_tl0; |
| 285 | extern unsigned long kern_locked_tte_data; |
| 286 | struct hvtramp_descr *hdesc; |
| 287 | unsigned long trampoline_ra; |
| 288 | struct trap_per_cpu *tb; |
| 289 | u64 tte_vaddr, tte_data; |
| 290 | unsigned long hv_err; |
| 291 | int i; |
| 292 | |
| 293 | hdesc = kzalloc(sizeof(*hdesc) + |
| 294 | (sizeof(struct hvtramp_mapping) * |
| 295 | num_kernel_image_mappings - 1), |
| 296 | GFP_KERNEL); |
| 297 | if (!hdesc) { |
| 298 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " |
| 299 | "hvtramp_descr.\n"); |
| 300 | return; |
| 301 | } |
| 302 | *descrp = hdesc; |
| 303 | |
| 304 | hdesc->cpu = cpu; |
| 305 | hdesc->num_mappings = num_kernel_image_mappings; |
| 306 | |
| 307 | tb = &trap_block[cpu]; |
| 308 | |
| 309 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; |
| 310 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); |
| 311 | |
| 312 | hdesc->thread_reg = thread_reg; |
| 313 | |
| 314 | tte_vaddr = (unsigned long) KERNBASE; |
| 315 | tte_data = kern_locked_tte_data; |
| 316 | |
| 317 | for (i = 0; i < hdesc->num_mappings; i++) { |
| 318 | hdesc->maps[i].vaddr = tte_vaddr; |
| 319 | hdesc->maps[i].tte = tte_data; |
| 320 | tte_vaddr += 0x400000; |
| 321 | tte_data += 0x400000; |
| 322 | } |
| 323 | |
| 324 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); |
| 325 | |
| 326 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, |
| 327 | kimage_addr_to_ra(&sparc64_ttable_tl0), |
| 328 | __pa(hdesc)); |
| 329 | if (hv_err) |
| 330 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " |
| 331 | "gives error %lu\n", hv_err); |
| 332 | } |
| 333 | #endif |
| 334 | |
| 335 | extern unsigned long sparc64_cpu_startup; |
| 336 | |
| 337 | /* The OBP cpu startup callback truncates the 3rd arg cookie to |
| 338 | * 32-bits (I think) so to be safe we have it read the pointer |
| 339 | * contained here so we work on >4GB machines. -DaveM |
| 340 | */ |
| 341 | static struct thread_info *cpu_new_thread = NULL; |
| 342 | |
| 343 | static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) |
| 344 | { |
| 345 | unsigned long entry = |
| 346 | (unsigned long)(&sparc64_cpu_startup); |
| 347 | unsigned long cookie = |
| 348 | (unsigned long)(&cpu_new_thread); |
| 349 | void *descr = NULL; |
| 350 | int timeout, ret; |
| 351 | |
| 352 | callin_flag = 0; |
| 353 | cpu_new_thread = task_thread_info(idle); |
| 354 | |
| 355 | if (tlb_type == hypervisor) { |
| 356 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
| 357 | if (ldom_domaining_enabled) |
| 358 | ldom_startcpu_cpuid(cpu, |
| 359 | (unsigned long) cpu_new_thread, |
| 360 | &descr); |
| 361 | else |
| 362 | #endif |
| 363 | prom_startcpu_cpuid(cpu, entry, cookie); |
| 364 | } else { |
| 365 | struct device_node *dp = of_find_node_by_cpuid(cpu); |
| 366 | |
| 367 | prom_startcpu(dp->phandle, entry, cookie); |
| 368 | } |
| 369 | |
| 370 | for (timeout = 0; timeout < 50000; timeout++) { |
| 371 | if (callin_flag) |
| 372 | break; |
| 373 | udelay(100); |
| 374 | } |
| 375 | |
| 376 | if (callin_flag) { |
| 377 | ret = 0; |
| 378 | } else { |
| 379 | printk("Processor %d is stuck.\n", cpu); |
| 380 | ret = -ENODEV; |
| 381 | } |
| 382 | cpu_new_thread = NULL; |
| 383 | |
| 384 | kfree(descr); |
| 385 | |
| 386 | return ret; |
| 387 | } |
| 388 | |
| 389 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) |
| 390 | { |
| 391 | u64 result, target; |
| 392 | int stuck, tmp; |
| 393 | |
| 394 | if (this_is_starfire) { |
| 395 | /* map to real upaid */ |
| 396 | cpu = (((cpu & 0x3c) << 1) | |
| 397 | ((cpu & 0x40) >> 4) | |
| 398 | (cpu & 0x3)); |
| 399 | } |
| 400 | |
| 401 | target = (cpu << 14) | 0x70; |
| 402 | again: |
| 403 | /* Ok, this is the real Spitfire Errata #54. |
| 404 | * One must read back from a UDB internal register |
| 405 | * after writes to the UDB interrupt dispatch, but |
| 406 | * before the membar Sync for that write. |
| 407 | * So we use the high UDB control register (ASI 0x7f, |
| 408 | * ADDR 0x20) for the dummy read. -DaveM |
| 409 | */ |
| 410 | tmp = 0x40; |
| 411 | __asm__ __volatile__( |
| 412 | "wrpr %1, %2, %%pstate\n\t" |
| 413 | "stxa %4, [%0] %3\n\t" |
| 414 | "stxa %5, [%0+%8] %3\n\t" |
| 415 | "add %0, %8, %0\n\t" |
| 416 | "stxa %6, [%0+%8] %3\n\t" |
| 417 | "membar #Sync\n\t" |
| 418 | "stxa %%g0, [%7] %3\n\t" |
| 419 | "membar #Sync\n\t" |
| 420 | "mov 0x20, %%g1\n\t" |
| 421 | "ldxa [%%g1] 0x7f, %%g0\n\t" |
| 422 | "membar #Sync" |
| 423 | : "=r" (tmp) |
| 424 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), |
| 425 | "r" (data0), "r" (data1), "r" (data2), "r" (target), |
| 426 | "r" (0x10), "0" (tmp) |
| 427 | : "g1"); |
| 428 | |
| 429 | /* NOTE: PSTATE_IE is still clear. */ |
| 430 | stuck = 100000; |
| 431 | do { |
| 432 | __asm__ __volatile__("ldxa [%%g0] %1, %0" |
| 433 | : "=r" (result) |
| 434 | : "i" (ASI_INTR_DISPATCH_STAT)); |
| 435 | if (result == 0) { |
| 436 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
| 437 | : : "r" (pstate)); |
| 438 | return; |
| 439 | } |
| 440 | stuck -= 1; |
| 441 | if (stuck == 0) |
| 442 | break; |
| 443 | } while (result & 0x1); |
| 444 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
| 445 | : : "r" (pstate)); |
| 446 | if (stuck == 0) { |
| 447 | printk("CPU[%d]: mondo stuckage result[%016llx]\n", |
| 448 | smp_processor_id(), result); |
| 449 | } else { |
| 450 | udelay(2); |
| 451 | goto again; |
| 452 | } |
| 453 | } |
| 454 | |
| 455 | static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
| 456 | { |
| 457 | u64 *mondo, data0, data1, data2; |
| 458 | u16 *cpu_list; |
| 459 | u64 pstate; |
| 460 | int i; |
| 461 | |
| 462 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
| 463 | cpu_list = __va(tb->cpu_list_pa); |
| 464 | mondo = __va(tb->cpu_mondo_block_pa); |
| 465 | data0 = mondo[0]; |
| 466 | data1 = mondo[1]; |
| 467 | data2 = mondo[2]; |
| 468 | for (i = 0; i < cnt; i++) |
| 469 | spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); |
| 470 | } |
| 471 | |
| 472 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt |
| 473 | * packet, but we have no use for that. However we do take advantage of |
| 474 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). |
| 475 | */ |
| 476 | static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
| 477 | { |
| 478 | int nack_busy_id, is_jbus, need_more; |
| 479 | u64 *mondo, pstate, ver, busy_mask; |
| 480 | u16 *cpu_list; |
| 481 | |
| 482 | cpu_list = __va(tb->cpu_list_pa); |
| 483 | mondo = __va(tb->cpu_mondo_block_pa); |
| 484 | |
| 485 | /* Unfortunately, someone at Sun had the brilliant idea to make the |
| 486 | * busy/nack fields hard-coded by ITID number for this Ultra-III |
| 487 | * derivative processor. |
| 488 | */ |
| 489 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
| 490 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
| 491 | (ver >> 32) == __SERRANO_ID); |
| 492 | |
| 493 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
| 494 | |
| 495 | retry: |
| 496 | need_more = 0; |
| 497 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" |
| 498 | : : "r" (pstate), "i" (PSTATE_IE)); |
| 499 | |
| 500 | /* Setup the dispatch data registers. */ |
| 501 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" |
| 502 | "stxa %1, [%4] %6\n\t" |
| 503 | "stxa %2, [%5] %6\n\t" |
| 504 | "membar #Sync\n\t" |
| 505 | : /* no outputs */ |
| 506 | : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), |
| 507 | "r" (0x40), "r" (0x50), "r" (0x60), |
| 508 | "i" (ASI_INTR_W)); |
| 509 | |
| 510 | nack_busy_id = 0; |
| 511 | busy_mask = 0; |
| 512 | { |
| 513 | int i; |
| 514 | |
| 515 | for (i = 0; i < cnt; i++) { |
| 516 | u64 target, nr; |
| 517 | |
| 518 | nr = cpu_list[i]; |
| 519 | if (nr == 0xffff) |
| 520 | continue; |
| 521 | |
| 522 | target = (nr << 14) | 0x70; |
| 523 | if (is_jbus) { |
| 524 | busy_mask |= (0x1UL << (nr * 2)); |
| 525 | } else { |
| 526 | target |= (nack_busy_id << 24); |
| 527 | busy_mask |= (0x1UL << |
| 528 | (nack_busy_id * 2)); |
| 529 | } |
| 530 | __asm__ __volatile__( |
| 531 | "stxa %%g0, [%0] %1\n\t" |
| 532 | "membar #Sync\n\t" |
| 533 | : /* no outputs */ |
| 534 | : "r" (target), "i" (ASI_INTR_W)); |
| 535 | nack_busy_id++; |
| 536 | if (nack_busy_id == 32) { |
| 537 | need_more = 1; |
| 538 | break; |
| 539 | } |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | /* Now, poll for completion. */ |
| 544 | { |
| 545 | u64 dispatch_stat, nack_mask; |
| 546 | long stuck; |
| 547 | |
| 548 | stuck = 100000 * nack_busy_id; |
| 549 | nack_mask = busy_mask << 1; |
| 550 | do { |
| 551 | __asm__ __volatile__("ldxa [%%g0] %1, %0" |
| 552 | : "=r" (dispatch_stat) |
| 553 | : "i" (ASI_INTR_DISPATCH_STAT)); |
| 554 | if (!(dispatch_stat & (busy_mask | nack_mask))) { |
| 555 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
| 556 | : : "r" (pstate)); |
| 557 | if (unlikely(need_more)) { |
| 558 | int i, this_cnt = 0; |
| 559 | for (i = 0; i < cnt; i++) { |
| 560 | if (cpu_list[i] == 0xffff) |
| 561 | continue; |
| 562 | cpu_list[i] = 0xffff; |
| 563 | this_cnt++; |
| 564 | if (this_cnt == 32) |
| 565 | break; |
| 566 | } |
| 567 | goto retry; |
| 568 | } |
| 569 | return; |
| 570 | } |
| 571 | if (!--stuck) |
| 572 | break; |
| 573 | } while (dispatch_stat & busy_mask); |
| 574 | |
| 575 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
| 576 | : : "r" (pstate)); |
| 577 | |
| 578 | if (dispatch_stat & busy_mask) { |
| 579 | /* Busy bits will not clear, continue instead |
| 580 | * of freezing up on this cpu. |
| 581 | */ |
| 582 | printk("CPU[%d]: mondo stuckage result[%016llx]\n", |
| 583 | smp_processor_id(), dispatch_stat); |
| 584 | } else { |
| 585 | int i, this_busy_nack = 0; |
| 586 | |
| 587 | /* Delay some random time with interrupts enabled |
| 588 | * to prevent deadlock. |
| 589 | */ |
| 590 | udelay(2 * nack_busy_id); |
| 591 | |
| 592 | /* Clear out the mask bits for cpus which did not |
| 593 | * NACK us. |
| 594 | */ |
| 595 | for (i = 0; i < cnt; i++) { |
| 596 | u64 check_mask, nr; |
| 597 | |
| 598 | nr = cpu_list[i]; |
| 599 | if (nr == 0xffff) |
| 600 | continue; |
| 601 | |
| 602 | if (is_jbus) |
| 603 | check_mask = (0x2UL << (2*nr)); |
| 604 | else |
| 605 | check_mask = (0x2UL << |
| 606 | this_busy_nack); |
| 607 | if ((dispatch_stat & check_mask) == 0) |
| 608 | cpu_list[i] = 0xffff; |
| 609 | this_busy_nack += 2; |
| 610 | if (this_busy_nack == 64) |
| 611 | break; |
| 612 | } |
| 613 | |
| 614 | goto retry; |
| 615 | } |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | /* Multi-cpu list version. */ |
| 620 | static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
| 621 | { |
| 622 | int retries, this_cpu, prev_sent, i, saw_cpu_error; |
| 623 | unsigned long status; |
| 624 | u16 *cpu_list; |
| 625 | |
| 626 | this_cpu = smp_processor_id(); |
| 627 | |
| 628 | cpu_list = __va(tb->cpu_list_pa); |
| 629 | |
| 630 | saw_cpu_error = 0; |
| 631 | retries = 0; |
| 632 | prev_sent = 0; |
| 633 | do { |
| 634 | int forward_progress, n_sent; |
| 635 | |
| 636 | status = sun4v_cpu_mondo_send(cnt, |
| 637 | tb->cpu_list_pa, |
| 638 | tb->cpu_mondo_block_pa); |
| 639 | |
| 640 | /* HV_EOK means all cpus received the xcall, we're done. */ |
| 641 | if (likely(status == HV_EOK)) |
| 642 | break; |
| 643 | |
| 644 | /* First, see if we made any forward progress. |
| 645 | * |
| 646 | * The hypervisor indicates successful sends by setting |
| 647 | * cpu list entries to the value 0xffff. |
| 648 | */ |
| 649 | n_sent = 0; |
| 650 | for (i = 0; i < cnt; i++) { |
| 651 | if (likely(cpu_list[i] == 0xffff)) |
| 652 | n_sent++; |
| 653 | } |
| 654 | |
| 655 | forward_progress = 0; |
| 656 | if (n_sent > prev_sent) |
| 657 | forward_progress = 1; |
| 658 | |
| 659 | prev_sent = n_sent; |
| 660 | |
| 661 | /* If we get a HV_ECPUERROR, then one or more of the cpus |
| 662 | * in the list are in error state. Use the cpu_state() |
| 663 | * hypervisor call to find out which cpus are in error state. |
| 664 | */ |
| 665 | if (unlikely(status == HV_ECPUERROR)) { |
| 666 | for (i = 0; i < cnt; i++) { |
| 667 | long err; |
| 668 | u16 cpu; |
| 669 | |
| 670 | cpu = cpu_list[i]; |
| 671 | if (cpu == 0xffff) |
| 672 | continue; |
| 673 | |
| 674 | err = sun4v_cpu_state(cpu); |
| 675 | if (err == HV_CPU_STATE_ERROR) { |
| 676 | saw_cpu_error = (cpu + 1); |
| 677 | cpu_list[i] = 0xffff; |
| 678 | } |
| 679 | } |
| 680 | } else if (unlikely(status != HV_EWOULDBLOCK)) |
| 681 | goto fatal_mondo_error; |
| 682 | |
| 683 | /* Don't bother rewriting the CPU list, just leave the |
| 684 | * 0xffff and non-0xffff entries in there and the |
| 685 | * hypervisor will do the right thing. |
| 686 | * |
| 687 | * Only advance timeout state if we didn't make any |
| 688 | * forward progress. |
| 689 | */ |
| 690 | if (unlikely(!forward_progress)) { |
| 691 | if (unlikely(++retries > 10000)) |
| 692 | goto fatal_mondo_timeout; |
| 693 | |
| 694 | /* Delay a little bit to let other cpus catch up |
| 695 | * on their cpu mondo queue work. |
| 696 | */ |
| 697 | udelay(2 * cnt); |
| 698 | } |
| 699 | } while (1); |
| 700 | |
| 701 | if (unlikely(saw_cpu_error)) |
| 702 | goto fatal_mondo_cpu_error; |
| 703 | |
| 704 | return; |
| 705 | |
| 706 | fatal_mondo_cpu_error: |
| 707 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " |
| 708 | "(including %d) were in error state\n", |
| 709 | this_cpu, saw_cpu_error - 1); |
| 710 | return; |
| 711 | |
| 712 | fatal_mondo_timeout: |
| 713 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " |
| 714 | " progress after %d retries.\n", |
| 715 | this_cpu, retries); |
| 716 | goto dump_cpu_list_and_out; |
| 717 | |
| 718 | fatal_mondo_error: |
| 719 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", |
| 720 | this_cpu, status); |
| 721 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " |
| 722 | "mondo_block_pa(%lx)\n", |
| 723 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); |
| 724 | |
| 725 | dump_cpu_list_and_out: |
| 726 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); |
| 727 | for (i = 0; i < cnt; i++) |
| 728 | printk("%u ", cpu_list[i]); |
| 729 | printk("]\n"); |
| 730 | } |
| 731 | |
| 732 | static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); |
| 733 | |
| 734 | static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) |
| 735 | { |
| 736 | struct trap_per_cpu *tb; |
| 737 | int this_cpu, i, cnt; |
| 738 | unsigned long flags; |
| 739 | u16 *cpu_list; |
| 740 | u64 *mondo; |
| 741 | |
| 742 | /* We have to do this whole thing with interrupts fully disabled. |
| 743 | * Otherwise if we send an xcall from interrupt context it will |
| 744 | * corrupt both our mondo block and cpu list state. |
| 745 | * |
| 746 | * One consequence of this is that we cannot use timeout mechanisms |
| 747 | * that depend upon interrupts being delivered locally. So, for |
| 748 | * example, we cannot sample jiffies and expect it to advance. |
| 749 | * |
| 750 | * Fortunately, udelay() uses %stick/%tick so we can use that. |
| 751 | */ |
| 752 | local_irq_save(flags); |
| 753 | |
| 754 | this_cpu = smp_processor_id(); |
| 755 | tb = &trap_block[this_cpu]; |
| 756 | |
| 757 | mondo = __va(tb->cpu_mondo_block_pa); |
| 758 | mondo[0] = data0; |
| 759 | mondo[1] = data1; |
| 760 | mondo[2] = data2; |
| 761 | wmb(); |
| 762 | |
| 763 | cpu_list = __va(tb->cpu_list_pa); |
| 764 | |
| 765 | /* Setup the initial cpu list. */ |
| 766 | cnt = 0; |
| 767 | for_each_cpu(i, mask) { |
| 768 | if (i == this_cpu || !cpu_online(i)) |
| 769 | continue; |
| 770 | cpu_list[cnt++] = i; |
| 771 | } |
| 772 | |
| 773 | if (cnt) |
| 774 | xcall_deliver_impl(tb, cnt); |
| 775 | |
| 776 | local_irq_restore(flags); |
| 777 | } |
| 778 | |
| 779 | /* Send cross call to all processors mentioned in MASK_P |
| 780 | * except self. Really, there are only two cases currently, |
| 781 | * "cpu_online_mask" and "mm_cpumask(mm)". |
| 782 | */ |
| 783 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) |
| 784 | { |
| 785 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); |
| 786 | |
| 787 | xcall_deliver(data0, data1, data2, mask); |
| 788 | } |
| 789 | |
| 790 | /* Send cross call to all processors except self. */ |
| 791 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) |
| 792 | { |
| 793 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); |
| 794 | } |
| 795 | |
| 796 | extern unsigned long xcall_sync_tick; |
| 797 | |
| 798 | static void smp_start_sync_tick_client(int cpu) |
| 799 | { |
| 800 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, |
| 801 | cpumask_of(cpu)); |
| 802 | } |
| 803 | |
| 804 | extern unsigned long xcall_call_function; |
| 805 | |
| 806 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 807 | { |
| 808 | xcall_deliver((u64) &xcall_call_function, 0, 0, mask); |
| 809 | } |
| 810 | |
| 811 | extern unsigned long xcall_call_function_single; |
| 812 | |
| 813 | void arch_send_call_function_single_ipi(int cpu) |
| 814 | { |
| 815 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, |
| 816 | cpumask_of(cpu)); |
| 817 | } |
| 818 | |
| 819 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
| 820 | { |
| 821 | clear_softint(1 << irq); |
| 822 | generic_smp_call_function_interrupt(); |
| 823 | } |
| 824 | |
| 825 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) |
| 826 | { |
| 827 | clear_softint(1 << irq); |
| 828 | generic_smp_call_function_single_interrupt(); |
| 829 | } |
| 830 | |
| 831 | static void tsb_sync(void *info) |
| 832 | { |
| 833 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; |
| 834 | struct mm_struct *mm = info; |
| 835 | |
| 836 | /* It is not valid to test "current->active_mm == mm" here. |
| 837 | * |
| 838 | * The value of "current" is not changed atomically with |
| 839 | * switch_mm(). But that's OK, we just need to check the |
| 840 | * current cpu's trap block PGD physical address. |
| 841 | */ |
| 842 | if (tp->pgd_paddr == __pa(mm->pgd)) |
| 843 | tsb_context_switch(mm); |
| 844 | } |
| 845 | |
| 846 | void smp_tsb_sync(struct mm_struct *mm) |
| 847 | { |
| 848 | smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1); |
| 849 | } |
| 850 | |
| 851 | extern unsigned long xcall_flush_tlb_mm; |
| 852 | extern unsigned long xcall_flush_tlb_pending; |
| 853 | extern unsigned long xcall_flush_tlb_kernel_range; |
| 854 | extern unsigned long xcall_fetch_glob_regs; |
| 855 | extern unsigned long xcall_fetch_glob_pmu; |
| 856 | extern unsigned long xcall_fetch_glob_pmu_n4; |
| 857 | extern unsigned long xcall_receive_signal; |
| 858 | extern unsigned long xcall_new_mmu_context_version; |
| 859 | #ifdef CONFIG_KGDB |
| 860 | extern unsigned long xcall_kgdb_capture; |
| 861 | #endif |
| 862 | |
| 863 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 864 | extern unsigned long xcall_flush_dcache_page_cheetah; |
| 865 | #endif |
| 866 | extern unsigned long xcall_flush_dcache_page_spitfire; |
| 867 | |
| 868 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 869 | extern atomic_t dcpage_flushes; |
| 870 | extern atomic_t dcpage_flushes_xcall; |
| 871 | #endif |
| 872 | |
| 873 | static inline void __local_flush_dcache_page(struct page *page) |
| 874 | { |
| 875 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 876 | __flush_dcache_page(page_address(page), |
| 877 | ((tlb_type == spitfire) && |
| 878 | page_mapping(page) != NULL)); |
| 879 | #else |
| 880 | if (page_mapping(page) != NULL && |
| 881 | tlb_type == spitfire) |
| 882 | __flush_icache_page(__pa(page_address(page))); |
| 883 | #endif |
| 884 | } |
| 885 | |
| 886 | void smp_flush_dcache_page_impl(struct page *page, int cpu) |
| 887 | { |
| 888 | int this_cpu; |
| 889 | |
| 890 | if (tlb_type == hypervisor) |
| 891 | return; |
| 892 | |
| 893 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 894 | atomic_inc(&dcpage_flushes); |
| 895 | #endif |
| 896 | |
| 897 | this_cpu = get_cpu(); |
| 898 | |
| 899 | if (cpu == this_cpu) { |
| 900 | __local_flush_dcache_page(page); |
| 901 | } else if (cpu_online(cpu)) { |
| 902 | void *pg_addr = page_address(page); |
| 903 | u64 data0 = 0; |
| 904 | |
| 905 | if (tlb_type == spitfire) { |
| 906 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); |
| 907 | if (page_mapping(page) != NULL) |
| 908 | data0 |= ((u64)1 << 32); |
| 909 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
| 910 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 911 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
| 912 | #endif |
| 913 | } |
| 914 | if (data0) { |
| 915 | xcall_deliver(data0, __pa(pg_addr), |
| 916 | (u64) pg_addr, cpumask_of(cpu)); |
| 917 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 918 | atomic_inc(&dcpage_flushes_xcall); |
| 919 | #endif |
| 920 | } |
| 921 | } |
| 922 | |
| 923 | put_cpu(); |
| 924 | } |
| 925 | |
| 926 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) |
| 927 | { |
| 928 | void *pg_addr; |
| 929 | u64 data0; |
| 930 | |
| 931 | if (tlb_type == hypervisor) |
| 932 | return; |
| 933 | |
| 934 | preempt_disable(); |
| 935 | |
| 936 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 937 | atomic_inc(&dcpage_flushes); |
| 938 | #endif |
| 939 | data0 = 0; |
| 940 | pg_addr = page_address(page); |
| 941 | if (tlb_type == spitfire) { |
| 942 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); |
| 943 | if (page_mapping(page) != NULL) |
| 944 | data0 |= ((u64)1 << 32); |
| 945 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
| 946 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 947 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
| 948 | #endif |
| 949 | } |
| 950 | if (data0) { |
| 951 | xcall_deliver(data0, __pa(pg_addr), |
| 952 | (u64) pg_addr, cpu_online_mask); |
| 953 | #ifdef CONFIG_DEBUG_DCFLUSH |
| 954 | atomic_inc(&dcpage_flushes_xcall); |
| 955 | #endif |
| 956 | } |
| 957 | __local_flush_dcache_page(page); |
| 958 | |
| 959 | preempt_enable(); |
| 960 | } |
| 961 | |
| 962 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
| 963 | { |
| 964 | struct mm_struct *mm; |
| 965 | unsigned long flags; |
| 966 | |
| 967 | clear_softint(1 << irq); |
| 968 | |
| 969 | /* See if we need to allocate a new TLB context because |
| 970 | * the version of the one we are using is now out of date. |
| 971 | */ |
| 972 | mm = current->active_mm; |
| 973 | if (unlikely(!mm || (mm == &init_mm))) |
| 974 | return; |
| 975 | |
| 976 | spin_lock_irqsave(&mm->context.lock, flags); |
| 977 | |
| 978 | if (unlikely(!CTX_VALID(mm->context))) |
| 979 | get_new_mmu_context(mm); |
| 980 | |
| 981 | spin_unlock_irqrestore(&mm->context.lock, flags); |
| 982 | |
| 983 | load_secondary_context(mm); |
| 984 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
| 985 | SECONDARY_CONTEXT); |
| 986 | } |
| 987 | |
| 988 | void smp_new_mmu_context_version(void) |
| 989 | { |
| 990 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
| 991 | } |
| 992 | |
| 993 | #ifdef CONFIG_KGDB |
| 994 | void kgdb_roundup_cpus(unsigned long flags) |
| 995 | { |
| 996 | smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); |
| 997 | } |
| 998 | #endif |
| 999 | |
| 1000 | void smp_fetch_global_regs(void) |
| 1001 | { |
| 1002 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); |
| 1003 | } |
| 1004 | |
| 1005 | void smp_fetch_global_pmu(void) |
| 1006 | { |
| 1007 | if (tlb_type == hypervisor && |
| 1008 | sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) |
| 1009 | smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); |
| 1010 | else |
| 1011 | smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); |
| 1012 | } |
| 1013 | |
| 1014 | /* We know that the window frames of the user have been flushed |
| 1015 | * to the stack before we get here because all callers of us |
| 1016 | * are flush_tlb_*() routines, and these run after flush_cache_*() |
| 1017 | * which performs the flushw. |
| 1018 | * |
| 1019 | * The SMP TLB coherency scheme we use works as follows: |
| 1020 | * |
| 1021 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address |
| 1022 | * space has (potentially) executed on, this is the heuristic |
| 1023 | * we use to avoid doing cross calls. |
| 1024 | * |
| 1025 | * Also, for flushing from kswapd and also for clones, we |
| 1026 | * use cpu_vm_mask as the list of cpus to make run the TLB. |
| 1027 | * |
| 1028 | * 2) TLB context numbers are shared globally across all processors |
| 1029 | * in the system, this allows us to play several games to avoid |
| 1030 | * cross calls. |
| 1031 | * |
| 1032 | * One invariant is that when a cpu switches to a process, and |
| 1033 | * that processes tsk->active_mm->cpu_vm_mask does not have the |
| 1034 | * current cpu's bit set, that tlb context is flushed locally. |
| 1035 | * |
| 1036 | * If the address space is non-shared (ie. mm->count == 1) we avoid |
| 1037 | * cross calls when we want to flush the currently running process's |
| 1038 | * tlb state. This is done by clearing all cpu bits except the current |
| 1039 | * processor's in current->mm->cpu_vm_mask and performing the |
| 1040 | * flush locally only. This will force any subsequent cpus which run |
| 1041 | * this task to flush the context from the local tlb if the process |
| 1042 | * migrates to another cpu (again). |
| 1043 | * |
| 1044 | * 3) For shared address spaces (threads) and swapping we bite the |
| 1045 | * bullet for most cases and perform the cross call (but only to |
| 1046 | * the cpus listed in cpu_vm_mask). |
| 1047 | * |
| 1048 | * The performance gain from "optimizing" away the cross call for threads is |
| 1049 | * questionable (in theory the big win for threads is the massive sharing of |
| 1050 | * address space state across processors). |
| 1051 | */ |
| 1052 | |
| 1053 | /* This currently is only used by the hugetlb arch pre-fault |
| 1054 | * hook on UltraSPARC-III+ and later when changing the pagesize |
| 1055 | * bits of the context register for an address space. |
| 1056 | */ |
| 1057 | void smp_flush_tlb_mm(struct mm_struct *mm) |
| 1058 | { |
| 1059 | u32 ctx = CTX_HWBITS(mm->context); |
| 1060 | int cpu = get_cpu(); |
| 1061 | |
| 1062 | if (atomic_read(&mm->mm_users) == 1) { |
| 1063 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
| 1064 | goto local_flush_and_out; |
| 1065 | } |
| 1066 | |
| 1067 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
| 1068 | ctx, 0, 0, |
| 1069 | mm_cpumask(mm)); |
| 1070 | |
| 1071 | local_flush_and_out: |
| 1072 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); |
| 1073 | |
| 1074 | put_cpu(); |
| 1075 | } |
| 1076 | |
| 1077 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
| 1078 | { |
| 1079 | u32 ctx = CTX_HWBITS(mm->context); |
| 1080 | int cpu = get_cpu(); |
| 1081 | |
| 1082 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
| 1083 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
| 1084 | else |
| 1085 | smp_cross_call_masked(&xcall_flush_tlb_pending, |
| 1086 | ctx, nr, (unsigned long) vaddrs, |
| 1087 | mm_cpumask(mm)); |
| 1088 | |
| 1089 | __flush_tlb_pending(ctx, nr, vaddrs); |
| 1090 | |
| 1091 | put_cpu(); |
| 1092 | } |
| 1093 | |
| 1094 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 1095 | { |
| 1096 | start &= PAGE_MASK; |
| 1097 | end = PAGE_ALIGN(end); |
| 1098 | if (start != end) { |
| 1099 | smp_cross_call(&xcall_flush_tlb_kernel_range, |
| 1100 | 0, start, end); |
| 1101 | |
| 1102 | __flush_tlb_kernel_range(start, end); |
| 1103 | } |
| 1104 | } |
| 1105 | |
| 1106 | /* CPU capture. */ |
| 1107 | /* #define CAPTURE_DEBUG */ |
| 1108 | extern unsigned long xcall_capture; |
| 1109 | |
| 1110 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); |
| 1111 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); |
| 1112 | static unsigned long penguins_are_doing_time; |
| 1113 | |
| 1114 | void smp_capture(void) |
| 1115 | { |
| 1116 | int result = atomic_add_ret(1, &smp_capture_depth); |
| 1117 | |
| 1118 | if (result == 1) { |
| 1119 | int ncpus = num_online_cpus(); |
| 1120 | |
| 1121 | #ifdef CAPTURE_DEBUG |
| 1122 | printk("CPU[%d]: Sending penguins to jail...", |
| 1123 | smp_processor_id()); |
| 1124 | #endif |
| 1125 | penguins_are_doing_time = 1; |
| 1126 | atomic_inc(&smp_capture_registry); |
| 1127 | smp_cross_call(&xcall_capture, 0, 0, 0); |
| 1128 | while (atomic_read(&smp_capture_registry) != ncpus) |
| 1129 | rmb(); |
| 1130 | #ifdef CAPTURE_DEBUG |
| 1131 | printk("done\n"); |
| 1132 | #endif |
| 1133 | } |
| 1134 | } |
| 1135 | |
| 1136 | void smp_release(void) |
| 1137 | { |
| 1138 | if (atomic_dec_and_test(&smp_capture_depth)) { |
| 1139 | #ifdef CAPTURE_DEBUG |
| 1140 | printk("CPU[%d]: Giving pardon to " |
| 1141 | "imprisoned penguins\n", |
| 1142 | smp_processor_id()); |
| 1143 | #endif |
| 1144 | penguins_are_doing_time = 0; |
| 1145 | membar_safe("#StoreLoad"); |
| 1146 | atomic_dec(&smp_capture_registry); |
| 1147 | } |
| 1148 | } |
| 1149 | |
| 1150 | /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE |
| 1151 | * set, so they can service tlb flush xcalls... |
| 1152 | */ |
| 1153 | extern void prom_world(int); |
| 1154 | |
| 1155 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) |
| 1156 | { |
| 1157 | clear_softint(1 << irq); |
| 1158 | |
| 1159 | preempt_disable(); |
| 1160 | |
| 1161 | __asm__ __volatile__("flushw"); |
| 1162 | prom_world(1); |
| 1163 | atomic_inc(&smp_capture_registry); |
| 1164 | membar_safe("#StoreLoad"); |
| 1165 | while (penguins_are_doing_time) |
| 1166 | rmb(); |
| 1167 | atomic_dec(&smp_capture_registry); |
| 1168 | prom_world(0); |
| 1169 | |
| 1170 | preempt_enable(); |
| 1171 | } |
| 1172 | |
| 1173 | /* /proc/profile writes can call this, don't __init it please. */ |
| 1174 | int setup_profiling_timer(unsigned int multiplier) |
| 1175 | { |
| 1176 | return -EINVAL; |
| 1177 | } |
| 1178 | |
| 1179 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 1180 | { |
| 1181 | } |
| 1182 | |
| 1183 | void __devinit smp_prepare_boot_cpu(void) |
| 1184 | { |
| 1185 | } |
| 1186 | |
| 1187 | void __init smp_setup_processor_id(void) |
| 1188 | { |
| 1189 | if (tlb_type == spitfire) |
| 1190 | xcall_deliver_impl = spitfire_xcall_deliver; |
| 1191 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
| 1192 | xcall_deliver_impl = cheetah_xcall_deliver; |
| 1193 | else |
| 1194 | xcall_deliver_impl = hypervisor_xcall_deliver; |
| 1195 | } |
| 1196 | |
| 1197 | void __devinit smp_fill_in_sib_core_maps(void) |
| 1198 | { |
| 1199 | unsigned int i; |
| 1200 | |
| 1201 | for_each_present_cpu(i) { |
| 1202 | unsigned int j; |
| 1203 | |
| 1204 | cpumask_clear(&cpu_core_map[i]); |
| 1205 | if (cpu_data(i).core_id == 0) { |
| 1206 | cpumask_set_cpu(i, &cpu_core_map[i]); |
| 1207 | continue; |
| 1208 | } |
| 1209 | |
| 1210 | for_each_present_cpu(j) { |
| 1211 | if (cpu_data(i).core_id == |
| 1212 | cpu_data(j).core_id) |
| 1213 | cpumask_set_cpu(j, &cpu_core_map[i]); |
| 1214 | } |
| 1215 | } |
| 1216 | |
| 1217 | for_each_present_cpu(i) { |
| 1218 | unsigned int j; |
| 1219 | |
| 1220 | cpumask_clear(&per_cpu(cpu_sibling_map, i)); |
| 1221 | if (cpu_data(i).proc_id == -1) { |
| 1222 | cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); |
| 1223 | continue; |
| 1224 | } |
| 1225 | |
| 1226 | for_each_present_cpu(j) { |
| 1227 | if (cpu_data(i).proc_id == |
| 1228 | cpu_data(j).proc_id) |
| 1229 | cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); |
| 1230 | } |
| 1231 | } |
| 1232 | } |
| 1233 | |
| 1234 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) |
| 1235 | { |
| 1236 | int ret = smp_boot_one_cpu(cpu, tidle); |
| 1237 | |
| 1238 | if (!ret) { |
| 1239 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
| 1240 | while (!cpu_online(cpu)) |
| 1241 | mb(); |
| 1242 | if (!cpu_online(cpu)) { |
| 1243 | ret = -ENODEV; |
| 1244 | } else { |
| 1245 | /* On SUN4V, writes to %tick and %stick are |
| 1246 | * not allowed. |
| 1247 | */ |
| 1248 | if (tlb_type != hypervisor) |
| 1249 | smp_synchronize_one_tick(cpu); |
| 1250 | } |
| 1251 | } |
| 1252 | return ret; |
| 1253 | } |
| 1254 | |
| 1255 | #ifdef CONFIG_HOTPLUG_CPU |
| 1256 | void cpu_play_dead(void) |
| 1257 | { |
| 1258 | int cpu = smp_processor_id(); |
| 1259 | unsigned long pstate; |
| 1260 | |
| 1261 | idle_task_exit(); |
| 1262 | |
| 1263 | if (tlb_type == hypervisor) { |
| 1264 | struct trap_per_cpu *tb = &trap_block[cpu]; |
| 1265 | |
| 1266 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, |
| 1267 | tb->cpu_mondo_pa, 0); |
| 1268 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, |
| 1269 | tb->dev_mondo_pa, 0); |
| 1270 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, |
| 1271 | tb->resum_mondo_pa, 0); |
| 1272 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, |
| 1273 | tb->nonresum_mondo_pa, 0); |
| 1274 | } |
| 1275 | |
| 1276 | cpumask_clear_cpu(cpu, &smp_commenced_mask); |
| 1277 | membar_safe("#Sync"); |
| 1278 | |
| 1279 | local_irq_disable(); |
| 1280 | |
| 1281 | __asm__ __volatile__( |
| 1282 | "rdpr %%pstate, %0\n\t" |
| 1283 | "wrpr %0, %1, %%pstate" |
| 1284 | : "=r" (pstate) |
| 1285 | : "i" (PSTATE_IE)); |
| 1286 | |
| 1287 | while (1) |
| 1288 | barrier(); |
| 1289 | } |
| 1290 | |
| 1291 | int __cpu_disable(void) |
| 1292 | { |
| 1293 | int cpu = smp_processor_id(); |
| 1294 | cpuinfo_sparc *c; |
| 1295 | int i; |
| 1296 | |
| 1297 | for_each_cpu(i, &cpu_core_map[cpu]) |
| 1298 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); |
| 1299 | cpumask_clear(&cpu_core_map[cpu]); |
| 1300 | |
| 1301 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
| 1302 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); |
| 1303 | cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); |
| 1304 | |
| 1305 | c = &cpu_data(cpu); |
| 1306 | |
| 1307 | c->core_id = 0; |
| 1308 | c->proc_id = -1; |
| 1309 | |
| 1310 | smp_wmb(); |
| 1311 | |
| 1312 | /* Make sure no interrupts point to this cpu. */ |
| 1313 | fixup_irqs(); |
| 1314 | |
| 1315 | local_irq_enable(); |
| 1316 | mdelay(1); |
| 1317 | local_irq_disable(); |
| 1318 | |
| 1319 | set_cpu_online(cpu, false); |
| 1320 | |
| 1321 | cpu_map_rebuild(); |
| 1322 | |
| 1323 | return 0; |
| 1324 | } |
| 1325 | |
| 1326 | void __cpu_die(unsigned int cpu) |
| 1327 | { |
| 1328 | int i; |
| 1329 | |
| 1330 | for (i = 0; i < 100; i++) { |
| 1331 | smp_rmb(); |
| 1332 | if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) |
| 1333 | break; |
| 1334 | msleep(100); |
| 1335 | } |
| 1336 | if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { |
| 1337 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
| 1338 | } else { |
| 1339 | #if defined(CONFIG_SUN_LDOMS) |
| 1340 | unsigned long hv_err; |
| 1341 | int limit = 100; |
| 1342 | |
| 1343 | do { |
| 1344 | hv_err = sun4v_cpu_stop(cpu); |
| 1345 | if (hv_err == HV_EOK) { |
| 1346 | set_cpu_present(cpu, false); |
| 1347 | break; |
| 1348 | } |
| 1349 | } while (--limit > 0); |
| 1350 | if (limit <= 0) { |
| 1351 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", |
| 1352 | hv_err); |
| 1353 | } |
| 1354 | #endif |
| 1355 | } |
| 1356 | } |
| 1357 | #endif |
| 1358 | |
| 1359 | void __init smp_cpus_done(unsigned int max_cpus) |
| 1360 | { |
| 1361 | pcr_arch_init(); |
| 1362 | } |
| 1363 | |
| 1364 | void smp_send_reschedule(int cpu) |
| 1365 | { |
| 1366 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, |
| 1367 | cpumask_of(cpu)); |
| 1368 | } |
| 1369 | |
| 1370 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
| 1371 | { |
| 1372 | clear_softint(1 << irq); |
| 1373 | scheduler_ipi(); |
| 1374 | } |
| 1375 | |
| 1376 | /* This is a nop because we capture all other cpus |
| 1377 | * anyways when making the PROM active. |
| 1378 | */ |
| 1379 | void smp_send_stop(void) |
| 1380 | { |
| 1381 | } |
| 1382 | |
| 1383 | /** |
| 1384 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu |
| 1385 | * @cpu: cpu to allocate for |
| 1386 | * @size: size allocation in bytes |
| 1387 | * @align: alignment |
| 1388 | * |
| 1389 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper |
| 1390 | * does the right thing for NUMA regardless of the current |
| 1391 | * configuration. |
| 1392 | * |
| 1393 | * RETURNS: |
| 1394 | * Pointer to the allocated area on success, NULL on failure. |
| 1395 | */ |
| 1396 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, |
| 1397 | size_t align) |
| 1398 | { |
| 1399 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); |
| 1400 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| 1401 | int node = cpu_to_node(cpu); |
| 1402 | void *ptr; |
| 1403 | |
| 1404 | if (!node_online(node) || !NODE_DATA(node)) { |
| 1405 | ptr = __alloc_bootmem(size, align, goal); |
| 1406 | pr_info("cpu %d has no node %d or node-local memory\n", |
| 1407 | cpu, node); |
| 1408 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", |
| 1409 | cpu, size, __pa(ptr)); |
| 1410 | } else { |
| 1411 | ptr = __alloc_bootmem_node(NODE_DATA(node), |
| 1412 | size, align, goal); |
| 1413 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " |
| 1414 | "%016lx\n", cpu, size, node, __pa(ptr)); |
| 1415 | } |
| 1416 | return ptr; |
| 1417 | #else |
| 1418 | return __alloc_bootmem(size, align, goal); |
| 1419 | #endif |
| 1420 | } |
| 1421 | |
| 1422 | static void __init pcpu_free_bootmem(void *ptr, size_t size) |
| 1423 | { |
| 1424 | free_bootmem(__pa(ptr), size); |
| 1425 | } |
| 1426 | |
| 1427 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
| 1428 | { |
| 1429 | if (cpu_to_node(from) == cpu_to_node(to)) |
| 1430 | return LOCAL_DISTANCE; |
| 1431 | else |
| 1432 | return REMOTE_DISTANCE; |
| 1433 | } |
| 1434 | |
| 1435 | static void __init pcpu_populate_pte(unsigned long addr) |
| 1436 | { |
| 1437 | pgd_t *pgd = pgd_offset_k(addr); |
| 1438 | pud_t *pud; |
| 1439 | pmd_t *pmd; |
| 1440 | |
| 1441 | pud = pud_offset(pgd, addr); |
| 1442 | if (pud_none(*pud)) { |
| 1443 | pmd_t *new; |
| 1444 | |
| 1445 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
| 1446 | pud_populate(&init_mm, pud, new); |
| 1447 | } |
| 1448 | |
| 1449 | pmd = pmd_offset(pud, addr); |
| 1450 | if (!pmd_present(*pmd)) { |
| 1451 | pte_t *new; |
| 1452 | |
| 1453 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
| 1454 | pmd_populate_kernel(&init_mm, pmd, new); |
| 1455 | } |
| 1456 | } |
| 1457 | |
| 1458 | void __init setup_per_cpu_areas(void) |
| 1459 | { |
| 1460 | unsigned long delta; |
| 1461 | unsigned int cpu; |
| 1462 | int rc = -EINVAL; |
| 1463 | |
| 1464 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
| 1465 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
| 1466 | PERCPU_DYNAMIC_RESERVE, 4 << 20, |
| 1467 | pcpu_cpu_distance, |
| 1468 | pcpu_alloc_bootmem, |
| 1469 | pcpu_free_bootmem); |
| 1470 | if (rc) |
| 1471 | pr_warning("PERCPU: %s allocator failed (%d), " |
| 1472 | "falling back to page size\n", |
| 1473 | pcpu_fc_names[pcpu_chosen_fc], rc); |
| 1474 | } |
| 1475 | if (rc < 0) |
| 1476 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, |
| 1477 | pcpu_alloc_bootmem, |
| 1478 | pcpu_free_bootmem, |
| 1479 | pcpu_populate_pte); |
| 1480 | if (rc < 0) |
| 1481 | panic("cannot initialize percpu area (err=%d)", rc); |
| 1482 | |
| 1483 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
| 1484 | for_each_possible_cpu(cpu) |
| 1485 | __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
| 1486 | |
| 1487 | /* Setup %g5 for the boot cpu. */ |
| 1488 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); |
| 1489 | |
| 1490 | of_fill_in_cpu_data(); |
| 1491 | if (tlb_type == hypervisor) |
| 1492 | mdesc_fill_in_cpu_data(cpu_all_mask); |
| 1493 | } |