Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[deliverable/linux.git] / arch / sparc / kernel / smp_64.c
1 /* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
28 #include <linux/kgdb.h>
29
30 #include <asm/head.h>
31 #include <asm/ptrace.h>
32 #include <linux/atomic.h>
33 #include <asm/tlbflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/cpudata.h>
36 #include <asm/hvtramp.h>
37 #include <asm/io.h>
38 #include <asm/timer.h>
39 #include <asm/setup.h>
40
41 #include <asm/irq.h>
42 #include <asm/irq_regs.h>
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/oplib.h>
46 #include <asm/uaccess.h>
47 #include <asm/starfire.h>
48 #include <asm/tlb.h>
49 #include <asm/sections.h>
50 #include <asm/prom.h>
51 #include <asm/mdesc.h>
52 #include <asm/ldc.h>
53 #include <asm/hypervisor.h>
54 #include <asm/pcr.h>
55
56 #include "cpumap.h"
57 #include "kernel.h"
58
59 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62
63 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
64 EXPORT_SYMBOL(cpu_core_map);
65
66 static cpumask_t smp_commenced_mask;
67
68 void smp_info(struct seq_file *m)
69 {
70 int i;
71
72 seq_printf(m, "State:\n");
73 for_each_online_cpu(i)
74 seq_printf(m, "CPU%d:\t\tonline\n", i);
75 }
76
77 void smp_bogo(struct seq_file *m)
78 {
79 int i;
80
81 for_each_online_cpu(i)
82 seq_printf(m,
83 "Cpu%dClkTck\t: %016lx\n",
84 i, cpu_data(i).clock_tick);
85 }
86
87 extern void setup_sparc64_timer(void);
88
89 static volatile unsigned long callin_flag = 0;
90
91 void smp_callin(void)
92 {
93 int cpuid = hard_smp_processor_id();
94
95 __local_per_cpu_offset = __per_cpu_offset(cpuid);
96
97 if (tlb_type == hypervisor)
98 sun4v_ktsb_register();
99
100 __flush_tlb_all();
101
102 setup_sparc64_timer();
103
104 if (cheetah_pcache_forced_on)
105 cheetah_enable_pcache();
106
107 callin_flag = 1;
108 __asm__ __volatile__("membar #Sync\n\t"
109 "flush %%g6" : : : "memory");
110
111 /* Clear this or we will die instantly when we
112 * schedule back to this idler...
113 */
114 current_thread_info()->new_child = 0;
115
116 /* Attach to the address space of init_task. */
117 atomic_inc(&init_mm.mm_count);
118 current->active_mm = &init_mm;
119
120 /* inform the notifiers about the new cpu */
121 notify_cpu_starting(cpuid);
122
123 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
124 rmb();
125
126 set_cpu_online(cpuid, true);
127
128 /* idle thread is expected to have preempt disabled */
129 preempt_disable();
130
131 local_irq_enable();
132
133 cpu_startup_entry(CPUHP_ONLINE);
134 }
135
136 void cpu_panic(void)
137 {
138 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
139 panic("SMP bolixed\n");
140 }
141
142 /* This tick register synchronization scheme is taken entirely from
143 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
144 *
145 * The only change I've made is to rework it so that the master
146 * initiates the synchonization instead of the slave. -DaveM
147 */
148
149 #define MASTER 0
150 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
151
152 #define NUM_ROUNDS 64 /* magic value */
153 #define NUM_ITERS 5 /* likewise */
154
155 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
156 static unsigned long go[SLAVE + 1];
157
158 #define DEBUG_TICK_SYNC 0
159
160 static inline long get_delta (long *rt, long *master)
161 {
162 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
163 unsigned long tcenter, t0, t1, tm;
164 unsigned long i;
165
166 for (i = 0; i < NUM_ITERS; i++) {
167 t0 = tick_ops->get_tick();
168 go[MASTER] = 1;
169 membar_safe("#StoreLoad");
170 while (!(tm = go[SLAVE]))
171 rmb();
172 go[SLAVE] = 0;
173 wmb();
174 t1 = tick_ops->get_tick();
175
176 if (t1 - t0 < best_t1 - best_t0)
177 best_t0 = t0, best_t1 = t1, best_tm = tm;
178 }
179
180 *rt = best_t1 - best_t0;
181 *master = best_tm - best_t0;
182
183 /* average best_t0 and best_t1 without overflow: */
184 tcenter = (best_t0/2 + best_t1/2);
185 if (best_t0 % 2 + best_t1 % 2 == 2)
186 tcenter++;
187 return tcenter - best_tm;
188 }
189
190 void smp_synchronize_tick_client(void)
191 {
192 long i, delta, adj, adjust_latency = 0, done = 0;
193 unsigned long flags, rt, master_time_stamp;
194 #if DEBUG_TICK_SYNC
195 struct {
196 long rt; /* roundtrip time */
197 long master; /* master's timestamp */
198 long diff; /* difference between midpoint and master's timestamp */
199 long lat; /* estimate of itc adjustment latency */
200 } t[NUM_ROUNDS];
201 #endif
202
203 go[MASTER] = 1;
204
205 while (go[MASTER])
206 rmb();
207
208 local_irq_save(flags);
209 {
210 for (i = 0; i < NUM_ROUNDS; i++) {
211 delta = get_delta(&rt, &master_time_stamp);
212 if (delta == 0)
213 done = 1; /* let's lock on to this... */
214
215 if (!done) {
216 if (i > 0) {
217 adjust_latency += -delta;
218 adj = -delta + adjust_latency/4;
219 } else
220 adj = -delta;
221
222 tick_ops->add_tick(adj);
223 }
224 #if DEBUG_TICK_SYNC
225 t[i].rt = rt;
226 t[i].master = master_time_stamp;
227 t[i].diff = delta;
228 t[i].lat = adjust_latency/4;
229 #endif
230 }
231 }
232 local_irq_restore(flags);
233
234 #if DEBUG_TICK_SYNC
235 for (i = 0; i < NUM_ROUNDS; i++)
236 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
237 t[i].rt, t[i].master, t[i].diff, t[i].lat);
238 #endif
239
240 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
241 "(last diff %ld cycles, maxerr %lu cycles)\n",
242 smp_processor_id(), delta, rt);
243 }
244
245 static void smp_start_sync_tick_client(int cpu);
246
247 static void smp_synchronize_one_tick(int cpu)
248 {
249 unsigned long flags, i;
250
251 go[MASTER] = 0;
252
253 smp_start_sync_tick_client(cpu);
254
255 /* wait for client to be ready */
256 while (!go[MASTER])
257 rmb();
258
259 /* now let the client proceed into his loop */
260 go[MASTER] = 0;
261 membar_safe("#StoreLoad");
262
263 raw_spin_lock_irqsave(&itc_sync_lock, flags);
264 {
265 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
266 while (!go[MASTER])
267 rmb();
268 go[MASTER] = 0;
269 wmb();
270 go[SLAVE] = tick_ops->get_tick();
271 membar_safe("#StoreLoad");
272 }
273 }
274 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
275 }
276
277 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
278 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
279 void **descrp)
280 {
281 extern unsigned long sparc64_ttable_tl0;
282 extern unsigned long kern_locked_tte_data;
283 struct hvtramp_descr *hdesc;
284 unsigned long trampoline_ra;
285 struct trap_per_cpu *tb;
286 u64 tte_vaddr, tte_data;
287 unsigned long hv_err;
288 int i;
289
290 hdesc = kzalloc(sizeof(*hdesc) +
291 (sizeof(struct hvtramp_mapping) *
292 num_kernel_image_mappings - 1),
293 GFP_KERNEL);
294 if (!hdesc) {
295 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
296 "hvtramp_descr.\n");
297 return;
298 }
299 *descrp = hdesc;
300
301 hdesc->cpu = cpu;
302 hdesc->num_mappings = num_kernel_image_mappings;
303
304 tb = &trap_block[cpu];
305
306 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
307 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
308
309 hdesc->thread_reg = thread_reg;
310
311 tte_vaddr = (unsigned long) KERNBASE;
312 tte_data = kern_locked_tte_data;
313
314 for (i = 0; i < hdesc->num_mappings; i++) {
315 hdesc->maps[i].vaddr = tte_vaddr;
316 hdesc->maps[i].tte = tte_data;
317 tte_vaddr += 0x400000;
318 tte_data += 0x400000;
319 }
320
321 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
322
323 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
324 kimage_addr_to_ra(&sparc64_ttable_tl0),
325 __pa(hdesc));
326 if (hv_err)
327 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
328 "gives error %lu\n", hv_err);
329 }
330 #endif
331
332 extern unsigned long sparc64_cpu_startup;
333
334 /* The OBP cpu startup callback truncates the 3rd arg cookie to
335 * 32-bits (I think) so to be safe we have it read the pointer
336 * contained here so we work on >4GB machines. -DaveM
337 */
338 static struct thread_info *cpu_new_thread = NULL;
339
340 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
341 {
342 unsigned long entry =
343 (unsigned long)(&sparc64_cpu_startup);
344 unsigned long cookie =
345 (unsigned long)(&cpu_new_thread);
346 void *descr = NULL;
347 int timeout, ret;
348
349 callin_flag = 0;
350 cpu_new_thread = task_thread_info(idle);
351
352 if (tlb_type == hypervisor) {
353 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
354 if (ldom_domaining_enabled)
355 ldom_startcpu_cpuid(cpu,
356 (unsigned long) cpu_new_thread,
357 &descr);
358 else
359 #endif
360 prom_startcpu_cpuid(cpu, entry, cookie);
361 } else {
362 struct device_node *dp = of_find_node_by_cpuid(cpu);
363
364 prom_startcpu(dp->phandle, entry, cookie);
365 }
366
367 for (timeout = 0; timeout < 50000; timeout++) {
368 if (callin_flag)
369 break;
370 udelay(100);
371 }
372
373 if (callin_flag) {
374 ret = 0;
375 } else {
376 printk("Processor %d is stuck.\n", cpu);
377 ret = -ENODEV;
378 }
379 cpu_new_thread = NULL;
380
381 kfree(descr);
382
383 return ret;
384 }
385
386 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
387 {
388 u64 result, target;
389 int stuck, tmp;
390
391 if (this_is_starfire) {
392 /* map to real upaid */
393 cpu = (((cpu & 0x3c) << 1) |
394 ((cpu & 0x40) >> 4) |
395 (cpu & 0x3));
396 }
397
398 target = (cpu << 14) | 0x70;
399 again:
400 /* Ok, this is the real Spitfire Errata #54.
401 * One must read back from a UDB internal register
402 * after writes to the UDB interrupt dispatch, but
403 * before the membar Sync for that write.
404 * So we use the high UDB control register (ASI 0x7f,
405 * ADDR 0x20) for the dummy read. -DaveM
406 */
407 tmp = 0x40;
408 __asm__ __volatile__(
409 "wrpr %1, %2, %%pstate\n\t"
410 "stxa %4, [%0] %3\n\t"
411 "stxa %5, [%0+%8] %3\n\t"
412 "add %0, %8, %0\n\t"
413 "stxa %6, [%0+%8] %3\n\t"
414 "membar #Sync\n\t"
415 "stxa %%g0, [%7] %3\n\t"
416 "membar #Sync\n\t"
417 "mov 0x20, %%g1\n\t"
418 "ldxa [%%g1] 0x7f, %%g0\n\t"
419 "membar #Sync"
420 : "=r" (tmp)
421 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
422 "r" (data0), "r" (data1), "r" (data2), "r" (target),
423 "r" (0x10), "0" (tmp)
424 : "g1");
425
426 /* NOTE: PSTATE_IE is still clear. */
427 stuck = 100000;
428 do {
429 __asm__ __volatile__("ldxa [%%g0] %1, %0"
430 : "=r" (result)
431 : "i" (ASI_INTR_DISPATCH_STAT));
432 if (result == 0) {
433 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
434 : : "r" (pstate));
435 return;
436 }
437 stuck -= 1;
438 if (stuck == 0)
439 break;
440 } while (result & 0x1);
441 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
442 : : "r" (pstate));
443 if (stuck == 0) {
444 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
445 smp_processor_id(), result);
446 } else {
447 udelay(2);
448 goto again;
449 }
450 }
451
452 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
453 {
454 u64 *mondo, data0, data1, data2;
455 u16 *cpu_list;
456 u64 pstate;
457 int i;
458
459 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
460 cpu_list = __va(tb->cpu_list_pa);
461 mondo = __va(tb->cpu_mondo_block_pa);
462 data0 = mondo[0];
463 data1 = mondo[1];
464 data2 = mondo[2];
465 for (i = 0; i < cnt; i++)
466 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
467 }
468
469 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
470 * packet, but we have no use for that. However we do take advantage of
471 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
472 */
473 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
474 {
475 int nack_busy_id, is_jbus, need_more;
476 u64 *mondo, pstate, ver, busy_mask;
477 u16 *cpu_list;
478
479 cpu_list = __va(tb->cpu_list_pa);
480 mondo = __va(tb->cpu_mondo_block_pa);
481
482 /* Unfortunately, someone at Sun had the brilliant idea to make the
483 * busy/nack fields hard-coded by ITID number for this Ultra-III
484 * derivative processor.
485 */
486 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
487 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
488 (ver >> 32) == __SERRANO_ID);
489
490 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
491
492 retry:
493 need_more = 0;
494 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
495 : : "r" (pstate), "i" (PSTATE_IE));
496
497 /* Setup the dispatch data registers. */
498 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
499 "stxa %1, [%4] %6\n\t"
500 "stxa %2, [%5] %6\n\t"
501 "membar #Sync\n\t"
502 : /* no outputs */
503 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
504 "r" (0x40), "r" (0x50), "r" (0x60),
505 "i" (ASI_INTR_W));
506
507 nack_busy_id = 0;
508 busy_mask = 0;
509 {
510 int i;
511
512 for (i = 0; i < cnt; i++) {
513 u64 target, nr;
514
515 nr = cpu_list[i];
516 if (nr == 0xffff)
517 continue;
518
519 target = (nr << 14) | 0x70;
520 if (is_jbus) {
521 busy_mask |= (0x1UL << (nr * 2));
522 } else {
523 target |= (nack_busy_id << 24);
524 busy_mask |= (0x1UL <<
525 (nack_busy_id * 2));
526 }
527 __asm__ __volatile__(
528 "stxa %%g0, [%0] %1\n\t"
529 "membar #Sync\n\t"
530 : /* no outputs */
531 : "r" (target), "i" (ASI_INTR_W));
532 nack_busy_id++;
533 if (nack_busy_id == 32) {
534 need_more = 1;
535 break;
536 }
537 }
538 }
539
540 /* Now, poll for completion. */
541 {
542 u64 dispatch_stat, nack_mask;
543 long stuck;
544
545 stuck = 100000 * nack_busy_id;
546 nack_mask = busy_mask << 1;
547 do {
548 __asm__ __volatile__("ldxa [%%g0] %1, %0"
549 : "=r" (dispatch_stat)
550 : "i" (ASI_INTR_DISPATCH_STAT));
551 if (!(dispatch_stat & (busy_mask | nack_mask))) {
552 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
553 : : "r" (pstate));
554 if (unlikely(need_more)) {
555 int i, this_cnt = 0;
556 for (i = 0; i < cnt; i++) {
557 if (cpu_list[i] == 0xffff)
558 continue;
559 cpu_list[i] = 0xffff;
560 this_cnt++;
561 if (this_cnt == 32)
562 break;
563 }
564 goto retry;
565 }
566 return;
567 }
568 if (!--stuck)
569 break;
570 } while (dispatch_stat & busy_mask);
571
572 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
573 : : "r" (pstate));
574
575 if (dispatch_stat & busy_mask) {
576 /* Busy bits will not clear, continue instead
577 * of freezing up on this cpu.
578 */
579 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
580 smp_processor_id(), dispatch_stat);
581 } else {
582 int i, this_busy_nack = 0;
583
584 /* Delay some random time with interrupts enabled
585 * to prevent deadlock.
586 */
587 udelay(2 * nack_busy_id);
588
589 /* Clear out the mask bits for cpus which did not
590 * NACK us.
591 */
592 for (i = 0; i < cnt; i++) {
593 u64 check_mask, nr;
594
595 nr = cpu_list[i];
596 if (nr == 0xffff)
597 continue;
598
599 if (is_jbus)
600 check_mask = (0x2UL << (2*nr));
601 else
602 check_mask = (0x2UL <<
603 this_busy_nack);
604 if ((dispatch_stat & check_mask) == 0)
605 cpu_list[i] = 0xffff;
606 this_busy_nack += 2;
607 if (this_busy_nack == 64)
608 break;
609 }
610
611 goto retry;
612 }
613 }
614 }
615
616 /* Multi-cpu list version. */
617 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
618 {
619 int retries, this_cpu, prev_sent, i, saw_cpu_error;
620 unsigned long status;
621 u16 *cpu_list;
622
623 this_cpu = smp_processor_id();
624
625 cpu_list = __va(tb->cpu_list_pa);
626
627 saw_cpu_error = 0;
628 retries = 0;
629 prev_sent = 0;
630 do {
631 int forward_progress, n_sent;
632
633 status = sun4v_cpu_mondo_send(cnt,
634 tb->cpu_list_pa,
635 tb->cpu_mondo_block_pa);
636
637 /* HV_EOK means all cpus received the xcall, we're done. */
638 if (likely(status == HV_EOK))
639 break;
640
641 /* First, see if we made any forward progress.
642 *
643 * The hypervisor indicates successful sends by setting
644 * cpu list entries to the value 0xffff.
645 */
646 n_sent = 0;
647 for (i = 0; i < cnt; i++) {
648 if (likely(cpu_list[i] == 0xffff))
649 n_sent++;
650 }
651
652 forward_progress = 0;
653 if (n_sent > prev_sent)
654 forward_progress = 1;
655
656 prev_sent = n_sent;
657
658 /* If we get a HV_ECPUERROR, then one or more of the cpus
659 * in the list are in error state. Use the cpu_state()
660 * hypervisor call to find out which cpus are in error state.
661 */
662 if (unlikely(status == HV_ECPUERROR)) {
663 for (i = 0; i < cnt; i++) {
664 long err;
665 u16 cpu;
666
667 cpu = cpu_list[i];
668 if (cpu == 0xffff)
669 continue;
670
671 err = sun4v_cpu_state(cpu);
672 if (err == HV_CPU_STATE_ERROR) {
673 saw_cpu_error = (cpu + 1);
674 cpu_list[i] = 0xffff;
675 }
676 }
677 } else if (unlikely(status != HV_EWOULDBLOCK))
678 goto fatal_mondo_error;
679
680 /* Don't bother rewriting the CPU list, just leave the
681 * 0xffff and non-0xffff entries in there and the
682 * hypervisor will do the right thing.
683 *
684 * Only advance timeout state if we didn't make any
685 * forward progress.
686 */
687 if (unlikely(!forward_progress)) {
688 if (unlikely(++retries > 10000))
689 goto fatal_mondo_timeout;
690
691 /* Delay a little bit to let other cpus catch up
692 * on their cpu mondo queue work.
693 */
694 udelay(2 * cnt);
695 }
696 } while (1);
697
698 if (unlikely(saw_cpu_error))
699 goto fatal_mondo_cpu_error;
700
701 return;
702
703 fatal_mondo_cpu_error:
704 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
705 "(including %d) were in error state\n",
706 this_cpu, saw_cpu_error - 1);
707 return;
708
709 fatal_mondo_timeout:
710 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
711 " progress after %d retries.\n",
712 this_cpu, retries);
713 goto dump_cpu_list_and_out;
714
715 fatal_mondo_error:
716 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
717 this_cpu, status);
718 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
719 "mondo_block_pa(%lx)\n",
720 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
721
722 dump_cpu_list_and_out:
723 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
724 for (i = 0; i < cnt; i++)
725 printk("%u ", cpu_list[i]);
726 printk("]\n");
727 }
728
729 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
730
731 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
732 {
733 struct trap_per_cpu *tb;
734 int this_cpu, i, cnt;
735 unsigned long flags;
736 u16 *cpu_list;
737 u64 *mondo;
738
739 /* We have to do this whole thing with interrupts fully disabled.
740 * Otherwise if we send an xcall from interrupt context it will
741 * corrupt both our mondo block and cpu list state.
742 *
743 * One consequence of this is that we cannot use timeout mechanisms
744 * that depend upon interrupts being delivered locally. So, for
745 * example, we cannot sample jiffies and expect it to advance.
746 *
747 * Fortunately, udelay() uses %stick/%tick so we can use that.
748 */
749 local_irq_save(flags);
750
751 this_cpu = smp_processor_id();
752 tb = &trap_block[this_cpu];
753
754 mondo = __va(tb->cpu_mondo_block_pa);
755 mondo[0] = data0;
756 mondo[1] = data1;
757 mondo[2] = data2;
758 wmb();
759
760 cpu_list = __va(tb->cpu_list_pa);
761
762 /* Setup the initial cpu list. */
763 cnt = 0;
764 for_each_cpu(i, mask) {
765 if (i == this_cpu || !cpu_online(i))
766 continue;
767 cpu_list[cnt++] = i;
768 }
769
770 if (cnt)
771 xcall_deliver_impl(tb, cnt);
772
773 local_irq_restore(flags);
774 }
775
776 /* Send cross call to all processors mentioned in MASK_P
777 * except self. Really, there are only two cases currently,
778 * "cpu_online_mask" and "mm_cpumask(mm)".
779 */
780 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
781 {
782 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
783
784 xcall_deliver(data0, data1, data2, mask);
785 }
786
787 /* Send cross call to all processors except self. */
788 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
789 {
790 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
791 }
792
793 extern unsigned long xcall_sync_tick;
794
795 static void smp_start_sync_tick_client(int cpu)
796 {
797 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
798 cpumask_of(cpu));
799 }
800
801 extern unsigned long xcall_call_function;
802
803 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
804 {
805 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
806 }
807
808 extern unsigned long xcall_call_function_single;
809
810 void arch_send_call_function_single_ipi(int cpu)
811 {
812 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
813 cpumask_of(cpu));
814 }
815
816 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
817 {
818 clear_softint(1 << irq);
819 generic_smp_call_function_interrupt();
820 }
821
822 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
823 {
824 clear_softint(1 << irq);
825 generic_smp_call_function_single_interrupt();
826 }
827
828 static void tsb_sync(void *info)
829 {
830 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
831 struct mm_struct *mm = info;
832
833 /* It is not valid to test "current->active_mm == mm" here.
834 *
835 * The value of "current" is not changed atomically with
836 * switch_mm(). But that's OK, we just need to check the
837 * current cpu's trap block PGD physical address.
838 */
839 if (tp->pgd_paddr == __pa(mm->pgd))
840 tsb_context_switch(mm);
841 }
842
843 void smp_tsb_sync(struct mm_struct *mm)
844 {
845 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
846 }
847
848 extern unsigned long xcall_flush_tlb_mm;
849 extern unsigned long xcall_flush_tlb_page;
850 extern unsigned long xcall_flush_tlb_kernel_range;
851 extern unsigned long xcall_fetch_glob_regs;
852 extern unsigned long xcall_fetch_glob_pmu;
853 extern unsigned long xcall_fetch_glob_pmu_n4;
854 extern unsigned long xcall_receive_signal;
855 extern unsigned long xcall_new_mmu_context_version;
856 #ifdef CONFIG_KGDB
857 extern unsigned long xcall_kgdb_capture;
858 #endif
859
860 #ifdef DCACHE_ALIASING_POSSIBLE
861 extern unsigned long xcall_flush_dcache_page_cheetah;
862 #endif
863 extern unsigned long xcall_flush_dcache_page_spitfire;
864
865 static inline void __local_flush_dcache_page(struct page *page)
866 {
867 #ifdef DCACHE_ALIASING_POSSIBLE
868 __flush_dcache_page(page_address(page),
869 ((tlb_type == spitfire) &&
870 page_mapping(page) != NULL));
871 #else
872 if (page_mapping(page) != NULL &&
873 tlb_type == spitfire)
874 __flush_icache_page(__pa(page_address(page)));
875 #endif
876 }
877
878 void smp_flush_dcache_page_impl(struct page *page, int cpu)
879 {
880 int this_cpu;
881
882 if (tlb_type == hypervisor)
883 return;
884
885 #ifdef CONFIG_DEBUG_DCFLUSH
886 atomic_inc(&dcpage_flushes);
887 #endif
888
889 this_cpu = get_cpu();
890
891 if (cpu == this_cpu) {
892 __local_flush_dcache_page(page);
893 } else if (cpu_online(cpu)) {
894 void *pg_addr = page_address(page);
895 u64 data0 = 0;
896
897 if (tlb_type == spitfire) {
898 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
899 if (page_mapping(page) != NULL)
900 data0 |= ((u64)1 << 32);
901 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
902 #ifdef DCACHE_ALIASING_POSSIBLE
903 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
904 #endif
905 }
906 if (data0) {
907 xcall_deliver(data0, __pa(pg_addr),
908 (u64) pg_addr, cpumask_of(cpu));
909 #ifdef CONFIG_DEBUG_DCFLUSH
910 atomic_inc(&dcpage_flushes_xcall);
911 #endif
912 }
913 }
914
915 put_cpu();
916 }
917
918 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
919 {
920 void *pg_addr;
921 u64 data0;
922
923 if (tlb_type == hypervisor)
924 return;
925
926 preempt_disable();
927
928 #ifdef CONFIG_DEBUG_DCFLUSH
929 atomic_inc(&dcpage_flushes);
930 #endif
931 data0 = 0;
932 pg_addr = page_address(page);
933 if (tlb_type == spitfire) {
934 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
935 if (page_mapping(page) != NULL)
936 data0 |= ((u64)1 << 32);
937 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
938 #ifdef DCACHE_ALIASING_POSSIBLE
939 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
940 #endif
941 }
942 if (data0) {
943 xcall_deliver(data0, __pa(pg_addr),
944 (u64) pg_addr, cpu_online_mask);
945 #ifdef CONFIG_DEBUG_DCFLUSH
946 atomic_inc(&dcpage_flushes_xcall);
947 #endif
948 }
949 __local_flush_dcache_page(page);
950
951 preempt_enable();
952 }
953
954 void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
955 {
956 struct mm_struct *mm;
957 unsigned long flags;
958
959 clear_softint(1 << irq);
960
961 /* See if we need to allocate a new TLB context because
962 * the version of the one we are using is now out of date.
963 */
964 mm = current->active_mm;
965 if (unlikely(!mm || (mm == &init_mm)))
966 return;
967
968 spin_lock_irqsave(&mm->context.lock, flags);
969
970 if (unlikely(!CTX_VALID(mm->context)))
971 get_new_mmu_context(mm);
972
973 spin_unlock_irqrestore(&mm->context.lock, flags);
974
975 load_secondary_context(mm);
976 __flush_tlb_mm(CTX_HWBITS(mm->context),
977 SECONDARY_CONTEXT);
978 }
979
980 void smp_new_mmu_context_version(void)
981 {
982 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
983 }
984
985 #ifdef CONFIG_KGDB
986 void kgdb_roundup_cpus(unsigned long flags)
987 {
988 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
989 }
990 #endif
991
992 void smp_fetch_global_regs(void)
993 {
994 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
995 }
996
997 void smp_fetch_global_pmu(void)
998 {
999 if (tlb_type == hypervisor &&
1000 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1001 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1002 else
1003 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1004 }
1005
1006 /* We know that the window frames of the user have been flushed
1007 * to the stack before we get here because all callers of us
1008 * are flush_tlb_*() routines, and these run after flush_cache_*()
1009 * which performs the flushw.
1010 *
1011 * The SMP TLB coherency scheme we use works as follows:
1012 *
1013 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1014 * space has (potentially) executed on, this is the heuristic
1015 * we use to avoid doing cross calls.
1016 *
1017 * Also, for flushing from kswapd and also for clones, we
1018 * use cpu_vm_mask as the list of cpus to make run the TLB.
1019 *
1020 * 2) TLB context numbers are shared globally across all processors
1021 * in the system, this allows us to play several games to avoid
1022 * cross calls.
1023 *
1024 * One invariant is that when a cpu switches to a process, and
1025 * that processes tsk->active_mm->cpu_vm_mask does not have the
1026 * current cpu's bit set, that tlb context is flushed locally.
1027 *
1028 * If the address space is non-shared (ie. mm->count == 1) we avoid
1029 * cross calls when we want to flush the currently running process's
1030 * tlb state. This is done by clearing all cpu bits except the current
1031 * processor's in current->mm->cpu_vm_mask and performing the
1032 * flush locally only. This will force any subsequent cpus which run
1033 * this task to flush the context from the local tlb if the process
1034 * migrates to another cpu (again).
1035 *
1036 * 3) For shared address spaces (threads) and swapping we bite the
1037 * bullet for most cases and perform the cross call (but only to
1038 * the cpus listed in cpu_vm_mask).
1039 *
1040 * The performance gain from "optimizing" away the cross call for threads is
1041 * questionable (in theory the big win for threads is the massive sharing of
1042 * address space state across processors).
1043 */
1044
1045 /* This currently is only used by the hugetlb arch pre-fault
1046 * hook on UltraSPARC-III+ and later when changing the pagesize
1047 * bits of the context register for an address space.
1048 */
1049 void smp_flush_tlb_mm(struct mm_struct *mm)
1050 {
1051 u32 ctx = CTX_HWBITS(mm->context);
1052 int cpu = get_cpu();
1053
1054 if (atomic_read(&mm->mm_users) == 1) {
1055 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1056 goto local_flush_and_out;
1057 }
1058
1059 smp_cross_call_masked(&xcall_flush_tlb_mm,
1060 ctx, 0, 0,
1061 mm_cpumask(mm));
1062
1063 local_flush_and_out:
1064 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1065
1066 put_cpu();
1067 }
1068
1069 struct tlb_pending_info {
1070 unsigned long ctx;
1071 unsigned long nr;
1072 unsigned long *vaddrs;
1073 };
1074
1075 static void tlb_pending_func(void *info)
1076 {
1077 struct tlb_pending_info *t = info;
1078
1079 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1080 }
1081
1082 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1083 {
1084 u32 ctx = CTX_HWBITS(mm->context);
1085 struct tlb_pending_info info;
1086 int cpu = get_cpu();
1087
1088 info.ctx = ctx;
1089 info.nr = nr;
1090 info.vaddrs = vaddrs;
1091
1092 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1093 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1094 else
1095 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1096 &info, 1);
1097
1098 __flush_tlb_pending(ctx, nr, vaddrs);
1099
1100 put_cpu();
1101 }
1102
1103 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1104 {
1105 unsigned long context = CTX_HWBITS(mm->context);
1106 int cpu = get_cpu();
1107
1108 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1109 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1110 else
1111 smp_cross_call_masked(&xcall_flush_tlb_page,
1112 context, vaddr, 0,
1113 mm_cpumask(mm));
1114 __flush_tlb_page(context, vaddr);
1115
1116 put_cpu();
1117 }
1118
1119 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1120 {
1121 start &= PAGE_MASK;
1122 end = PAGE_ALIGN(end);
1123 if (start != end) {
1124 smp_cross_call(&xcall_flush_tlb_kernel_range,
1125 0, start, end);
1126
1127 __flush_tlb_kernel_range(start, end);
1128 }
1129 }
1130
1131 /* CPU capture. */
1132 /* #define CAPTURE_DEBUG */
1133 extern unsigned long xcall_capture;
1134
1135 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1136 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1137 static unsigned long penguins_are_doing_time;
1138
1139 void smp_capture(void)
1140 {
1141 int result = atomic_add_ret(1, &smp_capture_depth);
1142
1143 if (result == 1) {
1144 int ncpus = num_online_cpus();
1145
1146 #ifdef CAPTURE_DEBUG
1147 printk("CPU[%d]: Sending penguins to jail...",
1148 smp_processor_id());
1149 #endif
1150 penguins_are_doing_time = 1;
1151 atomic_inc(&smp_capture_registry);
1152 smp_cross_call(&xcall_capture, 0, 0, 0);
1153 while (atomic_read(&smp_capture_registry) != ncpus)
1154 rmb();
1155 #ifdef CAPTURE_DEBUG
1156 printk("done\n");
1157 #endif
1158 }
1159 }
1160
1161 void smp_release(void)
1162 {
1163 if (atomic_dec_and_test(&smp_capture_depth)) {
1164 #ifdef CAPTURE_DEBUG
1165 printk("CPU[%d]: Giving pardon to "
1166 "imprisoned penguins\n",
1167 smp_processor_id());
1168 #endif
1169 penguins_are_doing_time = 0;
1170 membar_safe("#StoreLoad");
1171 atomic_dec(&smp_capture_registry);
1172 }
1173 }
1174
1175 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1176 * set, so they can service tlb flush xcalls...
1177 */
1178 extern void prom_world(int);
1179
1180 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1181 {
1182 clear_softint(1 << irq);
1183
1184 preempt_disable();
1185
1186 __asm__ __volatile__("flushw");
1187 prom_world(1);
1188 atomic_inc(&smp_capture_registry);
1189 membar_safe("#StoreLoad");
1190 while (penguins_are_doing_time)
1191 rmb();
1192 atomic_dec(&smp_capture_registry);
1193 prom_world(0);
1194
1195 preempt_enable();
1196 }
1197
1198 /* /proc/profile writes can call this, don't __init it please. */
1199 int setup_profiling_timer(unsigned int multiplier)
1200 {
1201 return -EINVAL;
1202 }
1203
1204 void __init smp_prepare_cpus(unsigned int max_cpus)
1205 {
1206 }
1207
1208 void smp_prepare_boot_cpu(void)
1209 {
1210 }
1211
1212 void __init smp_setup_processor_id(void)
1213 {
1214 if (tlb_type == spitfire)
1215 xcall_deliver_impl = spitfire_xcall_deliver;
1216 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1217 xcall_deliver_impl = cheetah_xcall_deliver;
1218 else
1219 xcall_deliver_impl = hypervisor_xcall_deliver;
1220 }
1221
1222 void smp_fill_in_sib_core_maps(void)
1223 {
1224 unsigned int i;
1225
1226 for_each_present_cpu(i) {
1227 unsigned int j;
1228
1229 cpumask_clear(&cpu_core_map[i]);
1230 if (cpu_data(i).core_id == 0) {
1231 cpumask_set_cpu(i, &cpu_core_map[i]);
1232 continue;
1233 }
1234
1235 for_each_present_cpu(j) {
1236 if (cpu_data(i).core_id ==
1237 cpu_data(j).core_id)
1238 cpumask_set_cpu(j, &cpu_core_map[i]);
1239 }
1240 }
1241
1242 for_each_present_cpu(i) {
1243 unsigned int j;
1244
1245 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1246 if (cpu_data(i).proc_id == -1) {
1247 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1248 continue;
1249 }
1250
1251 for_each_present_cpu(j) {
1252 if (cpu_data(i).proc_id ==
1253 cpu_data(j).proc_id)
1254 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1255 }
1256 }
1257 }
1258
1259 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1260 {
1261 int ret = smp_boot_one_cpu(cpu, tidle);
1262
1263 if (!ret) {
1264 cpumask_set_cpu(cpu, &smp_commenced_mask);
1265 while (!cpu_online(cpu))
1266 mb();
1267 if (!cpu_online(cpu)) {
1268 ret = -ENODEV;
1269 } else {
1270 /* On SUN4V, writes to %tick and %stick are
1271 * not allowed.
1272 */
1273 if (tlb_type != hypervisor)
1274 smp_synchronize_one_tick(cpu);
1275 }
1276 }
1277 return ret;
1278 }
1279
1280 #ifdef CONFIG_HOTPLUG_CPU
1281 void cpu_play_dead(void)
1282 {
1283 int cpu = smp_processor_id();
1284 unsigned long pstate;
1285
1286 idle_task_exit();
1287
1288 if (tlb_type == hypervisor) {
1289 struct trap_per_cpu *tb = &trap_block[cpu];
1290
1291 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1292 tb->cpu_mondo_pa, 0);
1293 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1294 tb->dev_mondo_pa, 0);
1295 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1296 tb->resum_mondo_pa, 0);
1297 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1298 tb->nonresum_mondo_pa, 0);
1299 }
1300
1301 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1302 membar_safe("#Sync");
1303
1304 local_irq_disable();
1305
1306 __asm__ __volatile__(
1307 "rdpr %%pstate, %0\n\t"
1308 "wrpr %0, %1, %%pstate"
1309 : "=r" (pstate)
1310 : "i" (PSTATE_IE));
1311
1312 while (1)
1313 barrier();
1314 }
1315
1316 int __cpu_disable(void)
1317 {
1318 int cpu = smp_processor_id();
1319 cpuinfo_sparc *c;
1320 int i;
1321
1322 for_each_cpu(i, &cpu_core_map[cpu])
1323 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1324 cpumask_clear(&cpu_core_map[cpu]);
1325
1326 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1327 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1328 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1329
1330 c = &cpu_data(cpu);
1331
1332 c->core_id = 0;
1333 c->proc_id = -1;
1334
1335 smp_wmb();
1336
1337 /* Make sure no interrupts point to this cpu. */
1338 fixup_irqs();
1339
1340 local_irq_enable();
1341 mdelay(1);
1342 local_irq_disable();
1343
1344 set_cpu_online(cpu, false);
1345
1346 cpu_map_rebuild();
1347
1348 return 0;
1349 }
1350
1351 void __cpu_die(unsigned int cpu)
1352 {
1353 int i;
1354
1355 for (i = 0; i < 100; i++) {
1356 smp_rmb();
1357 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1358 break;
1359 msleep(100);
1360 }
1361 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1362 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1363 } else {
1364 #if defined(CONFIG_SUN_LDOMS)
1365 unsigned long hv_err;
1366 int limit = 100;
1367
1368 do {
1369 hv_err = sun4v_cpu_stop(cpu);
1370 if (hv_err == HV_EOK) {
1371 set_cpu_present(cpu, false);
1372 break;
1373 }
1374 } while (--limit > 0);
1375 if (limit <= 0) {
1376 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1377 hv_err);
1378 }
1379 #endif
1380 }
1381 }
1382 #endif
1383
1384 void __init smp_cpus_done(unsigned int max_cpus)
1385 {
1386 pcr_arch_init();
1387 }
1388
1389 void smp_send_reschedule(int cpu)
1390 {
1391 if (cpu == smp_processor_id()) {
1392 WARN_ON_ONCE(preemptible());
1393 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1394 } else {
1395 xcall_deliver((u64) &xcall_receive_signal,
1396 0, 0, cpumask_of(cpu));
1397 }
1398 }
1399
1400 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1401 {
1402 clear_softint(1 << irq);
1403 scheduler_ipi();
1404 }
1405
1406 /* This is a nop because we capture all other cpus
1407 * anyways when making the PROM active.
1408 */
1409 void smp_send_stop(void)
1410 {
1411 }
1412
1413 /**
1414 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1415 * @cpu: cpu to allocate for
1416 * @size: size allocation in bytes
1417 * @align: alignment
1418 *
1419 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1420 * does the right thing for NUMA regardless of the current
1421 * configuration.
1422 *
1423 * RETURNS:
1424 * Pointer to the allocated area on success, NULL on failure.
1425 */
1426 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1427 size_t align)
1428 {
1429 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1430 #ifdef CONFIG_NEED_MULTIPLE_NODES
1431 int node = cpu_to_node(cpu);
1432 void *ptr;
1433
1434 if (!node_online(node) || !NODE_DATA(node)) {
1435 ptr = __alloc_bootmem(size, align, goal);
1436 pr_info("cpu %d has no node %d or node-local memory\n",
1437 cpu, node);
1438 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1439 cpu, size, __pa(ptr));
1440 } else {
1441 ptr = __alloc_bootmem_node(NODE_DATA(node),
1442 size, align, goal);
1443 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1444 "%016lx\n", cpu, size, node, __pa(ptr));
1445 }
1446 return ptr;
1447 #else
1448 return __alloc_bootmem(size, align, goal);
1449 #endif
1450 }
1451
1452 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1453 {
1454 free_bootmem(__pa(ptr), size);
1455 }
1456
1457 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1458 {
1459 if (cpu_to_node(from) == cpu_to_node(to))
1460 return LOCAL_DISTANCE;
1461 else
1462 return REMOTE_DISTANCE;
1463 }
1464
1465 static void __init pcpu_populate_pte(unsigned long addr)
1466 {
1467 pgd_t *pgd = pgd_offset_k(addr);
1468 pud_t *pud;
1469 pmd_t *pmd;
1470
1471 pud = pud_offset(pgd, addr);
1472 if (pud_none(*pud)) {
1473 pmd_t *new;
1474
1475 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1476 pud_populate(&init_mm, pud, new);
1477 }
1478
1479 pmd = pmd_offset(pud, addr);
1480 if (!pmd_present(*pmd)) {
1481 pte_t *new;
1482
1483 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1484 pmd_populate_kernel(&init_mm, pmd, new);
1485 }
1486 }
1487
1488 void __init setup_per_cpu_areas(void)
1489 {
1490 unsigned long delta;
1491 unsigned int cpu;
1492 int rc = -EINVAL;
1493
1494 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1495 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1496 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1497 pcpu_cpu_distance,
1498 pcpu_alloc_bootmem,
1499 pcpu_free_bootmem);
1500 if (rc)
1501 pr_warning("PERCPU: %s allocator failed (%d), "
1502 "falling back to page size\n",
1503 pcpu_fc_names[pcpu_chosen_fc], rc);
1504 }
1505 if (rc < 0)
1506 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1507 pcpu_alloc_bootmem,
1508 pcpu_free_bootmem,
1509 pcpu_populate_pte);
1510 if (rc < 0)
1511 panic("cannot initialize percpu area (err=%d)", rc);
1512
1513 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1514 for_each_possible_cpu(cpu)
1515 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1516
1517 /* Setup %g5 for the boot cpu. */
1518 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1519
1520 of_fill_in_cpu_data();
1521 if (tlb_type == hypervisor)
1522 mdesc_fill_in_cpu_data(cpu_all_mask);
1523 }
This page took 0.06287 seconds and 5 git commands to generate.