[SPARC64]: dr-cpu unconfigure support.
[deliverable/linux.git] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24
25 #include <asm/head.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31 #include <asm/hvtramp.h>
32 #include <asm/io.h>
33
34 #include <asm/irq.h>
35 #include <asm/irq_regs.h>
36 #include <asm/page.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/uaccess.h>
40 #include <asm/timer.h>
41 #include <asm/starfire.h>
42 #include <asm/tlb.h>
43 #include <asm/sections.h>
44 #include <asm/prom.h>
45 #include <asm/mdesc.h>
46 #include <asm/ldc.h>
47 #include <asm/hypervisor.h>
48
49 extern void calibrate_delay(void);
50
51 int sparc64_multi_core __read_mostly;
52
53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
59
60 EXPORT_SYMBOL(cpu_possible_map);
61 EXPORT_SYMBOL(cpu_online_map);
62 EXPORT_SYMBOL(cpu_sibling_map);
63 EXPORT_SYMBOL(cpu_core_map);
64
65 static cpumask_t smp_commenced_mask;
66
67 void smp_info(struct seq_file *m)
68 {
69 int i;
70
71 seq_printf(m, "State:\n");
72 for_each_online_cpu(i)
73 seq_printf(m, "CPU%d:\t\tonline\n", i);
74 }
75
76 void smp_bogo(struct seq_file *m)
77 {
78 int i;
79
80 for_each_online_cpu(i)
81 seq_printf(m,
82 "Cpu%dClkTck\t: %016lx\n",
83 i, cpu_data(i).clock_tick);
84 }
85
86 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
87
88 extern void setup_sparc64_timer(void);
89
90 static volatile unsigned long callin_flag = 0;
91
92 void __devinit smp_callin(void)
93 {
94 int cpuid = hard_smp_processor_id();
95
96 __local_per_cpu_offset = __per_cpu_offset(cpuid);
97
98 if (tlb_type == hypervisor)
99 sun4v_ktsb_register();
100
101 __flush_tlb_all();
102
103 setup_sparc64_timer();
104
105 if (cheetah_pcache_forced_on)
106 cheetah_enable_pcache();
107
108 local_irq_enable();
109
110 callin_flag = 1;
111 __asm__ __volatile__("membar #Sync\n\t"
112 "flush %%g6" : : : "memory");
113
114 /* Clear this or we will die instantly when we
115 * schedule back to this idler...
116 */
117 current_thread_info()->new_child = 0;
118
119 /* Attach to the address space of init_task. */
120 atomic_inc(&init_mm.mm_count);
121 current->active_mm = &init_mm;
122
123 while (!cpu_isset(cpuid, smp_commenced_mask))
124 rmb();
125
126 spin_lock(&call_lock);
127 cpu_set(cpuid, cpu_online_map);
128 spin_unlock(&call_lock);
129
130 /* idle thread is expected to have preempt disabled */
131 preempt_disable();
132 }
133
134 void cpu_panic(void)
135 {
136 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
137 panic("SMP bolixed\n");
138 }
139
140 /* This tick register synchronization scheme is taken entirely from
141 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
142 *
143 * The only change I've made is to rework it so that the master
144 * initiates the synchonization instead of the slave. -DaveM
145 */
146
147 #define MASTER 0
148 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
149
150 #define NUM_ROUNDS 64 /* magic value */
151 #define NUM_ITERS 5 /* likewise */
152
153 static DEFINE_SPINLOCK(itc_sync_lock);
154 static unsigned long go[SLAVE + 1];
155
156 #define DEBUG_TICK_SYNC 0
157
158 static inline long get_delta (long *rt, long *master)
159 {
160 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
161 unsigned long tcenter, t0, t1, tm;
162 unsigned long i;
163
164 for (i = 0; i < NUM_ITERS; i++) {
165 t0 = tick_ops->get_tick();
166 go[MASTER] = 1;
167 membar_storeload();
168 while (!(tm = go[SLAVE]))
169 rmb();
170 go[SLAVE] = 0;
171 wmb();
172 t1 = tick_ops->get_tick();
173
174 if (t1 - t0 < best_t1 - best_t0)
175 best_t0 = t0, best_t1 = t1, best_tm = tm;
176 }
177
178 *rt = best_t1 - best_t0;
179 *master = best_tm - best_t0;
180
181 /* average best_t0 and best_t1 without overflow: */
182 tcenter = (best_t0/2 + best_t1/2);
183 if (best_t0 % 2 + best_t1 % 2 == 2)
184 tcenter++;
185 return tcenter - best_tm;
186 }
187
188 void smp_synchronize_tick_client(void)
189 {
190 long i, delta, adj, adjust_latency = 0, done = 0;
191 unsigned long flags, rt, master_time_stamp, bound;
192 #if DEBUG_TICK_SYNC
193 struct {
194 long rt; /* roundtrip time */
195 long master; /* master's timestamp */
196 long diff; /* difference between midpoint and master's timestamp */
197 long lat; /* estimate of itc adjustment latency */
198 } t[NUM_ROUNDS];
199 #endif
200
201 go[MASTER] = 1;
202
203 while (go[MASTER])
204 rmb();
205
206 local_irq_save(flags);
207 {
208 for (i = 0; i < NUM_ROUNDS; i++) {
209 delta = get_delta(&rt, &master_time_stamp);
210 if (delta == 0) {
211 done = 1; /* let's lock on to this... */
212 bound = rt;
213 }
214
215 if (!done) {
216 if (i > 0) {
217 adjust_latency += -delta;
218 adj = -delta + adjust_latency/4;
219 } else
220 adj = -delta;
221
222 tick_ops->add_tick(adj);
223 }
224 #if DEBUG_TICK_SYNC
225 t[i].rt = rt;
226 t[i].master = master_time_stamp;
227 t[i].diff = delta;
228 t[i].lat = adjust_latency/4;
229 #endif
230 }
231 }
232 local_irq_restore(flags);
233
234 #if DEBUG_TICK_SYNC
235 for (i = 0; i < NUM_ROUNDS; i++)
236 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
237 t[i].rt, t[i].master, t[i].diff, t[i].lat);
238 #endif
239
240 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
241 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
242 }
243
244 static void smp_start_sync_tick_client(int cpu);
245
246 static void smp_synchronize_one_tick(int cpu)
247 {
248 unsigned long flags, i;
249
250 go[MASTER] = 0;
251
252 smp_start_sync_tick_client(cpu);
253
254 /* wait for client to be ready */
255 while (!go[MASTER])
256 rmb();
257
258 /* now let the client proceed into his loop */
259 go[MASTER] = 0;
260 membar_storeload();
261
262 spin_lock_irqsave(&itc_sync_lock, flags);
263 {
264 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
265 while (!go[MASTER])
266 rmb();
267 go[MASTER] = 0;
268 wmb();
269 go[SLAVE] = tick_ops->get_tick();
270 membar_storeload();
271 }
272 }
273 spin_unlock_irqrestore(&itc_sync_lock, flags);
274 }
275
276 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
277 /* XXX Put this in some common place. XXX */
278 static unsigned long kimage_addr_to_ra(void *p)
279 {
280 unsigned long val = (unsigned long) p;
281
282 return kern_base + (val - KERNBASE);
283 }
284
285 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
286 {
287 extern unsigned long sparc64_ttable_tl0;
288 extern unsigned long kern_locked_tte_data;
289 extern int bigkernel;
290 struct hvtramp_descr *hdesc;
291 unsigned long trampoline_ra;
292 struct trap_per_cpu *tb;
293 u64 tte_vaddr, tte_data;
294 unsigned long hv_err;
295
296 hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
297 if (!hdesc) {
298 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
299 "hvtramp_descr.\n");
300 return;
301 }
302
303 hdesc->cpu = cpu;
304 hdesc->num_mappings = (bigkernel ? 2 : 1);
305
306 tb = &trap_block[cpu];
307 tb->hdesc = hdesc;
308
309 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
310 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
311
312 hdesc->thread_reg = thread_reg;
313
314 tte_vaddr = (unsigned long) KERNBASE;
315 tte_data = kern_locked_tte_data;
316
317 hdesc->maps[0].vaddr = tte_vaddr;
318 hdesc->maps[0].tte = tte_data;
319 if (bigkernel) {
320 tte_vaddr += 0x400000;
321 tte_data += 0x400000;
322 hdesc->maps[1].vaddr = tte_vaddr;
323 hdesc->maps[1].tte = tte_data;
324 }
325
326 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
327
328 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329 kimage_addr_to_ra(&sparc64_ttable_tl0),
330 __pa(hdesc));
331 if (hv_err)
332 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
333 "gives error %lu\n", hv_err);
334 }
335 #endif
336
337 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
338
339 extern unsigned long sparc64_cpu_startup;
340
341 /* The OBP cpu startup callback truncates the 3rd arg cookie to
342 * 32-bits (I think) so to be safe we have it read the pointer
343 * contained here so we work on >4GB machines. -DaveM
344 */
345 static struct thread_info *cpu_new_thread = NULL;
346
347 static int __devinit smp_boot_one_cpu(unsigned int cpu)
348 {
349 struct trap_per_cpu *tb = &trap_block[cpu];
350 unsigned long entry =
351 (unsigned long)(&sparc64_cpu_startup);
352 unsigned long cookie =
353 (unsigned long)(&cpu_new_thread);
354 struct task_struct *p;
355 int timeout, ret;
356
357 p = fork_idle(cpu);
358 callin_flag = 0;
359 cpu_new_thread = task_thread_info(p);
360
361 if (tlb_type == hypervisor) {
362 /* Alloc the mondo queues, cpu will load them. */
363 sun4v_init_mondo_queues(0, cpu, 1, 0);
364
365 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
366 if (ldom_domaining_enabled)
367 ldom_startcpu_cpuid(cpu,
368 (unsigned long) cpu_new_thread);
369 else
370 #endif
371 prom_startcpu_cpuid(cpu, entry, cookie);
372 } else {
373 struct device_node *dp = of_find_node_by_cpuid(cpu);
374
375 prom_startcpu(dp->node, entry, cookie);
376 }
377
378 for (timeout = 0; timeout < 50000; timeout++) {
379 if (callin_flag)
380 break;
381 udelay(100);
382 }
383
384 if (callin_flag) {
385 ret = 0;
386 } else {
387 printk("Processor %d is stuck.\n", cpu);
388 ret = -ENODEV;
389 }
390 cpu_new_thread = NULL;
391
392 if (tb->hdesc) {
393 kfree(tb->hdesc);
394 tb->hdesc = NULL;
395 }
396
397 return ret;
398 }
399
400 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
401 {
402 u64 result, target;
403 int stuck, tmp;
404
405 if (this_is_starfire) {
406 /* map to real upaid */
407 cpu = (((cpu & 0x3c) << 1) |
408 ((cpu & 0x40) >> 4) |
409 (cpu & 0x3));
410 }
411
412 target = (cpu << 14) | 0x70;
413 again:
414 /* Ok, this is the real Spitfire Errata #54.
415 * One must read back from a UDB internal register
416 * after writes to the UDB interrupt dispatch, but
417 * before the membar Sync for that write.
418 * So we use the high UDB control register (ASI 0x7f,
419 * ADDR 0x20) for the dummy read. -DaveM
420 */
421 tmp = 0x40;
422 __asm__ __volatile__(
423 "wrpr %1, %2, %%pstate\n\t"
424 "stxa %4, [%0] %3\n\t"
425 "stxa %5, [%0+%8] %3\n\t"
426 "add %0, %8, %0\n\t"
427 "stxa %6, [%0+%8] %3\n\t"
428 "membar #Sync\n\t"
429 "stxa %%g0, [%7] %3\n\t"
430 "membar #Sync\n\t"
431 "mov 0x20, %%g1\n\t"
432 "ldxa [%%g1] 0x7f, %%g0\n\t"
433 "membar #Sync"
434 : "=r" (tmp)
435 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
436 "r" (data0), "r" (data1), "r" (data2), "r" (target),
437 "r" (0x10), "0" (tmp)
438 : "g1");
439
440 /* NOTE: PSTATE_IE is still clear. */
441 stuck = 100000;
442 do {
443 __asm__ __volatile__("ldxa [%%g0] %1, %0"
444 : "=r" (result)
445 : "i" (ASI_INTR_DISPATCH_STAT));
446 if (result == 0) {
447 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
448 : : "r" (pstate));
449 return;
450 }
451 stuck -= 1;
452 if (stuck == 0)
453 break;
454 } while (result & 0x1);
455 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
456 : : "r" (pstate));
457 if (stuck == 0) {
458 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
459 smp_processor_id(), result);
460 } else {
461 udelay(2);
462 goto again;
463 }
464 }
465
466 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
467 {
468 u64 pstate;
469 int i;
470
471 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
472 for_each_cpu_mask(i, mask)
473 spitfire_xcall_helper(data0, data1, data2, pstate, i);
474 }
475
476 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
477 * packet, but we have no use for that. However we do take advantage of
478 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
479 */
480 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
481 {
482 u64 pstate, ver;
483 int nack_busy_id, is_jbus, need_more;
484
485 if (cpus_empty(mask))
486 return;
487
488 /* Unfortunately, someone at Sun had the brilliant idea to make the
489 * busy/nack fields hard-coded by ITID number for this Ultra-III
490 * derivative processor.
491 */
492 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
493 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
494 (ver >> 32) == __SERRANO_ID);
495
496 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
497
498 retry:
499 need_more = 0;
500 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
501 : : "r" (pstate), "i" (PSTATE_IE));
502
503 /* Setup the dispatch data registers. */
504 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
505 "stxa %1, [%4] %6\n\t"
506 "stxa %2, [%5] %6\n\t"
507 "membar #Sync\n\t"
508 : /* no outputs */
509 : "r" (data0), "r" (data1), "r" (data2),
510 "r" (0x40), "r" (0x50), "r" (0x60),
511 "i" (ASI_INTR_W));
512
513 nack_busy_id = 0;
514 {
515 int i;
516
517 for_each_cpu_mask(i, mask) {
518 u64 target = (i << 14) | 0x70;
519
520 if (!is_jbus)
521 target |= (nack_busy_id << 24);
522 __asm__ __volatile__(
523 "stxa %%g0, [%0] %1\n\t"
524 "membar #Sync\n\t"
525 : /* no outputs */
526 : "r" (target), "i" (ASI_INTR_W));
527 nack_busy_id++;
528 if (nack_busy_id == 32) {
529 need_more = 1;
530 break;
531 }
532 }
533 }
534
535 /* Now, poll for completion. */
536 {
537 u64 dispatch_stat;
538 long stuck;
539
540 stuck = 100000 * nack_busy_id;
541 do {
542 __asm__ __volatile__("ldxa [%%g0] %1, %0"
543 : "=r" (dispatch_stat)
544 : "i" (ASI_INTR_DISPATCH_STAT));
545 if (dispatch_stat == 0UL) {
546 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
547 : : "r" (pstate));
548 if (unlikely(need_more)) {
549 int i, cnt = 0;
550 for_each_cpu_mask(i, mask) {
551 cpu_clear(i, mask);
552 cnt++;
553 if (cnt == 32)
554 break;
555 }
556 goto retry;
557 }
558 return;
559 }
560 if (!--stuck)
561 break;
562 } while (dispatch_stat & 0x5555555555555555UL);
563
564 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
565 : : "r" (pstate));
566
567 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
568 /* Busy bits will not clear, continue instead
569 * of freezing up on this cpu.
570 */
571 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
572 smp_processor_id(), dispatch_stat);
573 } else {
574 int i, this_busy_nack = 0;
575
576 /* Delay some random time with interrupts enabled
577 * to prevent deadlock.
578 */
579 udelay(2 * nack_busy_id);
580
581 /* Clear out the mask bits for cpus which did not
582 * NACK us.
583 */
584 for_each_cpu_mask(i, mask) {
585 u64 check_mask;
586
587 if (is_jbus)
588 check_mask = (0x2UL << (2*i));
589 else
590 check_mask = (0x2UL <<
591 this_busy_nack);
592 if ((dispatch_stat & check_mask) == 0)
593 cpu_clear(i, mask);
594 this_busy_nack += 2;
595 if (this_busy_nack == 64)
596 break;
597 }
598
599 goto retry;
600 }
601 }
602 }
603
604 /* Multi-cpu list version. */
605 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
606 {
607 struct trap_per_cpu *tb;
608 u16 *cpu_list;
609 u64 *mondo;
610 cpumask_t error_mask;
611 unsigned long flags, status;
612 int cnt, retries, this_cpu, prev_sent, i;
613
614 if (cpus_empty(mask))
615 return;
616
617 /* We have to do this whole thing with interrupts fully disabled.
618 * Otherwise if we send an xcall from interrupt context it will
619 * corrupt both our mondo block and cpu list state.
620 *
621 * One consequence of this is that we cannot use timeout mechanisms
622 * that depend upon interrupts being delivered locally. So, for
623 * example, we cannot sample jiffies and expect it to advance.
624 *
625 * Fortunately, udelay() uses %stick/%tick so we can use that.
626 */
627 local_irq_save(flags);
628
629 this_cpu = smp_processor_id();
630 tb = &trap_block[this_cpu];
631
632 mondo = __va(tb->cpu_mondo_block_pa);
633 mondo[0] = data0;
634 mondo[1] = data1;
635 mondo[2] = data2;
636 wmb();
637
638 cpu_list = __va(tb->cpu_list_pa);
639
640 /* Setup the initial cpu list. */
641 cnt = 0;
642 for_each_cpu_mask(i, mask)
643 cpu_list[cnt++] = i;
644
645 cpus_clear(error_mask);
646 retries = 0;
647 prev_sent = 0;
648 do {
649 int forward_progress, n_sent;
650
651 status = sun4v_cpu_mondo_send(cnt,
652 tb->cpu_list_pa,
653 tb->cpu_mondo_block_pa);
654
655 /* HV_EOK means all cpus received the xcall, we're done. */
656 if (likely(status == HV_EOK))
657 break;
658
659 /* First, see if we made any forward progress.
660 *
661 * The hypervisor indicates successful sends by setting
662 * cpu list entries to the value 0xffff.
663 */
664 n_sent = 0;
665 for (i = 0; i < cnt; i++) {
666 if (likely(cpu_list[i] == 0xffff))
667 n_sent++;
668 }
669
670 forward_progress = 0;
671 if (n_sent > prev_sent)
672 forward_progress = 1;
673
674 prev_sent = n_sent;
675
676 /* If we get a HV_ECPUERROR, then one or more of the cpus
677 * in the list are in error state. Use the cpu_state()
678 * hypervisor call to find out which cpus are in error state.
679 */
680 if (unlikely(status == HV_ECPUERROR)) {
681 for (i = 0; i < cnt; i++) {
682 long err;
683 u16 cpu;
684
685 cpu = cpu_list[i];
686 if (cpu == 0xffff)
687 continue;
688
689 err = sun4v_cpu_state(cpu);
690 if (err >= 0 &&
691 err == HV_CPU_STATE_ERROR) {
692 cpu_list[i] = 0xffff;
693 cpu_set(cpu, error_mask);
694 }
695 }
696 } else if (unlikely(status != HV_EWOULDBLOCK))
697 goto fatal_mondo_error;
698
699 /* Don't bother rewriting the CPU list, just leave the
700 * 0xffff and non-0xffff entries in there and the
701 * hypervisor will do the right thing.
702 *
703 * Only advance timeout state if we didn't make any
704 * forward progress.
705 */
706 if (unlikely(!forward_progress)) {
707 if (unlikely(++retries > 10000))
708 goto fatal_mondo_timeout;
709
710 /* Delay a little bit to let other cpus catch up
711 * on their cpu mondo queue work.
712 */
713 udelay(2 * cnt);
714 }
715 } while (1);
716
717 local_irq_restore(flags);
718
719 if (unlikely(!cpus_empty(error_mask)))
720 goto fatal_mondo_cpu_error;
721
722 return;
723
724 fatal_mondo_cpu_error:
725 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
726 "were in error state\n",
727 this_cpu);
728 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
729 for_each_cpu_mask(i, error_mask)
730 printk("%d ", i);
731 printk("]\n");
732 return;
733
734 fatal_mondo_timeout:
735 local_irq_restore(flags);
736 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
737 " progress after %d retries.\n",
738 this_cpu, retries);
739 goto dump_cpu_list_and_out;
740
741 fatal_mondo_error:
742 local_irq_restore(flags);
743 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
744 this_cpu, status);
745 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
746 "mondo_block_pa(%lx)\n",
747 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
748
749 dump_cpu_list_and_out:
750 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
751 for (i = 0; i < cnt; i++)
752 printk("%u ", cpu_list[i]);
753 printk("]\n");
754 }
755
756 /* Send cross call to all processors mentioned in MASK
757 * except self.
758 */
759 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
760 {
761 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
762 int this_cpu = get_cpu();
763
764 cpus_and(mask, mask, cpu_online_map);
765 cpu_clear(this_cpu, mask);
766
767 if (tlb_type == spitfire)
768 spitfire_xcall_deliver(data0, data1, data2, mask);
769 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
770 cheetah_xcall_deliver(data0, data1, data2, mask);
771 else
772 hypervisor_xcall_deliver(data0, data1, data2, mask);
773 /* NOTE: Caller runs local copy on master. */
774
775 put_cpu();
776 }
777
778 extern unsigned long xcall_sync_tick;
779
780 static void smp_start_sync_tick_client(int cpu)
781 {
782 cpumask_t mask = cpumask_of_cpu(cpu);
783
784 smp_cross_call_masked(&xcall_sync_tick,
785 0, 0, 0, mask);
786 }
787
788 /* Send cross call to all processors except self. */
789 #define smp_cross_call(func, ctx, data1, data2) \
790 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
791
792 struct call_data_struct {
793 void (*func) (void *info);
794 void *info;
795 atomic_t finished;
796 int wait;
797 };
798
799 static struct call_data_struct *call_data;
800
801 extern unsigned long xcall_call_function;
802
803 /**
804 * smp_call_function(): Run a function on all other CPUs.
805 * @func: The function to run. This must be fast and non-blocking.
806 * @info: An arbitrary pointer to pass to the function.
807 * @nonatomic: currently unused.
808 * @wait: If true, wait (atomically) until function has completed on other CPUs.
809 *
810 * Returns 0 on success, else a negative status code. Does not return until
811 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
812 *
813 * You must not call this function with disabled interrupts or from a
814 * hardware interrupt handler or from a bottom half handler.
815 */
816 static int smp_call_function_mask(void (*func)(void *info), void *info,
817 int nonatomic, int wait, cpumask_t mask)
818 {
819 struct call_data_struct data;
820 int cpus;
821
822 /* Can deadlock when called with interrupts disabled */
823 WARN_ON(irqs_disabled());
824
825 data.func = func;
826 data.info = info;
827 atomic_set(&data.finished, 0);
828 data.wait = wait;
829
830 spin_lock(&call_lock);
831
832 cpu_clear(smp_processor_id(), mask);
833 cpus = cpus_weight(mask);
834 if (!cpus)
835 goto out_unlock;
836
837 call_data = &data;
838 mb();
839
840 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
841
842 /* Wait for response */
843 while (atomic_read(&data.finished) != cpus)
844 cpu_relax();
845
846 out_unlock:
847 spin_unlock(&call_lock);
848
849 return 0;
850 }
851
852 int smp_call_function(void (*func)(void *info), void *info,
853 int nonatomic, int wait)
854 {
855 return smp_call_function_mask(func, info, nonatomic, wait,
856 cpu_online_map);
857 }
858
859 void smp_call_function_client(int irq, struct pt_regs *regs)
860 {
861 void (*func) (void *info) = call_data->func;
862 void *info = call_data->info;
863
864 clear_softint(1 << irq);
865 if (call_data->wait) {
866 /* let initiator proceed only after completion */
867 func(info);
868 atomic_inc(&call_data->finished);
869 } else {
870 /* let initiator proceed after getting data */
871 atomic_inc(&call_data->finished);
872 func(info);
873 }
874 }
875
876 static void tsb_sync(void *info)
877 {
878 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
879 struct mm_struct *mm = info;
880
881 /* It is not valid to test "currrent->active_mm == mm" here.
882 *
883 * The value of "current" is not changed atomically with
884 * switch_mm(). But that's OK, we just need to check the
885 * current cpu's trap block PGD physical address.
886 */
887 if (tp->pgd_paddr == __pa(mm->pgd))
888 tsb_context_switch(mm);
889 }
890
891 void smp_tsb_sync(struct mm_struct *mm)
892 {
893 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
894 }
895
896 extern unsigned long xcall_flush_tlb_mm;
897 extern unsigned long xcall_flush_tlb_pending;
898 extern unsigned long xcall_flush_tlb_kernel_range;
899 extern unsigned long xcall_report_regs;
900 extern unsigned long xcall_receive_signal;
901 extern unsigned long xcall_new_mmu_context_version;
902
903 #ifdef DCACHE_ALIASING_POSSIBLE
904 extern unsigned long xcall_flush_dcache_page_cheetah;
905 #endif
906 extern unsigned long xcall_flush_dcache_page_spitfire;
907
908 #ifdef CONFIG_DEBUG_DCFLUSH
909 extern atomic_t dcpage_flushes;
910 extern atomic_t dcpage_flushes_xcall;
911 #endif
912
913 static __inline__ void __local_flush_dcache_page(struct page *page)
914 {
915 #ifdef DCACHE_ALIASING_POSSIBLE
916 __flush_dcache_page(page_address(page),
917 ((tlb_type == spitfire) &&
918 page_mapping(page) != NULL));
919 #else
920 if (page_mapping(page) != NULL &&
921 tlb_type == spitfire)
922 __flush_icache_page(__pa(page_address(page)));
923 #endif
924 }
925
926 void smp_flush_dcache_page_impl(struct page *page, int cpu)
927 {
928 cpumask_t mask = cpumask_of_cpu(cpu);
929 int this_cpu;
930
931 if (tlb_type == hypervisor)
932 return;
933
934 #ifdef CONFIG_DEBUG_DCFLUSH
935 atomic_inc(&dcpage_flushes);
936 #endif
937
938 this_cpu = get_cpu();
939
940 if (cpu == this_cpu) {
941 __local_flush_dcache_page(page);
942 } else if (cpu_online(cpu)) {
943 void *pg_addr = page_address(page);
944 u64 data0;
945
946 if (tlb_type == spitfire) {
947 data0 =
948 ((u64)&xcall_flush_dcache_page_spitfire);
949 if (page_mapping(page) != NULL)
950 data0 |= ((u64)1 << 32);
951 spitfire_xcall_deliver(data0,
952 __pa(pg_addr),
953 (u64) pg_addr,
954 mask);
955 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
956 #ifdef DCACHE_ALIASING_POSSIBLE
957 data0 =
958 ((u64)&xcall_flush_dcache_page_cheetah);
959 cheetah_xcall_deliver(data0,
960 __pa(pg_addr),
961 0, mask);
962 #endif
963 }
964 #ifdef CONFIG_DEBUG_DCFLUSH
965 atomic_inc(&dcpage_flushes_xcall);
966 #endif
967 }
968
969 put_cpu();
970 }
971
972 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
973 {
974 void *pg_addr = page_address(page);
975 cpumask_t mask = cpu_online_map;
976 u64 data0;
977 int this_cpu;
978
979 if (tlb_type == hypervisor)
980 return;
981
982 this_cpu = get_cpu();
983
984 cpu_clear(this_cpu, mask);
985
986 #ifdef CONFIG_DEBUG_DCFLUSH
987 atomic_inc(&dcpage_flushes);
988 #endif
989 if (cpus_empty(mask))
990 goto flush_self;
991 if (tlb_type == spitfire) {
992 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
993 if (page_mapping(page) != NULL)
994 data0 |= ((u64)1 << 32);
995 spitfire_xcall_deliver(data0,
996 __pa(pg_addr),
997 (u64) pg_addr,
998 mask);
999 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1000 #ifdef DCACHE_ALIASING_POSSIBLE
1001 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1002 cheetah_xcall_deliver(data0,
1003 __pa(pg_addr),
1004 0, mask);
1005 #endif
1006 }
1007 #ifdef CONFIG_DEBUG_DCFLUSH
1008 atomic_inc(&dcpage_flushes_xcall);
1009 #endif
1010 flush_self:
1011 __local_flush_dcache_page(page);
1012
1013 put_cpu();
1014 }
1015
1016 static void __smp_receive_signal_mask(cpumask_t mask)
1017 {
1018 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1019 }
1020
1021 void smp_receive_signal(int cpu)
1022 {
1023 cpumask_t mask = cpumask_of_cpu(cpu);
1024
1025 if (cpu_online(cpu))
1026 __smp_receive_signal_mask(mask);
1027 }
1028
1029 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1030 {
1031 clear_softint(1 << irq);
1032 }
1033
1034 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1035 {
1036 struct mm_struct *mm;
1037 unsigned long flags;
1038
1039 clear_softint(1 << irq);
1040
1041 /* See if we need to allocate a new TLB context because
1042 * the version of the one we are using is now out of date.
1043 */
1044 mm = current->active_mm;
1045 if (unlikely(!mm || (mm == &init_mm)))
1046 return;
1047
1048 spin_lock_irqsave(&mm->context.lock, flags);
1049
1050 if (unlikely(!CTX_VALID(mm->context)))
1051 get_new_mmu_context(mm);
1052
1053 spin_unlock_irqrestore(&mm->context.lock, flags);
1054
1055 load_secondary_context(mm);
1056 __flush_tlb_mm(CTX_HWBITS(mm->context),
1057 SECONDARY_CONTEXT);
1058 }
1059
1060 void smp_new_mmu_context_version(void)
1061 {
1062 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1063 }
1064
1065 void smp_report_regs(void)
1066 {
1067 smp_cross_call(&xcall_report_regs, 0, 0, 0);
1068 }
1069
1070 /* We know that the window frames of the user have been flushed
1071 * to the stack before we get here because all callers of us
1072 * are flush_tlb_*() routines, and these run after flush_cache_*()
1073 * which performs the flushw.
1074 *
1075 * The SMP TLB coherency scheme we use works as follows:
1076 *
1077 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1078 * space has (potentially) executed on, this is the heuristic
1079 * we use to avoid doing cross calls.
1080 *
1081 * Also, for flushing from kswapd and also for clones, we
1082 * use cpu_vm_mask as the list of cpus to make run the TLB.
1083 *
1084 * 2) TLB context numbers are shared globally across all processors
1085 * in the system, this allows us to play several games to avoid
1086 * cross calls.
1087 *
1088 * One invariant is that when a cpu switches to a process, and
1089 * that processes tsk->active_mm->cpu_vm_mask does not have the
1090 * current cpu's bit set, that tlb context is flushed locally.
1091 *
1092 * If the address space is non-shared (ie. mm->count == 1) we avoid
1093 * cross calls when we want to flush the currently running process's
1094 * tlb state. This is done by clearing all cpu bits except the current
1095 * processor's in current->active_mm->cpu_vm_mask and performing the
1096 * flush locally only. This will force any subsequent cpus which run
1097 * this task to flush the context from the local tlb if the process
1098 * migrates to another cpu (again).
1099 *
1100 * 3) For shared address spaces (threads) and swapping we bite the
1101 * bullet for most cases and perform the cross call (but only to
1102 * the cpus listed in cpu_vm_mask).
1103 *
1104 * The performance gain from "optimizing" away the cross call for threads is
1105 * questionable (in theory the big win for threads is the massive sharing of
1106 * address space state across processors).
1107 */
1108
1109 /* This currently is only used by the hugetlb arch pre-fault
1110 * hook on UltraSPARC-III+ and later when changing the pagesize
1111 * bits of the context register for an address space.
1112 */
1113 void smp_flush_tlb_mm(struct mm_struct *mm)
1114 {
1115 u32 ctx = CTX_HWBITS(mm->context);
1116 int cpu = get_cpu();
1117
1118 if (atomic_read(&mm->mm_users) == 1) {
1119 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1120 goto local_flush_and_out;
1121 }
1122
1123 smp_cross_call_masked(&xcall_flush_tlb_mm,
1124 ctx, 0, 0,
1125 mm->cpu_vm_mask);
1126
1127 local_flush_and_out:
1128 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1129
1130 put_cpu();
1131 }
1132
1133 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1134 {
1135 u32 ctx = CTX_HWBITS(mm->context);
1136 int cpu = get_cpu();
1137
1138 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1139 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1140 else
1141 smp_cross_call_masked(&xcall_flush_tlb_pending,
1142 ctx, nr, (unsigned long) vaddrs,
1143 mm->cpu_vm_mask);
1144
1145 __flush_tlb_pending(ctx, nr, vaddrs);
1146
1147 put_cpu();
1148 }
1149
1150 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1151 {
1152 start &= PAGE_MASK;
1153 end = PAGE_ALIGN(end);
1154 if (start != end) {
1155 smp_cross_call(&xcall_flush_tlb_kernel_range,
1156 0, start, end);
1157
1158 __flush_tlb_kernel_range(start, end);
1159 }
1160 }
1161
1162 /* CPU capture. */
1163 /* #define CAPTURE_DEBUG */
1164 extern unsigned long xcall_capture;
1165
1166 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1167 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1168 static unsigned long penguins_are_doing_time;
1169
1170 void smp_capture(void)
1171 {
1172 int result = atomic_add_ret(1, &smp_capture_depth);
1173
1174 if (result == 1) {
1175 int ncpus = num_online_cpus();
1176
1177 #ifdef CAPTURE_DEBUG
1178 printk("CPU[%d]: Sending penguins to jail...",
1179 smp_processor_id());
1180 #endif
1181 penguins_are_doing_time = 1;
1182 membar_storestore_loadstore();
1183 atomic_inc(&smp_capture_registry);
1184 smp_cross_call(&xcall_capture, 0, 0, 0);
1185 while (atomic_read(&smp_capture_registry) != ncpus)
1186 rmb();
1187 #ifdef CAPTURE_DEBUG
1188 printk("done\n");
1189 #endif
1190 }
1191 }
1192
1193 void smp_release(void)
1194 {
1195 if (atomic_dec_and_test(&smp_capture_depth)) {
1196 #ifdef CAPTURE_DEBUG
1197 printk("CPU[%d]: Giving pardon to "
1198 "imprisoned penguins\n",
1199 smp_processor_id());
1200 #endif
1201 penguins_are_doing_time = 0;
1202 membar_storeload_storestore();
1203 atomic_dec(&smp_capture_registry);
1204 }
1205 }
1206
1207 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1208 * can service tlb flush xcalls...
1209 */
1210 extern void prom_world(int);
1211
1212 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1213 {
1214 clear_softint(1 << irq);
1215
1216 preempt_disable();
1217
1218 __asm__ __volatile__("flushw");
1219 prom_world(1);
1220 atomic_inc(&smp_capture_registry);
1221 membar_storeload_storestore();
1222 while (penguins_are_doing_time)
1223 rmb();
1224 atomic_dec(&smp_capture_registry);
1225 prom_world(0);
1226
1227 preempt_enable();
1228 }
1229
1230 /* /proc/profile writes can call this, don't __init it please. */
1231 int setup_profiling_timer(unsigned int multiplier)
1232 {
1233 return -EINVAL;
1234 }
1235
1236 void __init smp_prepare_cpus(unsigned int max_cpus)
1237 {
1238 }
1239
1240 void __devinit smp_prepare_boot_cpu(void)
1241 {
1242 }
1243
1244 void __devinit smp_fill_in_sib_core_maps(void)
1245 {
1246 unsigned int i;
1247
1248 for_each_present_cpu(i) {
1249 unsigned int j;
1250
1251 cpus_clear(cpu_core_map[i]);
1252 if (cpu_data(i).core_id == 0) {
1253 cpu_set(i, cpu_core_map[i]);
1254 continue;
1255 }
1256
1257 for_each_present_cpu(j) {
1258 if (cpu_data(i).core_id ==
1259 cpu_data(j).core_id)
1260 cpu_set(j, cpu_core_map[i]);
1261 }
1262 }
1263
1264 for_each_present_cpu(i) {
1265 unsigned int j;
1266
1267 cpus_clear(cpu_sibling_map[i]);
1268 if (cpu_data(i).proc_id == -1) {
1269 cpu_set(i, cpu_sibling_map[i]);
1270 continue;
1271 }
1272
1273 for_each_present_cpu(j) {
1274 if (cpu_data(i).proc_id ==
1275 cpu_data(j).proc_id)
1276 cpu_set(j, cpu_sibling_map[i]);
1277 }
1278 }
1279 }
1280
1281 int __cpuinit __cpu_up(unsigned int cpu)
1282 {
1283 int ret = smp_boot_one_cpu(cpu);
1284
1285 if (!ret) {
1286 cpu_set(cpu, smp_commenced_mask);
1287 while (!cpu_isset(cpu, cpu_online_map))
1288 mb();
1289 if (!cpu_isset(cpu, cpu_online_map)) {
1290 ret = -ENODEV;
1291 } else {
1292 /* On SUN4V, writes to %tick and %stick are
1293 * not allowed.
1294 */
1295 if (tlb_type != hypervisor)
1296 smp_synchronize_one_tick(cpu);
1297 }
1298 }
1299 return ret;
1300 }
1301
1302 #ifdef CONFIG_HOTPLUG_CPU
1303 void cpu_play_dead(void)
1304 {
1305 int cpu = smp_processor_id();
1306 unsigned long pstate;
1307
1308 idle_task_exit();
1309
1310 if (tlb_type == hypervisor) {
1311 struct trap_per_cpu *tb = &trap_block[cpu];
1312
1313 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1314 tb->cpu_mondo_pa, 0);
1315 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1316 tb->dev_mondo_pa, 0);
1317 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1318 tb->resum_mondo_pa, 0);
1319 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1320 tb->nonresum_mondo_pa, 0);
1321 }
1322
1323 cpu_clear(cpu, smp_commenced_mask);
1324 membar_safe("#Sync");
1325
1326 local_irq_disable();
1327
1328 __asm__ __volatile__(
1329 "rdpr %%pstate, %0\n\t"
1330 "wrpr %0, %1, %%pstate"
1331 : "=r" (pstate)
1332 : "i" (PSTATE_IE));
1333
1334 while (1)
1335 barrier();
1336 }
1337
1338 int __cpu_disable(void)
1339 {
1340 int cpu = smp_processor_id();
1341 cpuinfo_sparc *c;
1342 int i;
1343
1344 for_each_cpu_mask(i, cpu_core_map[cpu])
1345 cpu_clear(cpu, cpu_core_map[i]);
1346 cpus_clear(cpu_core_map[cpu]);
1347
1348 for_each_cpu_mask(i, cpu_sibling_map[cpu])
1349 cpu_clear(cpu, cpu_sibling_map[i]);
1350 cpus_clear(cpu_sibling_map[cpu]);
1351
1352 c = &cpu_data(cpu);
1353
1354 c->core_id = 0;
1355 c->proc_id = -1;
1356
1357 spin_lock(&call_lock);
1358 cpu_clear(cpu, cpu_online_map);
1359 spin_unlock(&call_lock);
1360
1361 smp_wmb();
1362
1363 /* Make sure no interrupts point to this cpu. */
1364 fixup_irqs();
1365
1366 local_irq_enable();
1367 mdelay(1);
1368 local_irq_disable();
1369
1370 return 0;
1371 }
1372
1373 void __cpu_die(unsigned int cpu)
1374 {
1375 int i;
1376
1377 for (i = 0; i < 100; i++) {
1378 smp_rmb();
1379 if (!cpu_isset(cpu, smp_commenced_mask))
1380 break;
1381 msleep(100);
1382 }
1383 if (cpu_isset(cpu, smp_commenced_mask)) {
1384 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1385 } else {
1386 #if defined(CONFIG_SUN_LDOMS)
1387 unsigned long hv_err;
1388 int limit = 100;
1389
1390 do {
1391 hv_err = sun4v_cpu_stop(cpu);
1392 if (hv_err == HV_EOK) {
1393 cpu_clear(cpu, cpu_present_map);
1394 break;
1395 }
1396 } while (--limit > 0);
1397 if (limit <= 0) {
1398 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1399 hv_err);
1400 }
1401 #endif
1402 }
1403 }
1404 #endif
1405
1406 void __init smp_cpus_done(unsigned int max_cpus)
1407 {
1408 }
1409
1410 void smp_send_reschedule(int cpu)
1411 {
1412 smp_receive_signal(cpu);
1413 }
1414
1415 /* This is a nop because we capture all other cpus
1416 * anyways when making the PROM active.
1417 */
1418 void smp_send_stop(void)
1419 {
1420 }
1421
1422 unsigned long __per_cpu_base __read_mostly;
1423 unsigned long __per_cpu_shift __read_mostly;
1424
1425 EXPORT_SYMBOL(__per_cpu_base);
1426 EXPORT_SYMBOL(__per_cpu_shift);
1427
1428 void __init real_setup_per_cpu_areas(void)
1429 {
1430 unsigned long goal, size, i;
1431 char *ptr;
1432
1433 /* Copy section for each CPU (we discard the original) */
1434 goal = PERCPU_ENOUGH_ROOM;
1435
1436 __per_cpu_shift = PAGE_SHIFT;
1437 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1438 __per_cpu_shift++;
1439
1440 ptr = alloc_bootmem_pages(size * NR_CPUS);
1441
1442 __per_cpu_base = ptr - __per_cpu_start;
1443
1444 for (i = 0; i < NR_CPUS; i++, ptr += size)
1445 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1446
1447 /* Setup %g5 for the boot cpu. */
1448 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1449 }
This page took 0.143846 seconds and 5 git commands to generate.