x86/xen: Provide a "Xen PV" APIC driver to support >255 VCPUs
[deliverable/linux.git] / arch / x86 / xen / smp.c
1 /*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
20 #include <linux/tick.h>
21
22 #include <asm/paravirt.h>
23 #include <asm/desc.h>
24 #include <asm/pgtable.h>
25 #include <asm/cpu.h>
26
27 #include <xen/interface/xen.h>
28 #include <xen/interface/vcpu.h>
29
30 #include <asm/xen/interface.h>
31 #include <asm/xen/hypercall.h>
32
33 #include <xen/xen.h>
34 #include <xen/page.h>
35 #include <xen/events.h>
36
37 #include <xen/hvc-console.h>
38 #include "xen-ops.h"
39 #include "mmu.h"
40 #include "smp.h"
41
42 cpumask_var_t xen_cpu_initialized_map;
43
44 struct xen_common_irq {
45 int irq;
46 char *name;
47 };
48 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
49 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
50 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
51 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
52 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
53
54 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
55 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
56 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
57
58 /*
59 * Reschedule call back.
60 */
61 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
62 {
63 inc_irq_stat(irq_resched_count);
64 scheduler_ipi();
65
66 return IRQ_HANDLED;
67 }
68
69 static void cpu_bringup(void)
70 {
71 int cpu;
72
73 cpu_init();
74 touch_softlockup_watchdog();
75 preempt_disable();
76
77 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
78 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
79 xen_enable_sysenter();
80 xen_enable_syscall();
81 }
82 cpu = smp_processor_id();
83 smp_store_cpu_info(cpu);
84 cpu_data(cpu).x86_max_cores = 1;
85 set_cpu_sibling_map(cpu);
86
87 xen_setup_cpu_clockevents();
88
89 notify_cpu_starting(cpu);
90
91 set_cpu_online(cpu, true);
92
93 this_cpu_write(cpu_state, CPU_ONLINE);
94
95 wmb();
96
97 /* We can take interrupts now: we're officially "up". */
98 local_irq_enable();
99
100 wmb(); /* make sure everything is out */
101 }
102
103 /*
104 * Note: cpu parameter is only relevant for PVH. The reason for passing it
105 * is we can't do smp_processor_id until the percpu segments are loaded, for
106 * which we need the cpu number! So we pass it in rdi as first parameter.
107 */
108 asmlinkage __visible void cpu_bringup_and_idle(int cpu)
109 {
110 #ifdef CONFIG_XEN_PVH
111 if (xen_feature(XENFEAT_auto_translated_physmap) &&
112 xen_feature(XENFEAT_supervisor_mode_kernel))
113 xen_pvh_secondary_vcpu_init(cpu);
114 #endif
115 cpu_bringup();
116 cpu_startup_entry(CPUHP_ONLINE);
117 }
118
119 static void xen_smp_intr_free(unsigned int cpu)
120 {
121 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
122 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
123 per_cpu(xen_resched_irq, cpu).irq = -1;
124 kfree(per_cpu(xen_resched_irq, cpu).name);
125 per_cpu(xen_resched_irq, cpu).name = NULL;
126 }
127 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
128 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
129 per_cpu(xen_callfunc_irq, cpu).irq = -1;
130 kfree(per_cpu(xen_callfunc_irq, cpu).name);
131 per_cpu(xen_callfunc_irq, cpu).name = NULL;
132 }
133 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
134 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
135 per_cpu(xen_debug_irq, cpu).irq = -1;
136 kfree(per_cpu(xen_debug_irq, cpu).name);
137 per_cpu(xen_debug_irq, cpu).name = NULL;
138 }
139 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
140 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
141 NULL);
142 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
143 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
144 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
145 }
146 if (xen_hvm_domain())
147 return;
148
149 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
150 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
151 per_cpu(xen_irq_work, cpu).irq = -1;
152 kfree(per_cpu(xen_irq_work, cpu).name);
153 per_cpu(xen_irq_work, cpu).name = NULL;
154 }
155 };
156 static int xen_smp_intr_init(unsigned int cpu)
157 {
158 int rc;
159 char *resched_name, *callfunc_name, *debug_name;
160
161 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
162 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
163 cpu,
164 xen_reschedule_interrupt,
165 IRQF_PERCPU|IRQF_NOBALANCING,
166 resched_name,
167 NULL);
168 if (rc < 0)
169 goto fail;
170 per_cpu(xen_resched_irq, cpu).irq = rc;
171 per_cpu(xen_resched_irq, cpu).name = resched_name;
172
173 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
174 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
175 cpu,
176 xen_call_function_interrupt,
177 IRQF_PERCPU|IRQF_NOBALANCING,
178 callfunc_name,
179 NULL);
180 if (rc < 0)
181 goto fail;
182 per_cpu(xen_callfunc_irq, cpu).irq = rc;
183 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
184
185 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
186 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
187 IRQF_PERCPU | IRQF_NOBALANCING,
188 debug_name, NULL);
189 if (rc < 0)
190 goto fail;
191 per_cpu(xen_debug_irq, cpu).irq = rc;
192 per_cpu(xen_debug_irq, cpu).name = debug_name;
193
194 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
195 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
196 cpu,
197 xen_call_function_single_interrupt,
198 IRQF_PERCPU|IRQF_NOBALANCING,
199 callfunc_name,
200 NULL);
201 if (rc < 0)
202 goto fail;
203 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
204 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
205
206 /*
207 * The IRQ worker on PVHVM goes through the native path and uses the
208 * IPI mechanism.
209 */
210 if (xen_hvm_domain())
211 return 0;
212
213 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
214 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
215 cpu,
216 xen_irq_work_interrupt,
217 IRQF_PERCPU|IRQF_NOBALANCING,
218 callfunc_name,
219 NULL);
220 if (rc < 0)
221 goto fail;
222 per_cpu(xen_irq_work, cpu).irq = rc;
223 per_cpu(xen_irq_work, cpu).name = callfunc_name;
224
225 return 0;
226
227 fail:
228 xen_smp_intr_free(cpu);
229 return rc;
230 }
231
232 static void __init xen_fill_possible_map(void)
233 {
234 int i, rc;
235
236 if (xen_initial_domain())
237 return;
238
239 for (i = 0; i < nr_cpu_ids; i++) {
240 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
241 if (rc >= 0) {
242 num_processors++;
243 set_cpu_possible(i, true);
244 }
245 }
246 }
247
248 static void __init xen_filter_cpu_maps(void)
249 {
250 int i, rc;
251 unsigned int subtract = 0;
252
253 if (!xen_initial_domain())
254 return;
255
256 num_processors = 0;
257 disabled_cpus = 0;
258 for (i = 0; i < nr_cpu_ids; i++) {
259 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
260 if (rc >= 0) {
261 num_processors++;
262 set_cpu_possible(i, true);
263 } else {
264 set_cpu_possible(i, false);
265 set_cpu_present(i, false);
266 subtract++;
267 }
268 }
269 #ifdef CONFIG_HOTPLUG_CPU
270 /* This is akin to using 'nr_cpus' on the Linux command line.
271 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
272 * have up to X, while nr_cpu_ids is greater than X. This
273 * normally is not a problem, except when CPU hotplugging
274 * is involved and then there might be more than X CPUs
275 * in the guest - which will not work as there is no
276 * hypercall to expand the max number of VCPUs an already
277 * running guest has. So cap it up to X. */
278 if (subtract)
279 nr_cpu_ids = nr_cpu_ids - subtract;
280 #endif
281
282 }
283
284 static void __init xen_smp_prepare_boot_cpu(void)
285 {
286 BUG_ON(smp_processor_id() != 0);
287 native_smp_prepare_boot_cpu();
288
289 if (xen_pv_domain()) {
290 if (!xen_feature(XENFEAT_writable_page_tables))
291 /* We've switched to the "real" per-cpu gdt, so make
292 * sure the old memory can be recycled. */
293 make_lowmem_page_readwrite(xen_initial_gdt);
294
295 #ifdef CONFIG_X86_32
296 /*
297 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
298 * expects __USER_DS
299 */
300 loadsegment(ds, __USER_DS);
301 loadsegment(es, __USER_DS);
302 #endif
303
304 xen_filter_cpu_maps();
305 xen_setup_vcpu_info_placement();
306 }
307 /*
308 * The alternative logic (which patches the unlock/lock) runs before
309 * the smp bootup up code is activated. Hence we need to set this up
310 * the core kernel is being patched. Otherwise we will have only
311 * modules patched but not core code.
312 */
313 xen_init_spinlocks();
314 }
315
316 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
317 {
318 unsigned cpu;
319 unsigned int i;
320
321 if (skip_ioapic_setup) {
322 char *m = (max_cpus == 0) ?
323 "The nosmp parameter is incompatible with Xen; " \
324 "use Xen dom0_max_vcpus=1 parameter" :
325 "The noapic parameter is incompatible with Xen";
326
327 xen_raw_printk(m);
328 panic(m);
329 }
330 xen_init_lock_cpu(0);
331
332 smp_store_boot_cpu_info();
333 cpu_data(0).x86_max_cores = 1;
334
335 for_each_possible_cpu(i) {
336 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
337 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
338 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
339 }
340 set_cpu_sibling_map(0);
341
342 if (xen_smp_intr_init(0))
343 BUG();
344
345 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
346 panic("could not allocate xen_cpu_initialized_map\n");
347
348 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
349
350 /* Restrict the possible_map according to max_cpus. */
351 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
352 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
353 continue;
354 set_cpu_possible(cpu, false);
355 }
356
357 for_each_possible_cpu(cpu)
358 set_cpu_present(cpu, true);
359 }
360
361 static int
362 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
363 {
364 struct vcpu_guest_context *ctxt;
365 struct desc_struct *gdt;
366 unsigned long gdt_mfn;
367
368 /* used to tell cpu_init() that it can proceed with initialization */
369 cpumask_set_cpu(cpu, cpu_callout_mask);
370 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
371 return 0;
372
373 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
374 if (ctxt == NULL)
375 return -ENOMEM;
376
377 gdt = get_cpu_gdt_table(cpu);
378
379 #ifdef CONFIG_X86_32
380 /* Note: PVH is not yet supported on x86_32. */
381 ctxt->user_regs.fs = __KERNEL_PERCPU;
382 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
383 #endif
384 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
385
386 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
387 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
388 ctxt->flags = VGCF_IN_KERNEL;
389 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
390 ctxt->user_regs.ds = __USER_DS;
391 ctxt->user_regs.es = __USER_DS;
392 ctxt->user_regs.ss = __KERNEL_DS;
393
394 xen_copy_trap_info(ctxt->trap_ctxt);
395
396 ctxt->ldt_ents = 0;
397
398 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
399
400 gdt_mfn = arbitrary_virt_to_mfn(gdt);
401 make_lowmem_page_readonly(gdt);
402 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
403
404 ctxt->gdt_frames[0] = gdt_mfn;
405 ctxt->gdt_ents = GDT_ENTRIES;
406
407 ctxt->kernel_ss = __KERNEL_DS;
408 ctxt->kernel_sp = idle->thread.sp0;
409
410 #ifdef CONFIG_X86_32
411 ctxt->event_callback_cs = __KERNEL_CS;
412 ctxt->failsafe_callback_cs = __KERNEL_CS;
413 #else
414 ctxt->gs_base_kernel = per_cpu_offset(cpu);
415 #endif
416 ctxt->event_callback_eip =
417 (unsigned long)xen_hypervisor_callback;
418 ctxt->failsafe_callback_eip =
419 (unsigned long)xen_failsafe_callback;
420 ctxt->user_regs.cs = __KERNEL_CS;
421 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
422 }
423 #ifdef CONFIG_XEN_PVH
424 else {
425 /*
426 * The vcpu comes on kernel page tables which have the NX pte
427 * bit set. This means before DS/SS is touched, NX in
428 * EFER must be set. Hence the following assembly glue code.
429 */
430 ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
431 ctxt->user_regs.rdi = cpu;
432 ctxt->user_regs.rsi = true; /* entry == true */
433 }
434 #endif
435 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
436 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
437 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
438 BUG();
439
440 kfree(ctxt);
441 return 0;
442 }
443
444 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
445 {
446 int rc;
447
448 per_cpu(current_task, cpu) = idle;
449 #ifdef CONFIG_X86_32
450 irq_ctx_init(cpu);
451 #else
452 clear_tsk_thread_flag(idle, TIF_FORK);
453 #endif
454 per_cpu(kernel_stack, cpu) =
455 (unsigned long)task_stack_page(idle) -
456 KERNEL_STACK_OFFSET + THREAD_SIZE;
457
458 xen_setup_runstate_info(cpu);
459 xen_setup_timer(cpu);
460 xen_init_lock_cpu(cpu);
461
462 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
463
464 /* make sure interrupts start blocked */
465 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
466
467 rc = cpu_initialize_context(cpu, idle);
468 if (rc)
469 return rc;
470
471 if (num_online_cpus() == 1)
472 /* Just in case we booted with a single CPU. */
473 alternatives_enable_smp();
474
475 rc = xen_smp_intr_init(cpu);
476 if (rc)
477 return rc;
478
479 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
480 BUG_ON(rc);
481
482 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
483 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
484 barrier();
485 }
486
487 return 0;
488 }
489
490 static void xen_smp_cpus_done(unsigned int max_cpus)
491 {
492 }
493
494 #ifdef CONFIG_HOTPLUG_CPU
495 static int xen_cpu_disable(void)
496 {
497 unsigned int cpu = smp_processor_id();
498 if (cpu == 0)
499 return -EBUSY;
500
501 cpu_disable_common();
502
503 load_cr3(swapper_pg_dir);
504 return 0;
505 }
506
507 static void xen_cpu_die(unsigned int cpu)
508 {
509 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
510 __set_current_state(TASK_UNINTERRUPTIBLE);
511 schedule_timeout(HZ/10);
512 }
513
514 cpu_die_common(cpu);
515
516 xen_smp_intr_free(cpu);
517 xen_uninit_lock_cpu(cpu);
518 xen_teardown_timer(cpu);
519 }
520
521 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
522 {
523 play_dead_common();
524 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
525 cpu_bringup();
526 /*
527 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
528 * clears certain data that the cpu_idle loop (which called us
529 * and that we return from) expects. The only way to get that
530 * data back is to call:
531 */
532 tick_nohz_idle_enter();
533 }
534
535 #else /* !CONFIG_HOTPLUG_CPU */
536 static int xen_cpu_disable(void)
537 {
538 return -ENOSYS;
539 }
540
541 static void xen_cpu_die(unsigned int cpu)
542 {
543 BUG();
544 }
545
546 static void xen_play_dead(void)
547 {
548 BUG();
549 }
550
551 #endif
552 static void stop_self(void *v)
553 {
554 int cpu = smp_processor_id();
555
556 /* make sure we're not pinning something down */
557 load_cr3(swapper_pg_dir);
558 /* should set up a minimal gdt */
559
560 set_cpu_online(cpu, false);
561
562 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
563 BUG();
564 }
565
566 static void xen_stop_other_cpus(int wait)
567 {
568 smp_call_function(stop_self, NULL, wait);
569 }
570
571 static void xen_smp_send_reschedule(int cpu)
572 {
573 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
574 }
575
576 static void __xen_send_IPI_mask(const struct cpumask *mask,
577 int vector)
578 {
579 unsigned cpu;
580
581 for_each_cpu_and(cpu, mask, cpu_online_mask)
582 xen_send_IPI_one(cpu, vector);
583 }
584
585 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
586 {
587 int cpu;
588
589 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
590
591 /* Make sure other vcpus get a chance to run if they need to. */
592 for_each_cpu(cpu, mask) {
593 if (xen_vcpu_stolen(cpu)) {
594 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
595 break;
596 }
597 }
598 }
599
600 static void xen_smp_send_call_function_single_ipi(int cpu)
601 {
602 __xen_send_IPI_mask(cpumask_of(cpu),
603 XEN_CALL_FUNCTION_SINGLE_VECTOR);
604 }
605
606 static inline int xen_map_vector(int vector)
607 {
608 int xen_vector;
609
610 switch (vector) {
611 case RESCHEDULE_VECTOR:
612 xen_vector = XEN_RESCHEDULE_VECTOR;
613 break;
614 case CALL_FUNCTION_VECTOR:
615 xen_vector = XEN_CALL_FUNCTION_VECTOR;
616 break;
617 case CALL_FUNCTION_SINGLE_VECTOR:
618 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
619 break;
620 case IRQ_WORK_VECTOR:
621 xen_vector = XEN_IRQ_WORK_VECTOR;
622 break;
623 #ifdef CONFIG_X86_64
624 case NMI_VECTOR:
625 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
626 xen_vector = XEN_NMI_VECTOR;
627 break;
628 #endif
629 default:
630 xen_vector = -1;
631 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
632 vector);
633 }
634
635 return xen_vector;
636 }
637
638 void xen_send_IPI_mask(const struct cpumask *mask,
639 int vector)
640 {
641 int xen_vector = xen_map_vector(vector);
642
643 if (xen_vector >= 0)
644 __xen_send_IPI_mask(mask, xen_vector);
645 }
646
647 void xen_send_IPI_all(int vector)
648 {
649 int xen_vector = xen_map_vector(vector);
650
651 if (xen_vector >= 0)
652 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
653 }
654
655 void xen_send_IPI_self(int vector)
656 {
657 int xen_vector = xen_map_vector(vector);
658
659 if (xen_vector >= 0)
660 xen_send_IPI_one(smp_processor_id(), xen_vector);
661 }
662
663 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
664 int vector)
665 {
666 unsigned cpu;
667 unsigned int this_cpu = smp_processor_id();
668 int xen_vector = xen_map_vector(vector);
669
670 if (!(num_online_cpus() > 1) || (xen_vector < 0))
671 return;
672
673 for_each_cpu_and(cpu, mask, cpu_online_mask) {
674 if (this_cpu == cpu)
675 continue;
676
677 xen_send_IPI_one(cpu, xen_vector);
678 }
679 }
680
681 void xen_send_IPI_allbutself(int vector)
682 {
683 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
684 }
685
686 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
687 {
688 irq_enter();
689 generic_smp_call_function_interrupt();
690 inc_irq_stat(irq_call_count);
691 irq_exit();
692
693 return IRQ_HANDLED;
694 }
695
696 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
697 {
698 irq_enter();
699 generic_smp_call_function_single_interrupt();
700 inc_irq_stat(irq_call_count);
701 irq_exit();
702
703 return IRQ_HANDLED;
704 }
705
706 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
707 {
708 irq_enter();
709 irq_work_run();
710 inc_irq_stat(apic_irq_work_irqs);
711 irq_exit();
712
713 return IRQ_HANDLED;
714 }
715
716 static const struct smp_ops xen_smp_ops __initconst = {
717 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
718 .smp_prepare_cpus = xen_smp_prepare_cpus,
719 .smp_cpus_done = xen_smp_cpus_done,
720
721 .cpu_up = xen_cpu_up,
722 .cpu_die = xen_cpu_die,
723 .cpu_disable = xen_cpu_disable,
724 .play_dead = xen_play_dead,
725
726 .stop_other_cpus = xen_stop_other_cpus,
727 .smp_send_reschedule = xen_smp_send_reschedule,
728
729 .send_call_func_ipi = xen_smp_send_call_function_ipi,
730 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
731 };
732
733 void __init xen_smp_init(void)
734 {
735 smp_ops = xen_smp_ops;
736 xen_fill_possible_map();
737 }
738
739 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
740 {
741 native_smp_prepare_cpus(max_cpus);
742 WARN_ON(xen_smp_intr_init(0));
743
744 xen_init_lock_cpu(0);
745 }
746
747 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
748 {
749 int rc;
750 /*
751 * xen_smp_intr_init() needs to run before native_cpu_up()
752 * so that IPI vectors are set up on the booting CPU before
753 * it is marked online in native_cpu_up().
754 */
755 rc = xen_smp_intr_init(cpu);
756 WARN_ON(rc);
757 if (!rc)
758 rc = native_cpu_up(cpu, tidle);
759
760 /*
761 * We must initialize the slowpath CPU kicker _after_ the native
762 * path has executed. If we initialized it before none of the
763 * unlocker IPI kicks would reach the booting CPU as the booting
764 * CPU had not set itself 'online' in cpu_online_mask. That mask
765 * is checked when IPIs are sent (on HVM at least).
766 */
767 xen_init_lock_cpu(cpu);
768 return rc;
769 }
770
771 static void xen_hvm_cpu_die(unsigned int cpu)
772 {
773 xen_cpu_die(cpu);
774 native_cpu_die(cpu);
775 }
776
777 void __init xen_hvm_smp_init(void)
778 {
779 if (!xen_have_vector_callback)
780 return;
781 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
782 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
783 smp_ops.cpu_up = xen_hvm_cpu_up;
784 smp_ops.cpu_die = xen_hvm_cpu_die;
785 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
786 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
787 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
788 }
This page took 0.045668 seconds and 5 git commands to generate.