Commit | Line | Data |
---|---|---|
f87e4cac JF |
1 | /* |
2 | * Xen SMP support | |
3 | * | |
4 | * This file implements the Xen versions of smp_ops. SMP under Xen is | |
5 | * very straightforward. Bringing a CPU up is simply a matter of | |
6 | * loading its initial context and setting it running. | |
7 | * | |
8 | * IPIs are handled through the Xen event mechanism. | |
9 | * | |
10 | * Because virtual CPUs can be scheduled onto any real CPU, there's no | |
11 | * useful topology information for the kernel to make use of. As a | |
12 | * result, all CPUs are treated as if they're single-core and | |
13 | * single-threaded. | |
f87e4cac JF |
14 | */ |
15 | #include <linux/sched.h> | |
16 | #include <linux/err.h> | |
5a0e3ad6 | 17 | #include <linux/slab.h> |
f87e4cac | 18 | #include <linux/smp.h> |
1ff2b0c3 | 19 | #include <linux/irq_work.h> |
466318a8 | 20 | #include <linux/tick.h> |
f87e4cac JF |
21 | |
22 | #include <asm/paravirt.h> | |
23 | #include <asm/desc.h> | |
24 | #include <asm/pgtable.h> | |
25 | #include <asm/cpu.h> | |
26 | ||
27 | #include <xen/interface/xen.h> | |
28 | #include <xen/interface/vcpu.h> | |
29 | ||
30 | #include <asm/xen/interface.h> | |
31 | #include <asm/xen/hypercall.h> | |
32 | ||
ea5b8f73 | 33 | #include <xen/xen.h> |
f87e4cac JF |
34 | #include <xen/page.h> |
35 | #include <xen/events.h> | |
36 | ||
ed467e69 | 37 | #include <xen/hvc-console.h> |
f87e4cac JF |
38 | #include "xen-ops.h" |
39 | #include "mmu.h" | |
40 | ||
b78936e1 | 41 | cpumask_var_t xen_cpu_initialized_map; |
f87e4cac | 42 | |
9547689f KRW |
43 | struct xen_common_irq { |
44 | int irq; | |
45 | char *name; | |
46 | }; | |
ee336e10 KRW |
47 | static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; |
48 | static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; | |
49 | static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; | |
50 | static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; | |
9547689f | 51 | static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; |
f87e4cac JF |
52 | |
53 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | |
3b16cf87 | 54 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
1ff2b0c3 | 55 | static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); |
f87e4cac JF |
56 | |
57 | /* | |
184748cc | 58 | * Reschedule call back. |
f87e4cac JF |
59 | */ |
60 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |
61 | { | |
1b437c8c | 62 | inc_irq_stat(irq_resched_count); |
184748cc | 63 | scheduler_ipi(); |
38bb5ab4 | 64 | |
f87e4cac JF |
65 | return IRQ_HANDLED; |
66 | } | |
67 | ||
148f9bb8 | 68 | static void cpu_bringup(void) |
f87e4cac | 69 | { |
e8c9e788 | 70 | int cpu; |
f87e4cac JF |
71 | |
72 | cpu_init(); | |
d68d82af | 73 | touch_softlockup_watchdog(); |
c7b75947 JF |
74 | preempt_disable(); |
75 | ||
e2a81baf | 76 | xen_enable_sysenter(); |
6fcac6d3 | 77 | xen_enable_syscall(); |
f87e4cac | 78 | |
c7b75947 JF |
79 | cpu = smp_processor_id(); |
80 | smp_store_cpu_info(cpu); | |
81 | cpu_data(cpu).x86_max_cores = 1; | |
82 | set_cpu_sibling_map(cpu); | |
f87e4cac JF |
83 | |
84 | xen_setup_cpu_clockevents(); | |
85 | ||
106b4438 KRW |
86 | notify_cpu_starting(cpu); |
87 | ||
d7d3756c | 88 | set_cpu_online(cpu, true); |
106b4438 | 89 | |
2113f469 | 90 | this_cpu_write(cpu_state, CPU_ONLINE); |
106b4438 | 91 | |
c7b75947 JF |
92 | wmb(); |
93 | ||
f87e4cac JF |
94 | /* We can take interrupts now: we're officially "up". */ |
95 | local_irq_enable(); | |
96 | ||
97 | wmb(); /* make sure everything is out */ | |
d68d82af AN |
98 | } |
99 | ||
148f9bb8 | 100 | static void cpu_bringup_and_idle(void) |
d68d82af AN |
101 | { |
102 | cpu_bringup(); | |
7d1a9417 | 103 | cpu_startup_entry(CPUHP_ONLINE); |
f87e4cac JF |
104 | } |
105 | ||
53b94fdc KRW |
106 | static void xen_smp_intr_free(unsigned int cpu) |
107 | { | |
ee336e10 | 108 | if (per_cpu(xen_resched_irq, cpu).irq >= 0) { |
9547689f | 109 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); |
ee336e10 | 110 | per_cpu(xen_resched_irq, cpu).irq = -1; |
b85fffec KRW |
111 | kfree(per_cpu(xen_resched_irq, cpu).name); |
112 | per_cpu(xen_resched_irq, cpu).name = NULL; | |
ee336e10 KRW |
113 | } |
114 | if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { | |
9547689f | 115 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); |
ee336e10 | 116 | per_cpu(xen_callfunc_irq, cpu).irq = -1; |
b85fffec KRW |
117 | kfree(per_cpu(xen_callfunc_irq, cpu).name); |
118 | per_cpu(xen_callfunc_irq, cpu).name = NULL; | |
ee336e10 KRW |
119 | } |
120 | if (per_cpu(xen_debug_irq, cpu).irq >= 0) { | |
9547689f | 121 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); |
ee336e10 | 122 | per_cpu(xen_debug_irq, cpu).irq = -1; |
b85fffec KRW |
123 | kfree(per_cpu(xen_debug_irq, cpu).name); |
124 | per_cpu(xen_debug_irq, cpu).name = NULL; | |
ee336e10 KRW |
125 | } |
126 | if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { | |
9547689f | 127 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, |
53b94fdc | 128 | NULL); |
ee336e10 | 129 | per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; |
b85fffec KRW |
130 | kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); |
131 | per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; | |
ee336e10 | 132 | } |
53b94fdc KRW |
133 | if (xen_hvm_domain()) |
134 | return; | |
135 | ||
ee336e10 | 136 | if (per_cpu(xen_irq_work, cpu).irq >= 0) { |
9547689f | 137 | unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); |
ee336e10 | 138 | per_cpu(xen_irq_work, cpu).irq = -1; |
b85fffec KRW |
139 | kfree(per_cpu(xen_irq_work, cpu).name); |
140 | per_cpu(xen_irq_work, cpu).name = NULL; | |
ee336e10 | 141 | } |
53b94fdc | 142 | }; |
f87e4cac JF |
143 | static int xen_smp_intr_init(unsigned int cpu) |
144 | { | |
145 | int rc; | |
b85fffec | 146 | char *resched_name, *callfunc_name, *debug_name; |
f87e4cac JF |
147 | |
148 | resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); | |
149 | rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, | |
150 | cpu, | |
151 | xen_reschedule_interrupt, | |
9d71cee6 | 152 | IRQF_PERCPU|IRQF_NOBALANCING, |
f87e4cac JF |
153 | resched_name, |
154 | NULL); | |
155 | if (rc < 0) | |
156 | goto fail; | |
9547689f | 157 | per_cpu(xen_resched_irq, cpu).irq = rc; |
b85fffec | 158 | per_cpu(xen_resched_irq, cpu).name = resched_name; |
f87e4cac JF |
159 | |
160 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | |
161 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | |
162 | cpu, | |
163 | xen_call_function_interrupt, | |
9d71cee6 | 164 | IRQF_PERCPU|IRQF_NOBALANCING, |
f87e4cac JF |
165 | callfunc_name, |
166 | NULL); | |
167 | if (rc < 0) | |
168 | goto fail; | |
9547689f | 169 | per_cpu(xen_callfunc_irq, cpu).irq = rc; |
b85fffec | 170 | per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; |
f87e4cac | 171 | |
ee523ca1 JF |
172 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
173 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | |
9d71cee6 | 174 | IRQF_PERCPU | IRQF_NOBALANCING, |
ee523ca1 JF |
175 | debug_name, NULL); |
176 | if (rc < 0) | |
177 | goto fail; | |
9547689f | 178 | per_cpu(xen_debug_irq, cpu).irq = rc; |
b85fffec | 179 | per_cpu(xen_debug_irq, cpu).name = debug_name; |
ee523ca1 | 180 | |
3b16cf87 JA |
181 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
182 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | |
183 | cpu, | |
184 | xen_call_function_single_interrupt, | |
9d71cee6 | 185 | IRQF_PERCPU|IRQF_NOBALANCING, |
3b16cf87 JA |
186 | callfunc_name, |
187 | NULL); | |
188 | if (rc < 0) | |
189 | goto fail; | |
9547689f | 190 | per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; |
b85fffec | 191 | per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; |
3b16cf87 | 192 | |
27d8b207 KRW |
193 | /* |
194 | * The IRQ worker on PVHVM goes through the native path and uses the | |
195 | * IPI mechanism. | |
196 | */ | |
197 | if (xen_hvm_domain()) | |
198 | return 0; | |
199 | ||
1ff2b0c3 LM |
200 | callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); |
201 | rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, | |
202 | cpu, | |
203 | xen_irq_work_interrupt, | |
9d71cee6 | 204 | IRQF_PERCPU|IRQF_NOBALANCING, |
1ff2b0c3 LM |
205 | callfunc_name, |
206 | NULL); | |
207 | if (rc < 0) | |
208 | goto fail; | |
9547689f | 209 | per_cpu(xen_irq_work, cpu).irq = rc; |
b85fffec | 210 | per_cpu(xen_irq_work, cpu).name = callfunc_name; |
1ff2b0c3 | 211 | |
f87e4cac JF |
212 | return 0; |
213 | ||
214 | fail: | |
53b94fdc | 215 | xen_smp_intr_free(cpu); |
f87e4cac JF |
216 | return rc; |
217 | } | |
218 | ||
c7b75947 | 219 | static void __init xen_fill_possible_map(void) |
f87e4cac JF |
220 | { |
221 | int i, rc; | |
222 | ||
ea5b8f73 SS |
223 | if (xen_initial_domain()) |
224 | return; | |
225 | ||
226 | for (i = 0; i < nr_cpu_ids; i++) { | |
227 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | |
228 | if (rc >= 0) { | |
229 | num_processors++; | |
230 | set_cpu_possible(i, true); | |
231 | } | |
232 | } | |
233 | } | |
234 | ||
235 | static void __init xen_filter_cpu_maps(void) | |
236 | { | |
237 | int i, rc; | |
cf405ae6 | 238 | unsigned int subtract = 0; |
ea5b8f73 SS |
239 | |
240 | if (!xen_initial_domain()) | |
241 | return; | |
242 | ||
801fd14a SS |
243 | num_processors = 0; |
244 | disabled_cpus = 0; | |
e7986739 | 245 | for (i = 0; i < nr_cpu_ids; i++) { |
f87e4cac | 246 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
4560a294 JF |
247 | if (rc >= 0) { |
248 | num_processors++; | |
4f062896 | 249 | set_cpu_possible(i, true); |
801fd14a SS |
250 | } else { |
251 | set_cpu_possible(i, false); | |
252 | set_cpu_present(i, false); | |
cf405ae6 | 253 | subtract++; |
4560a294 | 254 | } |
f87e4cac | 255 | } |
cf405ae6 KRW |
256 | #ifdef CONFIG_HOTPLUG_CPU |
257 | /* This is akin to using 'nr_cpus' on the Linux command line. | |
258 | * Which is OK as when we use 'dom0_max_vcpus=X' we can only | |
259 | * have up to X, while nr_cpu_ids is greater than X. This | |
260 | * normally is not a problem, except when CPU hotplugging | |
261 | * is involved and then there might be more than X CPUs | |
262 | * in the guest - which will not work as there is no | |
263 | * hypercall to expand the max number of VCPUs an already | |
264 | * running guest has. So cap it up to X. */ | |
265 | if (subtract) | |
266 | nr_cpu_ids = nr_cpu_ids - subtract; | |
267 | #endif | |
268 | ||
f87e4cac JF |
269 | } |
270 | ||
a9e7062d | 271 | static void __init xen_smp_prepare_boot_cpu(void) |
f87e4cac | 272 | { |
f87e4cac JF |
273 | BUG_ON(smp_processor_id() != 0); |
274 | native_smp_prepare_boot_cpu(); | |
275 | ||
26a79995 KRW |
276 | if (xen_pv_domain()) { |
277 | /* We've switched to the "real" per-cpu gdt, so make sure the | |
278 | old memory can be recycled */ | |
279 | make_lowmem_page_readwrite(xen_initial_gdt); | |
60223a32 | 280 | |
26a79995 KRW |
281 | xen_filter_cpu_maps(); |
282 | xen_setup_vcpu_info_placement(); | |
283 | } | |
284 | /* | |
285 | * The alternative logic (which patches the unlock/lock) runs before | |
286 | * the smp bootup up code is activated. Hence we need to set this up | |
287 | * the core kernel is being patched. Otherwise we will have only | |
288 | * modules patched but not core code. | |
289 | */ | |
bf7aab3a | 290 | xen_init_spinlocks(); |
f87e4cac JF |
291 | } |
292 | ||
a9e7062d | 293 | static void __init xen_smp_prepare_cpus(unsigned int max_cpus) |
f87e4cac JF |
294 | { |
295 | unsigned cpu; | |
900cba88 | 296 | unsigned int i; |
f87e4cac | 297 | |
ed467e69 KRW |
298 | if (skip_ioapic_setup) { |
299 | char *m = (max_cpus == 0) ? | |
300 | "The nosmp parameter is incompatible with Xen; " \ | |
301 | "use Xen dom0_max_vcpus=1 parameter" : | |
302 | "The noapic parameter is incompatible with Xen"; | |
303 | ||
304 | xen_raw_printk(m); | |
305 | panic(m); | |
306 | } | |
2d9e1e2f JF |
307 | xen_init_lock_cpu(0); |
308 | ||
06d0b5d9 | 309 | smp_store_boot_cpu_info(); |
c7b75947 | 310 | cpu_data(0).x86_max_cores = 1; |
900cba88 AJ |
311 | |
312 | for_each_possible_cpu(i) { | |
313 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | |
314 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | |
315 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); | |
316 | } | |
f87e4cac JF |
317 | set_cpu_sibling_map(0); |
318 | ||
319 | if (xen_smp_intr_init(0)) | |
320 | BUG(); | |
321 | ||
b78936e1 MT |
322 | if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) |
323 | panic("could not allocate xen_cpu_initialized_map\n"); | |
324 | ||
325 | cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); | |
f87e4cac JF |
326 | |
327 | /* Restrict the possible_map according to max_cpus. */ | |
328 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | |
e7986739 | 329 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
f87e4cac | 330 | continue; |
4f062896 | 331 | set_cpu_possible(cpu, false); |
f87e4cac JF |
332 | } |
333 | ||
7eb43a6d | 334 | for_each_possible_cpu(cpu) |
4f062896 | 335 | set_cpu_present(cpu, true); |
f87e4cac JF |
336 | } |
337 | ||
148f9bb8 | 338 | static int |
f87e4cac JF |
339 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
340 | { | |
341 | struct vcpu_guest_context *ctxt; | |
c7b75947 | 342 | struct desc_struct *gdt; |
9976b39b | 343 | unsigned long gdt_mfn; |
f87e4cac | 344 | |
b78936e1 | 345 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
f87e4cac JF |
346 | return 0; |
347 | ||
348 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | |
349 | if (ctxt == NULL) | |
350 | return -ENOMEM; | |
351 | ||
c7b75947 JF |
352 | gdt = get_cpu_gdt_table(cpu); |
353 | ||
f87e4cac | 354 | ctxt->flags = VGCF_IN_KERNEL; |
f87e4cac | 355 | ctxt->user_regs.ss = __KERNEL_DS; |
c7b75947 JF |
356 | #ifdef CONFIG_X86_32 |
357 | ctxt->user_regs.fs = __KERNEL_PERCPU; | |
577eebea | 358 | ctxt->user_regs.gs = __KERNEL_STACK_CANARY; |
795f99b6 JF |
359 | #else |
360 | ctxt->gs_base_kernel = per_cpu_offset(cpu); | |
c7b75947 | 361 | #endif |
f87e4cac | 362 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; |
f87e4cac JF |
363 | |
364 | memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); | |
365 | ||
dacd45f4 KRW |
366 | { |
367 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | |
368 | ctxt->user_regs.ds = __USER_DS; | |
369 | ctxt->user_regs.es = __USER_DS; | |
f87e4cac | 370 | |
dacd45f4 | 371 | xen_copy_trap_info(ctxt->trap_ctxt); |
f87e4cac | 372 | |
dacd45f4 | 373 | ctxt->ldt_ents = 0; |
9976b39b | 374 | |
dacd45f4 | 375 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); |
f87e4cac | 376 | |
dacd45f4 KRW |
377 | gdt_mfn = arbitrary_virt_to_mfn(gdt); |
378 | make_lowmem_page_readonly(gdt); | |
379 | make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); | |
f87e4cac | 380 | |
dacd45f4 KRW |
381 | ctxt->gdt_frames[0] = gdt_mfn; |
382 | ctxt->gdt_ents = GDT_ENTRIES; | |
f87e4cac | 383 | |
dacd45f4 KRW |
384 | ctxt->kernel_ss = __KERNEL_DS; |
385 | ctxt->kernel_sp = idle->thread.sp0; | |
f87e4cac | 386 | |
c7b75947 | 387 | #ifdef CONFIG_X86_32 |
dacd45f4 KRW |
388 | ctxt->event_callback_cs = __KERNEL_CS; |
389 | ctxt->failsafe_callback_cs = __KERNEL_CS; | |
c7b75947 | 390 | #endif |
dacd45f4 KRW |
391 | ctxt->event_callback_eip = |
392 | (unsigned long)xen_hypervisor_callback; | |
393 | ctxt->failsafe_callback_eip = | |
394 | (unsigned long)xen_failsafe_callback; | |
395 | } | |
396 | ctxt->user_regs.cs = __KERNEL_CS; | |
397 | ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); | |
f87e4cac JF |
398 | |
399 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); | |
400 | ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); | |
401 | ||
402 | if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) | |
403 | BUG(); | |
404 | ||
405 | kfree(ctxt); | |
406 | return 0; | |
407 | } | |
408 | ||
148f9bb8 | 409 | static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) |
f87e4cac | 410 | { |
f87e4cac JF |
411 | int rc; |
412 | ||
c6f5e0ac | 413 | per_cpu(current_task, cpu) = idle; |
c7b75947 | 414 | #ifdef CONFIG_X86_32 |
f87e4cac | 415 | irq_ctx_init(cpu); |
c7b75947 | 416 | #else |
c7b75947 | 417 | clear_tsk_thread_flag(idle, TIF_FORK); |
38341432 JF |
418 | per_cpu(kernel_stack, cpu) = |
419 | (unsigned long)task_stack_page(idle) - | |
420 | KERNEL_STACK_OFFSET + THREAD_SIZE; | |
c7b75947 | 421 | #endif |
02889672 | 422 | xen_setup_runstate_info(cpu); |
f87e4cac | 423 | xen_setup_timer(cpu); |
2d9e1e2f | 424 | xen_init_lock_cpu(cpu); |
f87e4cac | 425 | |
c7b75947 JF |
426 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
427 | ||
f87e4cac JF |
428 | /* make sure interrupts start blocked */ |
429 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; | |
430 | ||
431 | rc = cpu_initialize_context(cpu, idle); | |
432 | if (rc) | |
433 | return rc; | |
434 | ||
435 | if (num_online_cpus() == 1) | |
816afe4f RR |
436 | /* Just in case we booted with a single CPU. */ |
437 | alternatives_enable_smp(); | |
f87e4cac JF |
438 | |
439 | rc = xen_smp_intr_init(cpu); | |
440 | if (rc) | |
441 | return rc; | |
442 | ||
f87e4cac JF |
443 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); |
444 | BUG_ON(rc); | |
445 | ||
c7b75947 | 446 | while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { |
1207cf8e | 447 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); |
c7b75947 JF |
448 | barrier(); |
449 | } | |
450 | ||
f87e4cac JF |
451 | return 0; |
452 | } | |
453 | ||
a9e7062d | 454 | static void xen_smp_cpus_done(unsigned int max_cpus) |
f87e4cac JF |
455 | { |
456 | } | |
457 | ||
2737146b | 458 | #ifdef CONFIG_HOTPLUG_CPU |
26fd1051 | 459 | static int xen_cpu_disable(void) |
d68d82af AN |
460 | { |
461 | unsigned int cpu = smp_processor_id(); | |
462 | if (cpu == 0) | |
463 | return -EBUSY; | |
464 | ||
465 | cpu_disable_common(); | |
466 | ||
467 | load_cr3(swapper_pg_dir); | |
468 | return 0; | |
469 | } | |
470 | ||
26fd1051 | 471 | static void xen_cpu_die(unsigned int cpu) |
d68d82af | 472 | { |
b12abaa1 | 473 | while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { |
d68d82af AN |
474 | current->state = TASK_UNINTERRUPTIBLE; |
475 | schedule_timeout(HZ/10); | |
476 | } | |
53b94fdc | 477 | xen_smp_intr_free(cpu); |
d68d82af AN |
478 | xen_uninit_lock_cpu(cpu); |
479 | xen_teardown_timer(cpu); | |
d68d82af AN |
480 | } |
481 | ||
148f9bb8 | 482 | static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ |
d68d82af AN |
483 | { |
484 | play_dead_common(); | |
485 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | |
486 | cpu_bringup(); | |
466318a8 KRW |
487 | /* |
488 | * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) | |
489 | * clears certain data that the cpu_idle loop (which called us | |
490 | * and that we return from) expects. The only way to get that | |
491 | * data back is to call: | |
492 | */ | |
493 | tick_nohz_idle_enter(); | |
d68d82af AN |
494 | } |
495 | ||
2737146b | 496 | #else /* !CONFIG_HOTPLUG_CPU */ |
26fd1051 | 497 | static int xen_cpu_disable(void) |
2737146b AN |
498 | { |
499 | return -ENOSYS; | |
500 | } | |
501 | ||
26fd1051 | 502 | static void xen_cpu_die(unsigned int cpu) |
2737146b AN |
503 | { |
504 | BUG(); | |
505 | } | |
506 | ||
26fd1051 | 507 | static void xen_play_dead(void) |
2737146b AN |
508 | { |
509 | BUG(); | |
510 | } | |
511 | ||
512 | #endif | |
f87e4cac JF |
513 | static void stop_self(void *v) |
514 | { | |
515 | int cpu = smp_processor_id(); | |
516 | ||
517 | /* make sure we're not pinning something down */ | |
518 | load_cr3(swapper_pg_dir); | |
519 | /* should set up a minimal gdt */ | |
520 | ||
086748e5 IC |
521 | set_cpu_online(cpu, false); |
522 | ||
f87e4cac JF |
523 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); |
524 | BUG(); | |
525 | } | |
526 | ||
76fac077 | 527 | static void xen_stop_other_cpus(int wait) |
f87e4cac | 528 | { |
76fac077 | 529 | smp_call_function(stop_self, NULL, wait); |
f87e4cac JF |
530 | } |
531 | ||
a9e7062d | 532 | static void xen_smp_send_reschedule(int cpu) |
f87e4cac JF |
533 | { |
534 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | |
535 | } | |
536 | ||
f447d56d BG |
537 | static void __xen_send_IPI_mask(const struct cpumask *mask, |
538 | int vector) | |
f87e4cac JF |
539 | { |
540 | unsigned cpu; | |
541 | ||
bcda016e | 542 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
f87e4cac JF |
543 | xen_send_IPI_one(cpu, vector); |
544 | } | |
545 | ||
bcda016e | 546 | static void xen_smp_send_call_function_ipi(const struct cpumask *mask) |
3b16cf87 JA |
547 | { |
548 | int cpu; | |
549 | ||
f447d56d | 550 | __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
3b16cf87 JA |
551 | |
552 | /* Make sure other vcpus get a chance to run if they need to. */ | |
bcda016e | 553 | for_each_cpu(cpu, mask) { |
3b16cf87 | 554 | if (xen_vcpu_stolen(cpu)) { |
1207cf8e | 555 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); |
3b16cf87 JA |
556 | break; |
557 | } | |
558 | } | |
559 | } | |
560 | ||
a9e7062d | 561 | static void xen_smp_send_call_function_single_ipi(int cpu) |
3b16cf87 | 562 | { |
f447d56d | 563 | __xen_send_IPI_mask(cpumask_of(cpu), |
e7986739 | 564 | XEN_CALL_FUNCTION_SINGLE_VECTOR); |
3b16cf87 JA |
565 | } |
566 | ||
f447d56d BG |
567 | static inline int xen_map_vector(int vector) |
568 | { | |
569 | int xen_vector; | |
570 | ||
571 | switch (vector) { | |
572 | case RESCHEDULE_VECTOR: | |
573 | xen_vector = XEN_RESCHEDULE_VECTOR; | |
574 | break; | |
575 | case CALL_FUNCTION_VECTOR: | |
576 | xen_vector = XEN_CALL_FUNCTION_VECTOR; | |
577 | break; | |
578 | case CALL_FUNCTION_SINGLE_VECTOR: | |
579 | xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; | |
580 | break; | |
1ff2b0c3 LM |
581 | case IRQ_WORK_VECTOR: |
582 | xen_vector = XEN_IRQ_WORK_VECTOR; | |
583 | break; | |
6efa20e4 KRW |
584 | #ifdef CONFIG_X86_64 |
585 | case NMI_VECTOR: | |
586 | case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */ | |
587 | xen_vector = XEN_NMI_VECTOR; | |
588 | break; | |
589 | #endif | |
f447d56d BG |
590 | default: |
591 | xen_vector = -1; | |
592 | printk(KERN_ERR "xen: vector 0x%x is not implemented\n", | |
593 | vector); | |
594 | } | |
595 | ||
596 | return xen_vector; | |
597 | } | |
598 | ||
599 | void xen_send_IPI_mask(const struct cpumask *mask, | |
600 | int vector) | |
601 | { | |
602 | int xen_vector = xen_map_vector(vector); | |
603 | ||
604 | if (xen_vector >= 0) | |
605 | __xen_send_IPI_mask(mask, xen_vector); | |
606 | } | |
607 | ||
608 | void xen_send_IPI_all(int vector) | |
609 | { | |
610 | int xen_vector = xen_map_vector(vector); | |
611 | ||
612 | if (xen_vector >= 0) | |
613 | __xen_send_IPI_mask(cpu_online_mask, xen_vector); | |
614 | } | |
615 | ||
616 | void xen_send_IPI_self(int vector) | |
617 | { | |
618 | int xen_vector = xen_map_vector(vector); | |
619 | ||
620 | if (xen_vector >= 0) | |
621 | xen_send_IPI_one(smp_processor_id(), xen_vector); | |
622 | } | |
623 | ||
624 | void xen_send_IPI_mask_allbutself(const struct cpumask *mask, | |
625 | int vector) | |
626 | { | |
627 | unsigned cpu; | |
628 | unsigned int this_cpu = smp_processor_id(); | |
1db01b49 | 629 | int xen_vector = xen_map_vector(vector); |
f447d56d | 630 | |
1db01b49 | 631 | if (!(num_online_cpus() > 1) || (xen_vector < 0)) |
f447d56d BG |
632 | return; |
633 | ||
634 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | |
635 | if (this_cpu == cpu) | |
636 | continue; | |
637 | ||
1db01b49 | 638 | xen_send_IPI_one(cpu, xen_vector); |
f447d56d BG |
639 | } |
640 | } | |
641 | ||
642 | void xen_send_IPI_allbutself(int vector) | |
643 | { | |
1db01b49 | 644 | xen_send_IPI_mask_allbutself(cpu_online_mask, vector); |
f447d56d BG |
645 | } |
646 | ||
f87e4cac JF |
647 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
648 | { | |
f87e4cac | 649 | irq_enter(); |
3b16cf87 | 650 | generic_smp_call_function_interrupt(); |
1b437c8c | 651 | inc_irq_stat(irq_call_count); |
f87e4cac JF |
652 | irq_exit(); |
653 | ||
f87e4cac JF |
654 | return IRQ_HANDLED; |
655 | } | |
656 | ||
3b16cf87 | 657 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) |
f87e4cac | 658 | { |
3b16cf87 JA |
659 | irq_enter(); |
660 | generic_smp_call_function_single_interrupt(); | |
1b437c8c | 661 | inc_irq_stat(irq_call_count); |
3b16cf87 | 662 | irq_exit(); |
f87e4cac | 663 | |
3b16cf87 | 664 | return IRQ_HANDLED; |
f87e4cac | 665 | } |
a9e7062d | 666 | |
1ff2b0c3 LM |
667 | static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) |
668 | { | |
669 | irq_enter(); | |
670 | irq_work_run(); | |
671 | inc_irq_stat(apic_irq_work_irqs); | |
672 | irq_exit(); | |
673 | ||
674 | return IRQ_HANDLED; | |
675 | } | |
676 | ||
b53cedeb | 677 | static const struct smp_ops xen_smp_ops __initconst = { |
a9e7062d JF |
678 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
679 | .smp_prepare_cpus = xen_smp_prepare_cpus, | |
a9e7062d JF |
680 | .smp_cpus_done = xen_smp_cpus_done, |
681 | ||
d68d82af AN |
682 | .cpu_up = xen_cpu_up, |
683 | .cpu_die = xen_cpu_die, | |
684 | .cpu_disable = xen_cpu_disable, | |
685 | .play_dead = xen_play_dead, | |
686 | ||
76fac077 | 687 | .stop_other_cpus = xen_stop_other_cpus, |
a9e7062d JF |
688 | .smp_send_reschedule = xen_smp_send_reschedule, |
689 | ||
690 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | |
691 | .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | |
692 | }; | |
693 | ||
694 | void __init xen_smp_init(void) | |
695 | { | |
696 | smp_ops = xen_smp_ops; | |
c7b75947 | 697 | xen_fill_possible_map(); |
a9e7062d | 698 | } |
99bbb3a8 SS |
699 | |
700 | static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |
701 | { | |
702 | native_smp_prepare_cpus(max_cpus); | |
703 | WARN_ON(xen_smp_intr_init(0)); | |
704 | ||
99bbb3a8 | 705 | xen_init_lock_cpu(0); |
99bbb3a8 SS |
706 | } |
707 | ||
148f9bb8 | 708 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
99bbb3a8 SS |
709 | { |
710 | int rc; | |
fc78d343 CA |
711 | /* |
712 | * xen_smp_intr_init() needs to run before native_cpu_up() | |
713 | * so that IPI vectors are set up on the booting CPU before | |
714 | * it is marked online in native_cpu_up(). | |
715 | */ | |
716 | rc = xen_smp_intr_init(cpu); | |
717 | WARN_ON(rc); | |
718 | if (!rc) | |
719 | rc = native_cpu_up(cpu, tidle); | |
1fb3a8b2 KRW |
720 | |
721 | /* | |
722 | * We must initialize the slowpath CPU kicker _after_ the native | |
723 | * path has executed. If we initialized it before none of the | |
724 | * unlocker IPI kicks would reach the booting CPU as the booting | |
725 | * CPU had not set itself 'online' in cpu_online_mask. That mask | |
726 | * is checked when IPIs are sent (on HVM at least). | |
727 | */ | |
728 | xen_init_lock_cpu(cpu); | |
99bbb3a8 SS |
729 | return rc; |
730 | } | |
731 | ||
732 | static void xen_hvm_cpu_die(unsigned int cpu) | |
733 | { | |
b12abaa1 | 734 | xen_cpu_die(cpu); |
99bbb3a8 SS |
735 | native_cpu_die(cpu); |
736 | } | |
737 | ||
738 | void __init xen_hvm_smp_init(void) | |
739 | { | |
3c05c4be SS |
740 | if (!xen_have_vector_callback) |
741 | return; | |
99bbb3a8 SS |
742 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
743 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; | |
744 | smp_ops.cpu_up = xen_hvm_cpu_up; | |
745 | smp_ops.cpu_die = xen_hvm_cpu_die; | |
746 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; | |
747 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; | |
26a79995 | 748 | smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; |
99bbb3a8 | 749 | } |