Commit | Line | Data |
---|---|---|
f87e4cac JF |
1 | /* |
2 | * Xen SMP support | |
3 | * | |
4 | * This file implements the Xen versions of smp_ops. SMP under Xen is | |
5 | * very straightforward. Bringing a CPU up is simply a matter of | |
6 | * loading its initial context and setting it running. | |
7 | * | |
8 | * IPIs are handled through the Xen event mechanism. | |
9 | * | |
10 | * Because virtual CPUs can be scheduled onto any real CPU, there's no | |
11 | * useful topology information for the kernel to make use of. As a | |
12 | * result, all CPUs are treated as if they're single-core and | |
13 | * single-threaded. | |
14 | * | |
15 | * This does not handle HOTPLUG_CPU yet. | |
16 | */ | |
17 | #include <linux/sched.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/smp.h> | |
20 | ||
21 | #include <asm/paravirt.h> | |
22 | #include <asm/desc.h> | |
23 | #include <asm/pgtable.h> | |
24 | #include <asm/cpu.h> | |
25 | ||
26 | #include <xen/interface/xen.h> | |
27 | #include <xen/interface/vcpu.h> | |
28 | ||
29 | #include <asm/xen/interface.h> | |
30 | #include <asm/xen/hypercall.h> | |
31 | ||
32 | #include <xen/page.h> | |
33 | #include <xen/events.h> | |
34 | ||
35 | #include "xen-ops.h" | |
36 | #include "mmu.h" | |
37 | ||
ecaa6c9d | 38 | static cpumask_t xen_cpu_initialized_map; |
ee523ca1 JF |
39 | static DEFINE_PER_CPU(int, resched_irq) = -1; |
40 | static DEFINE_PER_CPU(int, callfunc_irq) = -1; | |
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | |
f87e4cac JF |
42 | |
43 | /* | |
44 | * Structure and data for smp_call_function(). This is designed to minimise | |
45 | * static memory requirements. It also looks cleaner. | |
46 | */ | |
47 | static DEFINE_SPINLOCK(call_lock); | |
48 | ||
49 | struct call_data_struct { | |
50 | void (*func) (void *info); | |
51 | void *info; | |
52 | atomic_t started; | |
53 | atomic_t finished; | |
54 | int wait; | |
55 | }; | |
56 | ||
57 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | |
58 | ||
59 | static struct call_data_struct *call_data; | |
60 | ||
61 | /* | |
62 | * Reschedule call back. Nothing to do, | |
63 | * all the work is done automatically when | |
64 | * we return from the interrupt. | |
65 | */ | |
66 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |
67 | { | |
68 | return IRQ_HANDLED; | |
69 | } | |
70 | ||
71 | static __cpuinit void cpu_bringup_and_idle(void) | |
72 | { | |
73 | int cpu = smp_processor_id(); | |
74 | ||
75 | cpu_init(); | |
e2a81baf | 76 | xen_enable_sysenter(); |
f87e4cac JF |
77 | |
78 | preempt_disable(); | |
79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | |
80 | ||
81 | xen_setup_cpu_clockevents(); | |
82 | ||
83 | /* We can take interrupts now: we're officially "up". */ | |
84 | local_irq_enable(); | |
85 | ||
86 | wmb(); /* make sure everything is out */ | |
87 | cpu_idle(); | |
88 | } | |
89 | ||
90 | static int xen_smp_intr_init(unsigned int cpu) | |
91 | { | |
92 | int rc; | |
ee523ca1 | 93 | const char *resched_name, *callfunc_name, *debug_name; |
f87e4cac JF |
94 | |
95 | resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); | |
96 | rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, | |
97 | cpu, | |
98 | xen_reschedule_interrupt, | |
99 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | |
100 | resched_name, | |
101 | NULL); | |
102 | if (rc < 0) | |
103 | goto fail; | |
104 | per_cpu(resched_irq, cpu) = rc; | |
105 | ||
106 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | |
107 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | |
108 | cpu, | |
109 | xen_call_function_interrupt, | |
110 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | |
111 | callfunc_name, | |
112 | NULL); | |
113 | if (rc < 0) | |
114 | goto fail; | |
115 | per_cpu(callfunc_irq, cpu) = rc; | |
116 | ||
ee523ca1 JF |
117 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
118 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | |
119 | IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, | |
120 | debug_name, NULL); | |
121 | if (rc < 0) | |
122 | goto fail; | |
123 | per_cpu(debug_irq, cpu) = rc; | |
124 | ||
f87e4cac JF |
125 | return 0; |
126 | ||
127 | fail: | |
128 | if (per_cpu(resched_irq, cpu) >= 0) | |
129 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | |
130 | if (per_cpu(callfunc_irq, cpu) >= 0) | |
131 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | |
ee523ca1 JF |
132 | if (per_cpu(debug_irq, cpu) >= 0) |
133 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | |
f87e4cac JF |
134 | return rc; |
135 | } | |
136 | ||
137 | void __init xen_fill_possible_map(void) | |
138 | { | |
139 | int i, rc; | |
140 | ||
141 | for (i = 0; i < NR_CPUS; i++) { | |
142 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | |
143 | if (rc >= 0) | |
144 | cpu_set(i, cpu_possible_map); | |
145 | } | |
146 | } | |
147 | ||
148 | void __init xen_smp_prepare_boot_cpu(void) | |
149 | { | |
150 | int cpu; | |
151 | ||
152 | BUG_ON(smp_processor_id() != 0); | |
153 | native_smp_prepare_boot_cpu(); | |
154 | ||
f87e4cac JF |
155 | /* We've switched to the "real" per-cpu gdt, so make sure the |
156 | old memory can be recycled */ | |
157 | make_lowmem_page_readwrite(&per_cpu__gdt_page); | |
158 | ||
7bf0c23e | 159 | for_each_possible_cpu(cpu) { |
d5a7430d | 160 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
08357611 MT |
161 | /* |
162 | * cpu_core_map lives in a per cpu area that is cleared | |
163 | * when the per cpu array is allocated. | |
164 | * | |
165 | * cpus_clear(per_cpu(cpu_core_map, cpu)); | |
166 | */ | |
f87e4cac | 167 | } |
60223a32 JF |
168 | |
169 | xen_setup_vcpu_info_placement(); | |
f87e4cac JF |
170 | } |
171 | ||
172 | void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |
173 | { | |
174 | unsigned cpu; | |
175 | ||
7bf0c23e | 176 | for_each_possible_cpu(cpu) { |
d5a7430d | 177 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
08357611 MT |
178 | /* |
179 | * cpu_core_ map will be zeroed when the per | |
180 | * cpu area is allocated. | |
181 | * | |
182 | * cpus_clear(per_cpu(cpu_core_map, cpu)); | |
183 | */ | |
f87e4cac JF |
184 | } |
185 | ||
186 | smp_store_cpu_info(0); | |
187 | set_cpu_sibling_map(0); | |
188 | ||
189 | if (xen_smp_intr_init(0)) | |
190 | BUG(); | |
191 | ||
ecaa6c9d | 192 | xen_cpu_initialized_map = cpumask_of_cpu(0); |
f87e4cac JF |
193 | |
194 | /* Restrict the possible_map according to max_cpus. */ | |
195 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | |
7c04e64a | 196 | for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) |
f87e4cac JF |
197 | continue; |
198 | cpu_clear(cpu, cpu_possible_map); | |
199 | } | |
200 | ||
201 | for_each_possible_cpu (cpu) { | |
202 | struct task_struct *idle; | |
203 | ||
204 | if (cpu == 0) | |
205 | continue; | |
206 | ||
207 | idle = fork_idle(cpu); | |
208 | if (IS_ERR(idle)) | |
209 | panic("failed fork for CPU %d", cpu); | |
210 | ||
211 | cpu_set(cpu, cpu_present_map); | |
212 | } | |
213 | ||
214 | //init_xenbus_allowed_cpumask(); | |
215 | } | |
216 | ||
217 | static __cpuinit int | |
218 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |
219 | { | |
220 | struct vcpu_guest_context *ctxt; | |
221 | struct gdt_page *gdt = &per_cpu(gdt_page, cpu); | |
222 | ||
ecaa6c9d | 223 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) |
f87e4cac JF |
224 | return 0; |
225 | ||
226 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | |
227 | if (ctxt == NULL) | |
228 | return -ENOMEM; | |
229 | ||
230 | ctxt->flags = VGCF_IN_KERNEL; | |
231 | ctxt->user_regs.ds = __USER_DS; | |
232 | ctxt->user_regs.es = __USER_DS; | |
233 | ctxt->user_regs.fs = __KERNEL_PERCPU; | |
234 | ctxt->user_regs.gs = 0; | |
235 | ctxt->user_regs.ss = __KERNEL_DS; | |
236 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; | |
237 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | |
238 | ||
239 | memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); | |
240 | ||
241 | xen_copy_trap_info(ctxt->trap_ctxt); | |
242 | ||
243 | ctxt->ldt_ents = 0; | |
244 | ||
245 | BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); | |
246 | make_lowmem_page_readonly(gdt->gdt); | |
247 | ||
248 | ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); | |
249 | ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); | |
250 | ||
251 | ctxt->user_regs.cs = __KERNEL_CS; | |
faca6227 | 252 | ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); |
f87e4cac JF |
253 | |
254 | ctxt->kernel_ss = __KERNEL_DS; | |
faca6227 | 255 | ctxt->kernel_sp = idle->thread.sp0; |
f87e4cac JF |
256 | |
257 | ctxt->event_callback_cs = __KERNEL_CS; | |
258 | ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; | |
259 | ctxt->failsafe_callback_cs = __KERNEL_CS; | |
260 | ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; | |
261 | ||
262 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); | |
263 | ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); | |
264 | ||
265 | if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) | |
266 | BUG(); | |
267 | ||
268 | kfree(ctxt); | |
269 | return 0; | |
270 | } | |
271 | ||
272 | int __cpuinit xen_cpu_up(unsigned int cpu) | |
273 | { | |
274 | struct task_struct *idle = idle_task(cpu); | |
275 | int rc; | |
276 | ||
277 | #if 0 | |
278 | rc = cpu_up_check(cpu); | |
279 | if (rc) | |
280 | return rc; | |
281 | #endif | |
282 | ||
283 | init_gdt(cpu); | |
284 | per_cpu(current_task, cpu) = idle; | |
f87e4cac JF |
285 | irq_ctx_init(cpu); |
286 | xen_setup_timer(cpu); | |
287 | ||
288 | /* make sure interrupts start blocked */ | |
289 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; | |
290 | ||
291 | rc = cpu_initialize_context(cpu, idle); | |
292 | if (rc) | |
293 | return rc; | |
294 | ||
295 | if (num_online_cpus() == 1) | |
296 | alternatives_smp_switch(1); | |
297 | ||
298 | rc = xen_smp_intr_init(cpu); | |
299 | if (rc) | |
300 | return rc; | |
301 | ||
302 | smp_store_cpu_info(cpu); | |
303 | set_cpu_sibling_map(cpu); | |
304 | /* This must be done before setting cpu_online_map */ | |
305 | wmb(); | |
306 | ||
307 | cpu_set(cpu, cpu_online_map); | |
308 | ||
309 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); | |
310 | BUG_ON(rc); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | void xen_smp_cpus_done(unsigned int max_cpus) | |
316 | { | |
317 | } | |
318 | ||
319 | static void stop_self(void *v) | |
320 | { | |
321 | int cpu = smp_processor_id(); | |
322 | ||
323 | /* make sure we're not pinning something down */ | |
324 | load_cr3(swapper_pg_dir); | |
325 | /* should set up a minimal gdt */ | |
326 | ||
327 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); | |
328 | BUG(); | |
329 | } | |
330 | ||
331 | void xen_smp_send_stop(void) | |
332 | { | |
fefa629a | 333 | smp_call_function(stop_self, NULL, 0, 0); |
f87e4cac JF |
334 | } |
335 | ||
336 | void xen_smp_send_reschedule(int cpu) | |
337 | { | |
338 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | |
339 | } | |
340 | ||
341 | ||
342 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |
343 | { | |
344 | unsigned cpu; | |
345 | ||
346 | cpus_and(mask, mask, cpu_online_map); | |
347 | ||
348 | for_each_cpu_mask(cpu, mask) | |
349 | xen_send_IPI_one(cpu, vector); | |
350 | } | |
351 | ||
352 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |
353 | { | |
354 | void (*func) (void *info) = call_data->func; | |
355 | void *info = call_data->info; | |
356 | int wait = call_data->wait; | |
357 | ||
358 | /* | |
359 | * Notify initiating CPU that I've grabbed the data and am | |
360 | * about to execute the function | |
361 | */ | |
362 | mb(); | |
363 | atomic_inc(&call_data->started); | |
364 | /* | |
365 | * At this point the info structure may be out of scope unless wait==1 | |
366 | */ | |
367 | irq_enter(); | |
368 | (*func)(info); | |
38e760a1 | 369 | __get_cpu_var(irq_stat).irq_call_count++; |
f87e4cac JF |
370 | irq_exit(); |
371 | ||
372 | if (wait) { | |
373 | mb(); /* commit everything before setting finished */ | |
374 | atomic_inc(&call_data->finished); | |
375 | } | |
376 | ||
377 | return IRQ_HANDLED; | |
378 | } | |
379 | ||
380 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |
381 | void *info, int wait) | |
382 | { | |
383 | struct call_data_struct data; | |
f0d73394 JF |
384 | int cpus, cpu; |
385 | bool yield; | |
f87e4cac JF |
386 | |
387 | /* Holding any lock stops cpus from going down. */ | |
388 | spin_lock(&call_lock); | |
389 | ||
390 | cpu_clear(smp_processor_id(), mask); | |
391 | ||
392 | cpus = cpus_weight(mask); | |
393 | if (!cpus) { | |
394 | spin_unlock(&call_lock); | |
395 | return 0; | |
396 | } | |
397 | ||
398 | /* Can deadlock when called with interrupts disabled */ | |
399 | WARN_ON(irqs_disabled()); | |
400 | ||
401 | data.func = func; | |
402 | data.info = info; | |
403 | atomic_set(&data.started, 0); | |
404 | data.wait = wait; | |
405 | if (wait) | |
406 | atomic_set(&data.finished, 0); | |
407 | ||
408 | call_data = &data; | |
409 | mb(); /* write everything before IPI */ | |
410 | ||
411 | /* Send a message to other CPUs and wait for them to respond */ | |
412 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | |
413 | ||
f0d73394 JF |
414 | /* Make sure other vcpus get a chance to run if they need to. */ |
415 | yield = false; | |
416 | for_each_cpu_mask(cpu, mask) | |
417 | if (xen_vcpu_stolen(cpu)) | |
418 | yield = true; | |
419 | ||
420 | if (yield) | |
421 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | |
f87e4cac JF |
422 | |
423 | /* Wait for response */ | |
424 | while (atomic_read(&data.started) != cpus || | |
425 | (wait && atomic_read(&data.finished) != cpus)) | |
426 | cpu_relax(); | |
427 | ||
428 | spin_unlock(&call_lock); | |
429 | ||
430 | return 0; | |
431 | } |