Commit | Line | Data |
---|---|---|
f87e4cac JF |
1 | /* |
2 | * Xen SMP support | |
3 | * | |
4 | * This file implements the Xen versions of smp_ops. SMP under Xen is | |
5 | * very straightforward. Bringing a CPU up is simply a matter of | |
6 | * loading its initial context and setting it running. | |
7 | * | |
8 | * IPIs are handled through the Xen event mechanism. | |
9 | * | |
10 | * Because virtual CPUs can be scheduled onto any real CPU, there's no | |
11 | * useful topology information for the kernel to make use of. As a | |
12 | * result, all CPUs are treated as if they're single-core and | |
13 | * single-threaded. | |
14 | * | |
15 | * This does not handle HOTPLUG_CPU yet. | |
16 | */ | |
17 | #include <linux/sched.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/smp.h> | |
20 | ||
21 | #include <asm/paravirt.h> | |
22 | #include <asm/desc.h> | |
23 | #include <asm/pgtable.h> | |
24 | #include <asm/cpu.h> | |
25 | ||
26 | #include <xen/interface/xen.h> | |
27 | #include <xen/interface/vcpu.h> | |
28 | ||
29 | #include <asm/xen/interface.h> | |
30 | #include <asm/xen/hypercall.h> | |
31 | ||
32 | #include <xen/page.h> | |
33 | #include <xen/events.h> | |
34 | ||
35 | #include "xen-ops.h" | |
36 | #include "mmu.h" | |
37 | ||
38 | static cpumask_t cpu_initialized_map; | |
39 | static DEFINE_PER_CPU(int, resched_irq); | |
40 | static DEFINE_PER_CPU(int, callfunc_irq); | |
41 | ||
42 | /* | |
43 | * Structure and data for smp_call_function(). This is designed to minimise | |
44 | * static memory requirements. It also looks cleaner. | |
45 | */ | |
46 | static DEFINE_SPINLOCK(call_lock); | |
47 | ||
48 | struct call_data_struct { | |
49 | void (*func) (void *info); | |
50 | void *info; | |
51 | atomic_t started; | |
52 | atomic_t finished; | |
53 | int wait; | |
54 | }; | |
55 | ||
56 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | |
57 | ||
58 | static struct call_data_struct *call_data; | |
59 | ||
60 | /* | |
61 | * Reschedule call back. Nothing to do, | |
62 | * all the work is done automatically when | |
63 | * we return from the interrupt. | |
64 | */ | |
65 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |
66 | { | |
67 | return IRQ_HANDLED; | |
68 | } | |
69 | ||
70 | static __cpuinit void cpu_bringup_and_idle(void) | |
71 | { | |
72 | int cpu = smp_processor_id(); | |
73 | ||
74 | cpu_init(); | |
75 | ||
76 | preempt_disable(); | |
77 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | |
78 | ||
79 | xen_setup_cpu_clockevents(); | |
80 | ||
81 | /* We can take interrupts now: we're officially "up". */ | |
82 | local_irq_enable(); | |
83 | ||
84 | wmb(); /* make sure everything is out */ | |
85 | cpu_idle(); | |
86 | } | |
87 | ||
88 | static int xen_smp_intr_init(unsigned int cpu) | |
89 | { | |
90 | int rc; | |
91 | const char *resched_name, *callfunc_name; | |
92 | ||
93 | per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1; | |
94 | ||
95 | resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); | |
96 | rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, | |
97 | cpu, | |
98 | xen_reschedule_interrupt, | |
99 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | |
100 | resched_name, | |
101 | NULL); | |
102 | if (rc < 0) | |
103 | goto fail; | |
104 | per_cpu(resched_irq, cpu) = rc; | |
105 | ||
106 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | |
107 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | |
108 | cpu, | |
109 | xen_call_function_interrupt, | |
110 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | |
111 | callfunc_name, | |
112 | NULL); | |
113 | if (rc < 0) | |
114 | goto fail; | |
115 | per_cpu(callfunc_irq, cpu) = rc; | |
116 | ||
117 | return 0; | |
118 | ||
119 | fail: | |
120 | if (per_cpu(resched_irq, cpu) >= 0) | |
121 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | |
122 | if (per_cpu(callfunc_irq, cpu) >= 0) | |
123 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | |
124 | return rc; | |
125 | } | |
126 | ||
127 | void __init xen_fill_possible_map(void) | |
128 | { | |
129 | int i, rc; | |
130 | ||
131 | for (i = 0; i < NR_CPUS; i++) { | |
132 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | |
133 | if (rc >= 0) | |
134 | cpu_set(i, cpu_possible_map); | |
135 | } | |
136 | } | |
137 | ||
138 | void __init xen_smp_prepare_boot_cpu(void) | |
139 | { | |
140 | int cpu; | |
141 | ||
142 | BUG_ON(smp_processor_id() != 0); | |
143 | native_smp_prepare_boot_cpu(); | |
144 | ||
f87e4cac JF |
145 | /* We've switched to the "real" per-cpu gdt, so make sure the |
146 | old memory can be recycled */ | |
147 | make_lowmem_page_readwrite(&per_cpu__gdt_page); | |
148 | ||
149 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
150 | cpus_clear(cpu_sibling_map[cpu]); | |
151 | cpus_clear(cpu_core_map[cpu]); | |
152 | } | |
60223a32 JF |
153 | |
154 | xen_setup_vcpu_info_placement(); | |
f87e4cac JF |
155 | } |
156 | ||
157 | void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |
158 | { | |
159 | unsigned cpu; | |
160 | ||
161 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
162 | cpus_clear(cpu_sibling_map[cpu]); | |
163 | cpus_clear(cpu_core_map[cpu]); | |
164 | } | |
165 | ||
166 | smp_store_cpu_info(0); | |
167 | set_cpu_sibling_map(0); | |
168 | ||
169 | if (xen_smp_intr_init(0)) | |
170 | BUG(); | |
171 | ||
172 | cpu_initialized_map = cpumask_of_cpu(0); | |
173 | ||
174 | /* Restrict the possible_map according to max_cpus. */ | |
175 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | |
176 | for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) | |
177 | continue; | |
178 | cpu_clear(cpu, cpu_possible_map); | |
179 | } | |
180 | ||
181 | for_each_possible_cpu (cpu) { | |
182 | struct task_struct *idle; | |
183 | ||
184 | if (cpu == 0) | |
185 | continue; | |
186 | ||
187 | idle = fork_idle(cpu); | |
188 | if (IS_ERR(idle)) | |
189 | panic("failed fork for CPU %d", cpu); | |
190 | ||
191 | cpu_set(cpu, cpu_present_map); | |
192 | } | |
193 | ||
194 | //init_xenbus_allowed_cpumask(); | |
195 | } | |
196 | ||
197 | static __cpuinit int | |
198 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |
199 | { | |
200 | struct vcpu_guest_context *ctxt; | |
201 | struct gdt_page *gdt = &per_cpu(gdt_page, cpu); | |
202 | ||
203 | if (cpu_test_and_set(cpu, cpu_initialized_map)) | |
204 | return 0; | |
205 | ||
206 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | |
207 | if (ctxt == NULL) | |
208 | return -ENOMEM; | |
209 | ||
210 | ctxt->flags = VGCF_IN_KERNEL; | |
211 | ctxt->user_regs.ds = __USER_DS; | |
212 | ctxt->user_regs.es = __USER_DS; | |
213 | ctxt->user_regs.fs = __KERNEL_PERCPU; | |
214 | ctxt->user_regs.gs = 0; | |
215 | ctxt->user_regs.ss = __KERNEL_DS; | |
216 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; | |
217 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | |
218 | ||
219 | memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); | |
220 | ||
221 | xen_copy_trap_info(ctxt->trap_ctxt); | |
222 | ||
223 | ctxt->ldt_ents = 0; | |
224 | ||
225 | BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); | |
226 | make_lowmem_page_readonly(gdt->gdt); | |
227 | ||
228 | ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); | |
229 | ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); | |
230 | ||
231 | ctxt->user_regs.cs = __KERNEL_CS; | |
232 | ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs); | |
233 | ||
234 | ctxt->kernel_ss = __KERNEL_DS; | |
235 | ctxt->kernel_sp = idle->thread.esp0; | |
236 | ||
237 | ctxt->event_callback_cs = __KERNEL_CS; | |
238 | ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; | |
239 | ctxt->failsafe_callback_cs = __KERNEL_CS; | |
240 | ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; | |
241 | ||
242 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); | |
243 | ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); | |
244 | ||
245 | if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) | |
246 | BUG(); | |
247 | ||
248 | kfree(ctxt); | |
249 | return 0; | |
250 | } | |
251 | ||
252 | int __cpuinit xen_cpu_up(unsigned int cpu) | |
253 | { | |
254 | struct task_struct *idle = idle_task(cpu); | |
255 | int rc; | |
256 | ||
257 | #if 0 | |
258 | rc = cpu_up_check(cpu); | |
259 | if (rc) | |
260 | return rc; | |
261 | #endif | |
262 | ||
263 | init_gdt(cpu); | |
264 | per_cpu(current_task, cpu) = idle; | |
f87e4cac JF |
265 | irq_ctx_init(cpu); |
266 | xen_setup_timer(cpu); | |
267 | ||
268 | /* make sure interrupts start blocked */ | |
269 | per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; | |
270 | ||
271 | rc = cpu_initialize_context(cpu, idle); | |
272 | if (rc) | |
273 | return rc; | |
274 | ||
275 | if (num_online_cpus() == 1) | |
276 | alternatives_smp_switch(1); | |
277 | ||
278 | rc = xen_smp_intr_init(cpu); | |
279 | if (rc) | |
280 | return rc; | |
281 | ||
282 | smp_store_cpu_info(cpu); | |
283 | set_cpu_sibling_map(cpu); | |
284 | /* This must be done before setting cpu_online_map */ | |
285 | wmb(); | |
286 | ||
287 | cpu_set(cpu, cpu_online_map); | |
288 | ||
289 | rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); | |
290 | BUG_ON(rc); | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | void xen_smp_cpus_done(unsigned int max_cpus) | |
296 | { | |
297 | } | |
298 | ||
299 | static void stop_self(void *v) | |
300 | { | |
301 | int cpu = smp_processor_id(); | |
302 | ||
303 | /* make sure we're not pinning something down */ | |
304 | load_cr3(swapper_pg_dir); | |
305 | /* should set up a minimal gdt */ | |
306 | ||
307 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); | |
308 | BUG(); | |
309 | } | |
310 | ||
311 | void xen_smp_send_stop(void) | |
312 | { | |
fefa629a | 313 | smp_call_function(stop_self, NULL, 0, 0); |
f87e4cac JF |
314 | } |
315 | ||
316 | void xen_smp_send_reschedule(int cpu) | |
317 | { | |
318 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | |
319 | } | |
320 | ||
321 | ||
322 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |
323 | { | |
324 | unsigned cpu; | |
325 | ||
326 | cpus_and(mask, mask, cpu_online_map); | |
327 | ||
328 | for_each_cpu_mask(cpu, mask) | |
329 | xen_send_IPI_one(cpu, vector); | |
330 | } | |
331 | ||
332 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |
333 | { | |
334 | void (*func) (void *info) = call_data->func; | |
335 | void *info = call_data->info; | |
336 | int wait = call_data->wait; | |
337 | ||
338 | /* | |
339 | * Notify initiating CPU that I've grabbed the data and am | |
340 | * about to execute the function | |
341 | */ | |
342 | mb(); | |
343 | atomic_inc(&call_data->started); | |
344 | /* | |
345 | * At this point the info structure may be out of scope unless wait==1 | |
346 | */ | |
347 | irq_enter(); | |
348 | (*func)(info); | |
349 | irq_exit(); | |
350 | ||
351 | if (wait) { | |
352 | mb(); /* commit everything before setting finished */ | |
353 | atomic_inc(&call_data->finished); | |
354 | } | |
355 | ||
356 | return IRQ_HANDLED; | |
357 | } | |
358 | ||
359 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |
360 | void *info, int wait) | |
361 | { | |
362 | struct call_data_struct data; | |
363 | int cpus; | |
364 | ||
365 | /* Holding any lock stops cpus from going down. */ | |
366 | spin_lock(&call_lock); | |
367 | ||
368 | cpu_clear(smp_processor_id(), mask); | |
369 | ||
370 | cpus = cpus_weight(mask); | |
371 | if (!cpus) { | |
372 | spin_unlock(&call_lock); | |
373 | return 0; | |
374 | } | |
375 | ||
376 | /* Can deadlock when called with interrupts disabled */ | |
377 | WARN_ON(irqs_disabled()); | |
378 | ||
379 | data.func = func; | |
380 | data.info = info; | |
381 | atomic_set(&data.started, 0); | |
382 | data.wait = wait; | |
383 | if (wait) | |
384 | atomic_set(&data.finished, 0); | |
385 | ||
386 | call_data = &data; | |
387 | mb(); /* write everything before IPI */ | |
388 | ||
389 | /* Send a message to other CPUs and wait for them to respond */ | |
390 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | |
391 | ||
392 | /* Make sure other vcpus get a chance to run. | |
393 | XXX too severe? Maybe we should check the other CPU's states? */ | |
394 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | |
395 | ||
396 | /* Wait for response */ | |
397 | while (atomic_read(&data.started) != cpus || | |
398 | (wait && atomic_read(&data.finished) != cpus)) | |
399 | cpu_relax(); | |
400 | ||
401 | spin_unlock(&call_lock); | |
402 | ||
403 | return 0; | |
404 | } |