Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / hv / hv.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 *
21 */
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hyperv.h>
29 #include <asm/hyperv.h>
30 #include "hyperv_vmbus.h"
31
32 /* The one and only */
33 struct hv_context hv_context = {
34 .synic_initialized = false,
35 .hypercall_page = NULL,
36 .signal_event_param = NULL,
37 .signal_event_buffer = NULL,
38 };
39
40 /*
41 * query_hypervisor_presence
42 * - Query the cpuid for presence of windows hypervisor
43 */
44 static int query_hypervisor_presence(void)
45 {
46 unsigned int eax;
47 unsigned int ebx;
48 unsigned int ecx;
49 unsigned int edx;
50 unsigned int op;
51
52 eax = 0;
53 ebx = 0;
54 ecx = 0;
55 edx = 0;
56 op = HVCPUID_VERSION_FEATURES;
57 cpuid(op, &eax, &ebx, &ecx, &edx);
58
59 return ecx & HV_PRESENT_BIT;
60 }
61
62 /*
63 * query_hypervisor_info - Get version info of the windows hypervisor
64 */
65 static int query_hypervisor_info(void)
66 {
67 unsigned int eax;
68 unsigned int ebx;
69 unsigned int ecx;
70 unsigned int edx;
71 unsigned int max_leaf;
72 unsigned int op;
73
74 /*
75 * Its assumed that this is called after confirming that Viridian
76 * is present. Query id and revision.
77 */
78 eax = 0;
79 ebx = 0;
80 ecx = 0;
81 edx = 0;
82 op = HVCPUID_VENDOR_MAXFUNCTION;
83 cpuid(op, &eax, &ebx, &ecx, &edx);
84
85 max_leaf = eax;
86
87 if (max_leaf >= HVCPUID_VERSION) {
88 eax = 0;
89 ebx = 0;
90 ecx = 0;
91 edx = 0;
92 op = HVCPUID_VERSION;
93 cpuid(op, &eax, &ebx, &ecx, &edx);
94 pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
95 eax,
96 ebx >> 16,
97 ebx & 0xFFFF,
98 ecx,
99 edx >> 24,
100 edx & 0xFFFFFF);
101 }
102 return max_leaf;
103 }
104
105 /*
106 * do_hypercall- Invoke the specified hypercall
107 */
108 static u64 do_hypercall(u64 control, void *input, void *output)
109 {
110 #ifdef CONFIG_X86_64
111 u64 hv_status = 0;
112 u64 input_address = (input) ? virt_to_phys(input) : 0;
113 u64 output_address = (output) ? virt_to_phys(output) : 0;
114 void *hypercall_page = hv_context.hypercall_page;
115
116 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
117 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
118 "c" (control), "d" (input_address),
119 "m" (hypercall_page));
120
121 return hv_status;
122
123 #else
124
125 u32 control_hi = control >> 32;
126 u32 control_lo = control & 0xFFFFFFFF;
127 u32 hv_status_hi = 1;
128 u32 hv_status_lo = 1;
129 u64 input_address = (input) ? virt_to_phys(input) : 0;
130 u32 input_address_hi = input_address >> 32;
131 u32 input_address_lo = input_address & 0xFFFFFFFF;
132 u64 output_address = (output) ? virt_to_phys(output) : 0;
133 u32 output_address_hi = output_address >> 32;
134 u32 output_address_lo = output_address & 0xFFFFFFFF;
135 void *hypercall_page = hv_context.hypercall_page;
136
137 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
138 "=a"(hv_status_lo) : "d" (control_hi),
139 "a" (control_lo), "b" (input_address_hi),
140 "c" (input_address_lo), "D"(output_address_hi),
141 "S"(output_address_lo), "m" (hypercall_page));
142
143 return hv_status_lo | ((u64)hv_status_hi << 32);
144 #endif /* !x86_64 */
145 }
146
147 /*
148 * hv_init - Main initialization routine.
149 *
150 * This routine must be called before any other routines in here are called
151 */
152 int hv_init(void)
153 {
154 int max_leaf;
155 union hv_x64_msr_hypercall_contents hypercall_msr;
156 void *virtaddr = NULL;
157
158 memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
159 memset(hv_context.synic_message_page, 0,
160 sizeof(void *) * MAX_NUM_CPUS);
161
162 if (!query_hypervisor_presence())
163 goto cleanup;
164
165 max_leaf = query_hypervisor_info();
166
167 rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
168
169 if (hv_context.guestid != 0)
170 goto cleanup;
171
172 /* Write our OS info */
173 wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
174 hv_context.guestid = HV_LINUX_GUEST_ID;
175
176 /* See if the hypercall page is already set */
177 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
178
179 virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
180
181 if (!virtaddr)
182 goto cleanup;
183
184 hypercall_msr.enable = 1;
185
186 hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
187 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
188
189 /* Confirm that hypercall page did get setup. */
190 hypercall_msr.as_uint64 = 0;
191 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
192
193 if (!hypercall_msr.enable)
194 goto cleanup;
195
196 hv_context.hypercall_page = virtaddr;
197
198 /* Setup the global signal event param for the signal event hypercall */
199 hv_context.signal_event_buffer =
200 kmalloc(sizeof(struct hv_input_signal_event_buffer),
201 GFP_KERNEL);
202 if (!hv_context.signal_event_buffer)
203 goto cleanup;
204
205 hv_context.signal_event_param =
206 (struct hv_input_signal_event *)
207 (ALIGN((unsigned long)
208 hv_context.signal_event_buffer,
209 HV_HYPERCALL_PARAM_ALIGN));
210 hv_context.signal_event_param->connectionid.asu32 = 0;
211 hv_context.signal_event_param->connectionid.u.id =
212 VMBUS_EVENT_CONNECTION_ID;
213 hv_context.signal_event_param->flag_number = 0;
214 hv_context.signal_event_param->rsvdz = 0;
215
216 return 0;
217
218 cleanup:
219 if (virtaddr) {
220 if (hypercall_msr.enable) {
221 hypercall_msr.as_uint64 = 0;
222 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
223 }
224
225 vfree(virtaddr);
226 }
227
228 return -ENOTSUPP;
229 }
230
231 /*
232 * hv_cleanup - Cleanup routine.
233 *
234 * This routine is called normally during driver unloading or exiting.
235 */
236 void hv_cleanup(void)
237 {
238 union hv_x64_msr_hypercall_contents hypercall_msr;
239
240 kfree(hv_context.signal_event_buffer);
241 hv_context.signal_event_buffer = NULL;
242 hv_context.signal_event_param = NULL;
243
244 if (hv_context.hypercall_page) {
245 hypercall_msr.as_uint64 = 0;
246 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
247 vfree(hv_context.hypercall_page);
248 hv_context.hypercall_page = NULL;
249 }
250 }
251
252 /*
253 * hv_post_message - Post a message using the hypervisor message IPC.
254 *
255 * This involves a hypercall.
256 */
257 u16 hv_post_message(union hv_connection_id connection_id,
258 enum hv_message_type message_type,
259 void *payload, size_t payload_size)
260 {
261 struct aligned_input {
262 u64 alignment8;
263 struct hv_input_post_message msg;
264 };
265
266 struct hv_input_post_message *aligned_msg;
267 u16 status;
268 unsigned long addr;
269
270 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
271 return -EMSGSIZE;
272
273 addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
274 if (!addr)
275 return -ENOMEM;
276
277 aligned_msg = (struct hv_input_post_message *)
278 (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
279
280 aligned_msg->connectionid = connection_id;
281 aligned_msg->message_type = message_type;
282 aligned_msg->payload_size = payload_size;
283 memcpy((void *)aligned_msg->payload, payload, payload_size);
284
285 status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
286 & 0xFFFF;
287
288 kfree((void *)addr);
289
290 return status;
291 }
292
293
294 /*
295 * hv_signal_event -
296 * Signal an event on the specified connection using the hypervisor event IPC.
297 *
298 * This involves a hypercall.
299 */
300 u16 hv_signal_event(void)
301 {
302 u16 status;
303
304 status = do_hypercall(HVCALL_SIGNAL_EVENT,
305 hv_context.signal_event_param,
306 NULL) & 0xFFFF;
307 return status;
308 }
309
310 /*
311 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
312 *
313 * If it is already initialized by another entity (ie x2v shim), we need to
314 * retrieve the initialized message and event pages. Otherwise, we create and
315 * initialize the message and event pages.
316 */
317 void hv_synic_init(void *irqarg)
318 {
319 u64 version;
320 union hv_synic_simp simp;
321 union hv_synic_siefp siefp;
322 union hv_synic_sint shared_sint;
323 union hv_synic_scontrol sctrl;
324
325 u32 irq_vector = *((u32 *)(irqarg));
326 int cpu = smp_processor_id();
327
328 if (!hv_context.hypercall_page)
329 return;
330
331 /* Check the version */
332 rdmsrl(HV_X64_MSR_SVERSION, version);
333
334 hv_context.synic_message_page[cpu] =
335 (void *)get_zeroed_page(GFP_ATOMIC);
336
337 if (hv_context.synic_message_page[cpu] == NULL) {
338 pr_err("Unable to allocate SYNIC message page\n");
339 goto cleanup;
340 }
341
342 hv_context.synic_event_page[cpu] =
343 (void *)get_zeroed_page(GFP_ATOMIC);
344
345 if (hv_context.synic_event_page[cpu] == NULL) {
346 pr_err("Unable to allocate SYNIC event page\n");
347 goto cleanup;
348 }
349
350 /* Setup the Synic's message page */
351 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
352 simp.simp_enabled = 1;
353 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
354 >> PAGE_SHIFT;
355
356 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
357
358 /* Setup the Synic's event page */
359 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
360 siefp.siefp_enabled = 1;
361 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
362 >> PAGE_SHIFT;
363
364 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
365
366 /* Setup the shared SINT. */
367 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
368
369 shared_sint.as_uint64 = 0;
370 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
371 shared_sint.masked = false;
372 shared_sint.auto_eoi = false;
373
374 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
375
376 /* Enable the global synic bit */
377 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
378 sctrl.enable = 1;
379
380 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
381
382 hv_context.synic_initialized = true;
383 return;
384
385 cleanup:
386 if (hv_context.synic_event_page[cpu])
387 free_page((unsigned long)hv_context.synic_event_page[cpu]);
388
389 if (hv_context.synic_message_page[cpu])
390 free_page((unsigned long)hv_context.synic_message_page[cpu]);
391 return;
392 }
393
394 /*
395 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
396 */
397 void hv_synic_cleanup(void *arg)
398 {
399 union hv_synic_sint shared_sint;
400 union hv_synic_simp simp;
401 union hv_synic_siefp siefp;
402 int cpu = smp_processor_id();
403
404 if (!hv_context.synic_initialized)
405 return;
406
407 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
408
409 shared_sint.masked = 1;
410
411 /* Need to correctly cleanup in the case of SMP!!! */
412 /* Disable the interrupt */
413 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
414
415 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
416 simp.simp_enabled = 0;
417 simp.base_simp_gpa = 0;
418
419 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
420
421 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
422 siefp.siefp_enabled = 0;
423 siefp.base_siefp_gpa = 0;
424
425 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
426
427 free_page((unsigned long)hv_context.synic_message_page[cpu]);
428 free_page((unsigned long)hv_context.synic_event_page[cpu]);
429 }
This page took 0.046343 seconds and 5 git commands to generate.