coresight: moving to new "hwtracing" directory
[deliverable/linux.git] / drivers / hv / vmbus_drv.c
CommitLineData
3e7ee490 1/*
3e7ee490
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
b0069f43 20 * K. Y. Srinivasan <kys@microsoft.com>
52e5c1ce 21 *
3e7ee490 22 */
0a46618d
HJ
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
3e7ee490
HJ
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/device.h>
3e7ee490
HJ
28#include <linux/interrupt.h>
29#include <linux/sysctl.h>
5a0e3ad6 30#include <linux/slab.h>
b0069f43 31#include <linux/acpi.h>
8b5d6d3b 32#include <linux/completion.h>
46a97191 33#include <linux/hyperv.h>
b0209501 34#include <linux/kernel_stat.h>
4061ed9e 35#include <linux/clockchips.h>
e513229b 36#include <linux/cpu.h>
407dd164 37#include <asm/hyperv.h>
1f94ea81 38#include <asm/hypervisor.h>
302a3c0f 39#include <asm/mshyperv.h>
96c1d058
NM
40#include <linux/notifier.h>
41#include <linux/ptrace.h>
0f2a6619 42#include "hyperv_vmbus.h"
3e7ee490 43
607c1a11 44static struct acpi_device *hv_acpi_dev;
1168ac22 45
59c0e4f0 46static struct tasklet_struct msg_dpc;
71a6655d 47static struct completion probe_event;
b0069f43 48static int irq;
98db4335 49
96c1d058 50
ed99d846 51static int hyperv_panic_event(struct notifier_block *nb,
96c1d058
NM
52 unsigned long event, void *ptr)
53{
54 struct pt_regs *regs;
55
56 regs = current_pt_regs();
57
58 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
59 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
60 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
61 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
62 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
63
64 /*
65 * Let Hyper-V know there is crash data available
66 */
67 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
68 return NOTIFY_DONE;
69}
70
71static struct notifier_block hyperv_panic_block = {
72 .notifier_call = hyperv_panic_event,
73};
74
90eedf0c
GH
75struct resource hyperv_mmio = {
76 .name = "hyperv mmio",
77 .flags = IORESOURCE_MEM,
78};
79EXPORT_SYMBOL_GPL(hyperv_mmio);
98db4335 80
cf6a2eac
S
81static int vmbus_exists(void)
82{
83 if (hv_acpi_dev == NULL)
84 return -ENODEV;
85
86 return 0;
87}
88
fd776ba9
OH
89#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
90static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
91{
92 int i;
93 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
94 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
95}
96
76c52bbe
GKH
97static u8 channel_monitor_group(struct vmbus_channel *channel)
98{
99 return (u8)channel->offermsg.monitorid / 32;
100}
101
102static u8 channel_monitor_offset(struct vmbus_channel *channel)
103{
104 return (u8)channel->offermsg.monitorid % 32;
105}
106
107static u32 channel_pending(struct vmbus_channel *channel,
108 struct hv_monitor_page *monitor_page)
109{
110 u8 monitor_group = channel_monitor_group(channel);
111 return monitor_page->trigger_group[monitor_group].pending;
112}
113
1cee272b
GKH
114static u32 channel_latency(struct vmbus_channel *channel,
115 struct hv_monitor_page *monitor_page)
116{
117 u8 monitor_group = channel_monitor_group(channel);
118 u8 monitor_offset = channel_monitor_offset(channel);
119 return monitor_page->latency[monitor_group][monitor_offset];
120}
121
4947c745
GKH
122static u32 channel_conn_id(struct vmbus_channel *channel,
123 struct hv_monitor_page *monitor_page)
124{
125 u8 monitor_group = channel_monitor_group(channel);
126 u8 monitor_offset = channel_monitor_offset(channel);
127 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
128}
129
03f3a910
GKH
130static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
131 char *buf)
132{
133 struct hv_device *hv_dev = device_to_hv_device(dev);
134
135 if (!hv_dev->channel)
136 return -ENODEV;
137 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
138}
139static DEVICE_ATTR_RO(id);
140
a8fb5f3d
GKH
141static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
142 char *buf)
143{
144 struct hv_device *hv_dev = device_to_hv_device(dev);
145
146 if (!hv_dev->channel)
147 return -ENODEV;
148 return sprintf(buf, "%d\n", hv_dev->channel->state);
149}
150static DEVICE_ATTR_RO(state);
151
5ffd00e2
GKH
152static ssize_t monitor_id_show(struct device *dev,
153 struct device_attribute *dev_attr, char *buf)
154{
155 struct hv_device *hv_dev = device_to_hv_device(dev);
156
157 if (!hv_dev->channel)
158 return -ENODEV;
159 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
160}
161static DEVICE_ATTR_RO(monitor_id);
162
68234c04
GKH
163static ssize_t class_id_show(struct device *dev,
164 struct device_attribute *dev_attr, char *buf)
165{
166 struct hv_device *hv_dev = device_to_hv_device(dev);
167
168 if (!hv_dev->channel)
169 return -ENODEV;
170 return sprintf(buf, "{%pUl}\n",
171 hv_dev->channel->offermsg.offer.if_type.b);
172}
173static DEVICE_ATTR_RO(class_id);
174
7c55e1d0
GKH
175static ssize_t device_id_show(struct device *dev,
176 struct device_attribute *dev_attr, char *buf)
177{
178 struct hv_device *hv_dev = device_to_hv_device(dev);
179
180 if (!hv_dev->channel)
181 return -ENODEV;
182 return sprintf(buf, "{%pUl}\n",
183 hv_dev->channel->offermsg.offer.if_instance.b);
184}
185static DEVICE_ATTR_RO(device_id);
186
647fa371
GKH
187static ssize_t modalias_show(struct device *dev,
188 struct device_attribute *dev_attr, char *buf)
189{
190 struct hv_device *hv_dev = device_to_hv_device(dev);
191 char alias_name[VMBUS_ALIAS_LEN + 1];
192
193 print_alias_name(hv_dev, alias_name);
194 return sprintf(buf, "vmbus:%s\n", alias_name);
195}
196static DEVICE_ATTR_RO(modalias);
197
76c52bbe
GKH
198static ssize_t server_monitor_pending_show(struct device *dev,
199 struct device_attribute *dev_attr,
200 char *buf)
201{
202 struct hv_device *hv_dev = device_to_hv_device(dev);
203
204 if (!hv_dev->channel)
205 return -ENODEV;
206 return sprintf(buf, "%d\n",
207 channel_pending(hv_dev->channel,
208 vmbus_connection.monitor_pages[1]));
209}
210static DEVICE_ATTR_RO(server_monitor_pending);
211
212static ssize_t client_monitor_pending_show(struct device *dev,
213 struct device_attribute *dev_attr,
214 char *buf)
215{
216 struct hv_device *hv_dev = device_to_hv_device(dev);
217
218 if (!hv_dev->channel)
219 return -ENODEV;
220 return sprintf(buf, "%d\n",
221 channel_pending(hv_dev->channel,
222 vmbus_connection.monitor_pages[1]));
223}
224static DEVICE_ATTR_RO(client_monitor_pending);
68234c04 225
1cee272b
GKH
226static ssize_t server_monitor_latency_show(struct device *dev,
227 struct device_attribute *dev_attr,
228 char *buf)
229{
230 struct hv_device *hv_dev = device_to_hv_device(dev);
231
232 if (!hv_dev->channel)
233 return -ENODEV;
234 return sprintf(buf, "%d\n",
235 channel_latency(hv_dev->channel,
236 vmbus_connection.monitor_pages[0]));
237}
238static DEVICE_ATTR_RO(server_monitor_latency);
239
240static ssize_t client_monitor_latency_show(struct device *dev,
241 struct device_attribute *dev_attr,
242 char *buf)
243{
244 struct hv_device *hv_dev = device_to_hv_device(dev);
245
246 if (!hv_dev->channel)
247 return -ENODEV;
248 return sprintf(buf, "%d\n",
249 channel_latency(hv_dev->channel,
250 vmbus_connection.monitor_pages[1]));
251}
252static DEVICE_ATTR_RO(client_monitor_latency);
253
4947c745
GKH
254static ssize_t server_monitor_conn_id_show(struct device *dev,
255 struct device_attribute *dev_attr,
256 char *buf)
257{
258 struct hv_device *hv_dev = device_to_hv_device(dev);
259
260 if (!hv_dev->channel)
261 return -ENODEV;
262 return sprintf(buf, "%d\n",
263 channel_conn_id(hv_dev->channel,
264 vmbus_connection.monitor_pages[0]));
265}
266static DEVICE_ATTR_RO(server_monitor_conn_id);
267
268static ssize_t client_monitor_conn_id_show(struct device *dev,
269 struct device_attribute *dev_attr,
270 char *buf)
271{
272 struct hv_device *hv_dev = device_to_hv_device(dev);
273
274 if (!hv_dev->channel)
275 return -ENODEV;
276 return sprintf(buf, "%d\n",
277 channel_conn_id(hv_dev->channel,
278 vmbus_connection.monitor_pages[1]));
279}
280static DEVICE_ATTR_RO(client_monitor_conn_id);
281
98f4c651
GKH
282static ssize_t out_intr_mask_show(struct device *dev,
283 struct device_attribute *dev_attr, char *buf)
284{
285 struct hv_device *hv_dev = device_to_hv_device(dev);
286 struct hv_ring_buffer_debug_info outbound;
287
288 if (!hv_dev->channel)
289 return -ENODEV;
290 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
291 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
292}
293static DEVICE_ATTR_RO(out_intr_mask);
294
295static ssize_t out_read_index_show(struct device *dev,
296 struct device_attribute *dev_attr, char *buf)
297{
298 struct hv_device *hv_dev = device_to_hv_device(dev);
299 struct hv_ring_buffer_debug_info outbound;
300
301 if (!hv_dev->channel)
302 return -ENODEV;
303 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
304 return sprintf(buf, "%d\n", outbound.current_read_index);
305}
306static DEVICE_ATTR_RO(out_read_index);
307
308static ssize_t out_write_index_show(struct device *dev,
309 struct device_attribute *dev_attr,
310 char *buf)
311{
312 struct hv_device *hv_dev = device_to_hv_device(dev);
313 struct hv_ring_buffer_debug_info outbound;
314
315 if (!hv_dev->channel)
316 return -ENODEV;
317 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
318 return sprintf(buf, "%d\n", outbound.current_write_index);
319}
320static DEVICE_ATTR_RO(out_write_index);
321
322static ssize_t out_read_bytes_avail_show(struct device *dev,
323 struct device_attribute *dev_attr,
324 char *buf)
325{
326 struct hv_device *hv_dev = device_to_hv_device(dev);
327 struct hv_ring_buffer_debug_info outbound;
328
329 if (!hv_dev->channel)
330 return -ENODEV;
331 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
332 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
333}
334static DEVICE_ATTR_RO(out_read_bytes_avail);
335
336static ssize_t out_write_bytes_avail_show(struct device *dev,
337 struct device_attribute *dev_attr,
338 char *buf)
339{
340 struct hv_device *hv_dev = device_to_hv_device(dev);
341 struct hv_ring_buffer_debug_info outbound;
342
343 if (!hv_dev->channel)
344 return -ENODEV;
345 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
346 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
347}
348static DEVICE_ATTR_RO(out_write_bytes_avail);
349
350static ssize_t in_intr_mask_show(struct device *dev,
351 struct device_attribute *dev_attr, char *buf)
352{
353 struct hv_device *hv_dev = device_to_hv_device(dev);
354 struct hv_ring_buffer_debug_info inbound;
355
356 if (!hv_dev->channel)
357 return -ENODEV;
358 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
359 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
360}
361static DEVICE_ATTR_RO(in_intr_mask);
362
363static ssize_t in_read_index_show(struct device *dev,
364 struct device_attribute *dev_attr, char *buf)
365{
366 struct hv_device *hv_dev = device_to_hv_device(dev);
367 struct hv_ring_buffer_debug_info inbound;
368
369 if (!hv_dev->channel)
370 return -ENODEV;
371 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
372 return sprintf(buf, "%d\n", inbound.current_read_index);
373}
374static DEVICE_ATTR_RO(in_read_index);
375
376static ssize_t in_write_index_show(struct device *dev,
377 struct device_attribute *dev_attr, char *buf)
378{
379 struct hv_device *hv_dev = device_to_hv_device(dev);
380 struct hv_ring_buffer_debug_info inbound;
381
382 if (!hv_dev->channel)
383 return -ENODEV;
384 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
385 return sprintf(buf, "%d\n", inbound.current_write_index);
386}
387static DEVICE_ATTR_RO(in_write_index);
388
389static ssize_t in_read_bytes_avail_show(struct device *dev,
390 struct device_attribute *dev_attr,
391 char *buf)
392{
393 struct hv_device *hv_dev = device_to_hv_device(dev);
394 struct hv_ring_buffer_debug_info inbound;
395
396 if (!hv_dev->channel)
397 return -ENODEV;
398 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
399 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
400}
401static DEVICE_ATTR_RO(in_read_bytes_avail);
402
403static ssize_t in_write_bytes_avail_show(struct device *dev,
404 struct device_attribute *dev_attr,
405 char *buf)
406{
407 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound;
409
410 if (!hv_dev->channel)
411 return -ENODEV;
412 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
413 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
414}
415static DEVICE_ATTR_RO(in_write_bytes_avail);
416
417/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
03f3a910
GKH
418static struct attribute *vmbus_attrs[] = {
419 &dev_attr_id.attr,
a8fb5f3d 420 &dev_attr_state.attr,
5ffd00e2 421 &dev_attr_monitor_id.attr,
68234c04 422 &dev_attr_class_id.attr,
7c55e1d0 423 &dev_attr_device_id.attr,
647fa371 424 &dev_attr_modalias.attr,
76c52bbe
GKH
425 &dev_attr_server_monitor_pending.attr,
426 &dev_attr_client_monitor_pending.attr,
1cee272b
GKH
427 &dev_attr_server_monitor_latency.attr,
428 &dev_attr_client_monitor_latency.attr,
4947c745
GKH
429 &dev_attr_server_monitor_conn_id.attr,
430 &dev_attr_client_monitor_conn_id.attr,
98f4c651
GKH
431 &dev_attr_out_intr_mask.attr,
432 &dev_attr_out_read_index.attr,
433 &dev_attr_out_write_index.attr,
434 &dev_attr_out_read_bytes_avail.attr,
435 &dev_attr_out_write_bytes_avail.attr,
436 &dev_attr_in_intr_mask.attr,
437 &dev_attr_in_read_index.attr,
438 &dev_attr_in_write_index.attr,
439 &dev_attr_in_read_bytes_avail.attr,
440 &dev_attr_in_write_bytes_avail.attr,
03f3a910
GKH
441 NULL,
442};
443ATTRIBUTE_GROUPS(vmbus);
444
adde2487
S
445/*
446 * vmbus_uevent - add uevent for our device
447 *
448 * This routine is invoked when a device is added or removed on the vmbus to
449 * generate a uevent to udev in the userspace. The udev will then look at its
450 * rule and the uevent generated here to load the appropriate driver
0ddda660
S
451 *
452 * The alias string will be of the form vmbus:guid where guid is the string
453 * representation of the device guid (each byte of the guid will be
454 * represented with two hex characters.
adde2487
S
455 */
456static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
457{
458 struct hv_device *dev = device_to_hv_device(device);
fd776ba9
OH
459 int ret;
460 char alias_name[VMBUS_ALIAS_LEN + 1];
0ddda660 461
fd776ba9 462 print_alias_name(dev, alias_name);
0ddda660
S
463 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
464 return ret;
adde2487
S
465}
466
1b9d48f2 467static const uuid_le null_guid;
5841a829
S
468
469static inline bool is_null_guid(const __u8 *guid)
470{
471 if (memcmp(guid, &null_guid, sizeof(uuid_le)))
472 return false;
473 return true;
474}
475
3037a7b6
S
476/*
477 * Return a matching hv_vmbus_device_id pointer.
478 * If there is no match, return NULL.
479 */
480static const struct hv_vmbus_device_id *hv_vmbus_get_id(
481 const struct hv_vmbus_device_id *id,
1b9d48f2 482 const __u8 *guid)
3037a7b6
S
483{
484 for (; !is_null_guid(id->guid); id++)
485 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
486 return id;
487
488 return NULL;
489}
490
491
b7fc147b
S
492
493/*
494 * vmbus_match - Attempt to match the specified device to the specified driver
495 */
496static int vmbus_match(struct device *device, struct device_driver *driver)
497{
b7fc147b 498 struct hv_driver *drv = drv_to_hv_drv(driver);
e8e27047 499 struct hv_device *hv_dev = device_to_hv_device(device);
b7fc147b 500
3037a7b6
S
501 if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
502 return 1;
de632a2b 503
5841a829 504 return 0;
b7fc147b
S
505}
506
f1f0d67b
S
507/*
508 * vmbus_probe - Add the new vmbus's child device
509 */
510static int vmbus_probe(struct device *child_device)
511{
512 int ret = 0;
513 struct hv_driver *drv =
514 drv_to_hv_drv(child_device->driver);
9efd21e1 515 struct hv_device *dev = device_to_hv_device(child_device);
84946899 516 const struct hv_vmbus_device_id *dev_id;
f1f0d67b 517
84946899 518 dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
9efd21e1 519 if (drv->probe) {
84946899 520 ret = drv->probe(dev, dev_id);
b14a7b30 521 if (ret != 0)
0a46618d
HJ
522 pr_err("probe failed for device %s (%d)\n",
523 dev_name(child_device), ret);
f1f0d67b 524
f1f0d67b 525 } else {
0a46618d
HJ
526 pr_err("probe not set for driver %s\n",
527 dev_name(child_device));
6de925b1 528 ret = -ENODEV;
f1f0d67b
S
529 }
530 return ret;
531}
532
c5dce3db
S
533/*
534 * vmbus_remove - Remove a vmbus device
535 */
536static int vmbus_remove(struct device *child_device)
537{
d15a0301 538 struct hv_driver *drv;
415b023a 539 struct hv_device *dev = device_to_hv_device(child_device);
ed6cfcc5 540 u32 relid = dev->channel->offermsg.child_relid;
c5dce3db 541
d15a0301
S
542 if (child_device->driver) {
543 drv = drv_to_hv_drv(child_device->driver);
544 if (drv->remove)
545 drv->remove(dev);
ed6cfcc5
S
546 else {
547 hv_process_channel_removal(dev->channel, relid);
d15a0301
S
548 pr_err("remove not set for driver %s\n",
549 dev_name(child_device));
ed6cfcc5
S
550 }
551 } else {
552 /*
553 * We don't have a driver for this device; deal with the
554 * rescind message by removing the channel.
555 */
556 hv_process_channel_removal(dev->channel, relid);
d15a0301 557 }
c5dce3db
S
558
559 return 0;
560}
561
eb1bb259
S
562
563/*
564 * vmbus_shutdown - Shutdown a vmbus device
565 */
566static void vmbus_shutdown(struct device *child_device)
567{
568 struct hv_driver *drv;
ca6887fb 569 struct hv_device *dev = device_to_hv_device(child_device);
eb1bb259
S
570
571
572 /* The device may not be attached yet */
573 if (!child_device->driver)
574 return;
575
576 drv = drv_to_hv_drv(child_device->driver);
577
ca6887fb
S
578 if (drv->shutdown)
579 drv->shutdown(dev);
eb1bb259
S
580
581 return;
582}
583
086e7a56
S
584
585/*
586 * vmbus_device_release - Final callback release of the vmbus child device
587 */
588static void vmbus_device_release(struct device *device)
589{
e8e27047 590 struct hv_device *hv_dev = device_to_hv_device(device);
086e7a56 591
e8e27047 592 kfree(hv_dev);
086e7a56
S
593
594}
595
454f18a9 596/* The one and only one */
9adcac5c
S
597static struct bus_type hv_bus = {
598 .name = "vmbus",
599 .match = vmbus_match,
600 .shutdown = vmbus_shutdown,
601 .remove = vmbus_remove,
602 .probe = vmbus_probe,
603 .uevent = vmbus_uevent,
03f3a910 604 .dev_groups = vmbus_groups,
3e7ee490
HJ
605};
606
bf6506f6
TT
607struct onmessage_work_context {
608 struct work_struct work;
609 struct hv_message msg;
610};
611
612static void vmbus_onmessage_work(struct work_struct *work)
613{
614 struct onmessage_work_context *ctx;
615
09a19628
VK
616 /* Do not process messages if we're in DISCONNECTED state */
617 if (vmbus_connection.conn_state == DISCONNECTED)
618 return;
619
bf6506f6
TT
620 ctx = container_of(work, struct onmessage_work_context,
621 work);
622 vmbus_onmessage(&ctx->msg);
623 kfree(ctx);
624}
625
d8a60e00 626static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
4061ed9e
S
627{
628 struct clock_event_device *dev = hv_context.clk_evt[cpu];
629
630 if (dev->event_handler)
631 dev->event_handler(dev);
632
633 msg->header.message_type = HVMSG_NONE;
634
635 /*
636 * Make sure the write to MessageType (ie set to
637 * HVMSG_NONE) happens before we read the
638 * MessagePending and EOMing. Otherwise, the EOMing
639 * will not deliver any more messages since there is
640 * no empty slot
641 */
642 mb();
643
644 if (msg->header.message_flags.msg_pending) {
645 /*
646 * This will cause message queue rescan to
647 * possibly deliver another msg from the
648 * hypervisor
649 */
650 wrmsrl(HV_X64_MSR_EOM, 0);
651 }
652}
653
62c1059d 654static void vmbus_on_msg_dpc(unsigned long data)
36199a99
GKH
655{
656 int cpu = smp_processor_id();
657 void *page_addr = hv_context.synic_message_page[cpu];
658 struct hv_message *msg = (struct hv_message *)page_addr +
659 VMBUS_MESSAGE_SINT;
bf6506f6 660 struct onmessage_work_context *ctx;
36199a99
GKH
661
662 while (1) {
663 if (msg->header.message_type == HVMSG_NONE) {
664 /* no msg */
665 break;
666 } else {
bf6506f6
TT
667 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
668 if (ctx == NULL)
36199a99 669 continue;
bf6506f6
TT
670 INIT_WORK(&ctx->work, vmbus_onmessage_work);
671 memcpy(&ctx->msg, msg, sizeof(*msg));
da9fcb72 672 queue_work(vmbus_connection.work_queue, &ctx->work);
36199a99
GKH
673 }
674
675 msg->header.message_type = HVMSG_NONE;
676
677 /*
678 * Make sure the write to MessageType (ie set to
679 * HVMSG_NONE) happens before we read the
680 * MessagePending and EOMing. Otherwise, the EOMing
681 * will not deliver any more messages since there is
682 * no empty slot
683 */
35848f68 684 mb();
36199a99
GKH
685
686 if (msg->header.message_flags.msg_pending) {
687 /*
688 * This will cause message queue rescan to
689 * possibly deliver another msg from the
690 * hypervisor
691 */
692 wrmsrl(HV_X64_MSR_EOM, 0);
693 }
694 }
695}
696
76d388cd 697static void vmbus_isr(void)
36199a99 698{
36199a99
GKH
699 int cpu = smp_processor_id();
700 void *page_addr;
701 struct hv_message *msg;
702 union hv_synic_event_flags *event;
ae4636e6 703 bool handled = false;
36199a99 704
5ab05951
S
705 page_addr = hv_context.synic_event_page[cpu];
706 if (page_addr == NULL)
76d388cd 707 return;
5ab05951
S
708
709 event = (union hv_synic_event_flags *)page_addr +
710 VMBUS_MESSAGE_SINT;
7341d908
S
711 /*
712 * Check for events before checking for messages. This is the order
713 * in which events and messages are checked in Windows guests on
714 * Hyper-V, and the Windows team suggested we do the same.
715 */
36199a99 716
6552ecd7
S
717 if ((vmbus_proto_version == VERSION_WS2008) ||
718 (vmbus_proto_version == VERSION_WIN7)) {
36199a99 719
6552ecd7
S
720 /* Since we are a child, we only need to check bit 0 */
721 if (sync_test_and_clear_bit(0,
722 (unsigned long *) &event->flags32[0])) {
723 handled = true;
724 }
725 } else {
726 /*
727 * Our host is win8 or above. The signaling mechanism
728 * has changed and we can directly look at the event page.
729 * If bit n is set then we have an interrup on the channel
730 * whose id is n.
731 */
ae4636e6 732 handled = true;
ae4636e6 733 }
793be9c7 734
6552ecd7 735 if (handled)
db11f12a 736 tasklet_schedule(hv_context.event_dpc[cpu]);
6552ecd7
S
737
738
7341d908
S
739 page_addr = hv_context.synic_message_page[cpu];
740 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
741
742 /* Check if there are actual msgs to be processed */
4061ed9e
S
743 if (msg->header.message_type != HVMSG_NONE) {
744 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
745 hv_process_timer_expiration(msg, cpu);
746 else
747 tasklet_schedule(&msg_dpc);
748 }
793be9c7
S
749}
750
e513229b
VK
751#ifdef CONFIG_HOTPLUG_CPU
752static int hyperv_cpu_disable(void)
753{
754 return -ENOSYS;
755}
756
757static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
758{
759 static void *previous_cpu_disable;
760
761 /*
762 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
763 * ...) is not supported at this moment as channel interrupts are
764 * distributed across all of them.
765 */
766
767 if ((vmbus_proto_version == VERSION_WS2008) ||
768 (vmbus_proto_version == VERSION_WIN7))
769 return;
770
771 if (vmbus_loaded) {
772 previous_cpu_disable = smp_ops.cpu_disable;
773 smp_ops.cpu_disable = hyperv_cpu_disable;
774 pr_notice("CPU offlining is not supported by hypervisor\n");
775 } else if (previous_cpu_disable)
776 smp_ops.cpu_disable = previous_cpu_disable;
777}
778#else
779static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
780{
781}
782#endif
783
3e189519 784/*
90c9960e
GKH
785 * vmbus_bus_init -Main vmbus driver initialization routine.
786 *
787 * Here, we
0686e4f4 788 * - initialize the vmbus driver context
0686e4f4
LL
789 * - invoke the vmbus hv main init routine
790 * - get the irq resource
0686e4f4 791 * - retrieve the channel offers
90c9960e 792 */
9aaa995e 793static int vmbus_bus_init(int irq)
3e7ee490 794{
90c9960e 795 int ret;
3e7ee490 796
6d26e38f
GKH
797 /* Hypervisor initialization...setup hypercall page..etc */
798 ret = hv_init();
90c9960e 799 if (ret != 0) {
0a46618d 800 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
d6c1c5de 801 return ret;
3e7ee490
HJ
802 }
803
59c0e4f0 804 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
3e7ee490 805
9adcac5c 806 ret = bus_register(&hv_bus);
d6c1c5de 807 if (ret)
8b9987e9 808 goto err_cleanup;
3e7ee490 809
76d388cd 810 hv_setup_vmbus_irq(vmbus_isr);
3e7ee490 811
2608fb65
JW
812 ret = hv_synic_alloc();
813 if (ret)
814 goto err_alloc;
800b6902 815 /*
302a3c0f 816 * Initialize the per-cpu interrupt state and
800b6902
S
817 * connect to the host.
818 */
302a3c0f 819 on_each_cpu(hv_synic_init, NULL, 1);
800b6902 820 ret = vmbus_connect();
8b9987e9 821 if (ret)
2608fb65 822 goto err_alloc;
800b6902 823
e513229b 824 hv_cpu_hotplug_quirk(true);
96c1d058
NM
825
826 /*
827 * Only register if the crash MSRs are available
828 */
829 if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
830 atomic_notifier_chain_register(&panic_notifier_list,
831 &hyperv_panic_block);
832 }
833
2d6e882b 834 vmbus_request_offers();
8b5d6d3b 835
d6c1c5de 836 return 0;
8b9987e9 837
2608fb65
JW
838err_alloc:
839 hv_synic_free();
76d388cd 840 hv_remove_vmbus_irq();
8b9987e9 841
8b9987e9
S
842 bus_unregister(&hv_bus);
843
844err_cleanup:
845 hv_cleanup();
846
847 return ret;
3e7ee490
HJ
848}
849
90c9960e 850/**
768fa219
GKH
851 * __vmbus_child_driver_register - Register a vmbus's driver
852 * @drv: Pointer to driver structure you want to register
853 * @owner: owner module of the drv
854 * @mod_name: module name string
3e189519
HJ
855 *
856 * Registers the given driver with Linux through the 'driver_register()' call
768fa219 857 * and sets up the hyper-v vmbus handling for this driver.
3e189519
HJ
858 * It will return the state of the 'driver_register()' call.
859 *
90c9960e 860 */
768fa219 861int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
3e7ee490 862{
5d48a1c2 863 int ret;
3e7ee490 864
768fa219 865 pr_info("registering driver %s\n", hv_driver->name);
3e7ee490 866
cf6a2eac
S
867 ret = vmbus_exists();
868 if (ret < 0)
869 return ret;
870
768fa219
GKH
871 hv_driver->driver.name = hv_driver->name;
872 hv_driver->driver.owner = owner;
873 hv_driver->driver.mod_name = mod_name;
874 hv_driver->driver.bus = &hv_bus;
3e7ee490 875
768fa219 876 ret = driver_register(&hv_driver->driver);
3e7ee490 877
5d48a1c2 878 return ret;
3e7ee490 879}
768fa219 880EXPORT_SYMBOL_GPL(__vmbus_driver_register);
3e7ee490 881
90c9960e 882/**
768fa219
GKH
883 * vmbus_driver_unregister() - Unregister a vmbus's driver
884 * @drv: Pointer to driver structure you want to un-register
3e189519 885 *
768fa219
GKH
886 * Un-register the given driver that was previous registered with a call to
887 * vmbus_driver_register()
90c9960e 888 */
768fa219 889void vmbus_driver_unregister(struct hv_driver *hv_driver)
3e7ee490 890{
768fa219 891 pr_info("unregistering driver %s\n", hv_driver->name);
3e7ee490 892
cf6a2eac 893 if (!vmbus_exists())
8f257a14 894 driver_unregister(&hv_driver->driver);
3e7ee490 895}
768fa219 896EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
3e7ee490 897
3e189519 898/*
f2c73011 899 * vmbus_device_create - Creates and registers a new child device
3e189519 900 * on the vmbus.
90c9960e 901 */
1b9d48f2 902struct hv_device *vmbus_device_create(const uuid_le *type,
903 const uuid_le *instance,
904 struct vmbus_channel *channel)
3e7ee490 905{
3d3b5518 906 struct hv_device *child_device_obj;
3e7ee490 907
6bad88da
S
908 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
909 if (!child_device_obj) {
0a46618d 910 pr_err("Unable to allocate device object for child device\n");
3e7ee490
HJ
911 return NULL;
912 }
913
cae5b843 914 child_device_obj->channel = channel;
358d2ee2 915 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
ca623ad3 916 memcpy(&child_device_obj->dev_instance, instance,
358d2ee2 917 sizeof(uuid_le));
3e7ee490 918
3e7ee490 919
3e7ee490
HJ
920 return child_device_obj;
921}
922
3e189519 923/*
22794281 924 * vmbus_device_register - Register the child device
90c9960e 925 */
22794281 926int vmbus_device_register(struct hv_device *child_device_obj)
3e7ee490 927{
90c9960e 928 int ret = 0;
6bad88da 929
bc63b6f6
VK
930 dev_set_name(&child_device_obj->device, "vmbus_%d",
931 child_device_obj->channel->id);
3e7ee490 932
0bce28b6 933 child_device_obj->device.bus = &hv_bus;
607c1a11 934 child_device_obj->device.parent = &hv_acpi_dev->dev;
6bad88da 935 child_device_obj->device.release = vmbus_device_release;
3e7ee490 936
90c9960e
GKH
937 /*
938 * Register with the LDM. This will kick off the driver/device
939 * binding...which will eventually call vmbus_match() and vmbus_probe()
940 */
6bad88da 941 ret = device_register(&child_device_obj->device);
3e7ee490 942
3e7ee490 943 if (ret)
0a46618d 944 pr_err("Unable to register child device\n");
3e7ee490 945 else
84672369 946 pr_debug("child device %s registered\n",
0a46618d 947 dev_name(&child_device_obj->device));
3e7ee490 948
3e7ee490
HJ
949 return ret;
950}
951
3e189519 952/*
696453ba 953 * vmbus_device_unregister - Remove the specified child device
3e189519 954 * from the vmbus.
90c9960e 955 */
696453ba 956void vmbus_device_unregister(struct hv_device *device_obj)
3e7ee490 957{
84672369
FS
958 pr_debug("child device %s unregistered\n",
959 dev_name(&device_obj->device));
960
90c9960e
GKH
961 /*
962 * Kick off the process of unregistering the device.
963 * This will call vmbus_remove() and eventually vmbus_device_release()
964 */
6bad88da 965 device_unregister(&device_obj->device);
3e7ee490
HJ
966}
967
3e7ee490 968
b0069f43 969/*
90f34535
S
970 * VMBUS is an acpi enumerated device. Get the the information we
971 * need from DSDT.
b0069f43
S
972 */
973
90f34535 974static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
b0069f43 975{
90f34535
S
976 switch (res->type) {
977 case ACPI_RESOURCE_TYPE_IRQ:
978 irq = res->data.irq.interrupts[0];
4eb923f8 979 break;
b0069f43 980
90f34535 981 case ACPI_RESOURCE_TYPE_ADDRESS64:
a45de93e
LZ
982 hyperv_mmio.start = res->data.address64.address.minimum;
983 hyperv_mmio.end = res->data.address64.address.maximum;
4eb923f8 984 break;
b0069f43
S
985 }
986
987 return AE_OK;
988}
989
990static int vmbus_acpi_add(struct acpi_device *device)
991{
992 acpi_status result;
90f34535 993 int ret_val = -ENODEV;
b0069f43 994
607c1a11
S
995 hv_acpi_dev = device;
996
0a4425b6 997 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
90f34535 998 vmbus_walk_resources, NULL);
b0069f43 999
90f34535
S
1000 if (ACPI_FAILURE(result))
1001 goto acpi_walk_err;
1002 /*
1003 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
1004 * has the mmio ranges. Get that.
1005 */
1006 if (device->parent) {
1007 result = acpi_walk_resources(device->parent->handle,
1008 METHOD_NAME__CRS,
1009 vmbus_walk_resources, NULL);
1010
1011 if (ACPI_FAILURE(result))
1012 goto acpi_walk_err;
90eedf0c
GH
1013 if (hyperv_mmio.start && hyperv_mmio.end)
1014 request_resource(&iomem_resource, &hyperv_mmio);
b0069f43 1015 }
90f34535
S
1016 ret_val = 0;
1017
1018acpi_walk_err:
b0069f43 1019 complete(&probe_event);
90f34535 1020 return ret_val;
b0069f43
S
1021}
1022
1023static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1024 {"VMBUS", 0},
9d7b18d1 1025 {"VMBus", 0},
b0069f43
S
1026 {"", 0},
1027};
1028MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1029
1030static struct acpi_driver vmbus_acpi_driver = {
1031 .name = "vmbus",
1032 .ids = vmbus_acpi_device_ids,
1033 .ops = {
1034 .add = vmbus_acpi_add,
1035 },
1036};
1037
607c1a11 1038static int __init hv_acpi_init(void)
1168ac22 1039{
2dda95f8 1040 int ret, t;
b0069f43 1041
1f94ea81 1042 if (x86_hyper != &x86_hyper_ms_hyperv)
0592969e
JW
1043 return -ENODEV;
1044
b0069f43
S
1045 init_completion(&probe_event);
1046
1047 /*
1048 * Get irq resources first.
1049 */
0246604c
S
1050 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1051
b0069f43
S
1052 if (ret)
1053 return ret;
1054
2dda95f8
S
1055 t = wait_for_completion_timeout(&probe_event, 5*HZ);
1056 if (t == 0) {
1057 ret = -ETIMEDOUT;
1058 goto cleanup;
1059 }
b0069f43
S
1060
1061 if (irq <= 0) {
2dda95f8
S
1062 ret = -ENODEV;
1063 goto cleanup;
b0069f43
S
1064 }
1065
91fd799e
S
1066 ret = vmbus_bus_init(irq);
1067 if (ret)
2dda95f8
S
1068 goto cleanup;
1069
1070 return 0;
1071
1072cleanup:
1073 acpi_bus_unregister_driver(&vmbus_acpi_driver);
cf6a2eac 1074 hv_acpi_dev = NULL;
91fd799e 1075 return ret;
1168ac22
S
1076}
1077
93e5bd06
S
1078static void __exit vmbus_exit(void)
1079{
e72e7ac5
VK
1080 int cpu;
1081
09a19628 1082 vmbus_connection.conn_state = DISCONNECTED;
e086748c 1083 hv_synic_clockevents_cleanup();
76d388cd 1084 hv_remove_vmbus_irq();
93e5bd06
S
1085 vmbus_free_channels();
1086 bus_unregister(&hv_bus);
1087 hv_cleanup();
e72e7ac5
VK
1088 for_each_online_cpu(cpu)
1089 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
93e5bd06 1090 acpi_bus_unregister_driver(&vmbus_acpi_driver);
e513229b 1091 hv_cpu_hotplug_quirk(false);
09a19628 1092 vmbus_disconnect();
93e5bd06
S
1093}
1094
1168ac22 1095
90c9960e 1096MODULE_LICENSE("GPL");
3e7ee490 1097
43d4e119 1098subsys_initcall(hv_acpi_init);
93e5bd06 1099module_exit(vmbus_exit);
This page took 0.534096 seconds and 5 git commands to generate.