Commit | Line | Data |
---|---|---|
1f166439 GZ |
1 | /* |
2 | * VMware VMCI Driver | |
3 | * | |
4 | * Copyright (C) 2012 VMware, Inc. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation version 2 and no later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
12 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 | * for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/vmw_vmci_defs.h> | |
17 | #include <linux/vmw_vmci_api.h> | |
18 | #include <linux/moduleparam.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/highmem.h> | |
21 | #include <linux/kernel.h> | |
ea8a83a4 | 22 | #include <linux/mm.h> |
1f166439 GZ |
23 | #include <linux/module.h> |
24 | #include <linux/sched.h> | |
ea8a83a4 | 25 | #include <linux/slab.h> |
1f166439 GZ |
26 | #include <linux/init.h> |
27 | #include <linux/pci.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/io.h> | |
ea8a83a4 | 30 | #include <linux/vmalloc.h> |
1f166439 GZ |
31 | |
32 | #include "vmci_datagram.h" | |
33 | #include "vmci_doorbell.h" | |
34 | #include "vmci_context.h" | |
35 | #include "vmci_driver.h" | |
36 | #include "vmci_event.h" | |
37 | ||
38 | #define PCI_VENDOR_ID_VMWARE 0x15AD | |
39 | #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 | |
40 | ||
41 | #define VMCI_UTIL_NUM_RESOURCES 1 | |
42 | ||
43 | static bool vmci_disable_msi; | |
44 | module_param_named(disable_msi, vmci_disable_msi, bool, 0); | |
45 | MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); | |
46 | ||
47 | static bool vmci_disable_msix; | |
48 | module_param_named(disable_msix, vmci_disable_msix, bool, 0); | |
49 | MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); | |
50 | ||
51 | static u32 ctx_update_sub_id = VMCI_INVALID_ID; | |
52 | static u32 vm_context_id = VMCI_INVALID_ID; | |
53 | ||
54 | struct vmci_guest_device { | |
55 | struct device *dev; /* PCI device we are attached to */ | |
56 | void __iomem *iobase; | |
57 | ||
58 | unsigned int irq; | |
59 | unsigned int intr_type; | |
60 | bool exclusive_vectors; | |
61 | struct msix_entry msix_entries[VMCI_MAX_INTRS]; | |
62 | ||
63 | struct tasklet_struct datagram_tasklet; | |
64 | struct tasklet_struct bm_tasklet; | |
65 | ||
66 | void *data_buffer; | |
67 | void *notification_bitmap; | |
6d6dfb4f | 68 | dma_addr_t notification_base; |
1f166439 GZ |
69 | }; |
70 | ||
71 | /* vmci_dev singleton device and supporting data*/ | |
6d6dfb4f | 72 | struct pci_dev *vmci_pdev; |
1f166439 GZ |
73 | static struct vmci_guest_device *vmci_dev_g; |
74 | static DEFINE_SPINLOCK(vmci_dev_spinlock); | |
75 | ||
76 | static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); | |
77 | ||
78 | bool vmci_guest_code_active(void) | |
79 | { | |
80 | return atomic_read(&vmci_num_guest_devices) != 0; | |
81 | } | |
82 | ||
83 | u32 vmci_get_vm_context_id(void) | |
84 | { | |
85 | if (vm_context_id == VMCI_INVALID_ID) { | |
1f166439 GZ |
86 | struct vmci_datagram get_cid_msg; |
87 | get_cid_msg.dst = | |
88 | vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, | |
89 | VMCI_GET_CONTEXT_ID); | |
90 | get_cid_msg.src = VMCI_ANON_SRC_HANDLE; | |
91 | get_cid_msg.payload_size = 0; | |
5a19b789 | 92 | vm_context_id = vmci_send_datagram(&get_cid_msg); |
1f166439 GZ |
93 | } |
94 | return vm_context_id; | |
95 | } | |
96 | ||
97 | /* | |
98 | * VM to hypervisor call mechanism. We use the standard VMware naming | |
99 | * convention since shared code is calling this function as well. | |
100 | */ | |
101 | int vmci_send_datagram(struct vmci_datagram *dg) | |
102 | { | |
103 | unsigned long flags; | |
104 | int result; | |
105 | ||
106 | /* Check args. */ | |
107 | if (dg == NULL) | |
108 | return VMCI_ERROR_INVALID_ARGS; | |
109 | ||
110 | /* | |
111 | * Need to acquire spinlock on the device because the datagram | |
112 | * data may be spread over multiple pages and the monitor may | |
113 | * interleave device user rpc calls from multiple | |
114 | * VCPUs. Acquiring the spinlock precludes that | |
115 | * possibility. Disabling interrupts to avoid incoming | |
116 | * datagrams during a "rep out" and possibly landing up in | |
117 | * this function. | |
118 | */ | |
119 | spin_lock_irqsave(&vmci_dev_spinlock, flags); | |
120 | ||
121 | if (vmci_dev_g) { | |
122 | iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, | |
123 | dg, VMCI_DG_SIZE(dg)); | |
124 | result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); | |
125 | } else { | |
126 | result = VMCI_ERROR_UNAVAILABLE; | |
127 | } | |
128 | ||
129 | spin_unlock_irqrestore(&vmci_dev_spinlock, flags); | |
130 | ||
131 | return result; | |
132 | } | |
133 | EXPORT_SYMBOL_GPL(vmci_send_datagram); | |
134 | ||
135 | /* | |
136 | * Gets called with the new context id if updated or resumed. | |
137 | * Context id. | |
138 | */ | |
139 | static void vmci_guest_cid_update(u32 sub_id, | |
140 | const struct vmci_event_data *event_data, | |
141 | void *client_data) | |
142 | { | |
143 | const struct vmci_event_payld_ctx *ev_payload = | |
144 | vmci_event_data_const_payload(event_data); | |
145 | ||
146 | if (sub_id != ctx_update_sub_id) { | |
147 | pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); | |
148 | return; | |
149 | } | |
150 | ||
151 | if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { | |
152 | pr_devel("Invalid event data\n"); | |
153 | return; | |
154 | } | |
155 | ||
156 | pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", | |
157 | vm_context_id, ev_payload->context_id, event_data->event); | |
158 | ||
159 | vm_context_id = ev_payload->context_id; | |
160 | } | |
161 | ||
162 | /* | |
163 | * Verify that the host supports the hypercalls we need. If it does not, | |
164 | * try to find fallback hypercalls and use those instead. Returns | |
165 | * true if required hypercalls (or fallback hypercalls) are | |
166 | * supported by the host, false otherwise. | |
167 | */ | |
168 | static bool vmci_check_host_caps(struct pci_dev *pdev) | |
169 | { | |
170 | bool result; | |
171 | struct vmci_resource_query_msg *msg; | |
172 | u32 msg_size = sizeof(struct vmci_resource_query_hdr) + | |
173 | VMCI_UTIL_NUM_RESOURCES * sizeof(u32); | |
174 | struct vmci_datagram *check_msg; | |
175 | ||
176 | check_msg = kmalloc(msg_size, GFP_KERNEL); | |
177 | if (!check_msg) { | |
178 | dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); | |
179 | return false; | |
180 | } | |
181 | ||
182 | check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, | |
183 | VMCI_RESOURCES_QUERY); | |
184 | check_msg->src = VMCI_ANON_SRC_HANDLE; | |
185 | check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; | |
186 | msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); | |
187 | ||
188 | msg->num_resources = VMCI_UTIL_NUM_RESOURCES; | |
189 | msg->resources[0] = VMCI_GET_CONTEXT_ID; | |
190 | ||
191 | /* Checks that hyper calls are supported */ | |
192 | result = vmci_send_datagram(check_msg) == 0x01; | |
193 | kfree(check_msg); | |
194 | ||
195 | dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", | |
196 | __func__, result ? "PASSED" : "FAILED"); | |
197 | ||
198 | /* We need the vector. There are no fallbacks. */ | |
199 | return result; | |
200 | } | |
201 | ||
202 | /* | |
203 | * Reads datagrams from the data in port and dispatches them. We | |
204 | * always start reading datagrams into only the first page of the | |
205 | * datagram buffer. If the datagrams don't fit into one page, we | |
206 | * use the maximum datagram buffer size for the remainder of the | |
207 | * invocation. This is a simple heuristic for not penalizing | |
208 | * small datagrams. | |
209 | * | |
210 | * This function assumes that it has exclusive access to the data | |
211 | * in port for the duration of the call. | |
212 | */ | |
213 | static void vmci_dispatch_dgs(unsigned long data) | |
214 | { | |
215 | struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; | |
216 | u8 *dg_in_buffer = vmci_dev->data_buffer; | |
217 | struct vmci_datagram *dg; | |
218 | size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; | |
219 | size_t current_dg_in_buffer_size = PAGE_SIZE; | |
220 | size_t remaining_bytes; | |
221 | ||
222 | BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); | |
223 | ||
224 | ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, | |
225 | vmci_dev->data_buffer, current_dg_in_buffer_size); | |
226 | dg = (struct vmci_datagram *)dg_in_buffer; | |
227 | remaining_bytes = current_dg_in_buffer_size; | |
228 | ||
229 | while (dg->dst.resource != VMCI_INVALID_ID || | |
230 | remaining_bytes > PAGE_SIZE) { | |
231 | unsigned dg_in_size; | |
232 | ||
233 | /* | |
234 | * When the input buffer spans multiple pages, a datagram can | |
235 | * start on any page boundary in the buffer. | |
236 | */ | |
237 | if (dg->dst.resource == VMCI_INVALID_ID) { | |
238 | dg = (struct vmci_datagram *)roundup( | |
239 | (uintptr_t)dg + 1, PAGE_SIZE); | |
240 | remaining_bytes = | |
241 | (size_t)(dg_in_buffer + | |
242 | current_dg_in_buffer_size - | |
243 | (u8 *)dg); | |
244 | continue; | |
245 | } | |
246 | ||
247 | dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); | |
248 | ||
249 | if (dg_in_size <= dg_in_buffer_size) { | |
250 | int result; | |
251 | ||
252 | /* | |
253 | * If the remaining bytes in the datagram | |
254 | * buffer doesn't contain the complete | |
255 | * datagram, we first make sure we have enough | |
256 | * room for it and then we read the reminder | |
257 | * of the datagram and possibly any following | |
258 | * datagrams. | |
259 | */ | |
260 | if (dg_in_size > remaining_bytes) { | |
261 | if (remaining_bytes != | |
262 | current_dg_in_buffer_size) { | |
263 | ||
264 | /* | |
265 | * We move the partial | |
266 | * datagram to the front and | |
267 | * read the reminder of the | |
268 | * datagram and possibly | |
269 | * following calls into the | |
270 | * following bytes. | |
271 | */ | |
272 | memmove(dg_in_buffer, dg_in_buffer + | |
273 | current_dg_in_buffer_size - | |
274 | remaining_bytes, | |
275 | remaining_bytes); | |
276 | dg = (struct vmci_datagram *) | |
277 | dg_in_buffer; | |
278 | } | |
279 | ||
280 | if (current_dg_in_buffer_size != | |
281 | dg_in_buffer_size) | |
282 | current_dg_in_buffer_size = | |
283 | dg_in_buffer_size; | |
284 | ||
285 | ioread8_rep(vmci_dev->iobase + | |
286 | VMCI_DATA_IN_ADDR, | |
287 | vmci_dev->data_buffer + | |
288 | remaining_bytes, | |
289 | current_dg_in_buffer_size - | |
290 | remaining_bytes); | |
291 | } | |
292 | ||
293 | /* | |
294 | * We special case event datagrams from the | |
295 | * hypervisor. | |
296 | */ | |
297 | if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && | |
298 | dg->dst.resource == VMCI_EVENT_HANDLER) { | |
299 | result = vmci_event_dispatch(dg); | |
300 | } else { | |
301 | result = vmci_datagram_invoke_guest_handler(dg); | |
302 | } | |
303 | if (result < VMCI_SUCCESS) | |
304 | dev_dbg(vmci_dev->dev, | |
305 | "Datagram with resource (ID=0x%x) failed (err=%d)\n", | |
306 | dg->dst.resource, result); | |
307 | ||
308 | /* On to the next datagram. */ | |
309 | dg = (struct vmci_datagram *)((u8 *)dg + | |
310 | dg_in_size); | |
311 | } else { | |
312 | size_t bytes_to_skip; | |
313 | ||
314 | /* | |
315 | * Datagram doesn't fit in datagram buffer of maximal | |
316 | * size. We drop it. | |
317 | */ | |
318 | dev_dbg(vmci_dev->dev, | |
319 | "Failed to receive datagram (size=%u bytes)\n", | |
320 | dg_in_size); | |
321 | ||
322 | bytes_to_skip = dg_in_size - remaining_bytes; | |
323 | if (current_dg_in_buffer_size != dg_in_buffer_size) | |
324 | current_dg_in_buffer_size = dg_in_buffer_size; | |
325 | ||
326 | for (;;) { | |
327 | ioread8_rep(vmci_dev->iobase + | |
328 | VMCI_DATA_IN_ADDR, | |
329 | vmci_dev->data_buffer, | |
330 | current_dg_in_buffer_size); | |
331 | if (bytes_to_skip <= current_dg_in_buffer_size) | |
332 | break; | |
333 | ||
334 | bytes_to_skip -= current_dg_in_buffer_size; | |
335 | } | |
336 | dg = (struct vmci_datagram *)(dg_in_buffer + | |
337 | bytes_to_skip); | |
338 | } | |
339 | ||
340 | remaining_bytes = | |
341 | (size_t) (dg_in_buffer + current_dg_in_buffer_size - | |
342 | (u8 *)dg); | |
343 | ||
344 | if (remaining_bytes < VMCI_DG_HEADERSIZE) { | |
345 | /* Get the next batch of datagrams. */ | |
346 | ||
347 | ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, | |
348 | vmci_dev->data_buffer, | |
349 | current_dg_in_buffer_size); | |
350 | dg = (struct vmci_datagram *)dg_in_buffer; | |
351 | remaining_bytes = current_dg_in_buffer_size; | |
352 | } | |
353 | } | |
354 | } | |
355 | ||
356 | /* | |
357 | * Scans the notification bitmap for raised flags, clears them | |
358 | * and handles the notifications. | |
359 | */ | |
360 | static void vmci_process_bitmap(unsigned long data) | |
361 | { | |
362 | struct vmci_guest_device *dev = (struct vmci_guest_device *)data; | |
363 | ||
364 | if (!dev->notification_bitmap) { | |
365 | dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); | |
366 | return; | |
367 | } | |
368 | ||
369 | vmci_dbell_scan_notification_entries(dev->notification_bitmap); | |
370 | } | |
371 | ||
372 | /* | |
373 | * Enable MSI-X. Try exclusive vectors first, then shared vectors. | |
374 | */ | |
375 | static int vmci_enable_msix(struct pci_dev *pdev, | |
376 | struct vmci_guest_device *vmci_dev) | |
377 | { | |
378 | int i; | |
379 | int result; | |
380 | ||
381 | for (i = 0; i < VMCI_MAX_INTRS; ++i) { | |
382 | vmci_dev->msix_entries[i].entry = i; | |
383 | vmci_dev->msix_entries[i].vector = i; | |
384 | } | |
385 | ||
386 | result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); | |
387 | if (result == 0) | |
388 | vmci_dev->exclusive_vectors = true; | |
389 | else if (result > 0) | |
390 | result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); | |
391 | ||
392 | return result; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Interrupt handler for legacy or MSI interrupt, or for first MSI-X | |
397 | * interrupt (vector VMCI_INTR_DATAGRAM). | |
398 | */ | |
399 | static irqreturn_t vmci_interrupt(int irq, void *_dev) | |
400 | { | |
401 | struct vmci_guest_device *dev = _dev; | |
402 | ||
403 | /* | |
404 | * If we are using MSI-X with exclusive vectors then we simply schedule | |
405 | * the datagram tasklet, since we know the interrupt was meant for us. | |
406 | * Otherwise we must read the ICR to determine what to do. | |
407 | */ | |
408 | ||
409 | if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { | |
410 | tasklet_schedule(&dev->datagram_tasklet); | |
411 | } else { | |
412 | unsigned int icr; | |
413 | ||
414 | /* Acknowledge interrupt and determine what needs doing. */ | |
415 | icr = ioread32(dev->iobase + VMCI_ICR_ADDR); | |
416 | if (icr == 0 || icr == ~0) | |
417 | return IRQ_NONE; | |
418 | ||
419 | if (icr & VMCI_ICR_DATAGRAM) { | |
420 | tasklet_schedule(&dev->datagram_tasklet); | |
421 | icr &= ~VMCI_ICR_DATAGRAM; | |
422 | } | |
423 | ||
424 | if (icr & VMCI_ICR_NOTIFICATION) { | |
425 | tasklet_schedule(&dev->bm_tasklet); | |
426 | icr &= ~VMCI_ICR_NOTIFICATION; | |
427 | } | |
428 | ||
429 | if (icr != 0) | |
430 | dev_warn(dev->dev, | |
431 | "Ignoring unknown interrupt cause (%d)\n", | |
432 | icr); | |
433 | } | |
434 | ||
435 | return IRQ_HANDLED; | |
436 | } | |
437 | ||
438 | /* | |
439 | * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, | |
440 | * which is for the notification bitmap. Will only get called if we are | |
441 | * using MSI-X with exclusive vectors. | |
442 | */ | |
443 | static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) | |
444 | { | |
445 | struct vmci_guest_device *dev = _dev; | |
446 | ||
447 | /* For MSI-X we can just assume it was meant for us. */ | |
448 | tasklet_schedule(&dev->bm_tasklet); | |
449 | ||
450 | return IRQ_HANDLED; | |
451 | } | |
452 | ||
453 | /* | |
454 | * Most of the initialization at module load time is done here. | |
455 | */ | |
456 | static int vmci_guest_probe_device(struct pci_dev *pdev, | |
457 | const struct pci_device_id *id) | |
458 | { | |
459 | struct vmci_guest_device *vmci_dev; | |
460 | void __iomem *iobase; | |
461 | unsigned int capabilities; | |
462 | unsigned long cmd; | |
463 | int vmci_err; | |
464 | int error; | |
465 | ||
466 | dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); | |
467 | ||
468 | error = pcim_enable_device(pdev); | |
469 | if (error) { | |
470 | dev_err(&pdev->dev, | |
471 | "Failed to enable VMCI device: %d\n", error); | |
472 | return error; | |
473 | } | |
474 | ||
475 | error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); | |
476 | if (error) { | |
477 | dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); | |
478 | return error; | |
479 | } | |
480 | ||
481 | iobase = pcim_iomap_table(pdev)[0]; | |
482 | ||
483 | dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", | |
484 | (unsigned long)iobase, pdev->irq); | |
485 | ||
486 | vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); | |
487 | if (!vmci_dev) { | |
488 | dev_err(&pdev->dev, | |
489 | "Can't allocate memory for VMCI device\n"); | |
490 | return -ENOMEM; | |
491 | } | |
492 | ||
493 | vmci_dev->dev = &pdev->dev; | |
494 | vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; | |
495 | vmci_dev->exclusive_vectors = false; | |
496 | vmci_dev->iobase = iobase; | |
497 | ||
498 | tasklet_init(&vmci_dev->datagram_tasklet, | |
499 | vmci_dispatch_dgs, (unsigned long)vmci_dev); | |
500 | tasklet_init(&vmci_dev->bm_tasklet, | |
501 | vmci_process_bitmap, (unsigned long)vmci_dev); | |
502 | ||
503 | vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); | |
504 | if (!vmci_dev->data_buffer) { | |
505 | dev_err(&pdev->dev, | |
506 | "Can't allocate memory for datagram buffer\n"); | |
507 | return -ENOMEM; | |
508 | } | |
509 | ||
510 | pci_set_master(pdev); /* To enable queue_pair functionality. */ | |
511 | ||
512 | /* | |
513 | * Verify that the VMCI Device supports the capabilities that | |
514 | * we need. If the device is missing capabilities that we would | |
515 | * like to use, check for fallback capabilities and use those | |
516 | * instead (so we can run a new VM on old hosts). Fail the load if | |
517 | * a required capability is missing and there is no fallback. | |
518 | * | |
519 | * Right now, we need datagrams. There are no fallbacks. | |
520 | */ | |
521 | capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); | |
522 | if (!(capabilities & VMCI_CAPS_DATAGRAM)) { | |
523 | dev_err(&pdev->dev, "Device does not support datagrams\n"); | |
524 | error = -ENXIO; | |
525 | goto err_free_data_buffer; | |
526 | } | |
527 | ||
528 | /* | |
529 | * If the hardware supports notifications, we will use that as | |
530 | * well. | |
531 | */ | |
532 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) { | |
6d6dfb4f AK |
533 | vmci_dev->notification_bitmap = dma_alloc_coherent( |
534 | &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, | |
535 | GFP_KERNEL); | |
1f166439 GZ |
536 | if (!vmci_dev->notification_bitmap) { |
537 | dev_warn(&pdev->dev, | |
538 | "Unable to allocate notification bitmap\n"); | |
539 | } else { | |
540 | memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); | |
541 | capabilities |= VMCI_CAPS_NOTIFICATIONS; | |
542 | } | |
543 | } | |
544 | ||
545 | dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); | |
546 | ||
547 | /* Let the host know which capabilities we intend to use. */ | |
548 | iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); | |
549 | ||
550 | /* Set up global device so that we can start sending datagrams */ | |
551 | spin_lock_irq(&vmci_dev_spinlock); | |
552 | vmci_dev_g = vmci_dev; | |
6d6dfb4f | 553 | vmci_pdev = pdev; |
1f166439 GZ |
554 | spin_unlock_irq(&vmci_dev_spinlock); |
555 | ||
556 | /* | |
557 | * Register notification bitmap with device if that capability is | |
558 | * used. | |
559 | */ | |
560 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) { | |
6d6dfb4f AK |
561 | unsigned long bitmap_ppn = |
562 | vmci_dev->notification_base >> PAGE_SHIFT; | |
1f166439 GZ |
563 | if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { |
564 | dev_warn(&pdev->dev, | |
565 | "VMCI device unable to register notification bitmap with PPN 0x%x\n", | |
566 | (u32) bitmap_ppn); | |
567 | goto err_remove_vmci_dev_g; | |
568 | } | |
569 | } | |
570 | ||
571 | /* Check host capabilities. */ | |
572 | if (!vmci_check_host_caps(pdev)) | |
573 | goto err_remove_bitmap; | |
574 | ||
575 | /* Enable device. */ | |
576 | ||
577 | /* | |
578 | * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can | |
579 | * update the internal context id when needed. | |
580 | */ | |
581 | vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, | |
582 | vmci_guest_cid_update, NULL, | |
583 | &ctx_update_sub_id); | |
584 | if (vmci_err < VMCI_SUCCESS) | |
585 | dev_warn(&pdev->dev, | |
586 | "Failed to subscribe to event (type=%d): %d\n", | |
587 | VMCI_EVENT_CTX_ID_UPDATE, vmci_err); | |
588 | ||
589 | /* | |
590 | * Enable interrupts. Try MSI-X first, then MSI, and then fallback on | |
591 | * legacy interrupts. | |
592 | */ | |
593 | if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { | |
594 | vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; | |
595 | vmci_dev->irq = vmci_dev->msix_entries[0].vector; | |
596 | } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { | |
597 | vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; | |
598 | vmci_dev->irq = pdev->irq; | |
599 | } else { | |
600 | vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; | |
601 | vmci_dev->irq = pdev->irq; | |
602 | } | |
603 | ||
604 | /* | |
605 | * Request IRQ for legacy or MSI interrupts, or for first | |
606 | * MSI-X vector. | |
607 | */ | |
608 | error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, | |
609 | KBUILD_MODNAME, vmci_dev); | |
610 | if (error) { | |
611 | dev_err(&pdev->dev, "Irq %u in use: %d\n", | |
612 | vmci_dev->irq, error); | |
613 | goto err_disable_msi; | |
614 | } | |
615 | ||
616 | /* | |
617 | * For MSI-X with exclusive vectors we need to request an | |
618 | * interrupt for each vector so that we get a separate | |
619 | * interrupt handler routine. This allows us to distinguish | |
620 | * between the vectors. | |
621 | */ | |
622 | if (vmci_dev->exclusive_vectors) { | |
623 | error = request_irq(vmci_dev->msix_entries[1].vector, | |
624 | vmci_interrupt_bm, 0, KBUILD_MODNAME, | |
625 | vmci_dev); | |
626 | if (error) { | |
627 | dev_err(&pdev->dev, | |
628 | "Failed to allocate irq %u: %d\n", | |
629 | vmci_dev->msix_entries[1].vector, error); | |
630 | goto err_free_irq; | |
631 | } | |
632 | } | |
633 | ||
634 | dev_dbg(&pdev->dev, "Registered device\n"); | |
635 | ||
636 | atomic_inc(&vmci_num_guest_devices); | |
637 | ||
638 | /* Enable specific interrupt bits. */ | |
639 | cmd = VMCI_IMR_DATAGRAM; | |
640 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) | |
641 | cmd |= VMCI_IMR_NOTIFICATION; | |
642 | iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); | |
643 | ||
644 | /* Enable interrupts. */ | |
645 | iowrite32(VMCI_CONTROL_INT_ENABLE, | |
646 | vmci_dev->iobase + VMCI_CONTROL_ADDR); | |
647 | ||
648 | pci_set_drvdata(pdev, vmci_dev); | |
649 | return 0; | |
650 | ||
651 | err_free_irq: | |
9089e3be | 652 | free_irq(vmci_dev->irq, vmci_dev); |
1f166439 GZ |
653 | tasklet_kill(&vmci_dev->datagram_tasklet); |
654 | tasklet_kill(&vmci_dev->bm_tasklet); | |
655 | ||
656 | err_disable_msi: | |
657 | if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) | |
658 | pci_disable_msix(pdev); | |
659 | else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) | |
660 | pci_disable_msi(pdev); | |
661 | ||
662 | vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); | |
663 | if (vmci_err < VMCI_SUCCESS) | |
664 | dev_warn(&pdev->dev, | |
665 | "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", | |
666 | VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); | |
667 | ||
668 | err_remove_bitmap: | |
669 | if (vmci_dev->notification_bitmap) { | |
670 | iowrite32(VMCI_CONTROL_RESET, | |
671 | vmci_dev->iobase + VMCI_CONTROL_ADDR); | |
6d6dfb4f AK |
672 | dma_free_coherent(&pdev->dev, PAGE_SIZE, |
673 | vmci_dev->notification_bitmap, | |
674 | vmci_dev->notification_base); | |
1f166439 GZ |
675 | } |
676 | ||
677 | err_remove_vmci_dev_g: | |
678 | spin_lock_irq(&vmci_dev_spinlock); | |
6d6dfb4f | 679 | vmci_pdev = NULL; |
1f166439 GZ |
680 | vmci_dev_g = NULL; |
681 | spin_unlock_irq(&vmci_dev_spinlock); | |
682 | ||
683 | err_free_data_buffer: | |
684 | vfree(vmci_dev->data_buffer); | |
685 | ||
686 | /* The rest are managed resources and will be freed by PCI core */ | |
687 | return error; | |
688 | } | |
689 | ||
690 | static void vmci_guest_remove_device(struct pci_dev *pdev) | |
691 | { | |
692 | struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); | |
693 | int vmci_err; | |
694 | ||
695 | dev_dbg(&pdev->dev, "Removing device\n"); | |
696 | ||
697 | atomic_dec(&vmci_num_guest_devices); | |
698 | ||
699 | vmci_qp_guest_endpoints_exit(); | |
700 | ||
701 | vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); | |
702 | if (vmci_err < VMCI_SUCCESS) | |
703 | dev_warn(&pdev->dev, | |
704 | "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", | |
705 | VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); | |
706 | ||
707 | spin_lock_irq(&vmci_dev_spinlock); | |
708 | vmci_dev_g = NULL; | |
6d6dfb4f | 709 | vmci_pdev = NULL; |
1f166439 GZ |
710 | spin_unlock_irq(&vmci_dev_spinlock); |
711 | ||
712 | dev_dbg(&pdev->dev, "Resetting vmci device\n"); | |
713 | iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); | |
714 | ||
715 | /* | |
716 | * Free IRQ and then disable MSI/MSI-X as appropriate. For | |
717 | * MSI-X, we might have multiple vectors, each with their own | |
718 | * IRQ, which we must free too. | |
719 | */ | |
720 | free_irq(vmci_dev->irq, vmci_dev); | |
721 | if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { | |
722 | if (vmci_dev->exclusive_vectors) | |
723 | free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); | |
724 | pci_disable_msix(pdev); | |
725 | } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { | |
726 | pci_disable_msi(pdev); | |
727 | } | |
728 | ||
729 | tasklet_kill(&vmci_dev->datagram_tasklet); | |
730 | tasklet_kill(&vmci_dev->bm_tasklet); | |
731 | ||
732 | if (vmci_dev->notification_bitmap) { | |
733 | /* | |
734 | * The device reset above cleared the bitmap state of the | |
735 | * device, so we can safely free it here. | |
736 | */ | |
737 | ||
6d6dfb4f AK |
738 | dma_free_coherent(&pdev->dev, PAGE_SIZE, |
739 | vmci_dev->notification_bitmap, | |
740 | vmci_dev->notification_base); | |
1f166439 GZ |
741 | } |
742 | ||
743 | vfree(vmci_dev->data_buffer); | |
744 | ||
745 | /* The rest are managed resources and will be freed by PCI core */ | |
746 | } | |
747 | ||
748 | static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { | |
749 | { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, | |
750 | { 0 }, | |
751 | }; | |
752 | MODULE_DEVICE_TABLE(pci, vmci_ids); | |
753 | ||
754 | static struct pci_driver vmci_guest_driver = { | |
755 | .name = KBUILD_MODNAME, | |
756 | .id_table = vmci_ids, | |
757 | .probe = vmci_guest_probe_device, | |
758 | .remove = vmci_guest_remove_device, | |
759 | }; | |
760 | ||
761 | int __init vmci_guest_init(void) | |
762 | { | |
763 | return pci_register_driver(&vmci_guest_driver); | |
764 | } | |
765 | ||
766 | void __exit vmci_guest_exit(void) | |
767 | { | |
768 | pci_unregister_driver(&vmci_guest_driver); | |
769 | } |