4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
7 * Copyright IBM Corp. 2007
10 * Anthony Liguori <aliguori@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include <linux/module.h>
18 #include <linux/list.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/virtio.h>
23 #include <linux/virtio_config.h>
24 #include <linux/virtio_ring.h>
25 #include <linux/virtio_pci.h>
26 #include <linux/highmem.h>
27 #include <linux/spinlock.h>
29 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
30 MODULE_DESCRIPTION("virtio-pci");
31 MODULE_LICENSE("GPL");
34 /* Our device structure */
35 struct virtio_pci_device
37 struct virtio_device vdev
;
38 struct pci_dev
*pci_dev
;
40 /* the IO mapping for the PCI config space */
43 /* a list of queues so we can dispatch IRQs */
45 struct list_head virtqueues
;
50 struct msix_entry
*msix_entries
;
51 cpumask_var_t
*msix_affinity_masks
;
52 /* Name strings for interrupts. This size should be enough,
53 * and I'm too lazy to allocate each name separately. */
54 char (*msix_names
)[256];
55 /* Number of available vectors */
56 unsigned msix_vectors
;
57 /* Vectors allocated, excluding per-vq vectors if any */
58 unsigned msix_used_vectors
;
60 /* Whether we have vector per vq */
64 /* Constants for MSI-X */
65 /* Use first vector for configuration changes, second and the rest for
66 * virtqueues Thus, we need at least 2 vectors for MSI. */
68 VP_MSIX_CONFIG_VECTOR
= 0,
69 VP_MSIX_VQ_VECTOR
= 1,
72 struct virtio_pci_vq_info
74 /* the actual virtqueue */
77 /* the number of entries in the queue */
80 /* the virtual address of the ring queue */
83 /* the list node for the virtqueues list */
84 struct list_head node
;
86 /* MSI-X vector (or none) */
90 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
91 static const struct pci_device_id virtio_pci_id_table
[] = {
92 { PCI_DEVICE(0x1af4, PCI_ANY_ID
) },
96 MODULE_DEVICE_TABLE(pci
, virtio_pci_id_table
);
98 /* Convert a generic virtio device to our structure */
99 static struct virtio_pci_device
*to_vp_device(struct virtio_device
*vdev
)
101 return container_of(vdev
, struct virtio_pci_device
, vdev
);
104 /* virtio config->get_features() implementation */
105 static u64
vp_get_features(struct virtio_device
*vdev
)
107 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
109 /* When someone needs more than 32 feature bits, we'll need to
110 * steal a bit to indicate that the rest are somewhere else. */
111 return ioread32(vp_dev
->ioaddr
+ VIRTIO_PCI_HOST_FEATURES
);
114 /* virtio config->finalize_features() implementation */
115 static int vp_finalize_features(struct virtio_device
*vdev
)
117 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
119 /* Give virtio_ring a chance to accept features. */
120 vring_transport_features(vdev
);
122 /* Make sure we don't have any features > 32 bits! */
123 BUG_ON((u32
)vdev
->features
!= vdev
->features
);
125 /* We only support 32 feature bits. */
126 iowrite32(vdev
->features
, vp_dev
->ioaddr
+ VIRTIO_PCI_GUEST_FEATURES
);
131 /* virtio config->get() implementation */
132 static void vp_get(struct virtio_device
*vdev
, unsigned offset
,
133 void *buf
, unsigned len
)
135 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
136 void __iomem
*ioaddr
= vp_dev
->ioaddr
+
137 VIRTIO_PCI_CONFIG(vp_dev
) + offset
;
141 for (i
= 0; i
< len
; i
++)
142 ptr
[i
] = ioread8(ioaddr
+ i
);
145 /* the config->set() implementation. it's symmetric to the config->get()
147 static void vp_set(struct virtio_device
*vdev
, unsigned offset
,
148 const void *buf
, unsigned len
)
150 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
151 void __iomem
*ioaddr
= vp_dev
->ioaddr
+
152 VIRTIO_PCI_CONFIG(vp_dev
) + offset
;
156 for (i
= 0; i
< len
; i
++)
157 iowrite8(ptr
[i
], ioaddr
+ i
);
160 /* config->{get,set}_status() implementations */
161 static u8
vp_get_status(struct virtio_device
*vdev
)
163 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
164 return ioread8(vp_dev
->ioaddr
+ VIRTIO_PCI_STATUS
);
167 static void vp_set_status(struct virtio_device
*vdev
, u8 status
)
169 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
170 /* We should never be setting status to 0. */
172 iowrite8(status
, vp_dev
->ioaddr
+ VIRTIO_PCI_STATUS
);
175 /* wait for pending irq handlers */
176 static void vp_synchronize_vectors(struct virtio_device
*vdev
)
178 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
181 if (vp_dev
->intx_enabled
)
182 synchronize_irq(vp_dev
->pci_dev
->irq
);
184 for (i
= 0; i
< vp_dev
->msix_vectors
; ++i
)
185 synchronize_irq(vp_dev
->msix_entries
[i
].vector
);
188 static void vp_reset(struct virtio_device
*vdev
)
190 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
191 /* 0 status means a reset. */
192 iowrite8(0, vp_dev
->ioaddr
+ VIRTIO_PCI_STATUS
);
193 /* Flush out the status write, and flush in device writes,
194 * including MSi-X interrupts, if any. */
195 ioread8(vp_dev
->ioaddr
+ VIRTIO_PCI_STATUS
);
196 /* Flush pending VQ/configuration callbacks. */
197 vp_synchronize_vectors(vdev
);
200 /* the notify function used when creating a virt queue */
201 static bool vp_notify(struct virtqueue
*vq
)
203 struct virtio_pci_device
*vp_dev
= to_vp_device(vq
->vdev
);
205 /* we write the queue's selector into the notification register to
206 * signal the other end */
207 iowrite16(vq
->index
, vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_NOTIFY
);
211 /* Handle a configuration change: Tell driver if it wants to know. */
212 static irqreturn_t
vp_config_changed(int irq
, void *opaque
)
214 struct virtio_pci_device
*vp_dev
= opaque
;
216 virtio_config_changed(&vp_dev
->vdev
);
220 /* Notify all virtqueues on an interrupt. */
221 static irqreturn_t
vp_vring_interrupt(int irq
, void *opaque
)
223 struct virtio_pci_device
*vp_dev
= opaque
;
224 struct virtio_pci_vq_info
*info
;
225 irqreturn_t ret
= IRQ_NONE
;
228 spin_lock_irqsave(&vp_dev
->lock
, flags
);
229 list_for_each_entry(info
, &vp_dev
->virtqueues
, node
) {
230 if (vring_interrupt(irq
, info
->vq
) == IRQ_HANDLED
)
233 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
238 /* A small wrapper to also acknowledge the interrupt when it's handled.
239 * I really need an EIO hook for the vring so I can ack the interrupt once we
240 * know that we'll be handling the IRQ but before we invoke the callback since
241 * the callback may notify the host which results in the host attempting to
242 * raise an interrupt that we would then mask once we acknowledged the
244 static irqreturn_t
vp_interrupt(int irq
, void *opaque
)
246 struct virtio_pci_device
*vp_dev
= opaque
;
249 /* reading the ISR has the effect of also clearing it so it's very
250 * important to save off the value. */
251 isr
= ioread8(vp_dev
->ioaddr
+ VIRTIO_PCI_ISR
);
253 /* It's definitely not us if the ISR was not high */
257 /* Configuration change? Tell driver if it wants to know. */
258 if (isr
& VIRTIO_PCI_ISR_CONFIG
)
259 vp_config_changed(irq
, opaque
);
261 return vp_vring_interrupt(irq
, opaque
);
264 static void vp_free_vectors(struct virtio_device
*vdev
)
266 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
269 if (vp_dev
->intx_enabled
) {
270 free_irq(vp_dev
->pci_dev
->irq
, vp_dev
);
271 vp_dev
->intx_enabled
= 0;
274 for (i
= 0; i
< vp_dev
->msix_used_vectors
; ++i
)
275 free_irq(vp_dev
->msix_entries
[i
].vector
, vp_dev
);
277 for (i
= 0; i
< vp_dev
->msix_vectors
; i
++)
278 if (vp_dev
->msix_affinity_masks
[i
])
279 free_cpumask_var(vp_dev
->msix_affinity_masks
[i
]);
281 if (vp_dev
->msix_enabled
) {
282 /* Disable the vector used for configuration */
283 iowrite16(VIRTIO_MSI_NO_VECTOR
,
284 vp_dev
->ioaddr
+ VIRTIO_MSI_CONFIG_VECTOR
);
285 /* Flush the write out to device */
286 ioread16(vp_dev
->ioaddr
+ VIRTIO_MSI_CONFIG_VECTOR
);
288 pci_disable_msix(vp_dev
->pci_dev
);
289 vp_dev
->msix_enabled
= 0;
292 vp_dev
->msix_vectors
= 0;
293 vp_dev
->msix_used_vectors
= 0;
294 kfree(vp_dev
->msix_names
);
295 vp_dev
->msix_names
= NULL
;
296 kfree(vp_dev
->msix_entries
);
297 vp_dev
->msix_entries
= NULL
;
298 kfree(vp_dev
->msix_affinity_masks
);
299 vp_dev
->msix_affinity_masks
= NULL
;
302 static int vp_request_msix_vectors(struct virtio_device
*vdev
, int nvectors
,
305 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
306 const char *name
= dev_name(&vp_dev
->vdev
.dev
);
310 vp_dev
->msix_vectors
= nvectors
;
312 vp_dev
->msix_entries
= kmalloc(nvectors
* sizeof *vp_dev
->msix_entries
,
314 if (!vp_dev
->msix_entries
)
316 vp_dev
->msix_names
= kmalloc(nvectors
* sizeof *vp_dev
->msix_names
,
318 if (!vp_dev
->msix_names
)
320 vp_dev
->msix_affinity_masks
321 = kzalloc(nvectors
* sizeof *vp_dev
->msix_affinity_masks
,
323 if (!vp_dev
->msix_affinity_masks
)
325 for (i
= 0; i
< nvectors
; ++i
)
326 if (!alloc_cpumask_var(&vp_dev
->msix_affinity_masks
[i
],
330 for (i
= 0; i
< nvectors
; ++i
)
331 vp_dev
->msix_entries
[i
].entry
= i
;
333 err
= pci_enable_msix_exact(vp_dev
->pci_dev
,
334 vp_dev
->msix_entries
, nvectors
);
337 vp_dev
->msix_enabled
= 1;
339 /* Set the vector used for configuration */
340 v
= vp_dev
->msix_used_vectors
;
341 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
343 err
= request_irq(vp_dev
->msix_entries
[v
].vector
,
344 vp_config_changed
, 0, vp_dev
->msix_names
[v
],
348 ++vp_dev
->msix_used_vectors
;
350 iowrite16(v
, vp_dev
->ioaddr
+ VIRTIO_MSI_CONFIG_VECTOR
);
351 /* Verify we had enough resources to assign the vector */
352 v
= ioread16(vp_dev
->ioaddr
+ VIRTIO_MSI_CONFIG_VECTOR
);
353 if (v
== VIRTIO_MSI_NO_VECTOR
) {
358 if (!per_vq_vectors
) {
359 /* Shared vector for all VQs */
360 v
= vp_dev
->msix_used_vectors
;
361 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
362 "%s-virtqueues", name
);
363 err
= request_irq(vp_dev
->msix_entries
[v
].vector
,
364 vp_vring_interrupt
, 0, vp_dev
->msix_names
[v
],
368 ++vp_dev
->msix_used_vectors
;
372 vp_free_vectors(vdev
);
376 static int vp_request_intx(struct virtio_device
*vdev
)
379 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
381 err
= request_irq(vp_dev
->pci_dev
->irq
, vp_interrupt
,
382 IRQF_SHARED
, dev_name(&vdev
->dev
), vp_dev
);
384 vp_dev
->intx_enabled
= 1;
388 static struct virtqueue
*setup_vq(struct virtio_device
*vdev
, unsigned index
,
389 void (*callback
)(struct virtqueue
*vq
),
393 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
394 struct virtio_pci_vq_info
*info
;
395 struct virtqueue
*vq
;
396 unsigned long flags
, size
;
400 /* Select the queue we're interested in */
401 iowrite16(index
, vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_SEL
);
403 /* Check if queue is either not available or already active. */
404 num
= ioread16(vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_NUM
);
405 if (!num
|| ioread32(vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_PFN
))
406 return ERR_PTR(-ENOENT
);
408 /* allocate and fill out our structure the represents an active
410 info
= kmalloc(sizeof(struct virtio_pci_vq_info
), GFP_KERNEL
);
412 return ERR_PTR(-ENOMEM
);
415 info
->msix_vector
= msix_vec
;
417 size
= PAGE_ALIGN(vring_size(num
, VIRTIO_PCI_VRING_ALIGN
));
418 info
->queue
= alloc_pages_exact(size
, GFP_KERNEL
|__GFP_ZERO
);
419 if (info
->queue
== NULL
) {
424 /* activate the queue */
425 iowrite32(virt_to_phys(info
->queue
) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
,
426 vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_PFN
);
428 /* create the vring */
429 vq
= vring_new_virtqueue(index
, info
->num
, VIRTIO_PCI_VRING_ALIGN
, vdev
,
430 true, info
->queue
, vp_notify
, callback
, name
);
433 goto out_activate_queue
;
439 if (msix_vec
!= VIRTIO_MSI_NO_VECTOR
) {
440 iowrite16(msix_vec
, vp_dev
->ioaddr
+ VIRTIO_MSI_QUEUE_VECTOR
);
441 msix_vec
= ioread16(vp_dev
->ioaddr
+ VIRTIO_MSI_QUEUE_VECTOR
);
442 if (msix_vec
== VIRTIO_MSI_NO_VECTOR
) {
449 spin_lock_irqsave(&vp_dev
->lock
, flags
);
450 list_add(&info
->node
, &vp_dev
->virtqueues
);
451 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
453 INIT_LIST_HEAD(&info
->node
);
459 vring_del_virtqueue(vq
);
461 iowrite32(0, vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_PFN
);
462 free_pages_exact(info
->queue
, size
);
468 static void vp_del_vq(struct virtqueue
*vq
)
470 struct virtio_pci_device
*vp_dev
= to_vp_device(vq
->vdev
);
471 struct virtio_pci_vq_info
*info
= vq
->priv
;
472 unsigned long flags
, size
;
474 spin_lock_irqsave(&vp_dev
->lock
, flags
);
475 list_del(&info
->node
);
476 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
478 iowrite16(vq
->index
, vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_SEL
);
480 if (vp_dev
->msix_enabled
) {
481 iowrite16(VIRTIO_MSI_NO_VECTOR
,
482 vp_dev
->ioaddr
+ VIRTIO_MSI_QUEUE_VECTOR
);
483 /* Flush the write out to device */
484 ioread8(vp_dev
->ioaddr
+ VIRTIO_PCI_ISR
);
487 vring_del_virtqueue(vq
);
489 /* Select and deactivate the queue */
490 iowrite32(0, vp_dev
->ioaddr
+ VIRTIO_PCI_QUEUE_PFN
);
492 size
= PAGE_ALIGN(vring_size(info
->num
, VIRTIO_PCI_VRING_ALIGN
));
493 free_pages_exact(info
->queue
, size
);
497 /* the config->del_vqs() implementation */
498 static void vp_del_vqs(struct virtio_device
*vdev
)
500 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
501 struct virtqueue
*vq
, *n
;
502 struct virtio_pci_vq_info
*info
;
504 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
) {
506 if (vp_dev
->per_vq_vectors
&&
507 info
->msix_vector
!= VIRTIO_MSI_NO_VECTOR
)
508 free_irq(vp_dev
->msix_entries
[info
->msix_vector
].vector
,
512 vp_dev
->per_vq_vectors
= false;
514 vp_free_vectors(vdev
);
517 static int vp_try_to_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
518 struct virtqueue
*vqs
[],
519 vq_callback_t
*callbacks
[],
524 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
526 int i
, err
, nvectors
, allocated_vectors
;
529 /* Old style: one normal interrupt for change and all vqs. */
530 err
= vp_request_intx(vdev
);
534 if (per_vq_vectors
) {
535 /* Best option: one for change interrupt, one per vq. */
537 for (i
= 0; i
< nvqs
; ++i
)
541 /* Second best: one for change, shared for all vqs. */
545 err
= vp_request_msix_vectors(vdev
, nvectors
, per_vq_vectors
);
550 vp_dev
->per_vq_vectors
= per_vq_vectors
;
551 allocated_vectors
= vp_dev
->msix_used_vectors
;
552 for (i
= 0; i
< nvqs
; ++i
) {
556 } else if (!callbacks
[i
] || !vp_dev
->msix_enabled
)
557 msix_vec
= VIRTIO_MSI_NO_VECTOR
;
558 else if (vp_dev
->per_vq_vectors
)
559 msix_vec
= allocated_vectors
++;
561 msix_vec
= VP_MSIX_VQ_VECTOR
;
562 vqs
[i
] = setup_vq(vdev
, i
, callbacks
[i
], names
[i
], msix_vec
);
563 if (IS_ERR(vqs
[i
])) {
564 err
= PTR_ERR(vqs
[i
]);
568 if (!vp_dev
->per_vq_vectors
|| msix_vec
== VIRTIO_MSI_NO_VECTOR
)
571 /* allocate per-vq irq if available and necessary */
572 snprintf(vp_dev
->msix_names
[msix_vec
],
573 sizeof *vp_dev
->msix_names
,
575 dev_name(&vp_dev
->vdev
.dev
), names
[i
]);
576 err
= request_irq(vp_dev
->msix_entries
[msix_vec
].vector
,
578 vp_dev
->msix_names
[msix_vec
],
594 /* the config->find_vqs() implementation */
595 static int vp_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
596 struct virtqueue
*vqs
[],
597 vq_callback_t
*callbacks
[],
602 /* Try MSI-X with one vector per queue. */
603 err
= vp_try_to_find_vqs(vdev
, nvqs
, vqs
, callbacks
, names
, true, true);
606 /* Fallback: MSI-X with one vector for config, one shared for queues. */
607 err
= vp_try_to_find_vqs(vdev
, nvqs
, vqs
, callbacks
, names
,
611 /* Finally fall back to regular interrupts. */
612 return vp_try_to_find_vqs(vdev
, nvqs
, vqs
, callbacks
, names
,
616 static const char *vp_bus_name(struct virtio_device
*vdev
)
618 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
620 return pci_name(vp_dev
->pci_dev
);
623 /* Setup the affinity for a virtqueue:
624 * - force the affinity for per vq vector
625 * - OR over all affinities for shared MSI
626 * - ignore the affinity request if we're using INTX
628 static int vp_set_vq_affinity(struct virtqueue
*vq
, int cpu
)
630 struct virtio_device
*vdev
= vq
->vdev
;
631 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
632 struct virtio_pci_vq_info
*info
= vq
->priv
;
633 struct cpumask
*mask
;
639 if (vp_dev
->msix_enabled
) {
640 mask
= vp_dev
->msix_affinity_masks
[info
->msix_vector
];
641 irq
= vp_dev
->msix_entries
[info
->msix_vector
].vector
;
643 irq_set_affinity_hint(irq
, NULL
);
645 cpumask_set_cpu(cpu
, mask
);
646 irq_set_affinity_hint(irq
, mask
);
652 static const struct virtio_config_ops virtio_pci_config_ops
= {
655 .get_status
= vp_get_status
,
656 .set_status
= vp_set_status
,
658 .find_vqs
= vp_find_vqs
,
659 .del_vqs
= vp_del_vqs
,
660 .get_features
= vp_get_features
,
661 .finalize_features
= vp_finalize_features
,
662 .bus_name
= vp_bus_name
,
663 .set_vq_affinity
= vp_set_vq_affinity
,
666 static void virtio_pci_release_dev(struct device
*_d
)
669 * No need for a release method as we allocate/free
670 * all devices together with the pci devices.
671 * Provide an empty one to avoid getting a warning from core.
675 /* the PCI probing function */
676 static int virtio_pci_probe(struct pci_dev
*pci_dev
,
677 const struct pci_device_id
*id
)
679 struct virtio_pci_device
*vp_dev
;
682 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
683 if (pci_dev
->device
< 0x1000 || pci_dev
->device
> 0x103f)
686 if (pci_dev
->revision
!= VIRTIO_PCI_ABI_VERSION
) {
687 printk(KERN_ERR
"virtio_pci: expected ABI version %d, got %d\n",
688 VIRTIO_PCI_ABI_VERSION
, pci_dev
->revision
);
692 /* allocate our structure and fill it out */
693 vp_dev
= kzalloc(sizeof(struct virtio_pci_device
), GFP_KERNEL
);
697 vp_dev
->vdev
.dev
.parent
= &pci_dev
->dev
;
698 vp_dev
->vdev
.dev
.release
= virtio_pci_release_dev
;
699 vp_dev
->vdev
.config
= &virtio_pci_config_ops
;
700 vp_dev
->pci_dev
= pci_dev
;
701 INIT_LIST_HEAD(&vp_dev
->virtqueues
);
702 spin_lock_init(&vp_dev
->lock
);
704 /* Disable MSI/MSIX to bring device to a known good state. */
705 pci_msi_off(pci_dev
);
707 /* enable the device */
708 err
= pci_enable_device(pci_dev
);
712 err
= pci_request_regions(pci_dev
, "virtio-pci");
714 goto out_enable_device
;
716 vp_dev
->ioaddr
= pci_iomap(pci_dev
, 0, 0);
717 if (vp_dev
->ioaddr
== NULL
) {
719 goto out_req_regions
;
722 pci_set_drvdata(pci_dev
, vp_dev
);
723 pci_set_master(pci_dev
);
725 /* we use the subsystem vendor/device id as the virtio vendor/device
726 * id. this allows us to use the same PCI vendor/device id for all
727 * virtio devices and to identify the particular virtio driver by
728 * the subsystem ids */
729 vp_dev
->vdev
.id
.vendor
= pci_dev
->subsystem_vendor
;
730 vp_dev
->vdev
.id
.device
= pci_dev
->subsystem_device
;
732 /* finally register the virtio device */
733 err
= register_virtio_device(&vp_dev
->vdev
);
735 goto out_set_drvdata
;
740 pci_iounmap(pci_dev
, vp_dev
->ioaddr
);
742 pci_release_regions(pci_dev
);
744 pci_disable_device(pci_dev
);
750 static void virtio_pci_remove(struct pci_dev
*pci_dev
)
752 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
754 unregister_virtio_device(&vp_dev
->vdev
);
756 vp_del_vqs(&vp_dev
->vdev
);
757 pci_iounmap(pci_dev
, vp_dev
->ioaddr
);
758 pci_release_regions(pci_dev
);
759 pci_disable_device(pci_dev
);
763 #ifdef CONFIG_PM_SLEEP
764 static int virtio_pci_freeze(struct device
*dev
)
766 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
767 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
770 ret
= virtio_device_freeze(&vp_dev
->vdev
);
773 pci_disable_device(pci_dev
);
777 static int virtio_pci_restore(struct device
*dev
)
779 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
780 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
783 ret
= pci_enable_device(pci_dev
);
787 pci_set_master(pci_dev
);
788 return virtio_device_restore(&vp_dev
->vdev
);
791 static const struct dev_pm_ops virtio_pci_pm_ops
= {
792 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze
, virtio_pci_restore
)
796 static struct pci_driver virtio_pci_driver
= {
797 .name
= "virtio-pci",
798 .id_table
= virtio_pci_id_table
,
799 .probe
= virtio_pci_probe
,
800 .remove
= virtio_pci_remove
,
801 #ifdef CONFIG_PM_SLEEP
802 .driver
.pm
= &virtio_pci_pm_ops
,
806 module_pci_driver(virtio_pci_driver
);