Commit | Line | Data |
---|---|---|
cba3345c AW |
1 | /* |
2 | * VFIO core | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/cdev.h> | |
17 | #include <linux/compat.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/anon_inodes.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/idr.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/list.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/mutex.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/uaccess.h> | |
31 | #include <linux/vfio.h> | |
32 | #include <linux/wait.h> | |
33 | ||
34 | #define DRIVER_VERSION "0.3" | |
35 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
36 | #define DRIVER_DESC "VFIO - User Level meta-driver" | |
37 | ||
38 | static struct vfio { | |
39 | struct class *class; | |
40 | struct list_head iommu_drivers_list; | |
41 | struct mutex iommu_drivers_lock; | |
42 | struct list_head group_list; | |
43 | struct idr group_idr; | |
44 | struct mutex group_lock; | |
45 | struct cdev group_cdev; | |
46 | struct device *dev; | |
47 | dev_t devt; | |
48 | struct cdev cdev; | |
49 | wait_queue_head_t release_q; | |
50 | } vfio; | |
51 | ||
52 | struct vfio_iommu_driver { | |
53 | const struct vfio_iommu_driver_ops *ops; | |
54 | struct list_head vfio_next; | |
55 | }; | |
56 | ||
57 | struct vfio_container { | |
58 | struct kref kref; | |
59 | struct list_head group_list; | |
60 | struct mutex group_lock; | |
61 | struct vfio_iommu_driver *iommu_driver; | |
62 | void *iommu_data; | |
63 | }; | |
64 | ||
65 | struct vfio_group { | |
66 | struct kref kref; | |
67 | int minor; | |
68 | atomic_t container_users; | |
69 | struct iommu_group *iommu_group; | |
70 | struct vfio_container *container; | |
71 | struct list_head device_list; | |
72 | struct mutex device_lock; | |
73 | struct device *dev; | |
74 | struct notifier_block nb; | |
75 | struct list_head vfio_next; | |
76 | struct list_head container_next; | |
77 | }; | |
78 | ||
79 | struct vfio_device { | |
80 | struct kref kref; | |
81 | struct device *dev; | |
82 | const struct vfio_device_ops *ops; | |
83 | struct vfio_group *group; | |
84 | struct list_head group_next; | |
85 | void *device_data; | |
86 | }; | |
87 | ||
88 | /** | |
89 | * IOMMU driver registration | |
90 | */ | |
91 | int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
92 | { | |
93 | struct vfio_iommu_driver *driver, *tmp; | |
94 | ||
95 | driver = kzalloc(sizeof(*driver), GFP_KERNEL); | |
96 | if (!driver) | |
97 | return -ENOMEM; | |
98 | ||
99 | driver->ops = ops; | |
100 | ||
101 | mutex_lock(&vfio.iommu_drivers_lock); | |
102 | ||
103 | /* Check for duplicates */ | |
104 | list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { | |
105 | if (tmp->ops == ops) { | |
106 | mutex_unlock(&vfio.iommu_drivers_lock); | |
107 | kfree(driver); | |
108 | return -EINVAL; | |
109 | } | |
110 | } | |
111 | ||
112 | list_add(&driver->vfio_next, &vfio.iommu_drivers_list); | |
113 | ||
114 | mutex_unlock(&vfio.iommu_drivers_lock); | |
115 | ||
116 | return 0; | |
117 | } | |
118 | EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); | |
119 | ||
120 | void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
121 | { | |
122 | struct vfio_iommu_driver *driver; | |
123 | ||
124 | mutex_lock(&vfio.iommu_drivers_lock); | |
125 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
126 | if (driver->ops == ops) { | |
127 | list_del(&driver->vfio_next); | |
128 | mutex_unlock(&vfio.iommu_drivers_lock); | |
129 | kfree(driver); | |
130 | return; | |
131 | } | |
132 | } | |
133 | mutex_unlock(&vfio.iommu_drivers_lock); | |
134 | } | |
135 | EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); | |
136 | ||
137 | /** | |
138 | * Group minor allocation/free - both called with vfio.group_lock held | |
139 | */ | |
140 | static int vfio_alloc_group_minor(struct vfio_group *group) | |
141 | { | |
cba3345c | 142 | /* index 0 is used by /dev/vfio/vfio */ |
a1c36b16 | 143 | return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL); |
cba3345c AW |
144 | } |
145 | ||
146 | static void vfio_free_group_minor(int minor) | |
147 | { | |
148 | idr_remove(&vfio.group_idr, minor); | |
149 | } | |
150 | ||
151 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
152 | unsigned long action, void *data); | |
153 | static void vfio_group_get(struct vfio_group *group); | |
154 | ||
155 | /** | |
156 | * Container objects - containers are created when /dev/vfio/vfio is | |
157 | * opened, but their lifecycle extends until the last user is done, so | |
158 | * it's freed via kref. Must support container/group/device being | |
159 | * closed in any order. | |
160 | */ | |
161 | static void vfio_container_get(struct vfio_container *container) | |
162 | { | |
163 | kref_get(&container->kref); | |
164 | } | |
165 | ||
166 | static void vfio_container_release(struct kref *kref) | |
167 | { | |
168 | struct vfio_container *container; | |
169 | container = container_of(kref, struct vfio_container, kref); | |
170 | ||
171 | kfree(container); | |
172 | } | |
173 | ||
174 | static void vfio_container_put(struct vfio_container *container) | |
175 | { | |
176 | kref_put(&container->kref, vfio_container_release); | |
177 | } | |
178 | ||
9df7b25a JL |
179 | static void vfio_group_unlock_and_free(struct vfio_group *group) |
180 | { | |
181 | mutex_unlock(&vfio.group_lock); | |
182 | /* | |
183 | * Unregister outside of lock. A spurious callback is harmless now | |
184 | * that the group is no longer in vfio.group_list. | |
185 | */ | |
186 | iommu_group_unregister_notifier(group->iommu_group, &group->nb); | |
187 | kfree(group); | |
188 | } | |
189 | ||
cba3345c AW |
190 | /** |
191 | * Group objects - create, release, get, put, search | |
192 | */ | |
193 | static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) | |
194 | { | |
195 | struct vfio_group *group, *tmp; | |
196 | struct device *dev; | |
197 | int ret, minor; | |
198 | ||
199 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
200 | if (!group) | |
201 | return ERR_PTR(-ENOMEM); | |
202 | ||
203 | kref_init(&group->kref); | |
204 | INIT_LIST_HEAD(&group->device_list); | |
205 | mutex_init(&group->device_lock); | |
206 | atomic_set(&group->container_users, 0); | |
207 | group->iommu_group = iommu_group; | |
208 | ||
209 | group->nb.notifier_call = vfio_iommu_group_notifier; | |
210 | ||
211 | /* | |
212 | * blocking notifiers acquire a rwsem around registering and hold | |
213 | * it around callback. Therefore, need to register outside of | |
214 | * vfio.group_lock to avoid A-B/B-A contention. Our callback won't | |
215 | * do anything unless it can find the group in vfio.group_list, so | |
216 | * no harm in registering early. | |
217 | */ | |
218 | ret = iommu_group_register_notifier(iommu_group, &group->nb); | |
219 | if (ret) { | |
220 | kfree(group); | |
221 | return ERR_PTR(ret); | |
222 | } | |
223 | ||
224 | mutex_lock(&vfio.group_lock); | |
225 | ||
226 | minor = vfio_alloc_group_minor(group); | |
227 | if (minor < 0) { | |
9df7b25a | 228 | vfio_group_unlock_and_free(group); |
cba3345c AW |
229 | return ERR_PTR(minor); |
230 | } | |
231 | ||
232 | /* Did we race creating this group? */ | |
233 | list_for_each_entry(tmp, &vfio.group_list, vfio_next) { | |
234 | if (tmp->iommu_group == iommu_group) { | |
235 | vfio_group_get(tmp); | |
236 | vfio_free_group_minor(minor); | |
9df7b25a | 237 | vfio_group_unlock_and_free(group); |
cba3345c AW |
238 | return tmp; |
239 | } | |
240 | } | |
241 | ||
242 | dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor), | |
243 | group, "%d", iommu_group_id(iommu_group)); | |
244 | if (IS_ERR(dev)) { | |
245 | vfio_free_group_minor(minor); | |
9df7b25a | 246 | vfio_group_unlock_and_free(group); |
cba3345c AW |
247 | return (struct vfio_group *)dev; /* ERR_PTR */ |
248 | } | |
249 | ||
250 | group->minor = minor; | |
251 | group->dev = dev; | |
252 | ||
253 | list_add(&group->vfio_next, &vfio.group_list); | |
254 | ||
255 | mutex_unlock(&vfio.group_lock); | |
256 | ||
257 | return group; | |
258 | } | |
259 | ||
6d2cd3ce | 260 | /* called with vfio.group_lock held */ |
cba3345c AW |
261 | static void vfio_group_release(struct kref *kref) |
262 | { | |
263 | struct vfio_group *group = container_of(kref, struct vfio_group, kref); | |
264 | ||
265 | WARN_ON(!list_empty(&group->device_list)); | |
266 | ||
267 | device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor)); | |
268 | list_del(&group->vfio_next); | |
269 | vfio_free_group_minor(group->minor); | |
9df7b25a | 270 | vfio_group_unlock_and_free(group); |
cba3345c AW |
271 | } |
272 | ||
273 | static void vfio_group_put(struct vfio_group *group) | |
274 | { | |
6d2cd3ce | 275 | kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); |
cba3345c AW |
276 | } |
277 | ||
278 | /* Assume group_lock or group reference is held */ | |
279 | static void vfio_group_get(struct vfio_group *group) | |
280 | { | |
281 | kref_get(&group->kref); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Not really a try as we will sleep for mutex, but we need to make | |
286 | * sure the group pointer is valid under lock and get a reference. | |
287 | */ | |
288 | static struct vfio_group *vfio_group_try_get(struct vfio_group *group) | |
289 | { | |
290 | struct vfio_group *target = group; | |
291 | ||
292 | mutex_lock(&vfio.group_lock); | |
293 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
294 | if (group == target) { | |
295 | vfio_group_get(group); | |
296 | mutex_unlock(&vfio.group_lock); | |
297 | return group; | |
298 | } | |
299 | } | |
300 | mutex_unlock(&vfio.group_lock); | |
301 | ||
302 | return NULL; | |
303 | } | |
304 | ||
305 | static | |
306 | struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group) | |
307 | { | |
308 | struct vfio_group *group; | |
309 | ||
310 | mutex_lock(&vfio.group_lock); | |
311 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
312 | if (group->iommu_group == iommu_group) { | |
313 | vfio_group_get(group); | |
314 | mutex_unlock(&vfio.group_lock); | |
315 | return group; | |
316 | } | |
317 | } | |
318 | mutex_unlock(&vfio.group_lock); | |
319 | ||
320 | return NULL; | |
321 | } | |
322 | ||
323 | static struct vfio_group *vfio_group_get_from_minor(int minor) | |
324 | { | |
325 | struct vfio_group *group; | |
326 | ||
327 | mutex_lock(&vfio.group_lock); | |
328 | group = idr_find(&vfio.group_idr, minor); | |
329 | if (!group) { | |
330 | mutex_unlock(&vfio.group_lock); | |
331 | return NULL; | |
332 | } | |
333 | vfio_group_get(group); | |
334 | mutex_unlock(&vfio.group_lock); | |
335 | ||
336 | return group; | |
337 | } | |
338 | ||
339 | /** | |
340 | * Device objects - create, release, get, put, search | |
341 | */ | |
342 | static | |
343 | struct vfio_device *vfio_group_create_device(struct vfio_group *group, | |
344 | struct device *dev, | |
345 | const struct vfio_device_ops *ops, | |
346 | void *device_data) | |
347 | { | |
348 | struct vfio_device *device; | |
349 | int ret; | |
350 | ||
351 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
352 | if (!device) | |
353 | return ERR_PTR(-ENOMEM); | |
354 | ||
355 | kref_init(&device->kref); | |
356 | device->dev = dev; | |
357 | device->group = group; | |
358 | device->ops = ops; | |
359 | device->device_data = device_data; | |
360 | ||
361 | ret = dev_set_drvdata(dev, device); | |
362 | if (ret) { | |
363 | kfree(device); | |
364 | return ERR_PTR(ret); | |
365 | } | |
366 | ||
367 | /* No need to get group_lock, caller has group reference */ | |
368 | vfio_group_get(group); | |
369 | ||
370 | mutex_lock(&group->device_lock); | |
371 | list_add(&device->group_next, &group->device_list); | |
372 | mutex_unlock(&group->device_lock); | |
373 | ||
374 | return device; | |
375 | } | |
376 | ||
377 | static void vfio_device_release(struct kref *kref) | |
378 | { | |
379 | struct vfio_device *device = container_of(kref, | |
380 | struct vfio_device, kref); | |
381 | struct vfio_group *group = device->group; | |
382 | ||
cba3345c AW |
383 | list_del(&device->group_next); |
384 | mutex_unlock(&group->device_lock); | |
385 | ||
386 | dev_set_drvdata(device->dev, NULL); | |
387 | ||
388 | kfree(device); | |
389 | ||
390 | /* vfio_del_group_dev may be waiting for this device */ | |
391 | wake_up(&vfio.release_q); | |
392 | } | |
393 | ||
394 | /* Device reference always implies a group reference */ | |
395 | static void vfio_device_put(struct vfio_device *device) | |
396 | { | |
934ad4c2 | 397 | struct vfio_group *group = device->group; |
90b1253e | 398 | kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); |
934ad4c2 | 399 | vfio_group_put(group); |
cba3345c AW |
400 | } |
401 | ||
402 | static void vfio_device_get(struct vfio_device *device) | |
403 | { | |
404 | vfio_group_get(device->group); | |
405 | kref_get(&device->kref); | |
406 | } | |
407 | ||
408 | static struct vfio_device *vfio_group_get_device(struct vfio_group *group, | |
409 | struct device *dev) | |
410 | { | |
411 | struct vfio_device *device; | |
412 | ||
413 | mutex_lock(&group->device_lock); | |
414 | list_for_each_entry(device, &group->device_list, group_next) { | |
415 | if (device->dev == dev) { | |
416 | vfio_device_get(device); | |
417 | mutex_unlock(&group->device_lock); | |
418 | return device; | |
419 | } | |
420 | } | |
421 | mutex_unlock(&group->device_lock); | |
422 | return NULL; | |
423 | } | |
424 | ||
425 | /* | |
426 | * Whitelist some drivers that we know are safe (no dma) or just sit on | |
427 | * a device. It's not always practical to leave a device within a group | |
428 | * driverless as it could get re-bound to something unsafe. | |
429 | */ | |
2b489a45 | 430 | static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" }; |
cba3345c AW |
431 | |
432 | static bool vfio_whitelisted_driver(struct device_driver *drv) | |
433 | { | |
434 | int i; | |
435 | ||
436 | for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) { | |
437 | if (!strcmp(drv->name, vfio_driver_whitelist[i])) | |
438 | return true; | |
439 | } | |
440 | ||
441 | return false; | |
442 | } | |
443 | ||
444 | /* | |
445 | * A vfio group is viable for use by userspace if all devices are either | |
446 | * driver-less or bound to a vfio or whitelisted driver. We test the | |
447 | * latter by the existence of a struct vfio_device matching the dev. | |
448 | */ | |
449 | static int vfio_dev_viable(struct device *dev, void *data) | |
450 | { | |
451 | struct vfio_group *group = data; | |
452 | struct vfio_device *device; | |
de2b3eea | 453 | struct device_driver *drv = ACCESS_ONCE(dev->driver); |
cba3345c | 454 | |
de2b3eea | 455 | if (!drv || vfio_whitelisted_driver(drv)) |
cba3345c AW |
456 | return 0; |
457 | ||
458 | device = vfio_group_get_device(group, dev); | |
459 | if (device) { | |
460 | vfio_device_put(device); | |
461 | return 0; | |
462 | } | |
463 | ||
464 | return -EINVAL; | |
465 | } | |
466 | ||
467 | /** | |
468 | * Async device support | |
469 | */ | |
470 | static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) | |
471 | { | |
472 | struct vfio_device *device; | |
473 | ||
474 | /* Do we already know about it? We shouldn't */ | |
475 | device = vfio_group_get_device(group, dev); | |
476 | if (WARN_ON_ONCE(device)) { | |
477 | vfio_device_put(device); | |
478 | return 0; | |
479 | } | |
480 | ||
481 | /* Nothing to do for idle groups */ | |
482 | if (!atomic_read(&group->container_users)) | |
483 | return 0; | |
484 | ||
485 | /* TODO Prevent device auto probing */ | |
486 | WARN("Device %s added to live group %d!\n", dev_name(dev), | |
487 | iommu_group_id(group->iommu_group)); | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev) | |
493 | { | |
494 | struct vfio_device *device; | |
495 | ||
496 | /* | |
497 | * Expect to fall out here. If a device was in use, it would | |
498 | * have been bound to a vfio sub-driver, which would have blocked | |
499 | * in .remove at vfio_del_group_dev. Sanity check that we no | |
500 | * longer track the device, so it's safe to remove. | |
501 | */ | |
502 | device = vfio_group_get_device(group, dev); | |
503 | if (likely(!device)) | |
504 | return 0; | |
505 | ||
506 | WARN("Device %s removed from live group %d!\n", dev_name(dev), | |
507 | iommu_group_id(group->iommu_group)); | |
508 | ||
509 | vfio_device_put(device); | |
510 | return 0; | |
511 | } | |
512 | ||
513 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) | |
514 | { | |
515 | /* We don't care what happens when the group isn't in use */ | |
516 | if (!atomic_read(&group->container_users)) | |
517 | return 0; | |
518 | ||
519 | return vfio_dev_viable(dev, group); | |
520 | } | |
521 | ||
522 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
523 | unsigned long action, void *data) | |
524 | { | |
525 | struct vfio_group *group = container_of(nb, struct vfio_group, nb); | |
526 | struct device *dev = data; | |
527 | ||
528 | /* | |
529 | * Need to go through a group_lock lookup to get a reference or | |
530 | * we risk racing a group being removed. Leave a WARN_ON for | |
531 | * debuging, but if the group no longer exists, a spurious notify | |
532 | * is harmless. | |
533 | */ | |
534 | group = vfio_group_try_get(group); | |
535 | if (WARN_ON(!group)) | |
536 | return NOTIFY_OK; | |
537 | ||
538 | switch (action) { | |
539 | case IOMMU_GROUP_NOTIFY_ADD_DEVICE: | |
540 | vfio_group_nb_add_dev(group, dev); | |
541 | break; | |
542 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: | |
543 | vfio_group_nb_del_dev(group, dev); | |
544 | break; | |
545 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: | |
546 | pr_debug("%s: Device %s, group %d binding to driver\n", | |
547 | __func__, dev_name(dev), | |
548 | iommu_group_id(group->iommu_group)); | |
549 | break; | |
550 | case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: | |
551 | pr_debug("%s: Device %s, group %d bound to driver %s\n", | |
552 | __func__, dev_name(dev), | |
553 | iommu_group_id(group->iommu_group), dev->driver->name); | |
554 | BUG_ON(vfio_group_nb_verify(group, dev)); | |
555 | break; | |
556 | case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: | |
557 | pr_debug("%s: Device %s, group %d unbinding from driver %s\n", | |
558 | __func__, dev_name(dev), | |
559 | iommu_group_id(group->iommu_group), dev->driver->name); | |
560 | break; | |
561 | case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: | |
562 | pr_debug("%s: Device %s, group %d unbound from driver\n", | |
563 | __func__, dev_name(dev), | |
564 | iommu_group_id(group->iommu_group)); | |
565 | /* | |
566 | * XXX An unbound device in a live group is ok, but we'd | |
567 | * really like to avoid the above BUG_ON by preventing other | |
568 | * drivers from binding to it. Once that occurs, we have to | |
569 | * stop the system to maintain isolation. At a minimum, we'd | |
570 | * want a toggle to disable driver auto probe for this device. | |
571 | */ | |
572 | break; | |
573 | } | |
574 | ||
575 | vfio_group_put(group); | |
576 | return NOTIFY_OK; | |
577 | } | |
578 | ||
579 | /** | |
580 | * VFIO driver API | |
581 | */ | |
582 | int vfio_add_group_dev(struct device *dev, | |
583 | const struct vfio_device_ops *ops, void *device_data) | |
584 | { | |
585 | struct iommu_group *iommu_group; | |
586 | struct vfio_group *group; | |
587 | struct vfio_device *device; | |
588 | ||
589 | iommu_group = iommu_group_get(dev); | |
590 | if (!iommu_group) | |
591 | return -EINVAL; | |
592 | ||
593 | group = vfio_group_get_from_iommu(iommu_group); | |
594 | if (!group) { | |
595 | group = vfio_create_group(iommu_group); | |
596 | if (IS_ERR(group)) { | |
597 | iommu_group_put(iommu_group); | |
598 | return PTR_ERR(group); | |
599 | } | |
600 | } | |
601 | ||
602 | device = vfio_group_get_device(group, dev); | |
603 | if (device) { | |
604 | WARN(1, "Device %s already exists on group %d\n", | |
605 | dev_name(dev), iommu_group_id(iommu_group)); | |
606 | vfio_device_put(device); | |
607 | vfio_group_put(group); | |
608 | iommu_group_put(iommu_group); | |
609 | return -EBUSY; | |
610 | } | |
611 | ||
612 | device = vfio_group_create_device(group, dev, ops, device_data); | |
613 | if (IS_ERR(device)) { | |
614 | vfio_group_put(group); | |
615 | iommu_group_put(iommu_group); | |
616 | return PTR_ERR(device); | |
617 | } | |
618 | ||
619 | /* | |
620 | * Added device holds reference to iommu_group and vfio_device | |
621 | * (which in turn holds reference to vfio_group). Drop extra | |
622 | * group reference used while acquiring device. | |
623 | */ | |
624 | vfio_group_put(group); | |
625 | ||
626 | return 0; | |
627 | } | |
628 | EXPORT_SYMBOL_GPL(vfio_add_group_dev); | |
629 | ||
e014e944 AW |
630 | /* Given a referenced group, check if it contains the device */ |
631 | static bool vfio_dev_present(struct vfio_group *group, struct device *dev) | |
cba3345c | 632 | { |
cba3345c AW |
633 | struct vfio_device *device; |
634 | ||
cba3345c | 635 | device = vfio_group_get_device(group, dev); |
e014e944 | 636 | if (!device) |
cba3345c | 637 | return false; |
cba3345c AW |
638 | |
639 | vfio_device_put(device); | |
cba3345c AW |
640 | return true; |
641 | } | |
642 | ||
643 | /* | |
644 | * Decrement the device reference count and wait for the device to be | |
645 | * removed. Open file descriptors for the device... */ | |
646 | void *vfio_del_group_dev(struct device *dev) | |
647 | { | |
648 | struct vfio_device *device = dev_get_drvdata(dev); | |
649 | struct vfio_group *group = device->group; | |
650 | struct iommu_group *iommu_group = group->iommu_group; | |
651 | void *device_data = device->device_data; | |
652 | ||
e014e944 AW |
653 | /* |
654 | * The group exists so long as we have a device reference. Get | |
655 | * a group reference and use it to scan for the device going away. | |
656 | */ | |
657 | vfio_group_get(group); | |
658 | ||
cba3345c AW |
659 | vfio_device_put(device); |
660 | ||
661 | /* TODO send a signal to encourage this to be released */ | |
e014e944 AW |
662 | wait_event(vfio.release_q, !vfio_dev_present(group, dev)); |
663 | ||
664 | vfio_group_put(group); | |
cba3345c AW |
665 | |
666 | iommu_group_put(iommu_group); | |
667 | ||
668 | return device_data; | |
669 | } | |
670 | EXPORT_SYMBOL_GPL(vfio_del_group_dev); | |
671 | ||
672 | /** | |
673 | * VFIO base fd, /dev/vfio/vfio | |
674 | */ | |
675 | static long vfio_ioctl_check_extension(struct vfio_container *container, | |
676 | unsigned long arg) | |
677 | { | |
678 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
679 | long ret = 0; | |
680 | ||
681 | switch (arg) { | |
682 | /* No base extensions yet */ | |
683 | default: | |
684 | /* | |
685 | * If no driver is set, poll all registered drivers for | |
686 | * extensions and return the first positive result. If | |
687 | * a driver is already set, further queries will be passed | |
688 | * only to that driver. | |
689 | */ | |
690 | if (!driver) { | |
691 | mutex_lock(&vfio.iommu_drivers_lock); | |
692 | list_for_each_entry(driver, &vfio.iommu_drivers_list, | |
693 | vfio_next) { | |
694 | if (!try_module_get(driver->ops->owner)) | |
695 | continue; | |
696 | ||
697 | ret = driver->ops->ioctl(NULL, | |
698 | VFIO_CHECK_EXTENSION, | |
699 | arg); | |
700 | module_put(driver->ops->owner); | |
701 | if (ret > 0) | |
702 | break; | |
703 | } | |
704 | mutex_unlock(&vfio.iommu_drivers_lock); | |
705 | } else | |
706 | ret = driver->ops->ioctl(container->iommu_data, | |
707 | VFIO_CHECK_EXTENSION, arg); | |
708 | } | |
709 | ||
710 | return ret; | |
711 | } | |
712 | ||
713 | /* hold container->group_lock */ | |
714 | static int __vfio_container_attach_groups(struct vfio_container *container, | |
715 | struct vfio_iommu_driver *driver, | |
716 | void *data) | |
717 | { | |
718 | struct vfio_group *group; | |
719 | int ret = -ENODEV; | |
720 | ||
721 | list_for_each_entry(group, &container->group_list, container_next) { | |
722 | ret = driver->ops->attach_group(data, group->iommu_group); | |
723 | if (ret) | |
724 | goto unwind; | |
725 | } | |
726 | ||
727 | return ret; | |
728 | ||
729 | unwind: | |
730 | list_for_each_entry_continue_reverse(group, &container->group_list, | |
731 | container_next) { | |
732 | driver->ops->detach_group(data, group->iommu_group); | |
733 | } | |
734 | ||
735 | return ret; | |
736 | } | |
737 | ||
738 | static long vfio_ioctl_set_iommu(struct vfio_container *container, | |
739 | unsigned long arg) | |
740 | { | |
741 | struct vfio_iommu_driver *driver; | |
742 | long ret = -ENODEV; | |
743 | ||
744 | mutex_lock(&container->group_lock); | |
745 | ||
746 | /* | |
747 | * The container is designed to be an unprivileged interface while | |
748 | * the group can be assigned to specific users. Therefore, only by | |
749 | * adding a group to a container does the user get the privilege of | |
750 | * enabling the iommu, which may allocate finite resources. There | |
751 | * is no unset_iommu, but by removing all the groups from a container, | |
752 | * the container is deprivileged and returns to an unset state. | |
753 | */ | |
754 | if (list_empty(&container->group_list) || container->iommu_driver) { | |
755 | mutex_unlock(&container->group_lock); | |
756 | return -EINVAL; | |
757 | } | |
758 | ||
759 | mutex_lock(&vfio.iommu_drivers_lock); | |
760 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
761 | void *data; | |
762 | ||
763 | if (!try_module_get(driver->ops->owner)) | |
764 | continue; | |
765 | ||
766 | /* | |
767 | * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, | |
768 | * so test which iommu driver reported support for this | |
769 | * extension and call open on them. We also pass them the | |
770 | * magic, allowing a single driver to support multiple | |
771 | * interfaces if they'd like. | |
772 | */ | |
773 | if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { | |
774 | module_put(driver->ops->owner); | |
775 | continue; | |
776 | } | |
777 | ||
778 | /* module reference holds the driver we're working on */ | |
779 | mutex_unlock(&vfio.iommu_drivers_lock); | |
780 | ||
781 | data = driver->ops->open(arg); | |
782 | if (IS_ERR(data)) { | |
783 | ret = PTR_ERR(data); | |
784 | module_put(driver->ops->owner); | |
785 | goto skip_drivers_unlock; | |
786 | } | |
787 | ||
788 | ret = __vfio_container_attach_groups(container, driver, data); | |
789 | if (!ret) { | |
790 | container->iommu_driver = driver; | |
791 | container->iommu_data = data; | |
792 | } else { | |
793 | driver->ops->release(data); | |
794 | module_put(driver->ops->owner); | |
795 | } | |
796 | ||
797 | goto skip_drivers_unlock; | |
798 | } | |
799 | ||
800 | mutex_unlock(&vfio.iommu_drivers_lock); | |
801 | skip_drivers_unlock: | |
802 | mutex_unlock(&container->group_lock); | |
803 | ||
804 | return ret; | |
805 | } | |
806 | ||
807 | static long vfio_fops_unl_ioctl(struct file *filep, | |
808 | unsigned int cmd, unsigned long arg) | |
809 | { | |
810 | struct vfio_container *container = filep->private_data; | |
811 | struct vfio_iommu_driver *driver; | |
812 | void *data; | |
813 | long ret = -EINVAL; | |
814 | ||
815 | if (!container) | |
816 | return ret; | |
817 | ||
818 | driver = container->iommu_driver; | |
819 | data = container->iommu_data; | |
820 | ||
821 | switch (cmd) { | |
822 | case VFIO_GET_API_VERSION: | |
823 | ret = VFIO_API_VERSION; | |
824 | break; | |
825 | case VFIO_CHECK_EXTENSION: | |
826 | ret = vfio_ioctl_check_extension(container, arg); | |
827 | break; | |
828 | case VFIO_SET_IOMMU: | |
829 | ret = vfio_ioctl_set_iommu(container, arg); | |
830 | break; | |
831 | default: | |
832 | if (driver) /* passthrough all unrecognized ioctls */ | |
833 | ret = driver->ops->ioctl(data, cmd, arg); | |
834 | } | |
835 | ||
836 | return ret; | |
837 | } | |
838 | ||
839 | #ifdef CONFIG_COMPAT | |
840 | static long vfio_fops_compat_ioctl(struct file *filep, | |
841 | unsigned int cmd, unsigned long arg) | |
842 | { | |
843 | arg = (unsigned long)compat_ptr(arg); | |
844 | return vfio_fops_unl_ioctl(filep, cmd, arg); | |
845 | } | |
846 | #endif /* CONFIG_COMPAT */ | |
847 | ||
848 | static int vfio_fops_open(struct inode *inode, struct file *filep) | |
849 | { | |
850 | struct vfio_container *container; | |
851 | ||
852 | container = kzalloc(sizeof(*container), GFP_KERNEL); | |
853 | if (!container) | |
854 | return -ENOMEM; | |
855 | ||
856 | INIT_LIST_HEAD(&container->group_list); | |
857 | mutex_init(&container->group_lock); | |
858 | kref_init(&container->kref); | |
859 | ||
860 | filep->private_data = container; | |
861 | ||
862 | return 0; | |
863 | } | |
864 | ||
865 | static int vfio_fops_release(struct inode *inode, struct file *filep) | |
866 | { | |
867 | struct vfio_container *container = filep->private_data; | |
868 | ||
869 | filep->private_data = NULL; | |
870 | ||
871 | vfio_container_put(container); | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
876 | /* | |
877 | * Once an iommu driver is set, we optionally pass read/write/mmap | |
878 | * on to the driver, allowing management interfaces beyond ioctl. | |
879 | */ | |
880 | static ssize_t vfio_fops_read(struct file *filep, char __user *buf, | |
881 | size_t count, loff_t *ppos) | |
882 | { | |
883 | struct vfio_container *container = filep->private_data; | |
884 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
885 | ||
886 | if (unlikely(!driver || !driver->ops->read)) | |
887 | return -EINVAL; | |
888 | ||
889 | return driver->ops->read(container->iommu_data, buf, count, ppos); | |
890 | } | |
891 | ||
892 | static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, | |
893 | size_t count, loff_t *ppos) | |
894 | { | |
895 | struct vfio_container *container = filep->private_data; | |
896 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
897 | ||
898 | if (unlikely(!driver || !driver->ops->write)) | |
899 | return -EINVAL; | |
900 | ||
901 | return driver->ops->write(container->iommu_data, buf, count, ppos); | |
902 | } | |
903 | ||
904 | static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
905 | { | |
906 | struct vfio_container *container = filep->private_data; | |
907 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
908 | ||
909 | if (unlikely(!driver || !driver->ops->mmap)) | |
910 | return -EINVAL; | |
911 | ||
912 | return driver->ops->mmap(container->iommu_data, vma); | |
913 | } | |
914 | ||
915 | static const struct file_operations vfio_fops = { | |
916 | .owner = THIS_MODULE, | |
917 | .open = vfio_fops_open, | |
918 | .release = vfio_fops_release, | |
919 | .read = vfio_fops_read, | |
920 | .write = vfio_fops_write, | |
921 | .unlocked_ioctl = vfio_fops_unl_ioctl, | |
922 | #ifdef CONFIG_COMPAT | |
923 | .compat_ioctl = vfio_fops_compat_ioctl, | |
924 | #endif | |
925 | .mmap = vfio_fops_mmap, | |
926 | }; | |
927 | ||
928 | /** | |
929 | * VFIO Group fd, /dev/vfio/$GROUP | |
930 | */ | |
931 | static void __vfio_group_unset_container(struct vfio_group *group) | |
932 | { | |
933 | struct vfio_container *container = group->container; | |
934 | struct vfio_iommu_driver *driver; | |
935 | ||
936 | mutex_lock(&container->group_lock); | |
937 | ||
938 | driver = container->iommu_driver; | |
939 | if (driver) | |
940 | driver->ops->detach_group(container->iommu_data, | |
941 | group->iommu_group); | |
942 | ||
943 | group->container = NULL; | |
944 | list_del(&group->container_next); | |
945 | ||
946 | /* Detaching the last group deprivileges a container, remove iommu */ | |
947 | if (driver && list_empty(&container->group_list)) { | |
948 | driver->ops->release(container->iommu_data); | |
949 | module_put(driver->ops->owner); | |
950 | container->iommu_driver = NULL; | |
951 | container->iommu_data = NULL; | |
952 | } | |
953 | ||
954 | mutex_unlock(&container->group_lock); | |
955 | ||
956 | vfio_container_put(container); | |
957 | } | |
958 | ||
959 | /* | |
960 | * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or | |
961 | * if there was no container to unset. Since the ioctl is called on | |
962 | * the group, we know that still exists, therefore the only valid | |
963 | * transition here is 1->0. | |
964 | */ | |
965 | static int vfio_group_unset_container(struct vfio_group *group) | |
966 | { | |
967 | int users = atomic_cmpxchg(&group->container_users, 1, 0); | |
968 | ||
969 | if (!users) | |
970 | return -EINVAL; | |
971 | if (users != 1) | |
972 | return -EBUSY; | |
973 | ||
974 | __vfio_group_unset_container(group); | |
975 | ||
976 | return 0; | |
977 | } | |
978 | ||
979 | /* | |
980 | * When removing container users, anything that removes the last user | |
981 | * implicitly removes the group from the container. That is, if the | |
982 | * group file descriptor is closed, as well as any device file descriptors, | |
983 | * the group is free. | |
984 | */ | |
985 | static void vfio_group_try_dissolve_container(struct vfio_group *group) | |
986 | { | |
987 | if (0 == atomic_dec_if_positive(&group->container_users)) | |
988 | __vfio_group_unset_container(group); | |
989 | } | |
990 | ||
991 | static int vfio_group_set_container(struct vfio_group *group, int container_fd) | |
992 | { | |
2903ff01 | 993 | struct fd f; |
cba3345c AW |
994 | struct vfio_container *container; |
995 | struct vfio_iommu_driver *driver; | |
2903ff01 | 996 | int ret = 0; |
cba3345c AW |
997 | |
998 | if (atomic_read(&group->container_users)) | |
999 | return -EINVAL; | |
1000 | ||
2903ff01 AV |
1001 | f = fdget(container_fd); |
1002 | if (!f.file) | |
cba3345c AW |
1003 | return -EBADF; |
1004 | ||
1005 | /* Sanity check, is this really our fd? */ | |
2903ff01 AV |
1006 | if (f.file->f_op != &vfio_fops) { |
1007 | fdput(f); | |
cba3345c AW |
1008 | return -EINVAL; |
1009 | } | |
1010 | ||
2903ff01 | 1011 | container = f.file->private_data; |
cba3345c AW |
1012 | WARN_ON(!container); /* fget ensures we don't race vfio_release */ |
1013 | ||
1014 | mutex_lock(&container->group_lock); | |
1015 | ||
1016 | driver = container->iommu_driver; | |
1017 | if (driver) { | |
1018 | ret = driver->ops->attach_group(container->iommu_data, | |
1019 | group->iommu_group); | |
1020 | if (ret) | |
1021 | goto unlock_out; | |
1022 | } | |
1023 | ||
1024 | group->container = container; | |
1025 | list_add(&group->container_next, &container->group_list); | |
1026 | ||
1027 | /* Get a reference on the container and mark a user within the group */ | |
1028 | vfio_container_get(container); | |
1029 | atomic_inc(&group->container_users); | |
1030 | ||
1031 | unlock_out: | |
1032 | mutex_unlock(&container->group_lock); | |
2903ff01 | 1033 | fdput(f); |
cba3345c AW |
1034 | return ret; |
1035 | } | |
1036 | ||
1037 | static bool vfio_group_viable(struct vfio_group *group) | |
1038 | { | |
1039 | return (iommu_group_for_each_dev(group->iommu_group, | |
1040 | group, vfio_dev_viable) == 0); | |
1041 | } | |
1042 | ||
1043 | static const struct file_operations vfio_device_fops; | |
1044 | ||
1045 | static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) | |
1046 | { | |
1047 | struct vfio_device *device; | |
1048 | struct file *filep; | |
1049 | int ret = -ENODEV; | |
1050 | ||
1051 | if (0 == atomic_read(&group->container_users) || | |
1052 | !group->container->iommu_driver || !vfio_group_viable(group)) | |
1053 | return -EINVAL; | |
1054 | ||
1055 | mutex_lock(&group->device_lock); | |
1056 | list_for_each_entry(device, &group->device_list, group_next) { | |
1057 | if (strcmp(dev_name(device->dev), buf)) | |
1058 | continue; | |
1059 | ||
1060 | ret = device->ops->open(device->device_data); | |
1061 | if (ret) | |
1062 | break; | |
1063 | /* | |
1064 | * We can't use anon_inode_getfd() because we need to modify | |
1065 | * the f_mode flags directly to allow more than just ioctls | |
1066 | */ | |
1067 | ret = get_unused_fd(); | |
1068 | if (ret < 0) { | |
1069 | device->ops->release(device->device_data); | |
1070 | break; | |
1071 | } | |
1072 | ||
1073 | filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, | |
1074 | device, O_RDWR); | |
1075 | if (IS_ERR(filep)) { | |
1076 | put_unused_fd(ret); | |
1077 | ret = PTR_ERR(filep); | |
1078 | device->ops->release(device->device_data); | |
1079 | break; | |
1080 | } | |
1081 | ||
1082 | /* | |
1083 | * TODO: add an anon_inode interface to do this. | |
1084 | * Appears to be missing by lack of need rather than | |
1085 | * explicitly prevented. Now there's need. | |
1086 | */ | |
1087 | filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); | |
1088 | ||
cba3345c AW |
1089 | vfio_device_get(device); |
1090 | atomic_inc(&group->container_users); | |
31605deb AV |
1091 | |
1092 | fd_install(ret, filep); | |
cba3345c AW |
1093 | break; |
1094 | } | |
1095 | mutex_unlock(&group->device_lock); | |
1096 | ||
1097 | return ret; | |
1098 | } | |
1099 | ||
1100 | static long vfio_group_fops_unl_ioctl(struct file *filep, | |
1101 | unsigned int cmd, unsigned long arg) | |
1102 | { | |
1103 | struct vfio_group *group = filep->private_data; | |
1104 | long ret = -ENOTTY; | |
1105 | ||
1106 | switch (cmd) { | |
1107 | case VFIO_GROUP_GET_STATUS: | |
1108 | { | |
1109 | struct vfio_group_status status; | |
1110 | unsigned long minsz; | |
1111 | ||
1112 | minsz = offsetofend(struct vfio_group_status, flags); | |
1113 | ||
1114 | if (copy_from_user(&status, (void __user *)arg, minsz)) | |
1115 | return -EFAULT; | |
1116 | ||
1117 | if (status.argsz < minsz) | |
1118 | return -EINVAL; | |
1119 | ||
1120 | status.flags = 0; | |
1121 | ||
1122 | if (vfio_group_viable(group)) | |
1123 | status.flags |= VFIO_GROUP_FLAGS_VIABLE; | |
1124 | ||
1125 | if (group->container) | |
1126 | status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; | |
1127 | ||
1128 | if (copy_to_user((void __user *)arg, &status, minsz)) | |
1129 | return -EFAULT; | |
1130 | ||
1131 | ret = 0; | |
1132 | break; | |
1133 | } | |
1134 | case VFIO_GROUP_SET_CONTAINER: | |
1135 | { | |
1136 | int fd; | |
1137 | ||
1138 | if (get_user(fd, (int __user *)arg)) | |
1139 | return -EFAULT; | |
1140 | ||
1141 | if (fd < 0) | |
1142 | return -EINVAL; | |
1143 | ||
1144 | ret = vfio_group_set_container(group, fd); | |
1145 | break; | |
1146 | } | |
1147 | case VFIO_GROUP_UNSET_CONTAINER: | |
1148 | ret = vfio_group_unset_container(group); | |
1149 | break; | |
1150 | case VFIO_GROUP_GET_DEVICE_FD: | |
1151 | { | |
1152 | char *buf; | |
1153 | ||
1154 | buf = strndup_user((const char __user *)arg, PAGE_SIZE); | |
1155 | if (IS_ERR(buf)) | |
1156 | return PTR_ERR(buf); | |
1157 | ||
1158 | ret = vfio_group_get_device_fd(group, buf); | |
1159 | kfree(buf); | |
1160 | break; | |
1161 | } | |
1162 | } | |
1163 | ||
1164 | return ret; | |
1165 | } | |
1166 | ||
1167 | #ifdef CONFIG_COMPAT | |
1168 | static long vfio_group_fops_compat_ioctl(struct file *filep, | |
1169 | unsigned int cmd, unsigned long arg) | |
1170 | { | |
1171 | arg = (unsigned long)compat_ptr(arg); | |
1172 | return vfio_group_fops_unl_ioctl(filep, cmd, arg); | |
1173 | } | |
1174 | #endif /* CONFIG_COMPAT */ | |
1175 | ||
1176 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |
1177 | { | |
1178 | struct vfio_group *group; | |
1179 | ||
1180 | group = vfio_group_get_from_minor(iminor(inode)); | |
1181 | if (!group) | |
1182 | return -ENODEV; | |
1183 | ||
1184 | if (group->container) { | |
1185 | vfio_group_put(group); | |
1186 | return -EBUSY; | |
1187 | } | |
1188 | ||
1189 | filep->private_data = group; | |
1190 | ||
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |
1195 | { | |
1196 | struct vfio_group *group = filep->private_data; | |
1197 | ||
1198 | filep->private_data = NULL; | |
1199 | ||
1200 | vfio_group_try_dissolve_container(group); | |
1201 | ||
1202 | vfio_group_put(group); | |
1203 | ||
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static const struct file_operations vfio_group_fops = { | |
1208 | .owner = THIS_MODULE, | |
1209 | .unlocked_ioctl = vfio_group_fops_unl_ioctl, | |
1210 | #ifdef CONFIG_COMPAT | |
1211 | .compat_ioctl = vfio_group_fops_compat_ioctl, | |
1212 | #endif | |
1213 | .open = vfio_group_fops_open, | |
1214 | .release = vfio_group_fops_release, | |
1215 | }; | |
1216 | ||
1217 | /** | |
1218 | * VFIO Device fd | |
1219 | */ | |
1220 | static int vfio_device_fops_release(struct inode *inode, struct file *filep) | |
1221 | { | |
1222 | struct vfio_device *device = filep->private_data; | |
1223 | ||
1224 | device->ops->release(device->device_data); | |
1225 | ||
1226 | vfio_group_try_dissolve_container(device->group); | |
1227 | ||
1228 | vfio_device_put(device); | |
1229 | ||
1230 | return 0; | |
1231 | } | |
1232 | ||
1233 | static long vfio_device_fops_unl_ioctl(struct file *filep, | |
1234 | unsigned int cmd, unsigned long arg) | |
1235 | { | |
1236 | struct vfio_device *device = filep->private_data; | |
1237 | ||
1238 | if (unlikely(!device->ops->ioctl)) | |
1239 | return -EINVAL; | |
1240 | ||
1241 | return device->ops->ioctl(device->device_data, cmd, arg); | |
1242 | } | |
1243 | ||
1244 | static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, | |
1245 | size_t count, loff_t *ppos) | |
1246 | { | |
1247 | struct vfio_device *device = filep->private_data; | |
1248 | ||
1249 | if (unlikely(!device->ops->read)) | |
1250 | return -EINVAL; | |
1251 | ||
1252 | return device->ops->read(device->device_data, buf, count, ppos); | |
1253 | } | |
1254 | ||
1255 | static ssize_t vfio_device_fops_write(struct file *filep, | |
1256 | const char __user *buf, | |
1257 | size_t count, loff_t *ppos) | |
1258 | { | |
1259 | struct vfio_device *device = filep->private_data; | |
1260 | ||
1261 | if (unlikely(!device->ops->write)) | |
1262 | return -EINVAL; | |
1263 | ||
1264 | return device->ops->write(device->device_data, buf, count, ppos); | |
1265 | } | |
1266 | ||
1267 | static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
1268 | { | |
1269 | struct vfio_device *device = filep->private_data; | |
1270 | ||
1271 | if (unlikely(!device->ops->mmap)) | |
1272 | return -EINVAL; | |
1273 | ||
1274 | return device->ops->mmap(device->device_data, vma); | |
1275 | } | |
1276 | ||
1277 | #ifdef CONFIG_COMPAT | |
1278 | static long vfio_device_fops_compat_ioctl(struct file *filep, | |
1279 | unsigned int cmd, unsigned long arg) | |
1280 | { | |
1281 | arg = (unsigned long)compat_ptr(arg); | |
1282 | return vfio_device_fops_unl_ioctl(filep, cmd, arg); | |
1283 | } | |
1284 | #endif /* CONFIG_COMPAT */ | |
1285 | ||
1286 | static const struct file_operations vfio_device_fops = { | |
1287 | .owner = THIS_MODULE, | |
1288 | .release = vfio_device_fops_release, | |
1289 | .read = vfio_device_fops_read, | |
1290 | .write = vfio_device_fops_write, | |
1291 | .unlocked_ioctl = vfio_device_fops_unl_ioctl, | |
1292 | #ifdef CONFIG_COMPAT | |
1293 | .compat_ioctl = vfio_device_fops_compat_ioctl, | |
1294 | #endif | |
1295 | .mmap = vfio_device_fops_mmap, | |
1296 | }; | |
1297 | ||
1298 | /** | |
1299 | * Module/class support | |
1300 | */ | |
1301 | static char *vfio_devnode(struct device *dev, umode_t *mode) | |
1302 | { | |
1303 | return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); | |
1304 | } | |
1305 | ||
1306 | static int __init vfio_init(void) | |
1307 | { | |
1308 | int ret; | |
1309 | ||
1310 | idr_init(&vfio.group_idr); | |
1311 | mutex_init(&vfio.group_lock); | |
1312 | mutex_init(&vfio.iommu_drivers_lock); | |
1313 | INIT_LIST_HEAD(&vfio.group_list); | |
1314 | INIT_LIST_HEAD(&vfio.iommu_drivers_list); | |
1315 | init_waitqueue_head(&vfio.release_q); | |
1316 | ||
1317 | vfio.class = class_create(THIS_MODULE, "vfio"); | |
1318 | if (IS_ERR(vfio.class)) { | |
1319 | ret = PTR_ERR(vfio.class); | |
1320 | goto err_class; | |
1321 | } | |
1322 | ||
1323 | vfio.class->devnode = vfio_devnode; | |
1324 | ||
1325 | ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio"); | |
1326 | if (ret) | |
1327 | goto err_base_chrdev; | |
1328 | ||
1329 | cdev_init(&vfio.cdev, &vfio_fops); | |
1330 | ret = cdev_add(&vfio.cdev, vfio.devt, 1); | |
1331 | if (ret) | |
1332 | goto err_base_cdev; | |
1333 | ||
1334 | vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio"); | |
1335 | if (IS_ERR(vfio.dev)) { | |
1336 | ret = PTR_ERR(vfio.dev); | |
1337 | goto err_base_dev; | |
1338 | } | |
1339 | ||
1340 | /* /dev/vfio/$GROUP */ | |
1341 | cdev_init(&vfio.group_cdev, &vfio_group_fops); | |
1342 | ret = cdev_add(&vfio.group_cdev, | |
1343 | MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1); | |
1344 | if (ret) | |
1345 | goto err_groups_cdev; | |
1346 | ||
1347 | pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | |
1348 | ||
73fa0d10 AW |
1349 | /* |
1350 | * Attempt to load known iommu-drivers. This gives us a working | |
1351 | * environment without the user needing to explicitly load iommu | |
1352 | * drivers. | |
1353 | */ | |
1354 | request_module_nowait("vfio_iommu_type1"); | |
1355 | ||
cba3345c AW |
1356 | return 0; |
1357 | ||
1358 | err_groups_cdev: | |
1359 | device_destroy(vfio.class, vfio.devt); | |
1360 | err_base_dev: | |
1361 | cdev_del(&vfio.cdev); | |
1362 | err_base_cdev: | |
1363 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1364 | err_base_chrdev: | |
1365 | class_destroy(vfio.class); | |
1366 | vfio.class = NULL; | |
1367 | err_class: | |
1368 | return ret; | |
1369 | } | |
1370 | ||
1371 | static void __exit vfio_cleanup(void) | |
1372 | { | |
1373 | WARN_ON(!list_empty(&vfio.group_list)); | |
1374 | ||
1375 | idr_destroy(&vfio.group_idr); | |
1376 | cdev_del(&vfio.group_cdev); | |
1377 | device_destroy(vfio.class, vfio.devt); | |
1378 | cdev_del(&vfio.cdev); | |
1379 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1380 | class_destroy(vfio.class); | |
1381 | vfio.class = NULL; | |
1382 | } | |
1383 | ||
1384 | module_init(vfio_init); | |
1385 | module_exit(vfio_cleanup); | |
1386 | ||
1387 | MODULE_VERSION(DRIVER_VERSION); | |
1388 | MODULE_LICENSE("GPL v2"); | |
1389 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
1390 | MODULE_DESCRIPTION(DRIVER_DESC); |