vfio: powerpc/spapr: Register memory and define IOMMU v2
[deliverable/linux.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2 * VFIO: IOMMU DMA mapping support for TCE on POWER
3 *
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
14 */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
24 #include <asm/tce.h>
25 #include <asm/mmu_context.h>
26
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
30
31 static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
33
34 static long try_increment_locked_vm(long npages)
35 {
36 long ret = 0, locked, lock_limit;
37
38 if (!current || !current->mm)
39 return -ESRCH; /* process exited */
40
41 if (!npages)
42 return 0;
43
44 down_write(&current->mm->mmap_sem);
45 locked = current->mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM;
49 else
50 current->mm->locked_vm += npages;
51
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT,
54 current->mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
57
58 up_write(&current->mm->mmap_sem);
59
60 return ret;
61 }
62
63 static void decrement_locked_vm(long npages)
64 {
65 if (!current || !current->mm || !npages)
66 return; /* process exited */
67
68 down_write(&current->mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
70 npages = current->mm->locked_vm;
71 current->mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT,
74 current->mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&current->mm->mmap_sem);
77 }
78
79 /*
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81 *
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
84 */
85
86 struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
89 };
90
91 /*
92 * The container descriptor supports only a single group per container.
93 * Required by the API as the container is not supplied with the IOMMU group
94 * at the moment of initialization.
95 */
96 struct tce_container {
97 struct mutex lock;
98 bool enabled;
99 bool v2;
100 unsigned long locked_pages;
101 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
102 struct list_head group_list;
103 };
104
105 static long tce_iommu_unregister_pages(struct tce_container *container,
106 __u64 vaddr, __u64 size)
107 {
108 struct mm_iommu_table_group_mem_t *mem;
109
110 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
111 return -EINVAL;
112
113 mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
114 if (!mem)
115 return -ENOENT;
116
117 return mm_iommu_put(mem);
118 }
119
120 static long tce_iommu_register_pages(struct tce_container *container,
121 __u64 vaddr, __u64 size)
122 {
123 long ret = 0;
124 struct mm_iommu_table_group_mem_t *mem = NULL;
125 unsigned long entries = size >> PAGE_SHIFT;
126
127 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
128 ((vaddr + size) < vaddr))
129 return -EINVAL;
130
131 ret = mm_iommu_get(vaddr, entries, &mem);
132 if (ret)
133 return ret;
134
135 container->enabled = true;
136
137 return 0;
138 }
139
140 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
141 {
142 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
143 tbl->it_size, PAGE_SIZE);
144 unsigned long *uas;
145 long ret;
146
147 BUG_ON(tbl->it_userspace);
148
149 ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
150 if (ret)
151 return ret;
152
153 uas = vzalloc(cb);
154 if (!uas) {
155 decrement_locked_vm(cb >> PAGE_SHIFT);
156 return -ENOMEM;
157 }
158 tbl->it_userspace = uas;
159
160 return 0;
161 }
162
163 static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
164 {
165 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
166 tbl->it_size, PAGE_SIZE);
167
168 if (!tbl->it_userspace)
169 return;
170
171 vfree(tbl->it_userspace);
172 tbl->it_userspace = NULL;
173 decrement_locked_vm(cb >> PAGE_SHIFT);
174 }
175
176 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
177 {
178 /*
179 * Check that the TCE table granularity is not bigger than the size of
180 * a page we just found. Otherwise the hardware can get access to
181 * a bigger memory chunk that it should.
182 */
183 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
184 }
185
186 static inline bool tce_groups_attached(struct tce_container *container)
187 {
188 return !list_empty(&container->group_list);
189 }
190
191 static long tce_iommu_find_table(struct tce_container *container,
192 phys_addr_t ioba, struct iommu_table **ptbl)
193 {
194 long i;
195
196 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
197 struct iommu_table *tbl = container->tables[i];
198
199 if (tbl) {
200 unsigned long entry = ioba >> tbl->it_page_shift;
201 unsigned long start = tbl->it_offset;
202 unsigned long end = start + tbl->it_size;
203
204 if ((start <= entry) && (entry < end)) {
205 *ptbl = tbl;
206 return i;
207 }
208 }
209 }
210
211 return -1;
212 }
213
214 static int tce_iommu_enable(struct tce_container *container)
215 {
216 int ret = 0;
217 unsigned long locked;
218 struct iommu_table_group *table_group;
219 struct tce_iommu_group *tcegrp;
220
221 if (!current->mm)
222 return -ESRCH; /* process exited */
223
224 if (container->enabled)
225 return -EBUSY;
226
227 /*
228 * When userspace pages are mapped into the IOMMU, they are effectively
229 * locked memory, so, theoretically, we need to update the accounting
230 * of locked pages on each map and unmap. For powerpc, the map unmap
231 * paths can be very hot, though, and the accounting would kill
232 * performance, especially since it would be difficult to impossible
233 * to handle the accounting in real mode only.
234 *
235 * To address that, rather than precisely accounting every page, we
236 * instead account for a worst case on locked memory when the iommu is
237 * enabled and disabled. The worst case upper bound on locked memory
238 * is the size of the whole iommu window, which is usually relatively
239 * small (compared to total memory sizes) on POWER hardware.
240 *
241 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
242 * that would effectively kill the guest at random points, much better
243 * enforcing the limit based on the max that the guest can map.
244 *
245 * Unfortunately at the moment it counts whole tables, no matter how
246 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
247 * each with 2GB DMA window, 8GB will be counted here. The reason for
248 * this is that we cannot tell here the amount of RAM used by the guest
249 * as this information is only available from KVM and VFIO is
250 * KVM agnostic.
251 *
252 * So we do not allow enabling a container without a group attached
253 * as there is no way to know how much we should increment
254 * the locked_vm counter.
255 */
256 if (!tce_groups_attached(container))
257 return -ENODEV;
258
259 tcegrp = list_first_entry(&container->group_list,
260 struct tce_iommu_group, next);
261 table_group = iommu_group_get_iommudata(tcegrp->grp);
262 if (!table_group)
263 return -ENODEV;
264
265 if (!table_group->tce32_size)
266 return -EPERM;
267
268 locked = table_group->tce32_size >> PAGE_SHIFT;
269 ret = try_increment_locked_vm(locked);
270 if (ret)
271 return ret;
272
273 container->locked_pages = locked;
274
275 container->enabled = true;
276
277 return ret;
278 }
279
280 static void tce_iommu_disable(struct tce_container *container)
281 {
282 if (!container->enabled)
283 return;
284
285 container->enabled = false;
286
287 if (!current->mm)
288 return;
289
290 decrement_locked_vm(container->locked_pages);
291 }
292
293 static void *tce_iommu_open(unsigned long arg)
294 {
295 struct tce_container *container;
296
297 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
298 pr_err("tce_vfio: Wrong IOMMU type\n");
299 return ERR_PTR(-EINVAL);
300 }
301
302 container = kzalloc(sizeof(*container), GFP_KERNEL);
303 if (!container)
304 return ERR_PTR(-ENOMEM);
305
306 mutex_init(&container->lock);
307 INIT_LIST_HEAD_RCU(&container->group_list);
308
309 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
310
311 return container;
312 }
313
314 static int tce_iommu_clear(struct tce_container *container,
315 struct iommu_table *tbl,
316 unsigned long entry, unsigned long pages);
317 static void tce_iommu_free_table(struct iommu_table *tbl);
318
319 static void tce_iommu_release(void *iommu_data)
320 {
321 struct tce_container *container = iommu_data;
322 struct iommu_table_group *table_group;
323 struct tce_iommu_group *tcegrp;
324 long i;
325
326 while (tce_groups_attached(container)) {
327 tcegrp = list_first_entry(&container->group_list,
328 struct tce_iommu_group, next);
329 table_group = iommu_group_get_iommudata(tcegrp->grp);
330 tce_iommu_detach_group(iommu_data, tcegrp->grp);
331 }
332
333 /*
334 * If VFIO created a table, it was not disposed
335 * by tce_iommu_detach_group() so do it now.
336 */
337 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
338 struct iommu_table *tbl = container->tables[i];
339
340 if (!tbl)
341 continue;
342
343 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
344 tce_iommu_free_table(tbl);
345 }
346
347 tce_iommu_disable(container);
348 mutex_destroy(&container->lock);
349
350 kfree(container);
351 }
352
353 static void tce_iommu_unuse_page(struct tce_container *container,
354 unsigned long hpa)
355 {
356 struct page *page;
357
358 page = pfn_to_page(hpa >> PAGE_SHIFT);
359 put_page(page);
360 }
361
362 static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
363 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
364 {
365 long ret = 0;
366 struct mm_iommu_table_group_mem_t *mem;
367
368 mem = mm_iommu_lookup(tce, size);
369 if (!mem)
370 return -EINVAL;
371
372 ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
373 if (ret)
374 return -EINVAL;
375
376 *pmem = mem;
377
378 return 0;
379 }
380
381 static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
382 unsigned long entry)
383 {
384 struct mm_iommu_table_group_mem_t *mem = NULL;
385 int ret;
386 unsigned long hpa = 0;
387 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
388
389 if (!pua || !current || !current->mm)
390 return;
391
392 ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
393 &hpa, &mem);
394 if (ret)
395 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
396 __func__, *pua, entry, ret);
397 if (mem)
398 mm_iommu_mapped_dec(mem);
399
400 *pua = 0;
401 }
402
403 static int tce_iommu_clear(struct tce_container *container,
404 struct iommu_table *tbl,
405 unsigned long entry, unsigned long pages)
406 {
407 unsigned long oldhpa;
408 long ret;
409 enum dma_data_direction direction;
410
411 for ( ; pages; --pages, ++entry) {
412 direction = DMA_NONE;
413 oldhpa = 0;
414 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
415 if (ret)
416 continue;
417
418 if (direction == DMA_NONE)
419 continue;
420
421 if (container->v2) {
422 tce_iommu_unuse_page_v2(tbl, entry);
423 continue;
424 }
425
426 tce_iommu_unuse_page(container, oldhpa);
427 }
428
429 return 0;
430 }
431
432 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
433 {
434 struct page *page = NULL;
435 enum dma_data_direction direction = iommu_tce_direction(tce);
436
437 if (get_user_pages_fast(tce & PAGE_MASK, 1,
438 direction != DMA_TO_DEVICE, &page) != 1)
439 return -EFAULT;
440
441 *hpa = __pa((unsigned long) page_address(page));
442
443 return 0;
444 }
445
446 static long tce_iommu_build(struct tce_container *container,
447 struct iommu_table *tbl,
448 unsigned long entry, unsigned long tce, unsigned long pages,
449 enum dma_data_direction direction)
450 {
451 long i, ret = 0;
452 struct page *page;
453 unsigned long hpa;
454 enum dma_data_direction dirtmp;
455
456 for (i = 0; i < pages; ++i) {
457 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
458
459 ret = tce_iommu_use_page(tce, &hpa);
460 if (ret)
461 break;
462
463 page = pfn_to_page(hpa >> PAGE_SHIFT);
464 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
465 ret = -EPERM;
466 break;
467 }
468
469 hpa |= offset;
470 dirtmp = direction;
471 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
472 if (ret) {
473 tce_iommu_unuse_page(container, hpa);
474 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
475 __func__, entry << tbl->it_page_shift,
476 tce, ret);
477 break;
478 }
479
480 if (dirtmp != DMA_NONE)
481 tce_iommu_unuse_page(container, hpa);
482
483 tce += IOMMU_PAGE_SIZE(tbl);
484 }
485
486 if (ret)
487 tce_iommu_clear(container, tbl, entry, i);
488
489 return ret;
490 }
491
492 static long tce_iommu_build_v2(struct tce_container *container,
493 struct iommu_table *tbl,
494 unsigned long entry, unsigned long tce, unsigned long pages,
495 enum dma_data_direction direction)
496 {
497 long i, ret = 0;
498 struct page *page;
499 unsigned long hpa;
500 enum dma_data_direction dirtmp;
501
502 for (i = 0; i < pages; ++i) {
503 struct mm_iommu_table_group_mem_t *mem = NULL;
504 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
505 entry + i);
506
507 ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
508 &hpa, &mem);
509 if (ret)
510 break;
511
512 page = pfn_to_page(hpa >> PAGE_SHIFT);
513 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
514 ret = -EPERM;
515 break;
516 }
517
518 /* Preserve offset within IOMMU page */
519 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
520 dirtmp = direction;
521
522 /* The registered region is being unregistered */
523 if (mm_iommu_mapped_inc(mem))
524 break;
525
526 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
527 if (ret) {
528 /* dirtmp cannot be DMA_NONE here */
529 tce_iommu_unuse_page_v2(tbl, entry + i);
530 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
531 __func__, entry << tbl->it_page_shift,
532 tce, ret);
533 break;
534 }
535
536 if (dirtmp != DMA_NONE)
537 tce_iommu_unuse_page_v2(tbl, entry + i);
538
539 *pua = tce;
540
541 tce += IOMMU_PAGE_SIZE(tbl);
542 }
543
544 if (ret)
545 tce_iommu_clear(container, tbl, entry, i);
546
547 return ret;
548 }
549
550 static long tce_iommu_create_table(struct tce_container *container,
551 struct iommu_table_group *table_group,
552 int num,
553 __u32 page_shift,
554 __u64 window_size,
555 __u32 levels,
556 struct iommu_table **ptbl)
557 {
558 long ret, table_size;
559
560 table_size = table_group->ops->get_table_size(page_shift, window_size,
561 levels);
562 if (!table_size)
563 return -EINVAL;
564
565 ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
566 if (ret)
567 return ret;
568
569 ret = table_group->ops->create_table(table_group, num,
570 page_shift, window_size, levels, ptbl);
571
572 WARN_ON(!ret && !(*ptbl)->it_ops->free);
573 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
574
575 if (!ret && container->v2) {
576 ret = tce_iommu_userspace_view_alloc(*ptbl);
577 if (ret)
578 (*ptbl)->it_ops->free(*ptbl);
579 }
580
581 if (ret)
582 decrement_locked_vm(table_size >> PAGE_SHIFT);
583
584 return ret;
585 }
586
587 static void tce_iommu_free_table(struct iommu_table *tbl)
588 {
589 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
590
591 tce_iommu_userspace_view_free(tbl);
592 tbl->it_ops->free(tbl);
593 decrement_locked_vm(pages);
594 }
595
596 static long tce_iommu_ioctl(void *iommu_data,
597 unsigned int cmd, unsigned long arg)
598 {
599 struct tce_container *container = iommu_data;
600 unsigned long minsz;
601 long ret;
602
603 switch (cmd) {
604 case VFIO_CHECK_EXTENSION:
605 switch (arg) {
606 case VFIO_SPAPR_TCE_IOMMU:
607 case VFIO_SPAPR_TCE_v2_IOMMU:
608 ret = 1;
609 break;
610 default:
611 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
612 break;
613 }
614
615 return (ret < 0) ? 0 : ret;
616
617 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
618 struct vfio_iommu_spapr_tce_info info;
619 struct tce_iommu_group *tcegrp;
620 struct iommu_table_group *table_group;
621
622 if (!tce_groups_attached(container))
623 return -ENXIO;
624
625 tcegrp = list_first_entry(&container->group_list,
626 struct tce_iommu_group, next);
627 table_group = iommu_group_get_iommudata(tcegrp->grp);
628
629 if (!table_group)
630 return -ENXIO;
631
632 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
633 dma32_window_size);
634
635 if (copy_from_user(&info, (void __user *)arg, minsz))
636 return -EFAULT;
637
638 if (info.argsz < minsz)
639 return -EINVAL;
640
641 info.dma32_window_start = table_group->tce32_start;
642 info.dma32_window_size = table_group->tce32_size;
643 info.flags = 0;
644
645 if (copy_to_user((void __user *)arg, &info, minsz))
646 return -EFAULT;
647
648 return 0;
649 }
650 case VFIO_IOMMU_MAP_DMA: {
651 struct vfio_iommu_type1_dma_map param;
652 struct iommu_table *tbl = NULL;
653 long num;
654 enum dma_data_direction direction;
655
656 if (!container->enabled)
657 return -EPERM;
658
659 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
660
661 if (copy_from_user(&param, (void __user *)arg, minsz))
662 return -EFAULT;
663
664 if (param.argsz < minsz)
665 return -EINVAL;
666
667 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
668 VFIO_DMA_MAP_FLAG_WRITE))
669 return -EINVAL;
670
671 num = tce_iommu_find_table(container, param.iova, &tbl);
672 if (num < 0)
673 return -ENXIO;
674
675 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
676 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
677 return -EINVAL;
678
679 /* iova is checked by the IOMMU API */
680 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
681 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
682 direction = DMA_BIDIRECTIONAL;
683 else
684 direction = DMA_TO_DEVICE;
685 } else {
686 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
687 direction = DMA_FROM_DEVICE;
688 else
689 return -EINVAL;
690 }
691
692 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
693 if (ret)
694 return ret;
695
696 if (container->v2)
697 ret = tce_iommu_build_v2(container, tbl,
698 param.iova >> tbl->it_page_shift,
699 param.vaddr,
700 param.size >> tbl->it_page_shift,
701 direction);
702 else
703 ret = tce_iommu_build(container, tbl,
704 param.iova >> tbl->it_page_shift,
705 param.vaddr,
706 param.size >> tbl->it_page_shift,
707 direction);
708
709 iommu_flush_tce(tbl);
710
711 return ret;
712 }
713 case VFIO_IOMMU_UNMAP_DMA: {
714 struct vfio_iommu_type1_dma_unmap param;
715 struct iommu_table *tbl = NULL;
716 long num;
717
718 if (!container->enabled)
719 return -EPERM;
720
721 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
722 size);
723
724 if (copy_from_user(&param, (void __user *)arg, minsz))
725 return -EFAULT;
726
727 if (param.argsz < minsz)
728 return -EINVAL;
729
730 /* No flag is supported now */
731 if (param.flags)
732 return -EINVAL;
733
734 num = tce_iommu_find_table(container, param.iova, &tbl);
735 if (num < 0)
736 return -ENXIO;
737
738 if (param.size & ~IOMMU_PAGE_MASK(tbl))
739 return -EINVAL;
740
741 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
742 param.size >> tbl->it_page_shift);
743 if (ret)
744 return ret;
745
746 ret = tce_iommu_clear(container, tbl,
747 param.iova >> tbl->it_page_shift,
748 param.size >> tbl->it_page_shift);
749 iommu_flush_tce(tbl);
750
751 return ret;
752 }
753 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
754 struct vfio_iommu_spapr_register_memory param;
755
756 if (!container->v2)
757 break;
758
759 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
760 size);
761
762 if (copy_from_user(&param, (void __user *)arg, minsz))
763 return -EFAULT;
764
765 if (param.argsz < minsz)
766 return -EINVAL;
767
768 /* No flag is supported now */
769 if (param.flags)
770 return -EINVAL;
771
772 mutex_lock(&container->lock);
773 ret = tce_iommu_register_pages(container, param.vaddr,
774 param.size);
775 mutex_unlock(&container->lock);
776
777 return ret;
778 }
779 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
780 struct vfio_iommu_spapr_register_memory param;
781
782 if (!container->v2)
783 break;
784
785 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
786 size);
787
788 if (copy_from_user(&param, (void __user *)arg, minsz))
789 return -EFAULT;
790
791 if (param.argsz < minsz)
792 return -EINVAL;
793
794 /* No flag is supported now */
795 if (param.flags)
796 return -EINVAL;
797
798 mutex_lock(&container->lock);
799 ret = tce_iommu_unregister_pages(container, param.vaddr,
800 param.size);
801 mutex_unlock(&container->lock);
802
803 return ret;
804 }
805 case VFIO_IOMMU_ENABLE:
806 if (container->v2)
807 break;
808
809 mutex_lock(&container->lock);
810 ret = tce_iommu_enable(container);
811 mutex_unlock(&container->lock);
812 return ret;
813
814
815 case VFIO_IOMMU_DISABLE:
816 if (container->v2)
817 break;
818
819 mutex_lock(&container->lock);
820 tce_iommu_disable(container);
821 mutex_unlock(&container->lock);
822 return 0;
823
824 case VFIO_EEH_PE_OP: {
825 struct tce_iommu_group *tcegrp;
826
827 ret = 0;
828 list_for_each_entry(tcegrp, &container->group_list, next) {
829 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
830 cmd, arg);
831 if (ret)
832 return ret;
833 }
834 return ret;
835 }
836
837 }
838
839 return -ENOTTY;
840 }
841
842 static void tce_iommu_release_ownership(struct tce_container *container,
843 struct iommu_table_group *table_group)
844 {
845 int i;
846
847 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
848 struct iommu_table *tbl = container->tables[i];
849
850 if (!tbl)
851 continue;
852
853 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
854 tce_iommu_userspace_view_free(tbl);
855 if (tbl->it_map)
856 iommu_release_ownership(tbl);
857
858 container->tables[i] = NULL;
859 }
860 }
861
862 static int tce_iommu_take_ownership(struct tce_container *container,
863 struct iommu_table_group *table_group)
864 {
865 int i, j, rc = 0;
866
867 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
868 struct iommu_table *tbl = table_group->tables[i];
869
870 if (!tbl || !tbl->it_map)
871 continue;
872
873 rc = tce_iommu_userspace_view_alloc(tbl);
874 if (!rc)
875 rc = iommu_take_ownership(tbl);
876
877 if (rc) {
878 for (j = 0; j < i; ++j)
879 iommu_release_ownership(
880 table_group->tables[j]);
881
882 return rc;
883 }
884 }
885
886 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
887 container->tables[i] = table_group->tables[i];
888
889 return 0;
890 }
891
892 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
893 struct iommu_table_group *table_group)
894 {
895 long i;
896
897 if (!table_group->ops->unset_window) {
898 WARN_ON_ONCE(1);
899 return;
900 }
901
902 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
903 table_group->ops->unset_window(table_group, i);
904
905 table_group->ops->release_ownership(table_group);
906 }
907
908 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
909 struct iommu_table_group *table_group)
910 {
911 long i, ret = 0;
912 struct iommu_table *tbl = NULL;
913
914 if (!table_group->ops->create_table || !table_group->ops->set_window ||
915 !table_group->ops->release_ownership) {
916 WARN_ON_ONCE(1);
917 return -EFAULT;
918 }
919
920 table_group->ops->take_ownership(table_group);
921
922 /*
923 * If it the first group attached, check if there is
924 * a default DMA window and create one if none as
925 * the userspace expects it to exist.
926 */
927 if (!tce_groups_attached(container) && !container->tables[0]) {
928 ret = tce_iommu_create_table(container,
929 table_group,
930 0, /* window number */
931 IOMMU_PAGE_SHIFT_4K,
932 table_group->tce32_size,
933 1, /* default levels */
934 &tbl);
935 if (ret)
936 goto release_exit;
937 else
938 container->tables[0] = tbl;
939 }
940
941 /* Set all windows to the new group */
942 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
943 tbl = container->tables[i];
944
945 if (!tbl)
946 continue;
947
948 /* Set the default window to a new group */
949 ret = table_group->ops->set_window(table_group, i, tbl);
950 if (ret)
951 goto release_exit;
952 }
953
954 return 0;
955
956 release_exit:
957 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
958 table_group->ops->unset_window(table_group, i);
959
960 table_group->ops->release_ownership(table_group);
961
962 return ret;
963 }
964
965 static int tce_iommu_attach_group(void *iommu_data,
966 struct iommu_group *iommu_group)
967 {
968 int ret;
969 struct tce_container *container = iommu_data;
970 struct iommu_table_group *table_group;
971 struct tce_iommu_group *tcegrp = NULL;
972
973 mutex_lock(&container->lock);
974
975 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
976 iommu_group_id(iommu_group), iommu_group); */
977 table_group = iommu_group_get_iommudata(iommu_group);
978
979 if (tce_groups_attached(container) && (!table_group->ops ||
980 !table_group->ops->take_ownership ||
981 !table_group->ops->release_ownership)) {
982 ret = -EBUSY;
983 goto unlock_exit;
984 }
985
986 /* Check if new group has the same iommu_ops (i.e. compatible) */
987 list_for_each_entry(tcegrp, &container->group_list, next) {
988 struct iommu_table_group *table_group_tmp;
989
990 if (tcegrp->grp == iommu_group) {
991 pr_warn("tce_vfio: Group %d is already attached\n",
992 iommu_group_id(iommu_group));
993 ret = -EBUSY;
994 goto unlock_exit;
995 }
996 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
997 if (table_group_tmp->ops != table_group->ops) {
998 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
999 iommu_group_id(iommu_group),
1000 iommu_group_id(tcegrp->grp));
1001 ret = -EPERM;
1002 goto unlock_exit;
1003 }
1004 }
1005
1006 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1007 if (!tcegrp) {
1008 ret = -ENOMEM;
1009 goto unlock_exit;
1010 }
1011
1012 if (!table_group->ops || !table_group->ops->take_ownership ||
1013 !table_group->ops->release_ownership)
1014 ret = tce_iommu_take_ownership(container, table_group);
1015 else
1016 ret = tce_iommu_take_ownership_ddw(container, table_group);
1017
1018 if (!ret) {
1019 tcegrp->grp = iommu_group;
1020 list_add(&tcegrp->next, &container->group_list);
1021 }
1022
1023 unlock_exit:
1024 if (ret && tcegrp)
1025 kfree(tcegrp);
1026
1027 mutex_unlock(&container->lock);
1028
1029 return ret;
1030 }
1031
1032 static void tce_iommu_detach_group(void *iommu_data,
1033 struct iommu_group *iommu_group)
1034 {
1035 struct tce_container *container = iommu_data;
1036 struct iommu_table_group *table_group;
1037 bool found = false;
1038 struct tce_iommu_group *tcegrp;
1039
1040 mutex_lock(&container->lock);
1041
1042 list_for_each_entry(tcegrp, &container->group_list, next) {
1043 if (tcegrp->grp == iommu_group) {
1044 found = true;
1045 break;
1046 }
1047 }
1048
1049 if (!found) {
1050 pr_warn("tce_vfio: detaching unattached group #%u\n",
1051 iommu_group_id(iommu_group));
1052 goto unlock_exit;
1053 }
1054
1055 list_del(&tcegrp->next);
1056 kfree(tcegrp);
1057
1058 table_group = iommu_group_get_iommudata(iommu_group);
1059 BUG_ON(!table_group);
1060
1061 if (!table_group->ops || !table_group->ops->release_ownership)
1062 tce_iommu_release_ownership(container, table_group);
1063 else
1064 tce_iommu_release_ownership_ddw(container, table_group);
1065
1066 unlock_exit:
1067 mutex_unlock(&container->lock);
1068 }
1069
1070 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1071 .name = "iommu-vfio-powerpc",
1072 .owner = THIS_MODULE,
1073 .open = tce_iommu_open,
1074 .release = tce_iommu_release,
1075 .ioctl = tce_iommu_ioctl,
1076 .attach_group = tce_iommu_attach_group,
1077 .detach_group = tce_iommu_detach_group,
1078 };
1079
1080 static int __init tce_iommu_init(void)
1081 {
1082 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1083 }
1084
1085 static void __exit tce_iommu_cleanup(void)
1086 {
1087 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1088 }
1089
1090 module_init(tce_iommu_init);
1091 module_exit(tce_iommu_cleanup);
1092
1093 MODULE_VERSION(DRIVER_VERSION);
1094 MODULE_LICENSE("GPL v2");
1095 MODULE_AUTHOR(DRIVER_AUTHOR);
1096 MODULE_DESCRIPTION(DRIVER_DESC);
1097
This page took 0.055281 seconds and 5 git commands to generate.