2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
25 #include <asm/mmu_context.h>
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
31 static void tce_iommu_detach_group(void *iommu_data
,
32 struct iommu_group
*iommu_group
);
34 static long try_increment_locked_vm(long npages
)
36 long ret
= 0, locked
, lock_limit
;
38 if (!current
|| !current
->mm
)
39 return -ESRCH
; /* process exited */
44 down_write(¤t
->mm
->mmap_sem
);
45 locked
= current
->mm
->locked_vm
+ npages
;
46 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
47 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
50 current
->mm
->locked_vm
+= npages
;
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current
->pid
,
54 current
->mm
->locked_vm
<< PAGE_SHIFT
,
55 rlimit(RLIMIT_MEMLOCK
),
56 ret
? " - exceeded" : "");
58 up_write(¤t
->mm
->mmap_sem
);
63 static void decrement_locked_vm(long npages
)
65 if (!current
|| !current
->mm
|| !npages
)
66 return; /* process exited */
68 down_write(¤t
->mm
->mmap_sem
);
69 if (WARN_ON_ONCE(npages
> current
->mm
->locked_vm
))
70 npages
= current
->mm
->locked_vm
;
71 current
->mm
->locked_vm
-= npages
;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current
->pid
,
74 current
->mm
->locked_vm
<< PAGE_SHIFT
,
75 rlimit(RLIMIT_MEMLOCK
));
76 up_write(¤t
->mm
->mmap_sem
);
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
86 struct tce_iommu_group
{
87 struct list_head next
;
88 struct iommu_group
*grp
;
92 * The container descriptor supports only a single group per container.
93 * Required by the API as the container is not supplied with the IOMMU group
94 * at the moment of initialization.
96 struct tce_container
{
100 unsigned long locked_pages
;
101 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
102 struct list_head group_list
;
105 static long tce_iommu_unregister_pages(struct tce_container
*container
,
106 __u64 vaddr
, __u64 size
)
108 struct mm_iommu_table_group_mem_t
*mem
;
110 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
113 mem
= mm_iommu_find(vaddr
, size
>> PAGE_SHIFT
);
117 return mm_iommu_put(mem
);
120 static long tce_iommu_register_pages(struct tce_container
*container
,
121 __u64 vaddr
, __u64 size
)
124 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
125 unsigned long entries
= size
>> PAGE_SHIFT
;
127 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
128 ((vaddr
+ size
) < vaddr
))
131 ret
= mm_iommu_get(vaddr
, entries
, &mem
);
135 container
->enabled
= true;
140 static long tce_iommu_userspace_view_alloc(struct iommu_table
*tbl
)
142 unsigned long cb
= _ALIGN_UP(sizeof(tbl
->it_userspace
[0]) *
143 tbl
->it_size
, PAGE_SIZE
);
147 BUG_ON(tbl
->it_userspace
);
149 ret
= try_increment_locked_vm(cb
>> PAGE_SHIFT
);
155 decrement_locked_vm(cb
>> PAGE_SHIFT
);
158 tbl
->it_userspace
= uas
;
163 static void tce_iommu_userspace_view_free(struct iommu_table
*tbl
)
165 unsigned long cb
= _ALIGN_UP(sizeof(tbl
->it_userspace
[0]) *
166 tbl
->it_size
, PAGE_SIZE
);
168 if (!tbl
->it_userspace
)
171 vfree(tbl
->it_userspace
);
172 tbl
->it_userspace
= NULL
;
173 decrement_locked_vm(cb
>> PAGE_SHIFT
);
176 static bool tce_page_is_contained(struct page
*page
, unsigned page_shift
)
179 * Check that the TCE table granularity is not bigger than the size of
180 * a page we just found. Otherwise the hardware can get access to
181 * a bigger memory chunk that it should.
183 return (PAGE_SHIFT
+ compound_order(compound_head(page
))) >= page_shift
;
186 static inline bool tce_groups_attached(struct tce_container
*container
)
188 return !list_empty(&container
->group_list
);
191 static long tce_iommu_find_table(struct tce_container
*container
,
192 phys_addr_t ioba
, struct iommu_table
**ptbl
)
196 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
197 struct iommu_table
*tbl
= container
->tables
[i
];
200 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
201 unsigned long start
= tbl
->it_offset
;
202 unsigned long end
= start
+ tbl
->it_size
;
204 if ((start
<= entry
) && (entry
< end
)) {
214 static int tce_iommu_enable(struct tce_container
*container
)
217 unsigned long locked
;
218 struct iommu_table_group
*table_group
;
219 struct tce_iommu_group
*tcegrp
;
222 return -ESRCH
; /* process exited */
224 if (container
->enabled
)
228 * When userspace pages are mapped into the IOMMU, they are effectively
229 * locked memory, so, theoretically, we need to update the accounting
230 * of locked pages on each map and unmap. For powerpc, the map unmap
231 * paths can be very hot, though, and the accounting would kill
232 * performance, especially since it would be difficult to impossible
233 * to handle the accounting in real mode only.
235 * To address that, rather than precisely accounting every page, we
236 * instead account for a worst case on locked memory when the iommu is
237 * enabled and disabled. The worst case upper bound on locked memory
238 * is the size of the whole iommu window, which is usually relatively
239 * small (compared to total memory sizes) on POWER hardware.
241 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
242 * that would effectively kill the guest at random points, much better
243 * enforcing the limit based on the max that the guest can map.
245 * Unfortunately at the moment it counts whole tables, no matter how
246 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
247 * each with 2GB DMA window, 8GB will be counted here. The reason for
248 * this is that we cannot tell here the amount of RAM used by the guest
249 * as this information is only available from KVM and VFIO is
252 * So we do not allow enabling a container without a group attached
253 * as there is no way to know how much we should increment
254 * the locked_vm counter.
256 if (!tce_groups_attached(container
))
259 tcegrp
= list_first_entry(&container
->group_list
,
260 struct tce_iommu_group
, next
);
261 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
265 if (!table_group
->tce32_size
)
268 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
269 ret
= try_increment_locked_vm(locked
);
273 container
->locked_pages
= locked
;
275 container
->enabled
= true;
280 static void tce_iommu_disable(struct tce_container
*container
)
282 if (!container
->enabled
)
285 container
->enabled
= false;
290 decrement_locked_vm(container
->locked_pages
);
293 static void *tce_iommu_open(unsigned long arg
)
295 struct tce_container
*container
;
297 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
298 pr_err("tce_vfio: Wrong IOMMU type\n");
299 return ERR_PTR(-EINVAL
);
302 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
304 return ERR_PTR(-ENOMEM
);
306 mutex_init(&container
->lock
);
307 INIT_LIST_HEAD_RCU(&container
->group_list
);
309 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
314 static int tce_iommu_clear(struct tce_container
*container
,
315 struct iommu_table
*tbl
,
316 unsigned long entry
, unsigned long pages
);
317 static void tce_iommu_free_table(struct iommu_table
*tbl
);
319 static void tce_iommu_release(void *iommu_data
)
321 struct tce_container
*container
= iommu_data
;
322 struct iommu_table_group
*table_group
;
323 struct tce_iommu_group
*tcegrp
;
326 while (tce_groups_attached(container
)) {
327 tcegrp
= list_first_entry(&container
->group_list
,
328 struct tce_iommu_group
, next
);
329 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
330 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
334 * If VFIO created a table, it was not disposed
335 * by tce_iommu_detach_group() so do it now.
337 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
338 struct iommu_table
*tbl
= container
->tables
[i
];
343 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
344 tce_iommu_free_table(tbl
);
347 tce_iommu_disable(container
);
348 mutex_destroy(&container
->lock
);
353 static void tce_iommu_unuse_page(struct tce_container
*container
,
358 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
362 static int tce_iommu_prereg_ua_to_hpa(unsigned long tce
, unsigned long size
,
363 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
366 struct mm_iommu_table_group_mem_t
*mem
;
368 mem
= mm_iommu_lookup(tce
, size
);
372 ret
= mm_iommu_ua_to_hpa(mem
, tce
, phpa
);
381 static void tce_iommu_unuse_page_v2(struct iommu_table
*tbl
,
384 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
386 unsigned long hpa
= 0;
387 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
);
389 if (!pua
|| !current
|| !current
->mm
)
392 ret
= tce_iommu_prereg_ua_to_hpa(*pua
, IOMMU_PAGE_SIZE(tbl
),
395 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
396 __func__
, *pua
, entry
, ret
);
398 mm_iommu_mapped_dec(mem
);
403 static int tce_iommu_clear(struct tce_container
*container
,
404 struct iommu_table
*tbl
,
405 unsigned long entry
, unsigned long pages
)
407 unsigned long oldhpa
;
409 enum dma_data_direction direction
;
411 for ( ; pages
; --pages
, ++entry
) {
412 direction
= DMA_NONE
;
414 ret
= iommu_tce_xchg(tbl
, entry
, &oldhpa
, &direction
);
418 if (direction
== DMA_NONE
)
422 tce_iommu_unuse_page_v2(tbl
, entry
);
426 tce_iommu_unuse_page(container
, oldhpa
);
432 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
434 struct page
*page
= NULL
;
435 enum dma_data_direction direction
= iommu_tce_direction(tce
);
437 if (get_user_pages_fast(tce
& PAGE_MASK
, 1,
438 direction
!= DMA_TO_DEVICE
, &page
) != 1)
441 *hpa
= __pa((unsigned long) page_address(page
));
446 static long tce_iommu_build(struct tce_container
*container
,
447 struct iommu_table
*tbl
,
448 unsigned long entry
, unsigned long tce
, unsigned long pages
,
449 enum dma_data_direction direction
)
454 enum dma_data_direction dirtmp
;
456 for (i
= 0; i
< pages
; ++i
) {
457 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
459 ret
= tce_iommu_use_page(tce
, &hpa
);
463 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
464 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
471 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
473 tce_iommu_unuse_page(container
, hpa
);
474 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
475 __func__
, entry
<< tbl
->it_page_shift
,
480 if (dirtmp
!= DMA_NONE
)
481 tce_iommu_unuse_page(container
, hpa
);
483 tce
+= IOMMU_PAGE_SIZE(tbl
);
487 tce_iommu_clear(container
, tbl
, entry
, i
);
492 static long tce_iommu_build_v2(struct tce_container
*container
,
493 struct iommu_table
*tbl
,
494 unsigned long entry
, unsigned long tce
, unsigned long pages
,
495 enum dma_data_direction direction
)
500 enum dma_data_direction dirtmp
;
502 for (i
= 0; i
< pages
; ++i
) {
503 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
504 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
,
507 ret
= tce_iommu_prereg_ua_to_hpa(tce
, IOMMU_PAGE_SIZE(tbl
),
512 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
513 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
518 /* Preserve offset within IOMMU page */
519 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
522 /* The registered region is being unregistered */
523 if (mm_iommu_mapped_inc(mem
))
526 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
528 /* dirtmp cannot be DMA_NONE here */
529 tce_iommu_unuse_page_v2(tbl
, entry
+ i
);
530 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
531 __func__
, entry
<< tbl
->it_page_shift
,
536 if (dirtmp
!= DMA_NONE
)
537 tce_iommu_unuse_page_v2(tbl
, entry
+ i
);
541 tce
+= IOMMU_PAGE_SIZE(tbl
);
545 tce_iommu_clear(container
, tbl
, entry
, i
);
550 static long tce_iommu_create_table(struct tce_container
*container
,
551 struct iommu_table_group
*table_group
,
556 struct iommu_table
**ptbl
)
558 long ret
, table_size
;
560 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
565 ret
= try_increment_locked_vm(table_size
>> PAGE_SHIFT
);
569 ret
= table_group
->ops
->create_table(table_group
, num
,
570 page_shift
, window_size
, levels
, ptbl
);
572 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
573 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
!= table_size
));
575 if (!ret
&& container
->v2
) {
576 ret
= tce_iommu_userspace_view_alloc(*ptbl
);
578 (*ptbl
)->it_ops
->free(*ptbl
);
582 decrement_locked_vm(table_size
>> PAGE_SHIFT
);
587 static void tce_iommu_free_table(struct iommu_table
*tbl
)
589 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
591 tce_iommu_userspace_view_free(tbl
);
592 tbl
->it_ops
->free(tbl
);
593 decrement_locked_vm(pages
);
596 static long tce_iommu_ioctl(void *iommu_data
,
597 unsigned int cmd
, unsigned long arg
)
599 struct tce_container
*container
= iommu_data
;
604 case VFIO_CHECK_EXTENSION
:
606 case VFIO_SPAPR_TCE_IOMMU
:
607 case VFIO_SPAPR_TCE_v2_IOMMU
:
611 ret
= vfio_spapr_iommu_eeh_ioctl(NULL
, cmd
, arg
);
615 return (ret
< 0) ? 0 : ret
;
617 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
618 struct vfio_iommu_spapr_tce_info info
;
619 struct tce_iommu_group
*tcegrp
;
620 struct iommu_table_group
*table_group
;
622 if (!tce_groups_attached(container
))
625 tcegrp
= list_first_entry(&container
->group_list
,
626 struct tce_iommu_group
, next
);
627 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
632 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
635 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
638 if (info
.argsz
< minsz
)
641 info
.dma32_window_start
= table_group
->tce32_start
;
642 info
.dma32_window_size
= table_group
->tce32_size
;
645 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
650 case VFIO_IOMMU_MAP_DMA
: {
651 struct vfio_iommu_type1_dma_map param
;
652 struct iommu_table
*tbl
= NULL
;
654 enum dma_data_direction direction
;
656 if (!container
->enabled
)
659 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
661 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
664 if (param
.argsz
< minsz
)
667 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
668 VFIO_DMA_MAP_FLAG_WRITE
))
671 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
675 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
676 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
679 /* iova is checked by the IOMMU API */
680 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
681 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
682 direction
= DMA_BIDIRECTIONAL
;
684 direction
= DMA_TO_DEVICE
;
686 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
687 direction
= DMA_FROM_DEVICE
;
692 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
697 ret
= tce_iommu_build_v2(container
, tbl
,
698 param
.iova
>> tbl
->it_page_shift
,
700 param
.size
>> tbl
->it_page_shift
,
703 ret
= tce_iommu_build(container
, tbl
,
704 param
.iova
>> tbl
->it_page_shift
,
706 param
.size
>> tbl
->it_page_shift
,
709 iommu_flush_tce(tbl
);
713 case VFIO_IOMMU_UNMAP_DMA
: {
714 struct vfio_iommu_type1_dma_unmap param
;
715 struct iommu_table
*tbl
= NULL
;
718 if (!container
->enabled
)
721 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
724 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
727 if (param
.argsz
< minsz
)
730 /* No flag is supported now */
734 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
738 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
741 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
742 param
.size
>> tbl
->it_page_shift
);
746 ret
= tce_iommu_clear(container
, tbl
,
747 param
.iova
>> tbl
->it_page_shift
,
748 param
.size
>> tbl
->it_page_shift
);
749 iommu_flush_tce(tbl
);
753 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
754 struct vfio_iommu_spapr_register_memory param
;
759 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
762 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
765 if (param
.argsz
< minsz
)
768 /* No flag is supported now */
772 mutex_lock(&container
->lock
);
773 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
775 mutex_unlock(&container
->lock
);
779 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
780 struct vfio_iommu_spapr_register_memory param
;
785 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
788 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
791 if (param
.argsz
< minsz
)
794 /* No flag is supported now */
798 mutex_lock(&container
->lock
);
799 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
801 mutex_unlock(&container
->lock
);
805 case VFIO_IOMMU_ENABLE
:
809 mutex_lock(&container
->lock
);
810 ret
= tce_iommu_enable(container
);
811 mutex_unlock(&container
->lock
);
815 case VFIO_IOMMU_DISABLE
:
819 mutex_lock(&container
->lock
);
820 tce_iommu_disable(container
);
821 mutex_unlock(&container
->lock
);
824 case VFIO_EEH_PE_OP
: {
825 struct tce_iommu_group
*tcegrp
;
828 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
829 ret
= vfio_spapr_iommu_eeh_ioctl(tcegrp
->grp
,
842 static void tce_iommu_release_ownership(struct tce_container
*container
,
843 struct iommu_table_group
*table_group
)
847 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
848 struct iommu_table
*tbl
= container
->tables
[i
];
853 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
854 tce_iommu_userspace_view_free(tbl
);
856 iommu_release_ownership(tbl
);
858 container
->tables
[i
] = NULL
;
862 static int tce_iommu_take_ownership(struct tce_container
*container
,
863 struct iommu_table_group
*table_group
)
867 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
868 struct iommu_table
*tbl
= table_group
->tables
[i
];
870 if (!tbl
|| !tbl
->it_map
)
873 rc
= tce_iommu_userspace_view_alloc(tbl
);
875 rc
= iommu_take_ownership(tbl
);
878 for (j
= 0; j
< i
; ++j
)
879 iommu_release_ownership(
880 table_group
->tables
[j
]);
886 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
887 container
->tables
[i
] = table_group
->tables
[i
];
892 static void tce_iommu_release_ownership_ddw(struct tce_container
*container
,
893 struct iommu_table_group
*table_group
)
897 if (!table_group
->ops
->unset_window
) {
902 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
903 table_group
->ops
->unset_window(table_group
, i
);
905 table_group
->ops
->release_ownership(table_group
);
908 static long tce_iommu_take_ownership_ddw(struct tce_container
*container
,
909 struct iommu_table_group
*table_group
)
912 struct iommu_table
*tbl
= NULL
;
914 if (!table_group
->ops
->create_table
|| !table_group
->ops
->set_window
||
915 !table_group
->ops
->release_ownership
) {
920 table_group
->ops
->take_ownership(table_group
);
923 * If it the first group attached, check if there is
924 * a default DMA window and create one if none as
925 * the userspace expects it to exist.
927 if (!tce_groups_attached(container
) && !container
->tables
[0]) {
928 ret
= tce_iommu_create_table(container
,
930 0, /* window number */
932 table_group
->tce32_size
,
933 1, /* default levels */
938 container
->tables
[0] = tbl
;
941 /* Set all windows to the new group */
942 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
943 tbl
= container
->tables
[i
];
948 /* Set the default window to a new group */
949 ret
= table_group
->ops
->set_window(table_group
, i
, tbl
);
957 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
958 table_group
->ops
->unset_window(table_group
, i
);
960 table_group
->ops
->release_ownership(table_group
);
965 static int tce_iommu_attach_group(void *iommu_data
,
966 struct iommu_group
*iommu_group
)
969 struct tce_container
*container
= iommu_data
;
970 struct iommu_table_group
*table_group
;
971 struct tce_iommu_group
*tcegrp
= NULL
;
973 mutex_lock(&container
->lock
);
975 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
976 iommu_group_id(iommu_group), iommu_group); */
977 table_group
= iommu_group_get_iommudata(iommu_group
);
979 if (tce_groups_attached(container
) && (!table_group
->ops
||
980 !table_group
->ops
->take_ownership
||
981 !table_group
->ops
->release_ownership
)) {
986 /* Check if new group has the same iommu_ops (i.e. compatible) */
987 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
988 struct iommu_table_group
*table_group_tmp
;
990 if (tcegrp
->grp
== iommu_group
) {
991 pr_warn("tce_vfio: Group %d is already attached\n",
992 iommu_group_id(iommu_group
));
996 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
997 if (table_group_tmp
->ops
!= table_group
->ops
) {
998 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
999 iommu_group_id(iommu_group
),
1000 iommu_group_id(tcegrp
->grp
));
1006 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1012 if (!table_group
->ops
|| !table_group
->ops
->take_ownership
||
1013 !table_group
->ops
->release_ownership
)
1014 ret
= tce_iommu_take_ownership(container
, table_group
);
1016 ret
= tce_iommu_take_ownership_ddw(container
, table_group
);
1019 tcegrp
->grp
= iommu_group
;
1020 list_add(&tcegrp
->next
, &container
->group_list
);
1027 mutex_unlock(&container
->lock
);
1032 static void tce_iommu_detach_group(void *iommu_data
,
1033 struct iommu_group
*iommu_group
)
1035 struct tce_container
*container
= iommu_data
;
1036 struct iommu_table_group
*table_group
;
1038 struct tce_iommu_group
*tcegrp
;
1040 mutex_lock(&container
->lock
);
1042 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1043 if (tcegrp
->grp
== iommu_group
) {
1050 pr_warn("tce_vfio: detaching unattached group #%u\n",
1051 iommu_group_id(iommu_group
));
1055 list_del(&tcegrp
->next
);
1058 table_group
= iommu_group_get_iommudata(iommu_group
);
1059 BUG_ON(!table_group
);
1061 if (!table_group
->ops
|| !table_group
->ops
->release_ownership
)
1062 tce_iommu_release_ownership(container
, table_group
);
1064 tce_iommu_release_ownership_ddw(container
, table_group
);
1067 mutex_unlock(&container
->lock
);
1070 const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1071 .name
= "iommu-vfio-powerpc",
1072 .owner
= THIS_MODULE
,
1073 .open
= tce_iommu_open
,
1074 .release
= tce_iommu_release
,
1075 .ioctl
= tce_iommu_ioctl
,
1076 .attach_group
= tce_iommu_attach_group
,
1077 .detach_group
= tce_iommu_detach_group
,
1080 static int __init
tce_iommu_init(void)
1082 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1085 static void __exit
tce_iommu_cleanup(void)
1087 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1090 module_init(tce_iommu_init
);
1091 module_exit(tce_iommu_cleanup
);
1093 MODULE_VERSION(DRIVER_VERSION
);
1094 MODULE_LICENSE("GPL v2");
1095 MODULE_AUTHOR(DRIVER_AUTHOR
);
1096 MODULE_DESCRIPTION(DRIVER_DESC
);