2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list
; /* For global state-list */
47 atomic_t count
; /* Reference count */
48 atomic_t mmu_notifier_count
; /* Counting nested mmu_notifier
50 struct task_struct
*task
; /* Task bound to this PASID */
51 struct mm_struct
*mm
; /* mm_struct for the faults */
52 struct mmu_notifier mn
; /* mmu_otifier handle */
53 struct pri_queue pri
[PRI_QUEUE_SIZE
]; /* PRI tag states */
54 struct device_state
*device_state
; /* Link to our device_state */
55 int pasid
; /* PASID index */
56 spinlock_t lock
; /* Protect pri_queues */
57 wait_queue_head_t wq
; /* To wait for count == 0 */
61 struct list_head list
;
65 struct pasid_state
**states
;
66 struct iommu_domain
*domain
;
69 amd_iommu_invalid_ppr_cb inv_ppr_cb
;
70 amd_iommu_invalidate_ctx inv_ctx_cb
;
76 struct work_struct work
;
77 struct device_state
*dev_state
;
78 struct pasid_state
*state
;
88 static LIST_HEAD(state_list
);
89 static spinlock_t state_lock
;
91 static struct workqueue_struct
*iommu_wq
;
94 * Empty page table - Used between
95 * mmu_notifier_invalidate_range_start and
96 * mmu_notifier_invalidate_range_end
98 static u64
*empty_page_table
;
100 static void free_pasid_states(struct device_state
*dev_state
);
101 static void unbind_pasid(struct device_state
*dev_state
, int pasid
);
103 static u16
device_id(struct pci_dev
*pdev
)
107 devid
= pdev
->bus
->number
;
108 devid
= (devid
<< 8) | pdev
->devfn
;
113 static struct device_state
*__get_device_state(u16 devid
)
115 struct device_state
*dev_state
;
117 list_for_each_entry(dev_state
, &state_list
, list
) {
118 if (dev_state
->devid
== devid
)
125 static struct device_state
*get_device_state(u16 devid
)
127 struct device_state
*dev_state
;
130 spin_lock_irqsave(&state_lock
, flags
);
131 dev_state
= __get_device_state(devid
);
132 if (dev_state
!= NULL
)
133 atomic_inc(&dev_state
->count
);
134 spin_unlock_irqrestore(&state_lock
, flags
);
139 static void free_device_state(struct device_state
*dev_state
)
142 * First detach device from domain - No more PRI requests will arrive
143 * from that device after it is unbound from the IOMMUv2 domain.
145 iommu_detach_device(dev_state
->domain
, &dev_state
->pdev
->dev
);
147 /* Everything is down now, free the IOMMUv2 domain */
148 iommu_domain_free(dev_state
->domain
);
150 /* Finally get rid of the device-state */
154 static void put_device_state(struct device_state
*dev_state
)
156 if (atomic_dec_and_test(&dev_state
->count
))
157 wake_up(&dev_state
->wq
);
160 static void put_device_state_wait(struct device_state
*dev_state
)
164 prepare_to_wait(&dev_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
165 if (!atomic_dec_and_test(&dev_state
->count
))
167 finish_wait(&dev_state
->wq
, &wait
);
169 free_device_state(dev_state
);
172 /* Must be called under dev_state->lock */
173 static struct pasid_state
**__get_pasid_state_ptr(struct device_state
*dev_state
,
174 int pasid
, bool alloc
)
176 struct pasid_state
**root
, **ptr
;
179 level
= dev_state
->pasid_levels
;
180 root
= dev_state
->states
;
184 index
= (pasid
>> (9 * level
)) & 0x1ff;
194 *ptr
= (void *)get_zeroed_page(GFP_ATOMIC
);
199 root
= (struct pasid_state
**)*ptr
;
206 static int set_pasid_state(struct device_state
*dev_state
,
207 struct pasid_state
*pasid_state
,
210 struct pasid_state
**ptr
;
214 spin_lock_irqsave(&dev_state
->lock
, flags
);
215 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
230 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
235 static void clear_pasid_state(struct device_state
*dev_state
, int pasid
)
237 struct pasid_state
**ptr
;
240 spin_lock_irqsave(&dev_state
->lock
, flags
);
241 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
249 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
252 static struct pasid_state
*get_pasid_state(struct device_state
*dev_state
,
255 struct pasid_state
**ptr
, *ret
= NULL
;
258 spin_lock_irqsave(&dev_state
->lock
, flags
);
259 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, false);
266 atomic_inc(&ret
->count
);
269 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
274 static void free_pasid_state(struct pasid_state
*pasid_state
)
279 static void put_pasid_state(struct pasid_state
*pasid_state
)
281 if (atomic_dec_and_test(&pasid_state
->count
)) {
282 put_device_state(pasid_state
->device_state
);
283 wake_up(&pasid_state
->wq
);
287 static void put_pasid_state_wait(struct pasid_state
*pasid_state
)
291 prepare_to_wait(&pasid_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
293 if (atomic_dec_and_test(&pasid_state
->count
))
294 put_device_state(pasid_state
->device_state
);
298 finish_wait(&pasid_state
->wq
, &wait
);
299 mmput(pasid_state
->mm
);
300 free_pasid_state(pasid_state
);
303 static void __unbind_pasid(struct pasid_state
*pasid_state
)
305 struct iommu_domain
*domain
;
307 domain
= pasid_state
->device_state
->domain
;
309 amd_iommu_domain_clear_gcr3(domain
, pasid_state
->pasid
);
310 clear_pasid_state(pasid_state
->device_state
, pasid_state
->pasid
);
312 /* Make sure no more pending faults are in the queue */
313 flush_workqueue(iommu_wq
);
315 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
317 put_pasid_state(pasid_state
); /* Reference taken in bind() function */
320 static void unbind_pasid(struct device_state
*dev_state
, int pasid
)
322 struct pasid_state
*pasid_state
;
324 pasid_state
= get_pasid_state(dev_state
, pasid
);
325 if (pasid_state
== NULL
)
328 __unbind_pasid(pasid_state
);
329 put_pasid_state_wait(pasid_state
); /* Reference taken in this function */
332 static void free_pasid_states_level1(struct pasid_state
**tbl
)
336 for (i
= 0; i
< 512; ++i
) {
340 free_page((unsigned long)tbl
[i
]);
344 static void free_pasid_states_level2(struct pasid_state
**tbl
)
346 struct pasid_state
**ptr
;
349 for (i
= 0; i
< 512; ++i
) {
353 ptr
= (struct pasid_state
**)tbl
[i
];
354 free_pasid_states_level1(ptr
);
358 static void free_pasid_states(struct device_state
*dev_state
)
360 struct pasid_state
*pasid_state
;
363 for (i
= 0; i
< dev_state
->max_pasids
; ++i
) {
364 pasid_state
= get_pasid_state(dev_state
, i
);
365 if (pasid_state
== NULL
)
368 put_pasid_state(pasid_state
);
371 * This will call the mn_release function and
374 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
377 if (dev_state
->pasid_levels
== 2)
378 free_pasid_states_level2(dev_state
->states
);
379 else if (dev_state
->pasid_levels
== 1)
380 free_pasid_states_level1(dev_state
->states
);
381 else if (dev_state
->pasid_levels
!= 0)
384 free_page((unsigned long)dev_state
->states
);
387 static struct pasid_state
*mn_to_state(struct mmu_notifier
*mn
)
389 return container_of(mn
, struct pasid_state
, mn
);
392 static void __mn_flush_page(struct mmu_notifier
*mn
,
393 unsigned long address
)
395 struct pasid_state
*pasid_state
;
396 struct device_state
*dev_state
;
398 pasid_state
= mn_to_state(mn
);
399 dev_state
= pasid_state
->device_state
;
401 amd_iommu_flush_page(dev_state
->domain
, pasid_state
->pasid
, address
);
404 static int mn_clear_flush_young(struct mmu_notifier
*mn
,
405 struct mm_struct
*mm
,
406 unsigned long address
)
408 __mn_flush_page(mn
, address
);
413 static void mn_change_pte(struct mmu_notifier
*mn
,
414 struct mm_struct
*mm
,
415 unsigned long address
,
418 __mn_flush_page(mn
, address
);
421 static void mn_invalidate_page(struct mmu_notifier
*mn
,
422 struct mm_struct
*mm
,
423 unsigned long address
)
425 __mn_flush_page(mn
, address
);
428 static void mn_invalidate_range_start(struct mmu_notifier
*mn
,
429 struct mm_struct
*mm
,
430 unsigned long start
, unsigned long end
)
432 struct pasid_state
*pasid_state
;
433 struct device_state
*dev_state
;
435 pasid_state
= mn_to_state(mn
);
436 dev_state
= pasid_state
->device_state
;
438 if (atomic_add_return(1, &pasid_state
->mmu_notifier_count
) == 1) {
439 amd_iommu_domain_set_gcr3(dev_state
->domain
,
441 __pa(empty_page_table
));
445 static void mn_invalidate_range_end(struct mmu_notifier
*mn
,
446 struct mm_struct
*mm
,
447 unsigned long start
, unsigned long end
)
449 struct pasid_state
*pasid_state
;
450 struct device_state
*dev_state
;
452 pasid_state
= mn_to_state(mn
);
453 dev_state
= pasid_state
->device_state
;
455 if (atomic_dec_and_test(&pasid_state
->mmu_notifier_count
)) {
456 amd_iommu_domain_set_gcr3(dev_state
->domain
,
458 __pa(pasid_state
->mm
->pgd
));
462 static void mn_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
464 struct pasid_state
*pasid_state
;
465 struct device_state
*dev_state
;
469 pasid_state
= mn_to_state(mn
);
470 dev_state
= pasid_state
->device_state
;
472 if (pasid_state
->device_state
->inv_ctx_cb
)
473 dev_state
->inv_ctx_cb(dev_state
->pdev
, pasid_state
->pasid
);
475 unbind_pasid(dev_state
, pasid_state
->pasid
);
478 static struct mmu_notifier_ops iommu_mn
= {
479 .release
= mn_release
,
480 .clear_flush_young
= mn_clear_flush_young
,
481 .change_pte
= mn_change_pte
,
482 .invalidate_page
= mn_invalidate_page
,
483 .invalidate_range_start
= mn_invalidate_range_start
,
484 .invalidate_range_end
= mn_invalidate_range_end
,
487 static void set_pri_tag_status(struct pasid_state
*pasid_state
,
492 spin_lock_irqsave(&pasid_state
->lock
, flags
);
493 pasid_state
->pri
[tag
].status
= status
;
494 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
497 static void finish_pri_tag(struct device_state
*dev_state
,
498 struct pasid_state
*pasid_state
,
503 spin_lock_irqsave(&pasid_state
->lock
, flags
);
504 if (atomic_dec_and_test(&pasid_state
->pri
[tag
].inflight
) &&
505 pasid_state
->pri
[tag
].finish
) {
506 amd_iommu_complete_ppr(dev_state
->pdev
, pasid_state
->pasid
,
507 pasid_state
->pri
[tag
].status
, tag
);
508 pasid_state
->pri
[tag
].finish
= false;
509 pasid_state
->pri
[tag
].status
= PPR_SUCCESS
;
511 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
514 static void do_fault(struct work_struct
*work
)
516 struct fault
*fault
= container_of(work
, struct fault
, work
);
520 write
= !!(fault
->flags
& PPR_FAULT_WRITE
);
522 down_read(&fault
->state
->mm
->mmap_sem
);
523 npages
= get_user_pages(fault
->state
->task
, fault
->state
->mm
,
524 fault
->address
, 1, write
, 0, &page
, NULL
);
525 up_read(&fault
->state
->mm
->mmap_sem
);
529 } else if (fault
->dev_state
->inv_ppr_cb
) {
532 status
= fault
->dev_state
->inv_ppr_cb(fault
->dev_state
->pdev
,
537 case AMD_IOMMU_INV_PRI_RSP_SUCCESS
:
538 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_SUCCESS
);
540 case AMD_IOMMU_INV_PRI_RSP_INVALID
:
541 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
543 case AMD_IOMMU_INV_PRI_RSP_FAIL
:
544 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_FAILURE
);
550 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
553 finish_pri_tag(fault
->dev_state
, fault
->state
, fault
->tag
);
555 put_pasid_state(fault
->state
);
560 static int ppr_notifier(struct notifier_block
*nb
, unsigned long e
, void *data
)
562 struct amd_iommu_fault
*iommu_fault
;
563 struct pasid_state
*pasid_state
;
564 struct device_state
*dev_state
;
572 tag
= iommu_fault
->tag
& 0x1ff;
573 finish
= (iommu_fault
->tag
>> 9) & 1;
576 dev_state
= get_device_state(iommu_fault
->device_id
);
577 if (dev_state
== NULL
)
580 pasid_state
= get_pasid_state(dev_state
, iommu_fault
->pasid
);
581 if (pasid_state
== NULL
) {
582 /* We know the device but not the PASID -> send INVALID */
583 amd_iommu_complete_ppr(dev_state
->pdev
, iommu_fault
->pasid
,
588 spin_lock_irqsave(&pasid_state
->lock
, flags
);
589 atomic_inc(&pasid_state
->pri
[tag
].inflight
);
591 pasid_state
->pri
[tag
].finish
= true;
592 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
594 fault
= kzalloc(sizeof(*fault
), GFP_ATOMIC
);
596 /* We are OOM - send success and let the device re-fault */
597 finish_pri_tag(dev_state
, pasid_state
, tag
);
601 fault
->dev_state
= dev_state
;
602 fault
->address
= iommu_fault
->address
;
603 fault
->state
= pasid_state
;
605 fault
->finish
= finish
;
606 fault
->flags
= iommu_fault
->flags
;
607 INIT_WORK(&fault
->work
, do_fault
);
609 queue_work(iommu_wq
, &fault
->work
);
614 put_device_state(dev_state
);
620 static struct notifier_block ppr_nb
= {
621 .notifier_call
= ppr_notifier
,
624 int amd_iommu_bind_pasid(struct pci_dev
*pdev
, int pasid
,
625 struct task_struct
*task
)
627 struct pasid_state
*pasid_state
;
628 struct device_state
*dev_state
;
634 if (!amd_iommu_v2_supported())
637 devid
= device_id(pdev
);
638 dev_state
= get_device_state(devid
);
640 if (dev_state
== NULL
)
644 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
648 pasid_state
= kzalloc(sizeof(*pasid_state
), GFP_KERNEL
);
649 if (pasid_state
== NULL
)
652 atomic_set(&pasid_state
->count
, 1);
653 atomic_set(&pasid_state
->mmu_notifier_count
, 0);
654 init_waitqueue_head(&pasid_state
->wq
);
655 spin_lock_init(&pasid_state
->lock
);
657 pasid_state
->task
= task
;
658 pasid_state
->mm
= get_task_mm(task
);
659 pasid_state
->device_state
= dev_state
;
660 pasid_state
->pasid
= pasid
;
661 pasid_state
->mn
.ops
= &iommu_mn
;
663 if (pasid_state
->mm
== NULL
)
666 mmu_notifier_register(&pasid_state
->mn
, pasid_state
->mm
);
668 ret
= set_pasid_state(dev_state
, pasid_state
, pasid
);
672 ret
= amd_iommu_domain_set_gcr3(dev_state
->domain
, pasid
,
673 __pa(pasid_state
->mm
->pgd
));
675 goto out_clear_state
;
680 clear_pasid_state(dev_state
, pasid
);
683 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
686 free_pasid_state(pasid_state
);
689 put_device_state(dev_state
);
693 EXPORT_SYMBOL(amd_iommu_bind_pasid
);
695 void amd_iommu_unbind_pasid(struct pci_dev
*pdev
, int pasid
)
697 struct pasid_state
*pasid_state
;
698 struct device_state
*dev_state
;
703 if (!amd_iommu_v2_supported())
706 devid
= device_id(pdev
);
707 dev_state
= get_device_state(devid
);
708 if (dev_state
== NULL
)
711 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
714 pasid_state
= get_pasid_state(dev_state
, pasid
);
715 if (pasid_state
== NULL
)
718 * Drop reference taken here. We are safe because we still hold
719 * the reference taken in the amd_iommu_bind_pasid function.
721 put_pasid_state(pasid_state
);
723 /* This will call the mn_release function and unbind the PASID */
724 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
727 put_device_state(dev_state
);
729 EXPORT_SYMBOL(amd_iommu_unbind_pasid
);
731 int amd_iommu_init_device(struct pci_dev
*pdev
, int pasids
)
733 struct device_state
*dev_state
;
740 if (!amd_iommu_v2_supported())
743 if (pasids
<= 0 || pasids
> (PASID_MASK
+ 1))
746 devid
= device_id(pdev
);
748 dev_state
= kzalloc(sizeof(*dev_state
), GFP_KERNEL
);
749 if (dev_state
== NULL
)
752 spin_lock_init(&dev_state
->lock
);
753 init_waitqueue_head(&dev_state
->wq
);
754 dev_state
->pdev
= pdev
;
755 dev_state
->devid
= devid
;
758 for (dev_state
->pasid_levels
= 0; (tmp
- 1) & ~0x1ff; tmp
>>= 9)
759 dev_state
->pasid_levels
+= 1;
761 atomic_set(&dev_state
->count
, 1);
762 dev_state
->max_pasids
= pasids
;
765 dev_state
->states
= (void *)get_zeroed_page(GFP_KERNEL
);
766 if (dev_state
->states
== NULL
)
767 goto out_free_dev_state
;
769 dev_state
->domain
= iommu_domain_alloc(&pci_bus_type
);
770 if (dev_state
->domain
== NULL
)
771 goto out_free_states
;
773 amd_iommu_domain_direct_map(dev_state
->domain
);
775 ret
= amd_iommu_domain_enable_v2(dev_state
->domain
, pasids
);
777 goto out_free_domain
;
779 ret
= iommu_attach_device(dev_state
->domain
, &pdev
->dev
);
781 goto out_free_domain
;
783 spin_lock_irqsave(&state_lock
, flags
);
785 if (__get_device_state(devid
) != NULL
) {
786 spin_unlock_irqrestore(&state_lock
, flags
);
788 goto out_free_domain
;
791 list_add_tail(&dev_state
->list
, &state_list
);
793 spin_unlock_irqrestore(&state_lock
, flags
);
798 iommu_domain_free(dev_state
->domain
);
801 free_page((unsigned long)dev_state
->states
);
808 EXPORT_SYMBOL(amd_iommu_init_device
);
810 void amd_iommu_free_device(struct pci_dev
*pdev
)
812 struct device_state
*dev_state
;
816 if (!amd_iommu_v2_supported())
819 devid
= device_id(pdev
);
821 spin_lock_irqsave(&state_lock
, flags
);
823 dev_state
= __get_device_state(devid
);
824 if (dev_state
== NULL
) {
825 spin_unlock_irqrestore(&state_lock
, flags
);
829 list_del(&dev_state
->list
);
831 spin_unlock_irqrestore(&state_lock
, flags
);
833 /* Get rid of any remaining pasid states */
834 free_pasid_states(dev_state
);
836 put_device_state_wait(dev_state
);
838 EXPORT_SYMBOL(amd_iommu_free_device
);
840 int amd_iommu_set_invalid_ppr_cb(struct pci_dev
*pdev
,
841 amd_iommu_invalid_ppr_cb cb
)
843 struct device_state
*dev_state
;
848 if (!amd_iommu_v2_supported())
851 devid
= device_id(pdev
);
853 spin_lock_irqsave(&state_lock
, flags
);
856 dev_state
= __get_device_state(devid
);
857 if (dev_state
== NULL
)
860 dev_state
->inv_ppr_cb
= cb
;
865 spin_unlock_irqrestore(&state_lock
, flags
);
869 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb
);
871 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev
*pdev
,
872 amd_iommu_invalidate_ctx cb
)
874 struct device_state
*dev_state
;
879 if (!amd_iommu_v2_supported())
882 devid
= device_id(pdev
);
884 spin_lock_irqsave(&state_lock
, flags
);
887 dev_state
= __get_device_state(devid
);
888 if (dev_state
== NULL
)
891 dev_state
->inv_ctx_cb
= cb
;
896 spin_unlock_irqrestore(&state_lock
, flags
);
900 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb
);
902 static int __init
amd_iommu_v2_init(void)
906 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
908 if (!amd_iommu_v2_supported()) {
909 pr_info("AMD IOMMUv2 functionality not available on this system\n");
911 * Load anyway to provide the symbols to other modules
912 * which may use AMD IOMMUv2 optionally.
917 spin_lock_init(&state_lock
);
920 iommu_wq
= create_workqueue("amd_iommu_v2");
921 if (iommu_wq
== NULL
)
925 empty_page_table
= (u64
*)get_zeroed_page(GFP_KERNEL
);
926 if (empty_page_table
== NULL
)
929 amd_iommu_register_ppr_notifier(&ppr_nb
);
934 destroy_workqueue(iommu_wq
);
940 static void __exit
amd_iommu_v2_exit(void)
942 struct device_state
*dev_state
;
945 if (!amd_iommu_v2_supported())
948 amd_iommu_unregister_ppr_notifier(&ppr_nb
);
950 flush_workqueue(iommu_wq
);
953 * The loop below might call flush_workqueue(), so call
954 * destroy_workqueue() after it
956 for (i
= 0; i
< MAX_DEVICES
; ++i
) {
957 dev_state
= get_device_state(i
);
959 if (dev_state
== NULL
)
964 put_device_state(dev_state
);
965 amd_iommu_free_device(dev_state
->pdev
);
968 destroy_workqueue(iommu_wq
);
970 free_page((unsigned long)empty_page_table
);
973 module_init(amd_iommu_v2_init
);
974 module_exit(amd_iommu_v2_exit
);