2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list
; /* For global state-list */
47 atomic_t count
; /* Reference count */
48 unsigned mmu_notifier_count
; /* Counting nested mmu_notifier
50 struct mm_struct
*mm
; /* mm_struct for the faults */
51 struct mmu_notifier mn
; /* mmu_notifier handle */
52 struct pri_queue pri
[PRI_QUEUE_SIZE
]; /* PRI tag states */
53 struct device_state
*device_state
; /* Link to our device_state */
54 int pasid
; /* PASID index */
55 bool invalid
; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock
; /* Protect pri_queues and
59 wait_queue_head_t wq
; /* To wait for count == 0 */
63 struct list_head list
;
67 struct pasid_state
**states
;
68 struct iommu_domain
*domain
;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb
;
72 amd_iommu_invalidate_ctx inv_ctx_cb
;
78 struct work_struct work
;
79 struct device_state
*dev_state
;
80 struct pasid_state
*state
;
90 static LIST_HEAD(state_list
);
91 static spinlock_t state_lock
;
93 static struct workqueue_struct
*iommu_wq
;
96 * Empty page table - Used between
97 * mmu_notifier_invalidate_range_start and
98 * mmu_notifier_invalidate_range_end
100 static u64
*empty_page_table
;
102 static void free_pasid_states(struct device_state
*dev_state
);
104 static u16
device_id(struct pci_dev
*pdev
)
108 devid
= pdev
->bus
->number
;
109 devid
= (devid
<< 8) | pdev
->devfn
;
114 static struct device_state
*__get_device_state(u16 devid
)
116 struct device_state
*dev_state
;
118 list_for_each_entry(dev_state
, &state_list
, list
) {
119 if (dev_state
->devid
== devid
)
126 static struct device_state
*get_device_state(u16 devid
)
128 struct device_state
*dev_state
;
131 spin_lock_irqsave(&state_lock
, flags
);
132 dev_state
= __get_device_state(devid
);
133 if (dev_state
!= NULL
)
134 atomic_inc(&dev_state
->count
);
135 spin_unlock_irqrestore(&state_lock
, flags
);
140 static void free_device_state(struct device_state
*dev_state
)
143 * First detach device from domain - No more PRI requests will arrive
144 * from that device after it is unbound from the IOMMUv2 domain.
146 iommu_detach_device(dev_state
->domain
, &dev_state
->pdev
->dev
);
148 /* Everything is down now, free the IOMMUv2 domain */
149 iommu_domain_free(dev_state
->domain
);
151 /* Finally get rid of the device-state */
155 static void put_device_state(struct device_state
*dev_state
)
157 if (atomic_dec_and_test(&dev_state
->count
))
158 wake_up(&dev_state
->wq
);
161 static void put_device_state_wait(struct device_state
*dev_state
)
165 prepare_to_wait(&dev_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
166 if (!atomic_dec_and_test(&dev_state
->count
))
168 finish_wait(&dev_state
->wq
, &wait
);
170 free_device_state(dev_state
);
173 /* Must be called under dev_state->lock */
174 static struct pasid_state
**__get_pasid_state_ptr(struct device_state
*dev_state
,
175 int pasid
, bool alloc
)
177 struct pasid_state
**root
, **ptr
;
180 level
= dev_state
->pasid_levels
;
181 root
= dev_state
->states
;
185 index
= (pasid
>> (9 * level
)) & 0x1ff;
195 *ptr
= (void *)get_zeroed_page(GFP_ATOMIC
);
200 root
= (struct pasid_state
**)*ptr
;
207 static int set_pasid_state(struct device_state
*dev_state
,
208 struct pasid_state
*pasid_state
,
211 struct pasid_state
**ptr
;
215 spin_lock_irqsave(&dev_state
->lock
, flags
);
216 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
231 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
236 static void clear_pasid_state(struct device_state
*dev_state
, int pasid
)
238 struct pasid_state
**ptr
;
241 spin_lock_irqsave(&dev_state
->lock
, flags
);
242 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
250 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
253 static struct pasid_state
*get_pasid_state(struct device_state
*dev_state
,
256 struct pasid_state
**ptr
, *ret
= NULL
;
259 spin_lock_irqsave(&dev_state
->lock
, flags
);
260 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, false);
267 atomic_inc(&ret
->count
);
270 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
275 static void free_pasid_state(struct pasid_state
*pasid_state
)
280 static void put_pasid_state(struct pasid_state
*pasid_state
)
282 if (atomic_dec_and_test(&pasid_state
->count
))
283 wake_up(&pasid_state
->wq
);
286 static void put_pasid_state_wait(struct pasid_state
*pasid_state
)
290 prepare_to_wait(&pasid_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
292 if (!atomic_dec_and_test(&pasid_state
->count
))
295 finish_wait(&pasid_state
->wq
, &wait
);
296 free_pasid_state(pasid_state
);
299 static void unbind_pasid(struct pasid_state
*pasid_state
)
301 struct iommu_domain
*domain
;
303 domain
= pasid_state
->device_state
->domain
;
306 * Mark pasid_state as invalid, no more faults will we added to the
307 * work queue after this is visible everywhere.
309 pasid_state
->invalid
= true;
311 /* Make sure this is visible */
314 /* After this the device/pasid can't access the mm anymore */
315 amd_iommu_domain_clear_gcr3(domain
, pasid_state
->pasid
);
317 /* Make sure no more pending faults are in the queue */
318 flush_workqueue(iommu_wq
);
321 static void free_pasid_states_level1(struct pasid_state
**tbl
)
325 for (i
= 0; i
< 512; ++i
) {
329 free_page((unsigned long)tbl
[i
]);
333 static void free_pasid_states_level2(struct pasid_state
**tbl
)
335 struct pasid_state
**ptr
;
338 for (i
= 0; i
< 512; ++i
) {
342 ptr
= (struct pasid_state
**)tbl
[i
];
343 free_pasid_states_level1(ptr
);
347 static void free_pasid_states(struct device_state
*dev_state
)
349 struct pasid_state
*pasid_state
;
352 for (i
= 0; i
< dev_state
->max_pasids
; ++i
) {
353 pasid_state
= get_pasid_state(dev_state
, i
);
354 if (pasid_state
== NULL
)
357 put_pasid_state(pasid_state
);
360 * This will call the mn_release function and
363 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
365 put_pasid_state_wait(pasid_state
); /* Reference taken in
366 amd_iommu_bind_pasid */
368 /* Drop reference taken in amd_iommu_bind_pasid */
369 put_device_state(dev_state
);
372 if (dev_state
->pasid_levels
== 2)
373 free_pasid_states_level2(dev_state
->states
);
374 else if (dev_state
->pasid_levels
== 1)
375 free_pasid_states_level1(dev_state
->states
);
376 else if (dev_state
->pasid_levels
!= 0)
379 free_page((unsigned long)dev_state
->states
);
382 static struct pasid_state
*mn_to_state(struct mmu_notifier
*mn
)
384 return container_of(mn
, struct pasid_state
, mn
);
387 static void __mn_flush_page(struct mmu_notifier
*mn
,
388 unsigned long address
)
390 struct pasid_state
*pasid_state
;
391 struct device_state
*dev_state
;
393 pasid_state
= mn_to_state(mn
);
394 dev_state
= pasid_state
->device_state
;
396 amd_iommu_flush_page(dev_state
->domain
, pasid_state
->pasid
, address
);
399 static int mn_clear_flush_young(struct mmu_notifier
*mn
,
400 struct mm_struct
*mm
,
404 for (; start
< end
; start
+= PAGE_SIZE
)
405 __mn_flush_page(mn
, start
);
410 static void mn_invalidate_page(struct mmu_notifier
*mn
,
411 struct mm_struct
*mm
,
412 unsigned long address
)
414 __mn_flush_page(mn
, address
);
417 static void mn_invalidate_range_start(struct mmu_notifier
*mn
,
418 struct mm_struct
*mm
,
419 unsigned long start
, unsigned long end
)
421 struct pasid_state
*pasid_state
;
422 struct device_state
*dev_state
;
425 pasid_state
= mn_to_state(mn
);
426 dev_state
= pasid_state
->device_state
;
428 spin_lock_irqsave(&pasid_state
->lock
, flags
);
429 if (pasid_state
->mmu_notifier_count
== 0) {
430 amd_iommu_domain_set_gcr3(dev_state
->domain
,
432 __pa(empty_page_table
));
434 pasid_state
->mmu_notifier_count
+= 1;
435 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
438 static void mn_invalidate_range_end(struct mmu_notifier
*mn
,
439 struct mm_struct
*mm
,
440 unsigned long start
, unsigned long end
)
442 struct pasid_state
*pasid_state
;
443 struct device_state
*dev_state
;
446 pasid_state
= mn_to_state(mn
);
447 dev_state
= pasid_state
->device_state
;
449 spin_lock_irqsave(&pasid_state
->lock
, flags
);
450 pasid_state
->mmu_notifier_count
-= 1;
451 if (pasid_state
->mmu_notifier_count
== 0) {
452 amd_iommu_domain_set_gcr3(dev_state
->domain
,
454 __pa(pasid_state
->mm
->pgd
));
456 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
459 static void mn_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
461 struct pasid_state
*pasid_state
;
462 struct device_state
*dev_state
;
467 pasid_state
= mn_to_state(mn
);
468 dev_state
= pasid_state
->device_state
;
469 run_inv_ctx_cb
= !pasid_state
->invalid
;
471 if (run_inv_ctx_cb
&& pasid_state
->device_state
->inv_ctx_cb
)
472 dev_state
->inv_ctx_cb(dev_state
->pdev
, pasid_state
->pasid
);
474 unbind_pasid(pasid_state
);
477 static struct mmu_notifier_ops iommu_mn
= {
478 .release
= mn_release
,
479 .clear_flush_young
= mn_clear_flush_young
,
480 .invalidate_page
= mn_invalidate_page
,
481 .invalidate_range_start
= mn_invalidate_range_start
,
482 .invalidate_range_end
= mn_invalidate_range_end
,
485 static void set_pri_tag_status(struct pasid_state
*pasid_state
,
490 spin_lock_irqsave(&pasid_state
->lock
, flags
);
491 pasid_state
->pri
[tag
].status
= status
;
492 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
495 static void finish_pri_tag(struct device_state
*dev_state
,
496 struct pasid_state
*pasid_state
,
501 spin_lock_irqsave(&pasid_state
->lock
, flags
);
502 if (atomic_dec_and_test(&pasid_state
->pri
[tag
].inflight
) &&
503 pasid_state
->pri
[tag
].finish
) {
504 amd_iommu_complete_ppr(dev_state
->pdev
, pasid_state
->pasid
,
505 pasid_state
->pri
[tag
].status
, tag
);
506 pasid_state
->pri
[tag
].finish
= false;
507 pasid_state
->pri
[tag
].status
= PPR_SUCCESS
;
509 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
512 static void do_fault(struct work_struct
*work
)
514 struct fault
*fault
= container_of(work
, struct fault
, work
);
518 write
= !!(fault
->flags
& PPR_FAULT_WRITE
);
520 down_read(&fault
->state
->mm
->mmap_sem
);
521 npages
= get_user_pages(NULL
, fault
->state
->mm
,
522 fault
->address
, 1, write
, 0, &page
, NULL
);
523 up_read(&fault
->state
->mm
->mmap_sem
);
527 } else if (fault
->dev_state
->inv_ppr_cb
) {
530 status
= fault
->dev_state
->inv_ppr_cb(fault
->dev_state
->pdev
,
535 case AMD_IOMMU_INV_PRI_RSP_SUCCESS
:
536 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_SUCCESS
);
538 case AMD_IOMMU_INV_PRI_RSP_INVALID
:
539 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
541 case AMD_IOMMU_INV_PRI_RSP_FAIL
:
542 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_FAILURE
);
548 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
551 finish_pri_tag(fault
->dev_state
, fault
->state
, fault
->tag
);
553 put_pasid_state(fault
->state
);
558 static int ppr_notifier(struct notifier_block
*nb
, unsigned long e
, void *data
)
560 struct amd_iommu_fault
*iommu_fault
;
561 struct pasid_state
*pasid_state
;
562 struct device_state
*dev_state
;
570 tag
= iommu_fault
->tag
& 0x1ff;
571 finish
= (iommu_fault
->tag
>> 9) & 1;
574 dev_state
= get_device_state(iommu_fault
->device_id
);
575 if (dev_state
== NULL
)
578 pasid_state
= get_pasid_state(dev_state
, iommu_fault
->pasid
);
579 if (pasid_state
== NULL
|| pasid_state
->invalid
) {
580 /* We know the device but not the PASID -> send INVALID */
581 amd_iommu_complete_ppr(dev_state
->pdev
, iommu_fault
->pasid
,
586 spin_lock_irqsave(&pasid_state
->lock
, flags
);
587 atomic_inc(&pasid_state
->pri
[tag
].inflight
);
589 pasid_state
->pri
[tag
].finish
= true;
590 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
592 fault
= kzalloc(sizeof(*fault
), GFP_ATOMIC
);
594 /* We are OOM - send success and let the device re-fault */
595 finish_pri_tag(dev_state
, pasid_state
, tag
);
599 fault
->dev_state
= dev_state
;
600 fault
->address
= iommu_fault
->address
;
601 fault
->state
= pasid_state
;
603 fault
->finish
= finish
;
604 fault
->pasid
= iommu_fault
->pasid
;
605 fault
->flags
= iommu_fault
->flags
;
606 INIT_WORK(&fault
->work
, do_fault
);
608 queue_work(iommu_wq
, &fault
->work
);
614 if (ret
!= NOTIFY_OK
&& pasid_state
)
615 put_pasid_state(pasid_state
);
617 put_device_state(dev_state
);
623 static struct notifier_block ppr_nb
= {
624 .notifier_call
= ppr_notifier
,
627 int amd_iommu_bind_pasid(struct pci_dev
*pdev
, int pasid
,
628 struct task_struct
*task
)
630 struct pasid_state
*pasid_state
;
631 struct device_state
*dev_state
;
632 struct mm_struct
*mm
;
638 if (!amd_iommu_v2_supported())
641 devid
= device_id(pdev
);
642 dev_state
= get_device_state(devid
);
644 if (dev_state
== NULL
)
648 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
652 pasid_state
= kzalloc(sizeof(*pasid_state
), GFP_KERNEL
);
653 if (pasid_state
== NULL
)
657 atomic_set(&pasid_state
->count
, 1);
658 init_waitqueue_head(&pasid_state
->wq
);
659 spin_lock_init(&pasid_state
->lock
);
661 mm
= get_task_mm(task
);
662 pasid_state
->mm
= mm
;
663 pasid_state
->device_state
= dev_state
;
664 pasid_state
->pasid
= pasid
;
665 pasid_state
->invalid
= true; /* Mark as valid only if we are
666 done with setting up the pasid */
667 pasid_state
->mn
.ops
= &iommu_mn
;
669 if (pasid_state
->mm
== NULL
)
672 mmu_notifier_register(&pasid_state
->mn
, mm
);
674 ret
= set_pasid_state(dev_state
, pasid_state
, pasid
);
678 ret
= amd_iommu_domain_set_gcr3(dev_state
->domain
, pasid
,
679 __pa(pasid_state
->mm
->pgd
));
681 goto out_clear_state
;
683 /* Now we are ready to handle faults */
684 pasid_state
->invalid
= false;
687 * Drop the reference to the mm_struct here. We rely on the
688 * mmu_notifier release call-back to inform us when the mm
696 clear_pasid_state(dev_state
, pasid
);
699 mmu_notifier_unregister(&pasid_state
->mn
, mm
);
703 free_pasid_state(pasid_state
);
706 put_device_state(dev_state
);
710 EXPORT_SYMBOL(amd_iommu_bind_pasid
);
712 void amd_iommu_unbind_pasid(struct pci_dev
*pdev
, int pasid
)
714 struct pasid_state
*pasid_state
;
715 struct device_state
*dev_state
;
720 if (!amd_iommu_v2_supported())
723 devid
= device_id(pdev
);
724 dev_state
= get_device_state(devid
);
725 if (dev_state
== NULL
)
728 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
731 pasid_state
= get_pasid_state(dev_state
, pasid
);
732 if (pasid_state
== NULL
)
735 * Drop reference taken here. We are safe because we still hold
736 * the reference taken in the amd_iommu_bind_pasid function.
738 put_pasid_state(pasid_state
);
740 /* Clear the pasid state so that the pasid can be re-used */
741 clear_pasid_state(dev_state
, pasid_state
->pasid
);
744 * Call mmu_notifier_unregister to drop our reference
747 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
749 put_pasid_state_wait(pasid_state
); /* Reference taken in
750 amd_iommu_bind_pasid */
752 /* Drop reference taken in this function */
753 put_device_state(dev_state
);
755 /* Drop reference taken in amd_iommu_bind_pasid */
756 put_device_state(dev_state
);
758 EXPORT_SYMBOL(amd_iommu_unbind_pasid
);
760 int amd_iommu_init_device(struct pci_dev
*pdev
, int pasids
)
762 struct device_state
*dev_state
;
769 if (!amd_iommu_v2_supported())
772 if (pasids
<= 0 || pasids
> (PASID_MASK
+ 1))
775 devid
= device_id(pdev
);
777 dev_state
= kzalloc(sizeof(*dev_state
), GFP_KERNEL
);
778 if (dev_state
== NULL
)
781 spin_lock_init(&dev_state
->lock
);
782 init_waitqueue_head(&dev_state
->wq
);
783 dev_state
->pdev
= pdev
;
784 dev_state
->devid
= devid
;
787 for (dev_state
->pasid_levels
= 0; (tmp
- 1) & ~0x1ff; tmp
>>= 9)
788 dev_state
->pasid_levels
+= 1;
790 atomic_set(&dev_state
->count
, 1);
791 dev_state
->max_pasids
= pasids
;
794 dev_state
->states
= (void *)get_zeroed_page(GFP_KERNEL
);
795 if (dev_state
->states
== NULL
)
796 goto out_free_dev_state
;
798 dev_state
->domain
= iommu_domain_alloc(&pci_bus_type
);
799 if (dev_state
->domain
== NULL
)
800 goto out_free_states
;
802 amd_iommu_domain_direct_map(dev_state
->domain
);
804 ret
= amd_iommu_domain_enable_v2(dev_state
->domain
, pasids
);
806 goto out_free_domain
;
808 ret
= iommu_attach_device(dev_state
->domain
, &pdev
->dev
);
810 goto out_free_domain
;
812 spin_lock_irqsave(&state_lock
, flags
);
814 if (__get_device_state(devid
) != NULL
) {
815 spin_unlock_irqrestore(&state_lock
, flags
);
817 goto out_free_domain
;
820 list_add_tail(&dev_state
->list
, &state_list
);
822 spin_unlock_irqrestore(&state_lock
, flags
);
827 iommu_domain_free(dev_state
->domain
);
830 free_page((unsigned long)dev_state
->states
);
837 EXPORT_SYMBOL(amd_iommu_init_device
);
839 void amd_iommu_free_device(struct pci_dev
*pdev
)
841 struct device_state
*dev_state
;
845 if (!amd_iommu_v2_supported())
848 devid
= device_id(pdev
);
850 spin_lock_irqsave(&state_lock
, flags
);
852 dev_state
= __get_device_state(devid
);
853 if (dev_state
== NULL
) {
854 spin_unlock_irqrestore(&state_lock
, flags
);
858 list_del(&dev_state
->list
);
860 spin_unlock_irqrestore(&state_lock
, flags
);
862 /* Get rid of any remaining pasid states */
863 free_pasid_states(dev_state
);
865 put_device_state_wait(dev_state
);
867 EXPORT_SYMBOL(amd_iommu_free_device
);
869 int amd_iommu_set_invalid_ppr_cb(struct pci_dev
*pdev
,
870 amd_iommu_invalid_ppr_cb cb
)
872 struct device_state
*dev_state
;
877 if (!amd_iommu_v2_supported())
880 devid
= device_id(pdev
);
882 spin_lock_irqsave(&state_lock
, flags
);
885 dev_state
= __get_device_state(devid
);
886 if (dev_state
== NULL
)
889 dev_state
->inv_ppr_cb
= cb
;
894 spin_unlock_irqrestore(&state_lock
, flags
);
898 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb
);
900 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev
*pdev
,
901 amd_iommu_invalidate_ctx cb
)
903 struct device_state
*dev_state
;
908 if (!amd_iommu_v2_supported())
911 devid
= device_id(pdev
);
913 spin_lock_irqsave(&state_lock
, flags
);
916 dev_state
= __get_device_state(devid
);
917 if (dev_state
== NULL
)
920 dev_state
->inv_ctx_cb
= cb
;
925 spin_unlock_irqrestore(&state_lock
, flags
);
929 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb
);
931 static int __init
amd_iommu_v2_init(void)
935 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
937 if (!amd_iommu_v2_supported()) {
938 pr_info("AMD IOMMUv2 functionality not available on this system\n");
940 * Load anyway to provide the symbols to other modules
941 * which may use AMD IOMMUv2 optionally.
946 spin_lock_init(&state_lock
);
949 iommu_wq
= create_workqueue("amd_iommu_v2");
950 if (iommu_wq
== NULL
)
954 empty_page_table
= (u64
*)get_zeroed_page(GFP_KERNEL
);
955 if (empty_page_table
== NULL
)
958 amd_iommu_register_ppr_notifier(&ppr_nb
);
963 destroy_workqueue(iommu_wq
);
969 static void __exit
amd_iommu_v2_exit(void)
971 struct device_state
*dev_state
;
974 if (!amd_iommu_v2_supported())
977 amd_iommu_unregister_ppr_notifier(&ppr_nb
);
979 flush_workqueue(iommu_wq
);
982 * The loop below might call flush_workqueue(), so call
983 * destroy_workqueue() after it
985 for (i
= 0; i
< MAX_DEVICES
; ++i
) {
986 dev_state
= get_device_state(i
);
988 if (dev_state
== NULL
)
993 put_device_state(dev_state
);
994 amd_iommu_free_device(dev_state
->pdev
);
997 destroy_workqueue(iommu_wq
);
999 free_page((unsigned long)empty_page_table
);
1002 module_init(amd_iommu_v2_init
);
1003 module_exit(amd_iommu_v2_exit
);