2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list
; /* For global state-list */
47 atomic_t count
; /* Reference count */
48 unsigned mmu_notifier_count
; /* Counting nested mmu_notifier
50 struct mm_struct
*mm
; /* mm_struct for the faults */
51 struct mmu_notifier mn
; /* mmu_notifier handle */
52 struct pri_queue pri
[PRI_QUEUE_SIZE
]; /* PRI tag states */
53 struct device_state
*device_state
; /* Link to our device_state */
54 int pasid
; /* PASID index */
55 bool invalid
; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock
; /* Protect pri_queues and
59 wait_queue_head_t wq
; /* To wait for count == 0 */
63 struct list_head list
;
67 struct pasid_state
**states
;
68 struct iommu_domain
*domain
;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb
;
72 amd_iommu_invalidate_ctx inv_ctx_cb
;
78 struct work_struct work
;
79 struct device_state
*dev_state
;
80 struct pasid_state
*state
;
90 static LIST_HEAD(state_list
);
91 static spinlock_t state_lock
;
93 static struct workqueue_struct
*iommu_wq
;
96 * Empty page table - Used between
97 * mmu_notifier_invalidate_range_start and
98 * mmu_notifier_invalidate_range_end
100 static u64
*empty_page_table
;
102 static void free_pasid_states(struct device_state
*dev_state
);
104 static u16
device_id(struct pci_dev
*pdev
)
108 devid
= pdev
->bus
->number
;
109 devid
= (devid
<< 8) | pdev
->devfn
;
114 static struct device_state
*__get_device_state(u16 devid
)
116 struct device_state
*dev_state
;
118 list_for_each_entry(dev_state
, &state_list
, list
) {
119 if (dev_state
->devid
== devid
)
126 static struct device_state
*get_device_state(u16 devid
)
128 struct device_state
*dev_state
;
131 spin_lock_irqsave(&state_lock
, flags
);
132 dev_state
= __get_device_state(devid
);
133 if (dev_state
!= NULL
)
134 atomic_inc(&dev_state
->count
);
135 spin_unlock_irqrestore(&state_lock
, flags
);
140 static void free_device_state(struct device_state
*dev_state
)
143 * First detach device from domain - No more PRI requests will arrive
144 * from that device after it is unbound from the IOMMUv2 domain.
146 iommu_detach_device(dev_state
->domain
, &dev_state
->pdev
->dev
);
148 /* Everything is down now, free the IOMMUv2 domain */
149 iommu_domain_free(dev_state
->domain
);
151 /* Finally get rid of the device-state */
155 static void put_device_state(struct device_state
*dev_state
)
157 if (atomic_dec_and_test(&dev_state
->count
))
158 wake_up(&dev_state
->wq
);
161 static void put_device_state_wait(struct device_state
*dev_state
)
165 prepare_to_wait(&dev_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
166 if (!atomic_dec_and_test(&dev_state
->count
))
168 finish_wait(&dev_state
->wq
, &wait
);
170 free_device_state(dev_state
);
173 /* Must be called under dev_state->lock */
174 static struct pasid_state
**__get_pasid_state_ptr(struct device_state
*dev_state
,
175 int pasid
, bool alloc
)
177 struct pasid_state
**root
, **ptr
;
180 level
= dev_state
->pasid_levels
;
181 root
= dev_state
->states
;
185 index
= (pasid
>> (9 * level
)) & 0x1ff;
195 *ptr
= (void *)get_zeroed_page(GFP_ATOMIC
);
200 root
= (struct pasid_state
**)*ptr
;
207 static int set_pasid_state(struct device_state
*dev_state
,
208 struct pasid_state
*pasid_state
,
211 struct pasid_state
**ptr
;
215 spin_lock_irqsave(&dev_state
->lock
, flags
);
216 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
231 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
236 static void clear_pasid_state(struct device_state
*dev_state
, int pasid
)
238 struct pasid_state
**ptr
;
241 spin_lock_irqsave(&dev_state
->lock
, flags
);
242 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, true);
250 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
253 static struct pasid_state
*get_pasid_state(struct device_state
*dev_state
,
256 struct pasid_state
**ptr
, *ret
= NULL
;
259 spin_lock_irqsave(&dev_state
->lock
, flags
);
260 ptr
= __get_pasid_state_ptr(dev_state
, pasid
, false);
267 atomic_inc(&ret
->count
);
270 spin_unlock_irqrestore(&dev_state
->lock
, flags
);
275 static void free_pasid_state(struct pasid_state
*pasid_state
)
280 static void put_pasid_state(struct pasid_state
*pasid_state
)
282 if (atomic_dec_and_test(&pasid_state
->count
)) {
283 put_device_state(pasid_state
->device_state
);
284 wake_up(&pasid_state
->wq
);
288 static void put_pasid_state_wait(struct pasid_state
*pasid_state
)
292 prepare_to_wait(&pasid_state
->wq
, &wait
, TASK_UNINTERRUPTIBLE
);
294 if (atomic_dec_and_test(&pasid_state
->count
))
295 put_device_state(pasid_state
->device_state
);
299 finish_wait(&pasid_state
->wq
, &wait
);
300 free_pasid_state(pasid_state
);
303 static void unbind_pasid(struct pasid_state
*pasid_state
)
305 struct iommu_domain
*domain
;
307 domain
= pasid_state
->device_state
->domain
;
310 * Mark pasid_state as invalid, no more faults will we added to the
311 * work queue after this is visible everywhere.
313 pasid_state
->invalid
= true;
315 /* Make sure this is visible */
318 /* After this the device/pasid can't access the mm anymore */
319 amd_iommu_domain_clear_gcr3(domain
, pasid_state
->pasid
);
321 /* Make sure no more pending faults are in the queue */
322 flush_workqueue(iommu_wq
);
325 static void free_pasid_states_level1(struct pasid_state
**tbl
)
329 for (i
= 0; i
< 512; ++i
) {
333 free_page((unsigned long)tbl
[i
]);
337 static void free_pasid_states_level2(struct pasid_state
**tbl
)
339 struct pasid_state
**ptr
;
342 for (i
= 0; i
< 512; ++i
) {
346 ptr
= (struct pasid_state
**)tbl
[i
];
347 free_pasid_states_level1(ptr
);
351 static void free_pasid_states(struct device_state
*dev_state
)
353 struct pasid_state
*pasid_state
;
356 for (i
= 0; i
< dev_state
->max_pasids
; ++i
) {
357 pasid_state
= get_pasid_state(dev_state
, i
);
358 if (pasid_state
== NULL
)
361 put_pasid_state(pasid_state
);
364 * This will call the mn_release function and
367 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
369 put_pasid_state_wait(pasid_state
); /* Reference taken in
370 amd_iommu_bind_pasid */
372 /* Drop reference taken in amd_iommu_bind_pasid */
373 put_device_state(dev_state
);
376 if (dev_state
->pasid_levels
== 2)
377 free_pasid_states_level2(dev_state
->states
);
378 else if (dev_state
->pasid_levels
== 1)
379 free_pasid_states_level1(dev_state
->states
);
380 else if (dev_state
->pasid_levels
!= 0)
383 free_page((unsigned long)dev_state
->states
);
386 static struct pasid_state
*mn_to_state(struct mmu_notifier
*mn
)
388 return container_of(mn
, struct pasid_state
, mn
);
391 static void __mn_flush_page(struct mmu_notifier
*mn
,
392 unsigned long address
)
394 struct pasid_state
*pasid_state
;
395 struct device_state
*dev_state
;
397 pasid_state
= mn_to_state(mn
);
398 dev_state
= pasid_state
->device_state
;
400 amd_iommu_flush_page(dev_state
->domain
, pasid_state
->pasid
, address
);
403 static int mn_clear_flush_young(struct mmu_notifier
*mn
,
404 struct mm_struct
*mm
,
405 unsigned long address
)
407 __mn_flush_page(mn
, address
);
412 static void mn_invalidate_page(struct mmu_notifier
*mn
,
413 struct mm_struct
*mm
,
414 unsigned long address
)
416 __mn_flush_page(mn
, address
);
419 static void mn_invalidate_range_start(struct mmu_notifier
*mn
,
420 struct mm_struct
*mm
,
421 unsigned long start
, unsigned long end
)
423 struct pasid_state
*pasid_state
;
424 struct device_state
*dev_state
;
427 pasid_state
= mn_to_state(mn
);
428 dev_state
= pasid_state
->device_state
;
430 spin_lock_irqsave(&pasid_state
->lock
, flags
);
431 if (pasid_state
->mmu_notifier_count
== 0) {
432 amd_iommu_domain_set_gcr3(dev_state
->domain
,
434 __pa(empty_page_table
));
436 pasid_state
->mmu_notifier_count
+= 1;
437 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
440 static void mn_invalidate_range_end(struct mmu_notifier
*mn
,
441 struct mm_struct
*mm
,
442 unsigned long start
, unsigned long end
)
444 struct pasid_state
*pasid_state
;
445 struct device_state
*dev_state
;
448 pasid_state
= mn_to_state(mn
);
449 dev_state
= pasid_state
->device_state
;
451 spin_lock_irqsave(&pasid_state
->lock
, flags
);
452 pasid_state
->mmu_notifier_count
-= 1;
453 if (pasid_state
->mmu_notifier_count
== 0) {
454 amd_iommu_domain_set_gcr3(dev_state
->domain
,
456 __pa(pasid_state
->mm
->pgd
));
458 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
461 static void mn_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
463 struct pasid_state
*pasid_state
;
464 struct device_state
*dev_state
;
469 pasid_state
= mn_to_state(mn
);
470 dev_state
= pasid_state
->device_state
;
471 run_inv_ctx_cb
= !pasid_state
->invalid
;
473 if (run_inv_ctx_cb
&& pasid_state
->device_state
->inv_ctx_cb
)
474 dev_state
->inv_ctx_cb(dev_state
->pdev
, pasid_state
->pasid
);
476 unbind_pasid(pasid_state
);
479 static struct mmu_notifier_ops iommu_mn
= {
480 .release
= mn_release
,
481 .clear_flush_young
= mn_clear_flush_young
,
482 .invalidate_page
= mn_invalidate_page
,
483 .invalidate_range_start
= mn_invalidate_range_start
,
484 .invalidate_range_end
= mn_invalidate_range_end
,
487 static void set_pri_tag_status(struct pasid_state
*pasid_state
,
492 spin_lock_irqsave(&pasid_state
->lock
, flags
);
493 pasid_state
->pri
[tag
].status
= status
;
494 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
497 static void finish_pri_tag(struct device_state
*dev_state
,
498 struct pasid_state
*pasid_state
,
503 spin_lock_irqsave(&pasid_state
->lock
, flags
);
504 if (atomic_dec_and_test(&pasid_state
->pri
[tag
].inflight
) &&
505 pasid_state
->pri
[tag
].finish
) {
506 amd_iommu_complete_ppr(dev_state
->pdev
, pasid_state
->pasid
,
507 pasid_state
->pri
[tag
].status
, tag
);
508 pasid_state
->pri
[tag
].finish
= false;
509 pasid_state
->pri
[tag
].status
= PPR_SUCCESS
;
511 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
514 static void do_fault(struct work_struct
*work
)
516 struct fault
*fault
= container_of(work
, struct fault
, work
);
520 write
= !!(fault
->flags
& PPR_FAULT_WRITE
);
522 down_read(&fault
->state
->mm
->mmap_sem
);
523 npages
= get_user_pages(NULL
, fault
->state
->mm
,
524 fault
->address
, 1, write
, 0, &page
, NULL
);
525 up_read(&fault
->state
->mm
->mmap_sem
);
529 } else if (fault
->dev_state
->inv_ppr_cb
) {
532 status
= fault
->dev_state
->inv_ppr_cb(fault
->dev_state
->pdev
,
537 case AMD_IOMMU_INV_PRI_RSP_SUCCESS
:
538 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_SUCCESS
);
540 case AMD_IOMMU_INV_PRI_RSP_INVALID
:
541 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
543 case AMD_IOMMU_INV_PRI_RSP_FAIL
:
544 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_FAILURE
);
550 set_pri_tag_status(fault
->state
, fault
->tag
, PPR_INVALID
);
553 finish_pri_tag(fault
->dev_state
, fault
->state
, fault
->tag
);
555 put_pasid_state(fault
->state
);
560 static int ppr_notifier(struct notifier_block
*nb
, unsigned long e
, void *data
)
562 struct amd_iommu_fault
*iommu_fault
;
563 struct pasid_state
*pasid_state
;
564 struct device_state
*dev_state
;
572 tag
= iommu_fault
->tag
& 0x1ff;
573 finish
= (iommu_fault
->tag
>> 9) & 1;
576 dev_state
= get_device_state(iommu_fault
->device_id
);
577 if (dev_state
== NULL
)
580 pasid_state
= get_pasid_state(dev_state
, iommu_fault
->pasid
);
581 if (pasid_state
== NULL
|| pasid_state
->invalid
) {
582 /* We know the device but not the PASID -> send INVALID */
583 amd_iommu_complete_ppr(dev_state
->pdev
, iommu_fault
->pasid
,
588 spin_lock_irqsave(&pasid_state
->lock
, flags
);
589 atomic_inc(&pasid_state
->pri
[tag
].inflight
);
591 pasid_state
->pri
[tag
].finish
= true;
592 spin_unlock_irqrestore(&pasid_state
->lock
, flags
);
594 fault
= kzalloc(sizeof(*fault
), GFP_ATOMIC
);
596 /* We are OOM - send success and let the device re-fault */
597 finish_pri_tag(dev_state
, pasid_state
, tag
);
601 fault
->dev_state
= dev_state
;
602 fault
->address
= iommu_fault
->address
;
603 fault
->state
= pasid_state
;
605 fault
->finish
= finish
;
606 fault
->pasid
= iommu_fault
->pasid
;
607 fault
->flags
= iommu_fault
->flags
;
608 INIT_WORK(&fault
->work
, do_fault
);
610 queue_work(iommu_wq
, &fault
->work
);
616 if (ret
!= NOTIFY_OK
&& pasid_state
)
617 put_pasid_state(pasid_state
);
619 put_device_state(dev_state
);
625 static struct notifier_block ppr_nb
= {
626 .notifier_call
= ppr_notifier
,
629 int amd_iommu_bind_pasid(struct pci_dev
*pdev
, int pasid
,
630 struct task_struct
*task
)
632 struct pasid_state
*pasid_state
;
633 struct device_state
*dev_state
;
634 struct mm_struct
*mm
;
640 if (!amd_iommu_v2_supported())
643 devid
= device_id(pdev
);
644 dev_state
= get_device_state(devid
);
646 if (dev_state
== NULL
)
650 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
654 pasid_state
= kzalloc(sizeof(*pasid_state
), GFP_KERNEL
);
655 if (pasid_state
== NULL
)
659 atomic_set(&pasid_state
->count
, 1);
660 init_waitqueue_head(&pasid_state
->wq
);
661 spin_lock_init(&pasid_state
->lock
);
663 mm
= get_task_mm(task
);
664 pasid_state
->mm
= mm
;
665 pasid_state
->device_state
= dev_state
;
666 pasid_state
->pasid
= pasid
;
667 pasid_state
->invalid
= true; /* Mark as valid only if we are
668 done with setting up the pasid */
669 pasid_state
->mn
.ops
= &iommu_mn
;
671 if (pasid_state
->mm
== NULL
)
674 mmu_notifier_register(&pasid_state
->mn
, mm
);
676 ret
= set_pasid_state(dev_state
, pasid_state
, pasid
);
680 ret
= amd_iommu_domain_set_gcr3(dev_state
->domain
, pasid
,
681 __pa(pasid_state
->mm
->pgd
));
683 goto out_clear_state
;
685 /* Now we are ready to handle faults */
686 pasid_state
->invalid
= false;
689 * Drop the reference to the mm_struct here. We rely on the
690 * mmu_notifier release call-back to inform us when the mm
698 clear_pasid_state(dev_state
, pasid
);
701 mmu_notifier_unregister(&pasid_state
->mn
, mm
);
705 free_pasid_state(pasid_state
);
708 put_device_state(dev_state
);
712 EXPORT_SYMBOL(amd_iommu_bind_pasid
);
714 void amd_iommu_unbind_pasid(struct pci_dev
*pdev
, int pasid
)
716 struct pasid_state
*pasid_state
;
717 struct device_state
*dev_state
;
722 if (!amd_iommu_v2_supported())
725 devid
= device_id(pdev
);
726 dev_state
= get_device_state(devid
);
727 if (dev_state
== NULL
)
730 if (pasid
< 0 || pasid
>= dev_state
->max_pasids
)
733 pasid_state
= get_pasid_state(dev_state
, pasid
);
734 if (pasid_state
== NULL
)
737 * Drop reference taken here. We are safe because we still hold
738 * the reference taken in the amd_iommu_bind_pasid function.
740 put_pasid_state(pasid_state
);
742 /* Clear the pasid state so that the pasid can be re-used */
743 clear_pasid_state(dev_state
, pasid_state
->pasid
);
746 * Call mmu_notifier_unregister to drop our reference
749 mmu_notifier_unregister(&pasid_state
->mn
, pasid_state
->mm
);
751 put_pasid_state_wait(pasid_state
); /* Reference taken in
752 amd_iommu_bind_pasid */
754 /* Drop reference taken in this function */
755 put_device_state(dev_state
);
757 /* Drop reference taken in amd_iommu_bind_pasid */
758 put_device_state(dev_state
);
760 EXPORT_SYMBOL(amd_iommu_unbind_pasid
);
762 int amd_iommu_init_device(struct pci_dev
*pdev
, int pasids
)
764 struct device_state
*dev_state
;
771 if (!amd_iommu_v2_supported())
774 if (pasids
<= 0 || pasids
> (PASID_MASK
+ 1))
777 devid
= device_id(pdev
);
779 dev_state
= kzalloc(sizeof(*dev_state
), GFP_KERNEL
);
780 if (dev_state
== NULL
)
783 spin_lock_init(&dev_state
->lock
);
784 init_waitqueue_head(&dev_state
->wq
);
785 dev_state
->pdev
= pdev
;
786 dev_state
->devid
= devid
;
789 for (dev_state
->pasid_levels
= 0; (tmp
- 1) & ~0x1ff; tmp
>>= 9)
790 dev_state
->pasid_levels
+= 1;
792 atomic_set(&dev_state
->count
, 1);
793 dev_state
->max_pasids
= pasids
;
796 dev_state
->states
= (void *)get_zeroed_page(GFP_KERNEL
);
797 if (dev_state
->states
== NULL
)
798 goto out_free_dev_state
;
800 dev_state
->domain
= iommu_domain_alloc(&pci_bus_type
);
801 if (dev_state
->domain
== NULL
)
802 goto out_free_states
;
804 amd_iommu_domain_direct_map(dev_state
->domain
);
806 ret
= amd_iommu_domain_enable_v2(dev_state
->domain
, pasids
);
808 goto out_free_domain
;
810 ret
= iommu_attach_device(dev_state
->domain
, &pdev
->dev
);
812 goto out_free_domain
;
814 spin_lock_irqsave(&state_lock
, flags
);
816 if (__get_device_state(devid
) != NULL
) {
817 spin_unlock_irqrestore(&state_lock
, flags
);
819 goto out_free_domain
;
822 list_add_tail(&dev_state
->list
, &state_list
);
824 spin_unlock_irqrestore(&state_lock
, flags
);
829 iommu_domain_free(dev_state
->domain
);
832 free_page((unsigned long)dev_state
->states
);
839 EXPORT_SYMBOL(amd_iommu_init_device
);
841 void amd_iommu_free_device(struct pci_dev
*pdev
)
843 struct device_state
*dev_state
;
847 if (!amd_iommu_v2_supported())
850 devid
= device_id(pdev
);
852 spin_lock_irqsave(&state_lock
, flags
);
854 dev_state
= __get_device_state(devid
);
855 if (dev_state
== NULL
) {
856 spin_unlock_irqrestore(&state_lock
, flags
);
860 list_del(&dev_state
->list
);
862 spin_unlock_irqrestore(&state_lock
, flags
);
864 /* Get rid of any remaining pasid states */
865 free_pasid_states(dev_state
);
867 put_device_state_wait(dev_state
);
869 EXPORT_SYMBOL(amd_iommu_free_device
);
871 int amd_iommu_set_invalid_ppr_cb(struct pci_dev
*pdev
,
872 amd_iommu_invalid_ppr_cb cb
)
874 struct device_state
*dev_state
;
879 if (!amd_iommu_v2_supported())
882 devid
= device_id(pdev
);
884 spin_lock_irqsave(&state_lock
, flags
);
887 dev_state
= __get_device_state(devid
);
888 if (dev_state
== NULL
)
891 dev_state
->inv_ppr_cb
= cb
;
896 spin_unlock_irqrestore(&state_lock
, flags
);
900 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb
);
902 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev
*pdev
,
903 amd_iommu_invalidate_ctx cb
)
905 struct device_state
*dev_state
;
910 if (!amd_iommu_v2_supported())
913 devid
= device_id(pdev
);
915 spin_lock_irqsave(&state_lock
, flags
);
918 dev_state
= __get_device_state(devid
);
919 if (dev_state
== NULL
)
922 dev_state
->inv_ctx_cb
= cb
;
927 spin_unlock_irqrestore(&state_lock
, flags
);
931 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb
);
933 static int __init
amd_iommu_v2_init(void)
937 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
939 if (!amd_iommu_v2_supported()) {
940 pr_info("AMD IOMMUv2 functionality not available on this system\n");
942 * Load anyway to provide the symbols to other modules
943 * which may use AMD IOMMUv2 optionally.
948 spin_lock_init(&state_lock
);
951 iommu_wq
= create_workqueue("amd_iommu_v2");
952 if (iommu_wq
== NULL
)
956 empty_page_table
= (u64
*)get_zeroed_page(GFP_KERNEL
);
957 if (empty_page_table
== NULL
)
960 amd_iommu_register_ppr_notifier(&ppr_nb
);
965 destroy_workqueue(iommu_wq
);
971 static void __exit
amd_iommu_v2_exit(void)
973 struct device_state
*dev_state
;
976 if (!amd_iommu_v2_supported())
979 amd_iommu_unregister_ppr_notifier(&ppr_nb
);
981 flush_workqueue(iommu_wq
);
984 * The loop below might call flush_workqueue(), so call
985 * destroy_workqueue() after it
987 for (i
= 0; i
< MAX_DEVICES
; ++i
) {
988 dev_state
= get_device_state(i
);
990 if (dev_state
== NULL
)
995 put_device_state(dev_state
);
996 amd_iommu_free_device(dev_state
->pdev
);
999 destroy_workqueue(iommu_wq
);
1001 free_page((unsigned long)empty_page_table
);
1004 module_init(amd_iommu_v2_init
);
1005 module_exit(amd_iommu_v2_exit
);