Merge tag 'for-linus-v4.8' of git://github.com/martinbrandenburg/linux
[deliverable/linux.git] / arch / x86 / kvm / iommu.c
CommitLineData
62c476c7
BAY
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
221d059d
AK
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
62c476c7
BAY
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
1767e931 28#include <linux/moduleparam.h>
62c476c7 29#include <linux/pci.h>
799fd8b2 30#include <linux/stat.h>
19de40a8 31#include <linux/iommu.h>
c9eab58f 32#include "assigned-dev.h"
62c476c7 33
90ab5ee9 34static bool allow_unsafe_assigned_interrupts;
3f68b031
AW
35module_param_named(allow_unsafe_assigned_interrupts,
36 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
38 "Enable device assignment on platforms without interrupt remapping support.");
39
62c476c7
BAY
40static int kvm_iommu_unmap_memslots(struct kvm *kvm);
41static void kvm_iommu_put_pages(struct kvm *kvm,
42 gfn_t base_gfn, unsigned long npages);
43
ba049e93 44static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
3d32e4db 45 unsigned long npages)
fcd95807
JR
46{
47 gfn_t end_gfn;
ba049e93 48 kvm_pfn_t pfn;
fcd95807 49
d5661048 50 pfn = gfn_to_pfn_memslot(slot, gfn);
3d32e4db 51 end_gfn = gfn + npages;
fcd95807
JR
52 gfn += 1;
53
81c52c56 54 if (is_error_noslot_pfn(pfn))
fcd95807
JR
55 return pfn;
56
57 while (gfn < end_gfn)
d5661048 58 gfn_to_pfn_memslot(slot, gfn++);
fcd95807
JR
59
60 return pfn;
61}
62
ba049e93
DW
63static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
64 unsigned long npages)
350b8bdd
MT
65{
66 unsigned long i;
67
68 for (i = 0; i < npages; ++i)
69 kvm_release_pfn_clean(pfn + i);
70}
71
3ad26d81 72int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
62c476c7 73{
fcd95807 74 gfn_t gfn, end_gfn;
ba049e93 75 kvm_pfn_t pfn;
fcd95807 76 int r = 0;
19de40a8 77 struct iommu_domain *domain = kvm->arch.iommu_domain;
522c68c4 78 int flags;
62c476c7
BAY
79
80 /* check if iommu exists and in use */
81 if (!domain)
82 return 0;
83
fcd95807
JR
84 gfn = slot->base_gfn;
85 end_gfn = gfn + slot->npages;
86
d47510e2
AW
87 flags = IOMMU_READ;
88 if (!(slot->flags & KVM_MEM_READONLY))
89 flags |= IOMMU_WRITE;
d96eb2c6 90 if (!kvm->arch.iommu_noncoherent)
522c68c4
SY
91 flags |= IOMMU_CACHE;
92
fcd95807
JR
93
94 while (gfn < end_gfn) {
95 unsigned long page_size;
96
97 /* Check if already mapped */
98 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
99 gfn += 1;
100 continue;
101 }
102
103 /* Get the page size we could use to map */
104 page_size = kvm_host_page_size(kvm, gfn);
105
106 /* Make sure the page_size does not exceed the memslot */
107 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
108 page_size >>= 1;
109
110 /* Make sure gfn is aligned to the page size we want to map */
111 while ((gfn << PAGE_SHIFT) & (page_size - 1))
112 page_size >>= 1;
113
27ef63c7
GE
114 /* Make sure hva is aligned to the page size we want to map */
115 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
116 page_size >>= 1;
117
fcd95807
JR
118 /*
119 * Pin all pages we are about to map in memory. This is
120 * important because we unmap and unpin in 4kb steps later.
121 */
3d32e4db 122 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
81c52c56 123 if (is_error_noslot_pfn(pfn)) {
fcd95807 124 gfn += 1;
62c476c7 125 continue;
fcd95807 126 }
62c476c7 127
fcd95807
JR
128 /* Map into IO address space */
129 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
7d3002cc 130 page_size, flags);
e5fcfc82 131 if (r) {
260782bc 132 printk(KERN_ERR "kvm_iommu_map_address:"
5689cc53 133 "iommu failed to map pfn=%llx\n", pfn);
3d32e4db 134 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
62c476c7
BAY
135 goto unmap_pages;
136 }
fcd95807
JR
137
138 gfn += page_size >> PAGE_SHIFT;
139
128ca093 140 cond_resched();
62c476c7 141 }
fcd95807 142
62c476c7
BAY
143 return 0;
144
145unmap_pages:
350b8bdd 146 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
62c476c7
BAY
147 return r;
148}
149
150static int kvm_iommu_map_memslots(struct kvm *kvm)
151{
be6ba0f0 152 int idx, r = 0;
46a26bf5 153 struct kvm_memslots *slots;
be6ba0f0 154 struct kvm_memory_slot *memslot;
62c476c7 155
e0f0bbc5
AW
156 if (kvm->arch.iommu_noncoherent)
157 kvm_arch_register_noncoherent_dma(kvm);
158
95c87e2b 159 idx = srcu_read_lock(&kvm->srcu);
90d83dc3 160 slots = kvm_memslots(kvm);
46a26bf5 161
be6ba0f0
XG
162 kvm_for_each_memslot(memslot, slots) {
163 r = kvm_iommu_map_pages(kvm, memslot);
62c476c7
BAY
164 if (r)
165 break;
166 }
95c87e2b 167 srcu_read_unlock(&kvm->srcu, idx);
682edb4c 168
62c476c7
BAY
169 return r;
170}
171
c9eab58f 172int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
62c476c7 173{
19de40a8 174 struct iommu_domain *domain = kvm->arch.iommu_domain;
d96eb2c6
AW
175 int r;
176 bool noncoherent;
62c476c7 177
260782bc
WH
178 /* check if iommu exists and in use */
179 if (!domain)
180 return 0;
181
260782bc 182 if (pdev == NULL)
62c476c7 183 return -ENODEV;
260782bc 184
19de40a8 185 r = iommu_attach_device(domain, &pdev->dev);
260782bc 186 if (r) {
d151f63f 187 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
260782bc 188 return r;
62c476c7
BAY
189 }
190
ee5ba30f 191 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
522c68c4
SY
192
193 /* Check if need to update IOMMU page table for guest memory */
d96eb2c6 194 if (noncoherent != kvm->arch.iommu_noncoherent) {
522c68c4 195 kvm_iommu_unmap_memslots(kvm);
d96eb2c6 196 kvm->arch.iommu_noncoherent = noncoherent;
522c68c4
SY
197 r = kvm_iommu_map_memslots(kvm);
198 if (r)
199 goto out_unmap;
200 }
201
5544eb9b 202 kvm_arch_start_assignment(kvm);
ad0d217c 203 pci_set_dev_assigned(pdev);
6777829c 204
29242cb5 205 dev_info(&pdev->dev, "kvm assign device\n");
62c476c7 206
260782bc 207 return 0;
522c68c4
SY
208out_unmap:
209 kvm_iommu_unmap_memslots(kvm);
210 return r;
260782bc 211}
62c476c7 212
c9eab58f 213int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
0a920356 214{
19de40a8 215 struct iommu_domain *domain = kvm->arch.iommu_domain;
0a920356
WH
216
217 /* check if iommu exists and in use */
218 if (!domain)
219 return 0;
220
0a920356
WH
221 if (pdev == NULL)
222 return -ENODEV;
223
19de40a8 224 iommu_detach_device(domain, &pdev->dev);
0a920356 225
ad0d217c 226 pci_clear_dev_assigned(pdev);
5544eb9b 227 kvm_arch_end_assignment(kvm);
6777829c 228
29242cb5 229 dev_info(&pdev->dev, "kvm deassign device\n");
0a920356
WH
230
231 return 0;
232}
233
260782bc
WH
234int kvm_iommu_map_guest(struct kvm *kvm)
235{
236 int r;
237
a1b60c1c 238 if (!iommu_present(&pci_bus_type)) {
19de40a8 239 printk(KERN_ERR "%s: iommu not found\n", __func__);
62c476c7
BAY
240 return -ENODEV;
241 }
242
21a1416a
AW
243 mutex_lock(&kvm->slots_lock);
244
905d66c1 245 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
21a1416a
AW
246 if (!kvm->arch.iommu_domain) {
247 r = -ENOMEM;
248 goto out_unlock;
249 }
62c476c7 250
3f68b031 251 if (!allow_unsafe_assigned_interrupts &&
ee5ba30f 252 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
3f68b031
AW
253 printk(KERN_WARNING "%s: No interrupt remapping support,"
254 " disallowing device assignment."
c19ca6cb 255 " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
3f68b031
AW
256 " module option.\n", __func__);
257 iommu_domain_free(kvm->arch.iommu_domain);
258 kvm->arch.iommu_domain = NULL;
21a1416a
AW
259 r = -EPERM;
260 goto out_unlock;
3f68b031
AW
261 }
262
62c476c7
BAY
263 r = kvm_iommu_map_memslots(kvm);
264 if (r)
21a1416a 265 kvm_iommu_unmap_memslots(kvm);
62c476c7 266
21a1416a
AW
267out_unlock:
268 mutex_unlock(&kvm->slots_lock);
62c476c7
BAY
269 return r;
270}
271
272static void kvm_iommu_put_pages(struct kvm *kvm,
260782bc 273 gfn_t base_gfn, unsigned long npages)
62c476c7 274{
fcd95807
JR
275 struct iommu_domain *domain;
276 gfn_t end_gfn, gfn;
ba049e93 277 kvm_pfn_t pfn;
260782bc
WH
278 u64 phys;
279
fcd95807
JR
280 domain = kvm->arch.iommu_domain;
281 end_gfn = base_gfn + npages;
282 gfn = base_gfn;
283
260782bc
WH
284 /* check if iommu exists and in use */
285 if (!domain)
286 return;
62c476c7 287
fcd95807
JR
288 while (gfn < end_gfn) {
289 unsigned long unmap_pages;
7d3002cc 290 size_t size;
fcd95807
JR
291
292 /* Get physical address */
19de40a8 293 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
16b854c8
XG
294
295 if (!phys) {
296 gfn++;
297 continue;
298 }
299
fcd95807
JR
300 pfn = phys >> PAGE_SHIFT;
301
302 /* Unmap address from IO address space */
7d3002cc
OBC
303 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
304 unmap_pages = 1ULL << get_order(size);
260782bc 305
fcd95807
JR
306 /* Unpin all pages we just unmapped to not leak any memory */
307 kvm_unpin_pages(kvm, pfn, unmap_pages);
308
309 gfn += unmap_pages;
128ca093
JR
310
311 cond_resched();
fcd95807 312 }
62c476c7
BAY
313}
314
32f6daad
AW
315void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
316{
317 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
318}
319
62c476c7
BAY
320static int kvm_iommu_unmap_memslots(struct kvm *kvm)
321{
be6ba0f0 322 int idx;
46a26bf5 323 struct kvm_memslots *slots;
be6ba0f0 324 struct kvm_memory_slot *memslot;
46a26bf5 325
95c87e2b 326 idx = srcu_read_lock(&kvm->srcu);
90d83dc3 327 slots = kvm_memslots(kvm);
682edb4c 328
be6ba0f0 329 kvm_for_each_memslot(memslot, slots)
32f6daad 330 kvm_iommu_unmap_pages(kvm, memslot);
be6ba0f0 331
95c87e2b 332 srcu_read_unlock(&kvm->srcu, idx);
62c476c7 333
e0f0bbc5
AW
334 if (kvm->arch.iommu_noncoherent)
335 kvm_arch_unregister_noncoherent_dma(kvm);
336
62c476c7
BAY
337 return 0;
338}
339
340int kvm_iommu_unmap_guest(struct kvm *kvm)
341{
19de40a8 342 struct iommu_domain *domain = kvm->arch.iommu_domain;
62c476c7
BAY
343
344 /* check if iommu exists and in use */
345 if (!domain)
346 return 0;
347
21a1416a 348 mutex_lock(&kvm->slots_lock);
62c476c7 349 kvm_iommu_unmap_memslots(kvm);
21a1416a 350 kvm->arch.iommu_domain = NULL;
d96eb2c6 351 kvm->arch.iommu_noncoherent = false;
21a1416a
AW
352 mutex_unlock(&kvm->slots_lock);
353
19de40a8 354 iommu_domain_free(domain);
62c476c7
BAY
355 return 0;
356}
This page took 0.566694 seconds and 5 git commands to generate.