drm/i915: Refactor vlv_display_irq_reset()
[deliverable/linux.git] / virt / kvm / iommu.c
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26 #include <linux/list.h>
27 #include <linux/kvm_host.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/stat.h>
31 #include <linux/dmar.h>
32 #include <linux/iommu.h>
33 #include <linux/intel-iommu.h>
34
35 static bool allow_unsafe_assigned_interrupts;
36 module_param_named(allow_unsafe_assigned_interrupts,
37 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
39 "Enable device assignment on platforms without interrupt remapping support.");
40
41 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42 static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages);
44
45 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 unsigned long npages)
47 {
48 gfn_t end_gfn;
49 pfn_t pfn;
50
51 pfn = gfn_to_pfn_memslot(slot, gfn);
52 end_gfn = gfn + npages;
53 gfn += 1;
54
55 if (is_error_noslot_pfn(pfn))
56 return pfn;
57
58 while (gfn < end_gfn)
59 gfn_to_pfn_memslot(slot, gfn++);
60
61 return pfn;
62 }
63
64 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
65 {
66 unsigned long i;
67
68 for (i = 0; i < npages; ++i)
69 kvm_release_pfn_clean(pfn + i);
70 }
71
72 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
73 {
74 gfn_t gfn, end_gfn;
75 pfn_t pfn;
76 int r = 0;
77 struct iommu_domain *domain = kvm->arch.iommu_domain;
78 int flags;
79
80 /* check if iommu exists and in use */
81 if (!domain)
82 return 0;
83
84 gfn = slot->base_gfn;
85 end_gfn = gfn + slot->npages;
86
87 flags = IOMMU_READ;
88 if (!(slot->flags & KVM_MEM_READONLY))
89 flags |= IOMMU_WRITE;
90 if (!kvm->arch.iommu_noncoherent)
91 flags |= IOMMU_CACHE;
92
93
94 while (gfn < end_gfn) {
95 unsigned long page_size;
96
97 /* Check if already mapped */
98 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
99 gfn += 1;
100 continue;
101 }
102
103 /* Get the page size we could use to map */
104 page_size = kvm_host_page_size(kvm, gfn);
105
106 /* Make sure the page_size does not exceed the memslot */
107 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
108 page_size >>= 1;
109
110 /* Make sure gfn is aligned to the page size we want to map */
111 while ((gfn << PAGE_SHIFT) & (page_size - 1))
112 page_size >>= 1;
113
114 /* Make sure hva is aligned to the page size we want to map */
115 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
116 page_size >>= 1;
117
118 /*
119 * Pin all pages we are about to map in memory. This is
120 * important because we unmap and unpin in 4kb steps later.
121 */
122 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
123 if (is_error_noslot_pfn(pfn)) {
124 gfn += 1;
125 continue;
126 }
127
128 /* Map into IO address space */
129 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
130 page_size, flags);
131 if (r) {
132 printk(KERN_ERR "kvm_iommu_map_address:"
133 "iommu failed to map pfn=%llx\n", pfn);
134 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
135 goto unmap_pages;
136 }
137
138 gfn += page_size >> PAGE_SHIFT;
139
140
141 }
142
143 return 0;
144
145 unmap_pages:
146 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
147 return r;
148 }
149
150 static int kvm_iommu_map_memslots(struct kvm *kvm)
151 {
152 int idx, r = 0;
153 struct kvm_memslots *slots;
154 struct kvm_memory_slot *memslot;
155
156 if (kvm->arch.iommu_noncoherent)
157 kvm_arch_register_noncoherent_dma(kvm);
158
159 idx = srcu_read_lock(&kvm->srcu);
160 slots = kvm_memslots(kvm);
161
162 kvm_for_each_memslot(memslot, slots) {
163 r = kvm_iommu_map_pages(kvm, memslot);
164 if (r)
165 break;
166 }
167 srcu_read_unlock(&kvm->srcu, idx);
168
169 return r;
170 }
171
172 int kvm_assign_device(struct kvm *kvm,
173 struct kvm_assigned_dev_kernel *assigned_dev)
174 {
175 struct pci_dev *pdev = NULL;
176 struct iommu_domain *domain = kvm->arch.iommu_domain;
177 int r;
178 bool noncoherent;
179
180 /* check if iommu exists and in use */
181 if (!domain)
182 return 0;
183
184 pdev = assigned_dev->dev;
185 if (pdev == NULL)
186 return -ENODEV;
187
188 r = iommu_attach_device(domain, &pdev->dev);
189 if (r) {
190 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
191 return r;
192 }
193
194 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
195
196 /* Check if need to update IOMMU page table for guest memory */
197 if (noncoherent != kvm->arch.iommu_noncoherent) {
198 kvm_iommu_unmap_memslots(kvm);
199 kvm->arch.iommu_noncoherent = noncoherent;
200 r = kvm_iommu_map_memslots(kvm);
201 if (r)
202 goto out_unmap;
203 }
204
205 pci_set_dev_assigned(pdev);
206
207 dev_info(&pdev->dev, "kvm assign device\n");
208
209 return 0;
210 out_unmap:
211 kvm_iommu_unmap_memslots(kvm);
212 return r;
213 }
214
215 int kvm_deassign_device(struct kvm *kvm,
216 struct kvm_assigned_dev_kernel *assigned_dev)
217 {
218 struct iommu_domain *domain = kvm->arch.iommu_domain;
219 struct pci_dev *pdev = NULL;
220
221 /* check if iommu exists and in use */
222 if (!domain)
223 return 0;
224
225 pdev = assigned_dev->dev;
226 if (pdev == NULL)
227 return -ENODEV;
228
229 iommu_detach_device(domain, &pdev->dev);
230
231 pci_clear_dev_assigned(pdev);
232
233 dev_info(&pdev->dev, "kvm deassign device\n");
234
235 return 0;
236 }
237
238 int kvm_iommu_map_guest(struct kvm *kvm)
239 {
240 int r;
241
242 if (!iommu_present(&pci_bus_type)) {
243 printk(KERN_ERR "%s: iommu not found\n", __func__);
244 return -ENODEV;
245 }
246
247 mutex_lock(&kvm->slots_lock);
248
249 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
250 if (!kvm->arch.iommu_domain) {
251 r = -ENOMEM;
252 goto out_unlock;
253 }
254
255 if (!allow_unsafe_assigned_interrupts &&
256 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
257 printk(KERN_WARNING "%s: No interrupt remapping support,"
258 " disallowing device assignment."
259 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
260 " module option.\n", __func__);
261 iommu_domain_free(kvm->arch.iommu_domain);
262 kvm->arch.iommu_domain = NULL;
263 r = -EPERM;
264 goto out_unlock;
265 }
266
267 r = kvm_iommu_map_memslots(kvm);
268 if (r)
269 kvm_iommu_unmap_memslots(kvm);
270
271 out_unlock:
272 mutex_unlock(&kvm->slots_lock);
273 return r;
274 }
275
276 static void kvm_iommu_put_pages(struct kvm *kvm,
277 gfn_t base_gfn, unsigned long npages)
278 {
279 struct iommu_domain *domain;
280 gfn_t end_gfn, gfn;
281 pfn_t pfn;
282 u64 phys;
283
284 domain = kvm->arch.iommu_domain;
285 end_gfn = base_gfn + npages;
286 gfn = base_gfn;
287
288 /* check if iommu exists and in use */
289 if (!domain)
290 return;
291
292 while (gfn < end_gfn) {
293 unsigned long unmap_pages;
294 size_t size;
295
296 /* Get physical address */
297 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
298
299 if (!phys) {
300 gfn++;
301 continue;
302 }
303
304 pfn = phys >> PAGE_SHIFT;
305
306 /* Unmap address from IO address space */
307 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
308 unmap_pages = 1ULL << get_order(size);
309
310 /* Unpin all pages we just unmapped to not leak any memory */
311 kvm_unpin_pages(kvm, pfn, unmap_pages);
312
313 gfn += unmap_pages;
314 }
315 }
316
317 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
318 {
319 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
320 }
321
322 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
323 {
324 int idx;
325 struct kvm_memslots *slots;
326 struct kvm_memory_slot *memslot;
327
328 idx = srcu_read_lock(&kvm->srcu);
329 slots = kvm_memslots(kvm);
330
331 kvm_for_each_memslot(memslot, slots)
332 kvm_iommu_unmap_pages(kvm, memslot);
333
334 srcu_read_unlock(&kvm->srcu, idx);
335
336 if (kvm->arch.iommu_noncoherent)
337 kvm_arch_unregister_noncoherent_dma(kvm);
338
339 return 0;
340 }
341
342 int kvm_iommu_unmap_guest(struct kvm *kvm)
343 {
344 struct iommu_domain *domain = kvm->arch.iommu_domain;
345
346 /* check if iommu exists and in use */
347 if (!domain)
348 return 0;
349
350 mutex_lock(&kvm->slots_lock);
351 kvm_iommu_unmap_memslots(kvm);
352 kvm->arch.iommu_domain = NULL;
353 kvm->arch.iommu_noncoherent = false;
354 mutex_unlock(&kvm->slots_lock);
355
356 iommu_domain_free(domain);
357 return 0;
358 }
This page took 0.039059 seconds and 5 git commands to generate.