KVM: Update Red Hat copyrights
[deliverable/linux.git] / virt / kvm / iommu.c
CommitLineData
62c476c7
BAY
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
221d059d
AK
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
62c476c7
BAY
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
19de40a8 30#include <linux/iommu.h>
62c476c7
BAY
31#include <linux/intel-iommu.h>
32
33static int kvm_iommu_unmap_memslots(struct kvm *kvm);
34static void kvm_iommu_put_pages(struct kvm *kvm,
35 gfn_t base_gfn, unsigned long npages);
36
fcd95807
JR
37static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
38 gfn_t gfn, unsigned long size)
39{
40 gfn_t end_gfn;
41 pfn_t pfn;
42
43 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
44 end_gfn = gfn + (size >> PAGE_SHIFT);
45 gfn += 1;
46
47 if (is_error_pfn(pfn))
48 return pfn;
49
50 while (gfn < end_gfn)
51 gfn_to_pfn_memslot(kvm, slot, gfn++);
52
53 return pfn;
54}
55
3ad26d81 56int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
62c476c7 57{
fcd95807 58 gfn_t gfn, end_gfn;
62c476c7 59 pfn_t pfn;
fcd95807 60 int r = 0;
19de40a8 61 struct iommu_domain *domain = kvm->arch.iommu_domain;
522c68c4 62 int flags;
62c476c7
BAY
63
64 /* check if iommu exists and in use */
65 if (!domain)
66 return 0;
67
fcd95807
JR
68 gfn = slot->base_gfn;
69 end_gfn = gfn + slot->npages;
70
522c68c4
SY
71 flags = IOMMU_READ | IOMMU_WRITE;
72 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
73 flags |= IOMMU_CACHE;
74
fcd95807
JR
75
76 while (gfn < end_gfn) {
77 unsigned long page_size;
78
79 /* Check if already mapped */
80 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
81 gfn += 1;
82 continue;
83 }
84
85 /* Get the page size we could use to map */
86 page_size = kvm_host_page_size(kvm, gfn);
87
88 /* Make sure the page_size does not exceed the memslot */
89 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
90 page_size >>= 1;
91
92 /* Make sure gfn is aligned to the page size we want to map */
93 while ((gfn << PAGE_SHIFT) & (page_size - 1))
94 page_size >>= 1;
95
96 /*
97 * Pin all pages we are about to map in memory. This is
98 * important because we unmap and unpin in 4kb steps later.
99 */
100 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
101 if (is_error_pfn(pfn)) {
102 gfn += 1;
62c476c7 103 continue;
fcd95807 104 }
62c476c7 105
fcd95807
JR
106 /* Map into IO address space */
107 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
108 get_order(page_size), flags);
e5fcfc82 109 if (r) {
260782bc 110 printk(KERN_ERR "kvm_iommu_map_address:"
e5fcfc82 111 "iommu failed to map pfn=%lx\n", pfn);
62c476c7
BAY
112 goto unmap_pages;
113 }
fcd95807
JR
114
115 gfn += page_size >> PAGE_SHIFT;
116
117
62c476c7 118 }
fcd95807 119
62c476c7
BAY
120 return 0;
121
122unmap_pages:
fcd95807 123 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
62c476c7
BAY
124 return r;
125}
126
127static int kvm_iommu_map_memslots(struct kvm *kvm)
128{
7398ca79 129 int i, r = 0;
46a26bf5 130 struct kvm_memslots *slots;
62c476c7 131
90d83dc3 132 slots = kvm_memslots(kvm);
46a26bf5
MT
133
134 for (i = 0; i < slots->nmemslots; i++) {
3ad26d81 135 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
62c476c7
BAY
136 if (r)
137 break;
138 }
682edb4c 139
62c476c7
BAY
140 return r;
141}
142
260782bc
WH
143int kvm_assign_device(struct kvm *kvm,
144 struct kvm_assigned_dev_kernel *assigned_dev)
62c476c7
BAY
145{
146 struct pci_dev *pdev = NULL;
19de40a8 147 struct iommu_domain *domain = kvm->arch.iommu_domain;
522c68c4 148 int r, last_flags;
62c476c7 149
260782bc
WH
150 /* check if iommu exists and in use */
151 if (!domain)
152 return 0;
153
154 pdev = assigned_dev->dev;
155 if (pdev == NULL)
62c476c7 156 return -ENODEV;
260782bc 157
19de40a8 158 r = iommu_attach_device(domain, &pdev->dev);
260782bc 159 if (r) {
ab9f4ecb
ZE
160 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
161 pci_domain_nr(pdev->bus),
260782bc
WH
162 pdev->bus->number,
163 PCI_SLOT(pdev->devfn),
164 PCI_FUNC(pdev->devfn));
165 return r;
62c476c7
BAY
166 }
167
522c68c4
SY
168 last_flags = kvm->arch.iommu_flags;
169 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
170 IOMMU_CAP_CACHE_COHERENCY))
171 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
172
173 /* Check if need to update IOMMU page table for guest memory */
174 if ((last_flags ^ kvm->arch.iommu_flags) ==
175 KVM_IOMMU_CACHE_COHERENCY) {
176 kvm_iommu_unmap_memslots(kvm);
177 r = kvm_iommu_map_memslots(kvm);
178 if (r)
179 goto out_unmap;
180 }
181
ab9f4ecb
ZE
182 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
183 assigned_dev->host_segnr,
260782bc
WH
184 assigned_dev->host_busnr,
185 PCI_SLOT(assigned_dev->host_devfn),
186 PCI_FUNC(assigned_dev->host_devfn));
62c476c7 187
260782bc 188 return 0;
522c68c4
SY
189out_unmap:
190 kvm_iommu_unmap_memslots(kvm);
191 return r;
260782bc 192}
62c476c7 193
0a920356
WH
194int kvm_deassign_device(struct kvm *kvm,
195 struct kvm_assigned_dev_kernel *assigned_dev)
196{
19de40a8 197 struct iommu_domain *domain = kvm->arch.iommu_domain;
0a920356
WH
198 struct pci_dev *pdev = NULL;
199
200 /* check if iommu exists and in use */
201 if (!domain)
202 return 0;
203
204 pdev = assigned_dev->dev;
205 if (pdev == NULL)
206 return -ENODEV;
207
19de40a8 208 iommu_detach_device(domain, &pdev->dev);
0a920356 209
ab9f4ecb
ZE
210 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
211 assigned_dev->host_segnr,
0a920356
WH
212 assigned_dev->host_busnr,
213 PCI_SLOT(assigned_dev->host_devfn),
214 PCI_FUNC(assigned_dev->host_devfn));
215
216 return 0;
217}
218
260782bc
WH
219int kvm_iommu_map_guest(struct kvm *kvm)
220{
221 int r;
222
19de40a8
JR
223 if (!iommu_found()) {
224 printk(KERN_ERR "%s: iommu not found\n", __func__);
62c476c7
BAY
225 return -ENODEV;
226 }
227
19de40a8
JR
228 kvm->arch.iommu_domain = iommu_domain_alloc();
229 if (!kvm->arch.iommu_domain)
260782bc 230 return -ENOMEM;
62c476c7
BAY
231
232 r = kvm_iommu_map_memslots(kvm);
233 if (r)
234 goto out_unmap;
235
62c476c7
BAY
236 return 0;
237
238out_unmap:
239 kvm_iommu_unmap_memslots(kvm);
240 return r;
241}
242
fcd95807
JR
243static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
244{
245 unsigned long i;
246
247 for (i = 0; i < npages; ++i)
248 kvm_release_pfn_clean(pfn + i);
249}
250
62c476c7 251static void kvm_iommu_put_pages(struct kvm *kvm,
260782bc 252 gfn_t base_gfn, unsigned long npages)
62c476c7 253{
fcd95807
JR
254 struct iommu_domain *domain;
255 gfn_t end_gfn, gfn;
62c476c7 256 pfn_t pfn;
260782bc
WH
257 u64 phys;
258
fcd95807
JR
259 domain = kvm->arch.iommu_domain;
260 end_gfn = base_gfn + npages;
261 gfn = base_gfn;
262
260782bc
WH
263 /* check if iommu exists and in use */
264 if (!domain)
265 return;
62c476c7 266
fcd95807
JR
267 while (gfn < end_gfn) {
268 unsigned long unmap_pages;
269 int order;
270
271 /* Get physical address */
19de40a8 272 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
fcd95807
JR
273 pfn = phys >> PAGE_SHIFT;
274
275 /* Unmap address from IO address space */
05b782ab 276 order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
fcd95807 277 unmap_pages = 1ULL << order;
260782bc 278
fcd95807
JR
279 /* Unpin all pages we just unmapped to not leak any memory */
280 kvm_unpin_pages(kvm, pfn, unmap_pages);
281
282 gfn += unmap_pages;
283 }
62c476c7
BAY
284}
285
286static int kvm_iommu_unmap_memslots(struct kvm *kvm)
287{
288 int i;
46a26bf5
MT
289 struct kvm_memslots *slots;
290
90d83dc3 291 slots = kvm_memslots(kvm);
682edb4c 292
46a26bf5
MT
293 for (i = 0; i < slots->nmemslots; i++) {
294 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
295 slots->memslots[i].npages);
62c476c7 296 }
62c476c7
BAY
297
298 return 0;
299}
300
301int kvm_iommu_unmap_guest(struct kvm *kvm)
302{
19de40a8 303 struct iommu_domain *domain = kvm->arch.iommu_domain;
62c476c7
BAY
304
305 /* check if iommu exists and in use */
306 if (!domain)
307 return 0;
308
62c476c7 309 kvm_iommu_unmap_memslots(kvm);
19de40a8 310 iommu_domain_free(domain);
62c476c7
BAY
311 return 0;
312}
This page took 0.132915 seconds and 5 git commands to generate.