| 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
| 17 | * Copyright (C) 2006-2008 Intel Corporation |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 20 | * |
| 21 | * Author: Allen M. Kay <allen.m.kay@intel.com> |
| 22 | * Author: Weidong Han <weidong.han@intel.com> |
| 23 | * Author: Ben-Ami Yassour <benami@il.ibm.com> |
| 24 | */ |
| 25 | |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/kvm_host.h> |
| 28 | #include <linux/module.h> |
| 29 | #include <linux/pci.h> |
| 30 | #include <linux/stat.h> |
| 31 | #include <linux/dmar.h> |
| 32 | #include <linux/iommu.h> |
| 33 | #include <linux/intel-iommu.h> |
| 34 | |
| 35 | static bool allow_unsafe_assigned_interrupts; |
| 36 | module_param_named(allow_unsafe_assigned_interrupts, |
| 37 | allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); |
| 38 | MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, |
| 39 | "Enable device assignment on platforms without interrupt remapping support."); |
| 40 | |
| 41 | static int kvm_iommu_unmap_memslots(struct kvm *kvm); |
| 42 | static void kvm_iommu_put_pages(struct kvm *kvm, |
| 43 | gfn_t base_gfn, unsigned long npages); |
| 44 | |
| 45 | static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, |
| 46 | unsigned long size) |
| 47 | { |
| 48 | gfn_t end_gfn; |
| 49 | pfn_t pfn; |
| 50 | |
| 51 | pfn = gfn_to_pfn_memslot(slot, gfn); |
| 52 | end_gfn = gfn + (size >> PAGE_SHIFT); |
| 53 | gfn += 1; |
| 54 | |
| 55 | if (is_error_noslot_pfn(pfn)) |
| 56 | return pfn; |
| 57 | |
| 58 | while (gfn < end_gfn) |
| 59 | gfn_to_pfn_memslot(slot, gfn++); |
| 60 | |
| 61 | return pfn; |
| 62 | } |
| 63 | |
| 64 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
| 65 | { |
| 66 | gfn_t gfn, end_gfn; |
| 67 | pfn_t pfn; |
| 68 | int r = 0; |
| 69 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
| 70 | int flags; |
| 71 | |
| 72 | /* check if iommu exists and in use */ |
| 73 | if (!domain) |
| 74 | return 0; |
| 75 | |
| 76 | gfn = slot->base_gfn; |
| 77 | end_gfn = gfn + slot->npages; |
| 78 | |
| 79 | flags = IOMMU_READ | IOMMU_WRITE; |
| 80 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) |
| 81 | flags |= IOMMU_CACHE; |
| 82 | |
| 83 | |
| 84 | while (gfn < end_gfn) { |
| 85 | unsigned long page_size; |
| 86 | |
| 87 | /* Check if already mapped */ |
| 88 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { |
| 89 | gfn += 1; |
| 90 | continue; |
| 91 | } |
| 92 | |
| 93 | /* Get the page size we could use to map */ |
| 94 | page_size = kvm_host_page_size(kvm, gfn); |
| 95 | |
| 96 | /* Make sure the page_size does not exceed the memslot */ |
| 97 | while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) |
| 98 | page_size >>= 1; |
| 99 | |
| 100 | /* Make sure gfn is aligned to the page size we want to map */ |
| 101 | while ((gfn << PAGE_SHIFT) & (page_size - 1)) |
| 102 | page_size >>= 1; |
| 103 | |
| 104 | /* |
| 105 | * Pin all pages we are about to map in memory. This is |
| 106 | * important because we unmap and unpin in 4kb steps later. |
| 107 | */ |
| 108 | pfn = kvm_pin_pages(slot, gfn, page_size); |
| 109 | if (is_error_noslot_pfn(pfn)) { |
| 110 | gfn += 1; |
| 111 | continue; |
| 112 | } |
| 113 | |
| 114 | /* Map into IO address space */ |
| 115 | r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), |
| 116 | page_size, flags); |
| 117 | if (r) { |
| 118 | printk(KERN_ERR "kvm_iommu_map_address:" |
| 119 | "iommu failed to map pfn=%llx\n", pfn); |
| 120 | goto unmap_pages; |
| 121 | } |
| 122 | |
| 123 | gfn += page_size >> PAGE_SHIFT; |
| 124 | |
| 125 | |
| 126 | } |
| 127 | |
| 128 | return 0; |
| 129 | |
| 130 | unmap_pages: |
| 131 | kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); |
| 132 | return r; |
| 133 | } |
| 134 | |
| 135 | static int kvm_iommu_map_memslots(struct kvm *kvm) |
| 136 | { |
| 137 | int idx, r = 0; |
| 138 | struct kvm_memslots *slots; |
| 139 | struct kvm_memory_slot *memslot; |
| 140 | |
| 141 | idx = srcu_read_lock(&kvm->srcu); |
| 142 | slots = kvm_memslots(kvm); |
| 143 | |
| 144 | kvm_for_each_memslot(memslot, slots) { |
| 145 | r = kvm_iommu_map_pages(kvm, memslot); |
| 146 | if (r) |
| 147 | break; |
| 148 | } |
| 149 | srcu_read_unlock(&kvm->srcu, idx); |
| 150 | |
| 151 | return r; |
| 152 | } |
| 153 | |
| 154 | int kvm_assign_device(struct kvm *kvm, |
| 155 | struct kvm_assigned_dev_kernel *assigned_dev) |
| 156 | { |
| 157 | struct pci_dev *pdev = NULL; |
| 158 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
| 159 | int r, last_flags; |
| 160 | |
| 161 | /* check if iommu exists and in use */ |
| 162 | if (!domain) |
| 163 | return 0; |
| 164 | |
| 165 | pdev = assigned_dev->dev; |
| 166 | if (pdev == NULL) |
| 167 | return -ENODEV; |
| 168 | |
| 169 | r = iommu_attach_device(domain, &pdev->dev); |
| 170 | if (r) { |
| 171 | dev_err(&pdev->dev, "kvm assign device failed ret %d", r); |
| 172 | return r; |
| 173 | } |
| 174 | |
| 175 | last_flags = kvm->arch.iommu_flags; |
| 176 | if (iommu_domain_has_cap(kvm->arch.iommu_domain, |
| 177 | IOMMU_CAP_CACHE_COHERENCY)) |
| 178 | kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; |
| 179 | |
| 180 | /* Check if need to update IOMMU page table for guest memory */ |
| 181 | if ((last_flags ^ kvm->arch.iommu_flags) == |
| 182 | KVM_IOMMU_CACHE_COHERENCY) { |
| 183 | kvm_iommu_unmap_memslots(kvm); |
| 184 | r = kvm_iommu_map_memslots(kvm); |
| 185 | if (r) |
| 186 | goto out_unmap; |
| 187 | } |
| 188 | |
| 189 | pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; |
| 190 | |
| 191 | printk(KERN_DEBUG "assign device %x:%x:%x.%x\n", |
| 192 | assigned_dev->host_segnr, |
| 193 | assigned_dev->host_busnr, |
| 194 | PCI_SLOT(assigned_dev->host_devfn), |
| 195 | PCI_FUNC(assigned_dev->host_devfn)); |
| 196 | |
| 197 | return 0; |
| 198 | out_unmap: |
| 199 | kvm_iommu_unmap_memslots(kvm); |
| 200 | return r; |
| 201 | } |
| 202 | |
| 203 | int kvm_deassign_device(struct kvm *kvm, |
| 204 | struct kvm_assigned_dev_kernel *assigned_dev) |
| 205 | { |
| 206 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
| 207 | struct pci_dev *pdev = NULL; |
| 208 | |
| 209 | /* check if iommu exists and in use */ |
| 210 | if (!domain) |
| 211 | return 0; |
| 212 | |
| 213 | pdev = assigned_dev->dev; |
| 214 | if (pdev == NULL) |
| 215 | return -ENODEV; |
| 216 | |
| 217 | iommu_detach_device(domain, &pdev->dev); |
| 218 | |
| 219 | pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; |
| 220 | |
| 221 | printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n", |
| 222 | assigned_dev->host_segnr, |
| 223 | assigned_dev->host_busnr, |
| 224 | PCI_SLOT(assigned_dev->host_devfn), |
| 225 | PCI_FUNC(assigned_dev->host_devfn)); |
| 226 | |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | int kvm_iommu_map_guest(struct kvm *kvm) |
| 231 | { |
| 232 | int r; |
| 233 | |
| 234 | if (!iommu_present(&pci_bus_type)) { |
| 235 | printk(KERN_ERR "%s: iommu not found\n", __func__); |
| 236 | return -ENODEV; |
| 237 | } |
| 238 | |
| 239 | mutex_lock(&kvm->slots_lock); |
| 240 | |
| 241 | kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); |
| 242 | if (!kvm->arch.iommu_domain) { |
| 243 | r = -ENOMEM; |
| 244 | goto out_unlock; |
| 245 | } |
| 246 | |
| 247 | if (!allow_unsafe_assigned_interrupts && |
| 248 | !iommu_domain_has_cap(kvm->arch.iommu_domain, |
| 249 | IOMMU_CAP_INTR_REMAP)) { |
| 250 | printk(KERN_WARNING "%s: No interrupt remapping support," |
| 251 | " disallowing device assignment." |
| 252 | " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" |
| 253 | " module option.\n", __func__); |
| 254 | iommu_domain_free(kvm->arch.iommu_domain); |
| 255 | kvm->arch.iommu_domain = NULL; |
| 256 | r = -EPERM; |
| 257 | goto out_unlock; |
| 258 | } |
| 259 | |
| 260 | r = kvm_iommu_map_memslots(kvm); |
| 261 | if (r) |
| 262 | kvm_iommu_unmap_memslots(kvm); |
| 263 | |
| 264 | out_unlock: |
| 265 | mutex_unlock(&kvm->slots_lock); |
| 266 | return r; |
| 267 | } |
| 268 | |
| 269 | static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) |
| 270 | { |
| 271 | unsigned long i; |
| 272 | |
| 273 | for (i = 0; i < npages; ++i) |
| 274 | kvm_release_pfn_clean(pfn + i); |
| 275 | } |
| 276 | |
| 277 | static void kvm_iommu_put_pages(struct kvm *kvm, |
| 278 | gfn_t base_gfn, unsigned long npages) |
| 279 | { |
| 280 | struct iommu_domain *domain; |
| 281 | gfn_t end_gfn, gfn; |
| 282 | pfn_t pfn; |
| 283 | u64 phys; |
| 284 | |
| 285 | domain = kvm->arch.iommu_domain; |
| 286 | end_gfn = base_gfn + npages; |
| 287 | gfn = base_gfn; |
| 288 | |
| 289 | /* check if iommu exists and in use */ |
| 290 | if (!domain) |
| 291 | return; |
| 292 | |
| 293 | while (gfn < end_gfn) { |
| 294 | unsigned long unmap_pages; |
| 295 | size_t size; |
| 296 | |
| 297 | /* Get physical address */ |
| 298 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); |
| 299 | |
| 300 | if (!phys) { |
| 301 | gfn++; |
| 302 | continue; |
| 303 | } |
| 304 | |
| 305 | pfn = phys >> PAGE_SHIFT; |
| 306 | |
| 307 | /* Unmap address from IO address space */ |
| 308 | size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); |
| 309 | unmap_pages = 1ULL << get_order(size); |
| 310 | |
| 311 | /* Unpin all pages we just unmapped to not leak any memory */ |
| 312 | kvm_unpin_pages(kvm, pfn, unmap_pages); |
| 313 | |
| 314 | gfn += unmap_pages; |
| 315 | } |
| 316 | } |
| 317 | |
| 318 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
| 319 | { |
| 320 | kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); |
| 321 | } |
| 322 | |
| 323 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
| 324 | { |
| 325 | int idx; |
| 326 | struct kvm_memslots *slots; |
| 327 | struct kvm_memory_slot *memslot; |
| 328 | |
| 329 | idx = srcu_read_lock(&kvm->srcu); |
| 330 | slots = kvm_memslots(kvm); |
| 331 | |
| 332 | kvm_for_each_memslot(memslot, slots) |
| 333 | kvm_iommu_unmap_pages(kvm, memslot); |
| 334 | |
| 335 | srcu_read_unlock(&kvm->srcu, idx); |
| 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | |
| 340 | int kvm_iommu_unmap_guest(struct kvm *kvm) |
| 341 | { |
| 342 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
| 343 | |
| 344 | /* check if iommu exists and in use */ |
| 345 | if (!domain) |
| 346 | return 0; |
| 347 | |
| 348 | mutex_lock(&kvm->slots_lock); |
| 349 | kvm_iommu_unmap_memslots(kvm); |
| 350 | kvm->arch.iommu_domain = NULL; |
| 351 | mutex_unlock(&kvm->slots_lock); |
| 352 | |
| 353 | iommu_domain_free(domain); |
| 354 | return 0; |
| 355 | } |