| 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
| 17 | * Copyright (C) 2006-2008 Intel Corporation |
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
| 21 | * |
| 22 | * This file implements early detection/parsing of Remapping Devices |
| 23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
| 24 | * tables. |
| 25 | * |
| 26 | * These routines are used by both DMA-remapping and Interrupt-remapping |
| 27 | */ |
| 28 | |
| 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */ |
| 30 | |
| 31 | #include <linux/pci.h> |
| 32 | #include <linux/dmar.h> |
| 33 | #include <linux/iova.h> |
| 34 | #include <linux/intel-iommu.h> |
| 35 | #include <linux/timer.h> |
| 36 | #include <linux/irq.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/tboot.h> |
| 39 | #include <linux/dmi.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <asm/irq_remapping.h> |
| 42 | #include <asm/iommu_table.h> |
| 43 | |
| 44 | #include "irq_remapping.h" |
| 45 | |
| 46 | /* |
| 47 | * Assumptions: |
| 48 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added |
| 49 | * before IO devices managed by that unit. |
| 50 | * 2) The hotplug framework guarantees that DMAR unit will be hot-removed |
| 51 | * after IO devices managed by that unit. |
| 52 | * 3) Hotplug events are rare. |
| 53 | * |
| 54 | * Locking rules for DMA and interrupt remapping related global data structures: |
| 55 | * 1) Use dmar_global_lock in process context |
| 56 | * 2) Use RCU in interrupt context |
| 57 | */ |
| 58 | DECLARE_RWSEM(dmar_global_lock); |
| 59 | LIST_HEAD(dmar_drhd_units); |
| 60 | |
| 61 | struct acpi_table_header * __initdata dmar_tbl; |
| 62 | static acpi_size dmar_tbl_size; |
| 63 | static int dmar_dev_scope_status = 1; |
| 64 | |
| 65 | static int alloc_iommu(struct dmar_drhd_unit *drhd); |
| 66 | static void free_iommu(struct intel_iommu *iommu); |
| 67 | |
| 68 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
| 69 | { |
| 70 | /* |
| 71 | * add INCLUDE_ALL at the tail, so scan the list will find it at |
| 72 | * the very end. |
| 73 | */ |
| 74 | if (drhd->include_all) |
| 75 | list_add_tail_rcu(&drhd->list, &dmar_drhd_units); |
| 76 | else |
| 77 | list_add_rcu(&drhd->list, &dmar_drhd_units); |
| 78 | } |
| 79 | |
| 80 | void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) |
| 81 | { |
| 82 | struct acpi_dmar_device_scope *scope; |
| 83 | |
| 84 | *cnt = 0; |
| 85 | while (start < end) { |
| 86 | scope = start; |
| 87 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI || |
| 88 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
| 89 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
| 90 | (*cnt)++; |
| 91 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && |
| 92 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) { |
| 93 | pr_warn("Unsupported device scope\n"); |
| 94 | } |
| 95 | start += scope->length; |
| 96 | } |
| 97 | if (*cnt == 0) |
| 98 | return NULL; |
| 99 | |
| 100 | return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL); |
| 101 | } |
| 102 | |
| 103 | void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt) |
| 104 | { |
| 105 | int i; |
| 106 | struct device *tmp_dev; |
| 107 | |
| 108 | if (*devices && *cnt) { |
| 109 | for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) |
| 110 | put_device(tmp_dev); |
| 111 | kfree(*devices); |
| 112 | } |
| 113 | |
| 114 | *devices = NULL; |
| 115 | *cnt = 0; |
| 116 | } |
| 117 | |
| 118 | /* Optimize out kzalloc()/kfree() for normal cases */ |
| 119 | static char dmar_pci_notify_info_buf[64]; |
| 120 | |
| 121 | static struct dmar_pci_notify_info * |
| 122 | dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) |
| 123 | { |
| 124 | int level = 0; |
| 125 | size_t size; |
| 126 | struct pci_dev *tmp; |
| 127 | struct dmar_pci_notify_info *info; |
| 128 | |
| 129 | BUG_ON(dev->is_virtfn); |
| 130 | |
| 131 | /* Only generate path[] for device addition event */ |
| 132 | if (event == BUS_NOTIFY_ADD_DEVICE) |
| 133 | for (tmp = dev; tmp; tmp = tmp->bus->self) |
| 134 | level++; |
| 135 | |
| 136 | size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); |
| 137 | if (size <= sizeof(dmar_pci_notify_info_buf)) { |
| 138 | info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; |
| 139 | } else { |
| 140 | info = kzalloc(size, GFP_KERNEL); |
| 141 | if (!info) { |
| 142 | pr_warn("Out of memory when allocating notify_info " |
| 143 | "for %s.\n", pci_name(dev)); |
| 144 | if (dmar_dev_scope_status == 0) |
| 145 | dmar_dev_scope_status = -ENOMEM; |
| 146 | return NULL; |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | info->event = event; |
| 151 | info->dev = dev; |
| 152 | info->seg = pci_domain_nr(dev->bus); |
| 153 | info->level = level; |
| 154 | if (event == BUS_NOTIFY_ADD_DEVICE) { |
| 155 | for (tmp = dev, level--; tmp; tmp = tmp->bus->self) { |
| 156 | info->path[level].device = PCI_SLOT(tmp->devfn); |
| 157 | info->path[level].function = PCI_FUNC(tmp->devfn); |
| 158 | if (pci_is_root_bus(tmp->bus)) |
| 159 | info->bus = tmp->bus->number; |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | return info; |
| 164 | } |
| 165 | |
| 166 | static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info) |
| 167 | { |
| 168 | if ((void *)info != dmar_pci_notify_info_buf) |
| 169 | kfree(info); |
| 170 | } |
| 171 | |
| 172 | static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, |
| 173 | struct acpi_dmar_pci_path *path, int count) |
| 174 | { |
| 175 | int i; |
| 176 | |
| 177 | if (info->bus != bus) |
| 178 | return false; |
| 179 | if (info->level != count) |
| 180 | return false; |
| 181 | |
| 182 | for (i = 0; i < count; i++) { |
| 183 | if (path[i].device != info->path[i].device || |
| 184 | path[i].function != info->path[i].function) |
| 185 | return false; |
| 186 | } |
| 187 | |
| 188 | return true; |
| 189 | } |
| 190 | |
| 191 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ |
| 192 | int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, |
| 193 | void *start, void*end, u16 segment, |
| 194 | struct dmar_dev_scope *devices, |
| 195 | int devices_cnt) |
| 196 | { |
| 197 | int i, level; |
| 198 | struct device *tmp, *dev = &info->dev->dev; |
| 199 | struct acpi_dmar_device_scope *scope; |
| 200 | struct acpi_dmar_pci_path *path; |
| 201 | |
| 202 | if (segment != info->seg) |
| 203 | return 0; |
| 204 | |
| 205 | for (; start < end; start += scope->length) { |
| 206 | scope = start; |
| 207 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && |
| 208 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
| 209 | continue; |
| 210 | |
| 211 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
| 212 | level = (scope->length - sizeof(*scope)) / sizeof(*path); |
| 213 | if (!dmar_match_pci_path(info, scope->bus, path, level)) |
| 214 | continue; |
| 215 | |
| 216 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^ |
| 217 | (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) { |
| 218 | pr_warn("Device scope type does not match for %s\n", |
| 219 | pci_name(info->dev)); |
| 220 | return -EINVAL; |
| 221 | } |
| 222 | |
| 223 | for_each_dev_scope(devices, devices_cnt, i, tmp) |
| 224 | if (tmp == NULL) { |
| 225 | devices[i].bus = info->dev->bus->number; |
| 226 | devices[i].devfn = info->dev->devfn; |
| 227 | rcu_assign_pointer(devices[i].dev, |
| 228 | get_device(dev)); |
| 229 | return 1; |
| 230 | } |
| 231 | BUG_ON(i >= devices_cnt); |
| 232 | } |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, |
| 238 | struct dmar_dev_scope *devices, int count) |
| 239 | { |
| 240 | int index; |
| 241 | struct device *tmp; |
| 242 | |
| 243 | if (info->seg != segment) |
| 244 | return 0; |
| 245 | |
| 246 | for_each_active_dev_scope(devices, count, index, tmp) |
| 247 | if (tmp == &info->dev->dev) { |
| 248 | rcu_assign_pointer(devices[index].dev, NULL); |
| 249 | synchronize_rcu(); |
| 250 | put_device(tmp); |
| 251 | return 1; |
| 252 | } |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) |
| 258 | { |
| 259 | int ret = 0; |
| 260 | struct dmar_drhd_unit *dmaru; |
| 261 | struct acpi_dmar_hardware_unit *drhd; |
| 262 | |
| 263 | for_each_drhd_unit(dmaru) { |
| 264 | if (dmaru->include_all) |
| 265 | continue; |
| 266 | |
| 267 | drhd = container_of(dmaru->hdr, |
| 268 | struct acpi_dmar_hardware_unit, header); |
| 269 | ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), |
| 270 | ((void *)drhd) + drhd->header.length, |
| 271 | dmaru->segment, |
| 272 | dmaru->devices, dmaru->devices_cnt); |
| 273 | if (ret != 0) |
| 274 | break; |
| 275 | } |
| 276 | if (ret >= 0) |
| 277 | ret = dmar_iommu_notify_scope_dev(info); |
| 278 | if (ret < 0 && dmar_dev_scope_status == 0) |
| 279 | dmar_dev_scope_status = ret; |
| 280 | |
| 281 | return ret; |
| 282 | } |
| 283 | |
| 284 | static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info) |
| 285 | { |
| 286 | struct dmar_drhd_unit *dmaru; |
| 287 | |
| 288 | for_each_drhd_unit(dmaru) |
| 289 | if (dmar_remove_dev_scope(info, dmaru->segment, |
| 290 | dmaru->devices, dmaru->devices_cnt)) |
| 291 | break; |
| 292 | dmar_iommu_notify_scope_dev(info); |
| 293 | } |
| 294 | |
| 295 | static int dmar_pci_bus_notifier(struct notifier_block *nb, |
| 296 | unsigned long action, void *data) |
| 297 | { |
| 298 | struct pci_dev *pdev = to_pci_dev(data); |
| 299 | struct dmar_pci_notify_info *info; |
| 300 | |
| 301 | /* Only care about add/remove events for physical functions */ |
| 302 | if (pdev->is_virtfn) |
| 303 | return NOTIFY_DONE; |
| 304 | if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) |
| 305 | return NOTIFY_DONE; |
| 306 | |
| 307 | info = dmar_alloc_pci_notify_info(pdev, action); |
| 308 | if (!info) |
| 309 | return NOTIFY_DONE; |
| 310 | |
| 311 | down_write(&dmar_global_lock); |
| 312 | if (action == BUS_NOTIFY_ADD_DEVICE) |
| 313 | dmar_pci_bus_add_dev(info); |
| 314 | else if (action == BUS_NOTIFY_DEL_DEVICE) |
| 315 | dmar_pci_bus_del_dev(info); |
| 316 | up_write(&dmar_global_lock); |
| 317 | |
| 318 | dmar_free_pci_notify_info(info); |
| 319 | |
| 320 | return NOTIFY_OK; |
| 321 | } |
| 322 | |
| 323 | static struct notifier_block dmar_pci_bus_nb = { |
| 324 | .notifier_call = dmar_pci_bus_notifier, |
| 325 | .priority = INT_MIN, |
| 326 | }; |
| 327 | |
| 328 | /** |
| 329 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition |
| 330 | * structure which uniquely represent one DMA remapping hardware unit |
| 331 | * present in the platform |
| 332 | */ |
| 333 | static int __init |
| 334 | dmar_parse_one_drhd(struct acpi_dmar_header *header) |
| 335 | { |
| 336 | struct acpi_dmar_hardware_unit *drhd; |
| 337 | struct dmar_drhd_unit *dmaru; |
| 338 | int ret = 0; |
| 339 | |
| 340 | drhd = (struct acpi_dmar_hardware_unit *)header; |
| 341 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
| 342 | if (!dmaru) |
| 343 | return -ENOMEM; |
| 344 | |
| 345 | dmaru->hdr = header; |
| 346 | dmaru->reg_base_addr = drhd->address; |
| 347 | dmaru->segment = drhd->segment; |
| 348 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
| 349 | dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1), |
| 350 | ((void *)drhd) + drhd->header.length, |
| 351 | &dmaru->devices_cnt); |
| 352 | if (dmaru->devices_cnt && dmaru->devices == NULL) { |
| 353 | kfree(dmaru); |
| 354 | return -ENOMEM; |
| 355 | } |
| 356 | |
| 357 | ret = alloc_iommu(dmaru); |
| 358 | if (ret) { |
| 359 | dmar_free_dev_scope(&dmaru->devices, |
| 360 | &dmaru->devices_cnt); |
| 361 | kfree(dmaru); |
| 362 | return ret; |
| 363 | } |
| 364 | dmar_register_drhd_unit(dmaru); |
| 365 | return 0; |
| 366 | } |
| 367 | |
| 368 | static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) |
| 369 | { |
| 370 | if (dmaru->devices && dmaru->devices_cnt) |
| 371 | dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt); |
| 372 | if (dmaru->iommu) |
| 373 | free_iommu(dmaru->iommu); |
| 374 | kfree(dmaru); |
| 375 | } |
| 376 | |
| 377 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) |
| 378 | { |
| 379 | struct acpi_dmar_andd *andd = (void *)header; |
| 380 | |
| 381 | /* Check for NUL termination within the designated length */ |
| 382 | if (strnlen(andd->object_name, header->length - 8) == header->length - 8) { |
| 383 | WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, |
| 384 | "Your BIOS is broken; ANDD object name is not NUL-terminated\n" |
| 385 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| 386 | dmi_get_system_info(DMI_BIOS_VENDOR), |
| 387 | dmi_get_system_info(DMI_BIOS_VERSION), |
| 388 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
| 389 | return -EINVAL; |
| 390 | } |
| 391 | pr_info("ANDD device: %x name: %s\n", andd->device_number, |
| 392 | andd->object_name); |
| 393 | |
| 394 | return 0; |
| 395 | } |
| 396 | |
| 397 | #ifdef CONFIG_ACPI_NUMA |
| 398 | static int __init |
| 399 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) |
| 400 | { |
| 401 | struct acpi_dmar_rhsa *rhsa; |
| 402 | struct dmar_drhd_unit *drhd; |
| 403 | |
| 404 | rhsa = (struct acpi_dmar_rhsa *)header; |
| 405 | for_each_drhd_unit(drhd) { |
| 406 | if (drhd->reg_base_addr == rhsa->base_address) { |
| 407 | int node = acpi_map_pxm_to_node(rhsa->proximity_domain); |
| 408 | |
| 409 | if (!node_online(node)) |
| 410 | node = -1; |
| 411 | drhd->iommu->node = node; |
| 412 | return 0; |
| 413 | } |
| 414 | } |
| 415 | WARN_TAINT( |
| 416 | 1, TAINT_FIRMWARE_WORKAROUND, |
| 417 | "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" |
| 418 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| 419 | drhd->reg_base_addr, |
| 420 | dmi_get_system_info(DMI_BIOS_VENDOR), |
| 421 | dmi_get_system_info(DMI_BIOS_VERSION), |
| 422 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
| 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | #endif |
| 427 | |
| 428 | static void __init |
| 429 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
| 430 | { |
| 431 | struct acpi_dmar_hardware_unit *drhd; |
| 432 | struct acpi_dmar_reserved_memory *rmrr; |
| 433 | struct acpi_dmar_atsr *atsr; |
| 434 | struct acpi_dmar_rhsa *rhsa; |
| 435 | |
| 436 | switch (header->type) { |
| 437 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
| 438 | drhd = container_of(header, struct acpi_dmar_hardware_unit, |
| 439 | header); |
| 440 | pr_info("DRHD base: %#016Lx flags: %#x\n", |
| 441 | (unsigned long long)drhd->address, drhd->flags); |
| 442 | break; |
| 443 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
| 444 | rmrr = container_of(header, struct acpi_dmar_reserved_memory, |
| 445 | header); |
| 446 | pr_info("RMRR base: %#016Lx end: %#016Lx\n", |
| 447 | (unsigned long long)rmrr->base_address, |
| 448 | (unsigned long long)rmrr->end_address); |
| 449 | break; |
| 450 | case ACPI_DMAR_TYPE_ATSR: |
| 451 | atsr = container_of(header, struct acpi_dmar_atsr, header); |
| 452 | pr_info("ATSR flags: %#x\n", atsr->flags); |
| 453 | break; |
| 454 | case ACPI_DMAR_HARDWARE_AFFINITY: |
| 455 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); |
| 456 | pr_info("RHSA base: %#016Lx proximity domain: %#x\n", |
| 457 | (unsigned long long)rhsa->base_address, |
| 458 | rhsa->proximity_domain); |
| 459 | break; |
| 460 | case ACPI_DMAR_TYPE_ANDD: |
| 461 | /* We don't print this here because we need to sanity-check |
| 462 | it first. So print it in dmar_parse_one_andd() instead. */ |
| 463 | break; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /** |
| 468 | * dmar_table_detect - checks to see if the platform supports DMAR devices |
| 469 | */ |
| 470 | static int __init dmar_table_detect(void) |
| 471 | { |
| 472 | acpi_status status = AE_OK; |
| 473 | |
| 474 | /* if we could find DMAR table, then there are DMAR devices */ |
| 475 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
| 476 | (struct acpi_table_header **)&dmar_tbl, |
| 477 | &dmar_tbl_size); |
| 478 | |
| 479 | if (ACPI_SUCCESS(status) && !dmar_tbl) { |
| 480 | pr_warn("Unable to map DMAR\n"); |
| 481 | status = AE_NOT_FOUND; |
| 482 | } |
| 483 | |
| 484 | return (ACPI_SUCCESS(status) ? 1 : 0); |
| 485 | } |
| 486 | |
| 487 | /** |
| 488 | * parse_dmar_table - parses the DMA reporting table |
| 489 | */ |
| 490 | static int __init |
| 491 | parse_dmar_table(void) |
| 492 | { |
| 493 | struct acpi_table_dmar *dmar; |
| 494 | struct acpi_dmar_header *entry_header; |
| 495 | int ret = 0; |
| 496 | int drhd_count = 0; |
| 497 | |
| 498 | /* |
| 499 | * Do it again, earlier dmar_tbl mapping could be mapped with |
| 500 | * fixed map. |
| 501 | */ |
| 502 | dmar_table_detect(); |
| 503 | |
| 504 | /* |
| 505 | * ACPI tables may not be DMA protected by tboot, so use DMAR copy |
| 506 | * SINIT saved in SinitMleData in TXT heap (which is DMA protected) |
| 507 | */ |
| 508 | dmar_tbl = tboot_get_dmar_table(dmar_tbl); |
| 509 | |
| 510 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
| 511 | if (!dmar) |
| 512 | return -ENODEV; |
| 513 | |
| 514 | if (dmar->width < PAGE_SHIFT - 1) { |
| 515 | pr_warn("Invalid DMAR haw\n"); |
| 516 | return -EINVAL; |
| 517 | } |
| 518 | |
| 519 | pr_info("Host address width %d\n", dmar->width + 1); |
| 520 | |
| 521 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
| 522 | while (((unsigned long)entry_header) < |
| 523 | (((unsigned long)dmar) + dmar_tbl->length)) { |
| 524 | /* Avoid looping forever on bad ACPI tables */ |
| 525 | if (entry_header->length == 0) { |
| 526 | pr_warn("Invalid 0-length structure\n"); |
| 527 | ret = -EINVAL; |
| 528 | break; |
| 529 | } |
| 530 | |
| 531 | dmar_table_print_dmar_entry(entry_header); |
| 532 | |
| 533 | switch (entry_header->type) { |
| 534 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
| 535 | drhd_count++; |
| 536 | ret = dmar_parse_one_drhd(entry_header); |
| 537 | break; |
| 538 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
| 539 | ret = dmar_parse_one_rmrr(entry_header); |
| 540 | break; |
| 541 | case ACPI_DMAR_TYPE_ATSR: |
| 542 | ret = dmar_parse_one_atsr(entry_header); |
| 543 | break; |
| 544 | case ACPI_DMAR_HARDWARE_AFFINITY: |
| 545 | #ifdef CONFIG_ACPI_NUMA |
| 546 | ret = dmar_parse_one_rhsa(entry_header); |
| 547 | #endif |
| 548 | break; |
| 549 | case ACPI_DMAR_TYPE_ANDD: |
| 550 | ret = dmar_parse_one_andd(entry_header); |
| 551 | break; |
| 552 | default: |
| 553 | pr_warn("Unknown DMAR structure type %d\n", |
| 554 | entry_header->type); |
| 555 | ret = 0; /* for forward compatibility */ |
| 556 | break; |
| 557 | } |
| 558 | if (ret) |
| 559 | break; |
| 560 | |
| 561 | entry_header = ((void *)entry_header + entry_header->length); |
| 562 | } |
| 563 | if (drhd_count == 0) |
| 564 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); |
| 565 | return ret; |
| 566 | } |
| 567 | |
| 568 | static int dmar_pci_device_match(struct dmar_dev_scope devices[], |
| 569 | int cnt, struct pci_dev *dev) |
| 570 | { |
| 571 | int index; |
| 572 | struct device *tmp; |
| 573 | |
| 574 | while (dev) { |
| 575 | for_each_active_dev_scope(devices, cnt, index, tmp) |
| 576 | if (dev_is_pci(tmp) && dev == to_pci_dev(tmp)) |
| 577 | return 1; |
| 578 | |
| 579 | /* Check our parent */ |
| 580 | dev = dev->bus->self; |
| 581 | } |
| 582 | |
| 583 | return 0; |
| 584 | } |
| 585 | |
| 586 | struct dmar_drhd_unit * |
| 587 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
| 588 | { |
| 589 | struct dmar_drhd_unit *dmaru; |
| 590 | struct acpi_dmar_hardware_unit *drhd; |
| 591 | |
| 592 | dev = pci_physfn(dev); |
| 593 | |
| 594 | rcu_read_lock(); |
| 595 | for_each_drhd_unit(dmaru) { |
| 596 | drhd = container_of(dmaru->hdr, |
| 597 | struct acpi_dmar_hardware_unit, |
| 598 | header); |
| 599 | |
| 600 | if (dmaru->include_all && |
| 601 | drhd->segment == pci_domain_nr(dev->bus)) |
| 602 | goto out; |
| 603 | |
| 604 | if (dmar_pci_device_match(dmaru->devices, |
| 605 | dmaru->devices_cnt, dev)) |
| 606 | goto out; |
| 607 | } |
| 608 | dmaru = NULL; |
| 609 | out: |
| 610 | rcu_read_unlock(); |
| 611 | |
| 612 | return dmaru; |
| 613 | } |
| 614 | |
| 615 | static void __init dmar_acpi_insert_dev_scope(u8 device_number, |
| 616 | struct acpi_device *adev) |
| 617 | { |
| 618 | struct dmar_drhd_unit *dmaru; |
| 619 | struct acpi_dmar_hardware_unit *drhd; |
| 620 | struct acpi_dmar_device_scope *scope; |
| 621 | struct device *tmp; |
| 622 | int i; |
| 623 | struct acpi_dmar_pci_path *path; |
| 624 | |
| 625 | for_each_drhd_unit(dmaru) { |
| 626 | drhd = container_of(dmaru->hdr, |
| 627 | struct acpi_dmar_hardware_unit, |
| 628 | header); |
| 629 | |
| 630 | for (scope = (void *)(drhd + 1); |
| 631 | (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; |
| 632 | scope = ((void *)scope) + scope->length) { |
| 633 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI) |
| 634 | continue; |
| 635 | if (scope->enumeration_id != device_number) |
| 636 | continue; |
| 637 | |
| 638 | path = (void *)(scope + 1); |
| 639 | pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", |
| 640 | dev_name(&adev->dev), dmaru->reg_base_addr, |
| 641 | scope->bus, path->device, path->function); |
| 642 | for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) |
| 643 | if (tmp == NULL) { |
| 644 | dmaru->devices[i].bus = scope->bus; |
| 645 | dmaru->devices[i].devfn = PCI_DEVFN(path->device, |
| 646 | path->function); |
| 647 | rcu_assign_pointer(dmaru->devices[i].dev, |
| 648 | get_device(&adev->dev)); |
| 649 | return; |
| 650 | } |
| 651 | BUG_ON(i >= dmaru->devices_cnt); |
| 652 | } |
| 653 | } |
| 654 | pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", |
| 655 | device_number, dev_name(&adev->dev)); |
| 656 | } |
| 657 | |
| 658 | static int __init dmar_acpi_dev_scope_init(void) |
| 659 | { |
| 660 | struct acpi_dmar_andd *andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar); |
| 661 | |
| 662 | while (((unsigned long)andd) < |
| 663 | ((unsigned long)dmar_tbl) + dmar_tbl->length) { |
| 664 | if (andd->header.type == ACPI_DMAR_TYPE_ANDD) { |
| 665 | acpi_handle h; |
| 666 | struct acpi_device *adev; |
| 667 | |
| 668 | if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, |
| 669 | andd->object_name, |
| 670 | &h))) { |
| 671 | pr_err("Failed to find handle for ACPI object %s\n", |
| 672 | andd->object_name); |
| 673 | continue; |
| 674 | } |
| 675 | acpi_bus_get_device(h, &adev); |
| 676 | if (!adev) { |
| 677 | pr_err("Failed to get device for ACPI object %s\n", |
| 678 | andd->object_name); |
| 679 | continue; |
| 680 | } |
| 681 | dmar_acpi_insert_dev_scope(andd->device_number, adev); |
| 682 | } |
| 683 | andd = ((void *)andd) + andd->header.length; |
| 684 | } |
| 685 | return 0; |
| 686 | } |
| 687 | |
| 688 | int __init dmar_dev_scope_init(void) |
| 689 | { |
| 690 | struct pci_dev *dev = NULL; |
| 691 | struct dmar_pci_notify_info *info; |
| 692 | |
| 693 | if (dmar_dev_scope_status != 1) |
| 694 | return dmar_dev_scope_status; |
| 695 | |
| 696 | dmar_acpi_dev_scope_init(); |
| 697 | |
| 698 | if (list_empty(&dmar_drhd_units)) { |
| 699 | dmar_dev_scope_status = -ENODEV; |
| 700 | } else { |
| 701 | dmar_dev_scope_status = 0; |
| 702 | |
| 703 | for_each_pci_dev(dev) { |
| 704 | if (dev->is_virtfn) |
| 705 | continue; |
| 706 | |
| 707 | info = dmar_alloc_pci_notify_info(dev, |
| 708 | BUS_NOTIFY_ADD_DEVICE); |
| 709 | if (!info) { |
| 710 | return dmar_dev_scope_status; |
| 711 | } else { |
| 712 | dmar_pci_bus_add_dev(info); |
| 713 | dmar_free_pci_notify_info(info); |
| 714 | } |
| 715 | } |
| 716 | |
| 717 | bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb); |
| 718 | } |
| 719 | |
| 720 | return dmar_dev_scope_status; |
| 721 | } |
| 722 | |
| 723 | |
| 724 | int __init dmar_table_init(void) |
| 725 | { |
| 726 | static int dmar_table_initialized; |
| 727 | int ret; |
| 728 | |
| 729 | if (dmar_table_initialized == 0) { |
| 730 | ret = parse_dmar_table(); |
| 731 | if (ret < 0) { |
| 732 | if (ret != -ENODEV) |
| 733 | pr_info("parse DMAR table failure.\n"); |
| 734 | } else if (list_empty(&dmar_drhd_units)) { |
| 735 | pr_info("No DMAR devices found\n"); |
| 736 | ret = -ENODEV; |
| 737 | } |
| 738 | |
| 739 | if (ret < 0) |
| 740 | dmar_table_initialized = ret; |
| 741 | else |
| 742 | dmar_table_initialized = 1; |
| 743 | } |
| 744 | |
| 745 | return dmar_table_initialized < 0 ? dmar_table_initialized : 0; |
| 746 | } |
| 747 | |
| 748 | static void warn_invalid_dmar(u64 addr, const char *message) |
| 749 | { |
| 750 | WARN_TAINT_ONCE( |
| 751 | 1, TAINT_FIRMWARE_WORKAROUND, |
| 752 | "Your BIOS is broken; DMAR reported at address %llx%s!\n" |
| 753 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| 754 | addr, message, |
| 755 | dmi_get_system_info(DMI_BIOS_VENDOR), |
| 756 | dmi_get_system_info(DMI_BIOS_VERSION), |
| 757 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
| 758 | } |
| 759 | |
| 760 | static int __init check_zero_address(void) |
| 761 | { |
| 762 | struct acpi_table_dmar *dmar; |
| 763 | struct acpi_dmar_header *entry_header; |
| 764 | struct acpi_dmar_hardware_unit *drhd; |
| 765 | |
| 766 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
| 767 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
| 768 | |
| 769 | while (((unsigned long)entry_header) < |
| 770 | (((unsigned long)dmar) + dmar_tbl->length)) { |
| 771 | /* Avoid looping forever on bad ACPI tables */ |
| 772 | if (entry_header->length == 0) { |
| 773 | pr_warn("Invalid 0-length structure\n"); |
| 774 | return 0; |
| 775 | } |
| 776 | |
| 777 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { |
| 778 | void __iomem *addr; |
| 779 | u64 cap, ecap; |
| 780 | |
| 781 | drhd = (void *)entry_header; |
| 782 | if (!drhd->address) { |
| 783 | warn_invalid_dmar(0, ""); |
| 784 | goto failed; |
| 785 | } |
| 786 | |
| 787 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); |
| 788 | if (!addr ) { |
| 789 | printk("IOMMU: can't validate: %llx\n", drhd->address); |
| 790 | goto failed; |
| 791 | } |
| 792 | cap = dmar_readq(addr + DMAR_CAP_REG); |
| 793 | ecap = dmar_readq(addr + DMAR_ECAP_REG); |
| 794 | early_iounmap(addr, VTD_PAGE_SIZE); |
| 795 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { |
| 796 | warn_invalid_dmar(drhd->address, |
| 797 | " returns all ones"); |
| 798 | goto failed; |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | entry_header = ((void *)entry_header + entry_header->length); |
| 803 | } |
| 804 | return 1; |
| 805 | |
| 806 | failed: |
| 807 | return 0; |
| 808 | } |
| 809 | |
| 810 | int __init detect_intel_iommu(void) |
| 811 | { |
| 812 | int ret; |
| 813 | |
| 814 | down_write(&dmar_global_lock); |
| 815 | ret = dmar_table_detect(); |
| 816 | if (ret) |
| 817 | ret = check_zero_address(); |
| 818 | { |
| 819 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
| 820 | iommu_detected = 1; |
| 821 | /* Make sure ACS will be enabled */ |
| 822 | pci_request_acs(); |
| 823 | } |
| 824 | |
| 825 | #ifdef CONFIG_X86 |
| 826 | if (ret) |
| 827 | x86_init.iommu.iommu_init = intel_iommu_init; |
| 828 | #endif |
| 829 | } |
| 830 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); |
| 831 | dmar_tbl = NULL; |
| 832 | up_write(&dmar_global_lock); |
| 833 | |
| 834 | return ret ? 1 : -ENODEV; |
| 835 | } |
| 836 | |
| 837 | |
| 838 | static void unmap_iommu(struct intel_iommu *iommu) |
| 839 | { |
| 840 | iounmap(iommu->reg); |
| 841 | release_mem_region(iommu->reg_phys, iommu->reg_size); |
| 842 | } |
| 843 | |
| 844 | /** |
| 845 | * map_iommu: map the iommu's registers |
| 846 | * @iommu: the iommu to map |
| 847 | * @phys_addr: the physical address of the base resgister |
| 848 | * |
| 849 | * Memory map the iommu's registers. Start w/ a single page, and |
| 850 | * possibly expand if that turns out to be insufficent. |
| 851 | */ |
| 852 | static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) |
| 853 | { |
| 854 | int map_size, err=0; |
| 855 | |
| 856 | iommu->reg_phys = phys_addr; |
| 857 | iommu->reg_size = VTD_PAGE_SIZE; |
| 858 | |
| 859 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { |
| 860 | pr_err("IOMMU: can't reserve memory\n"); |
| 861 | err = -EBUSY; |
| 862 | goto out; |
| 863 | } |
| 864 | |
| 865 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); |
| 866 | if (!iommu->reg) { |
| 867 | pr_err("IOMMU: can't map the region\n"); |
| 868 | err = -ENOMEM; |
| 869 | goto release; |
| 870 | } |
| 871 | |
| 872 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
| 873 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
| 874 | |
| 875 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { |
| 876 | err = -EINVAL; |
| 877 | warn_invalid_dmar(phys_addr, " returns all ones"); |
| 878 | goto unmap; |
| 879 | } |
| 880 | |
| 881 | /* the registers might be more than one page */ |
| 882 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
| 883 | cap_max_fault_reg_offset(iommu->cap)); |
| 884 | map_size = VTD_PAGE_ALIGN(map_size); |
| 885 | if (map_size > iommu->reg_size) { |
| 886 | iounmap(iommu->reg); |
| 887 | release_mem_region(iommu->reg_phys, iommu->reg_size); |
| 888 | iommu->reg_size = map_size; |
| 889 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, |
| 890 | iommu->name)) { |
| 891 | pr_err("IOMMU: can't reserve memory\n"); |
| 892 | err = -EBUSY; |
| 893 | goto out; |
| 894 | } |
| 895 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); |
| 896 | if (!iommu->reg) { |
| 897 | pr_err("IOMMU: can't map the region\n"); |
| 898 | err = -ENOMEM; |
| 899 | goto release; |
| 900 | } |
| 901 | } |
| 902 | err = 0; |
| 903 | goto out; |
| 904 | |
| 905 | unmap: |
| 906 | iounmap(iommu->reg); |
| 907 | release: |
| 908 | release_mem_region(iommu->reg_phys, iommu->reg_size); |
| 909 | out: |
| 910 | return err; |
| 911 | } |
| 912 | |
| 913 | static int alloc_iommu(struct dmar_drhd_unit *drhd) |
| 914 | { |
| 915 | struct intel_iommu *iommu; |
| 916 | u32 ver, sts; |
| 917 | static int iommu_allocated = 0; |
| 918 | int agaw = 0; |
| 919 | int msagaw = 0; |
| 920 | int err; |
| 921 | |
| 922 | if (!drhd->reg_base_addr) { |
| 923 | warn_invalid_dmar(0, ""); |
| 924 | return -EINVAL; |
| 925 | } |
| 926 | |
| 927 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
| 928 | if (!iommu) |
| 929 | return -ENOMEM; |
| 930 | |
| 931 | iommu->seq_id = iommu_allocated++; |
| 932 | sprintf (iommu->name, "dmar%d", iommu->seq_id); |
| 933 | |
| 934 | err = map_iommu(iommu, drhd->reg_base_addr); |
| 935 | if (err) { |
| 936 | pr_err("IOMMU: failed to map %s\n", iommu->name); |
| 937 | goto error; |
| 938 | } |
| 939 | |
| 940 | err = -EINVAL; |
| 941 | agaw = iommu_calculate_agaw(iommu); |
| 942 | if (agaw < 0) { |
| 943 | pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", |
| 944 | iommu->seq_id); |
| 945 | goto err_unmap; |
| 946 | } |
| 947 | msagaw = iommu_calculate_max_sagaw(iommu); |
| 948 | if (msagaw < 0) { |
| 949 | pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
| 950 | iommu->seq_id); |
| 951 | goto err_unmap; |
| 952 | } |
| 953 | iommu->agaw = agaw; |
| 954 | iommu->msagaw = msagaw; |
| 955 | |
| 956 | iommu->node = -1; |
| 957 | |
| 958 | ver = readl(iommu->reg + DMAR_VER_REG); |
| 959 | pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", |
| 960 | iommu->seq_id, |
| 961 | (unsigned long long)drhd->reg_base_addr, |
| 962 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
| 963 | (unsigned long long)iommu->cap, |
| 964 | (unsigned long long)iommu->ecap); |
| 965 | |
| 966 | /* Reflect status in gcmd */ |
| 967 | sts = readl(iommu->reg + DMAR_GSTS_REG); |
| 968 | if (sts & DMA_GSTS_IRES) |
| 969 | iommu->gcmd |= DMA_GCMD_IRE; |
| 970 | if (sts & DMA_GSTS_TES) |
| 971 | iommu->gcmd |= DMA_GCMD_TE; |
| 972 | if (sts & DMA_GSTS_QIES) |
| 973 | iommu->gcmd |= DMA_GCMD_QIE; |
| 974 | |
| 975 | raw_spin_lock_init(&iommu->register_lock); |
| 976 | |
| 977 | drhd->iommu = iommu; |
| 978 | return 0; |
| 979 | |
| 980 | err_unmap: |
| 981 | unmap_iommu(iommu); |
| 982 | error: |
| 983 | kfree(iommu); |
| 984 | return err; |
| 985 | } |
| 986 | |
| 987 | static void free_iommu(struct intel_iommu *iommu) |
| 988 | { |
| 989 | if (iommu->irq) { |
| 990 | free_irq(iommu->irq, iommu); |
| 991 | irq_set_handler_data(iommu->irq, NULL); |
| 992 | destroy_irq(iommu->irq); |
| 993 | } |
| 994 | |
| 995 | if (iommu->qi) { |
| 996 | free_page((unsigned long)iommu->qi->desc); |
| 997 | kfree(iommu->qi->desc_status); |
| 998 | kfree(iommu->qi); |
| 999 | } |
| 1000 | |
| 1001 | if (iommu->reg) |
| 1002 | unmap_iommu(iommu); |
| 1003 | |
| 1004 | kfree(iommu); |
| 1005 | } |
| 1006 | |
| 1007 | /* |
| 1008 | * Reclaim all the submitted descriptors which have completed its work. |
| 1009 | */ |
| 1010 | static inline void reclaim_free_desc(struct q_inval *qi) |
| 1011 | { |
| 1012 | while (qi->desc_status[qi->free_tail] == QI_DONE || |
| 1013 | qi->desc_status[qi->free_tail] == QI_ABORT) { |
| 1014 | qi->desc_status[qi->free_tail] = QI_FREE; |
| 1015 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; |
| 1016 | qi->free_cnt++; |
| 1017 | } |
| 1018 | } |
| 1019 | |
| 1020 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
| 1021 | { |
| 1022 | u32 fault; |
| 1023 | int head, tail; |
| 1024 | struct q_inval *qi = iommu->qi; |
| 1025 | int wait_index = (index + 1) % QI_LENGTH; |
| 1026 | |
| 1027 | if (qi->desc_status[wait_index] == QI_ABORT) |
| 1028 | return -EAGAIN; |
| 1029 | |
| 1030 | fault = readl(iommu->reg + DMAR_FSTS_REG); |
| 1031 | |
| 1032 | /* |
| 1033 | * If IQE happens, the head points to the descriptor associated |
| 1034 | * with the error. No new descriptors are fetched until the IQE |
| 1035 | * is cleared. |
| 1036 | */ |
| 1037 | if (fault & DMA_FSTS_IQE) { |
| 1038 | head = readl(iommu->reg + DMAR_IQH_REG); |
| 1039 | if ((head >> DMAR_IQ_SHIFT) == index) { |
| 1040 | pr_err("VT-d detected invalid descriptor: " |
| 1041 | "low=%llx, high=%llx\n", |
| 1042 | (unsigned long long)qi->desc[index].low, |
| 1043 | (unsigned long long)qi->desc[index].high); |
| 1044 | memcpy(&qi->desc[index], &qi->desc[wait_index], |
| 1045 | sizeof(struct qi_desc)); |
| 1046 | __iommu_flush_cache(iommu, &qi->desc[index], |
| 1047 | sizeof(struct qi_desc)); |
| 1048 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); |
| 1049 | return -EINVAL; |
| 1050 | } |
| 1051 | } |
| 1052 | |
| 1053 | /* |
| 1054 | * If ITE happens, all pending wait_desc commands are aborted. |
| 1055 | * No new descriptors are fetched until the ITE is cleared. |
| 1056 | */ |
| 1057 | if (fault & DMA_FSTS_ITE) { |
| 1058 | head = readl(iommu->reg + DMAR_IQH_REG); |
| 1059 | head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; |
| 1060 | head |= 1; |
| 1061 | tail = readl(iommu->reg + DMAR_IQT_REG); |
| 1062 | tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; |
| 1063 | |
| 1064 | writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); |
| 1065 | |
| 1066 | do { |
| 1067 | if (qi->desc_status[head] == QI_IN_USE) |
| 1068 | qi->desc_status[head] = QI_ABORT; |
| 1069 | head = (head - 2 + QI_LENGTH) % QI_LENGTH; |
| 1070 | } while (head != tail); |
| 1071 | |
| 1072 | if (qi->desc_status[wait_index] == QI_ABORT) |
| 1073 | return -EAGAIN; |
| 1074 | } |
| 1075 | |
| 1076 | if (fault & DMA_FSTS_ICE) |
| 1077 | writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); |
| 1078 | |
| 1079 | return 0; |
| 1080 | } |
| 1081 | |
| 1082 | /* |
| 1083 | * Submit the queued invalidation descriptor to the remapping |
| 1084 | * hardware unit and wait for its completion. |
| 1085 | */ |
| 1086 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
| 1087 | { |
| 1088 | int rc; |
| 1089 | struct q_inval *qi = iommu->qi; |
| 1090 | struct qi_desc *hw, wait_desc; |
| 1091 | int wait_index, index; |
| 1092 | unsigned long flags; |
| 1093 | |
| 1094 | if (!qi) |
| 1095 | return 0; |
| 1096 | |
| 1097 | hw = qi->desc; |
| 1098 | |
| 1099 | restart: |
| 1100 | rc = 0; |
| 1101 | |
| 1102 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 1103 | while (qi->free_cnt < 3) { |
| 1104 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 1105 | cpu_relax(); |
| 1106 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 1107 | } |
| 1108 | |
| 1109 | index = qi->free_head; |
| 1110 | wait_index = (index + 1) % QI_LENGTH; |
| 1111 | |
| 1112 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; |
| 1113 | |
| 1114 | hw[index] = *desc; |
| 1115 | |
| 1116 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
| 1117 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; |
| 1118 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
| 1119 | |
| 1120 | hw[wait_index] = wait_desc; |
| 1121 | |
| 1122 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); |
| 1123 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); |
| 1124 | |
| 1125 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
| 1126 | qi->free_cnt -= 2; |
| 1127 | |
| 1128 | /* |
| 1129 | * update the HW tail register indicating the presence of |
| 1130 | * new descriptors. |
| 1131 | */ |
| 1132 | writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); |
| 1133 | |
| 1134 | while (qi->desc_status[wait_index] != QI_DONE) { |
| 1135 | /* |
| 1136 | * We will leave the interrupts disabled, to prevent interrupt |
| 1137 | * context to queue another cmd while a cmd is already submitted |
| 1138 | * and waiting for completion on this cpu. This is to avoid |
| 1139 | * a deadlock where the interrupt context can wait indefinitely |
| 1140 | * for free slots in the queue. |
| 1141 | */ |
| 1142 | rc = qi_check_fault(iommu, index); |
| 1143 | if (rc) |
| 1144 | break; |
| 1145 | |
| 1146 | raw_spin_unlock(&qi->q_lock); |
| 1147 | cpu_relax(); |
| 1148 | raw_spin_lock(&qi->q_lock); |
| 1149 | } |
| 1150 | |
| 1151 | qi->desc_status[index] = QI_DONE; |
| 1152 | |
| 1153 | reclaim_free_desc(qi); |
| 1154 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 1155 | |
| 1156 | if (rc == -EAGAIN) |
| 1157 | goto restart; |
| 1158 | |
| 1159 | return rc; |
| 1160 | } |
| 1161 | |
| 1162 | /* |
| 1163 | * Flush the global interrupt entry cache. |
| 1164 | */ |
| 1165 | void qi_global_iec(struct intel_iommu *iommu) |
| 1166 | { |
| 1167 | struct qi_desc desc; |
| 1168 | |
| 1169 | desc.low = QI_IEC_TYPE; |
| 1170 | desc.high = 0; |
| 1171 | |
| 1172 | /* should never fail */ |
| 1173 | qi_submit_sync(&desc, iommu); |
| 1174 | } |
| 1175 | |
| 1176 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
| 1177 | u64 type) |
| 1178 | { |
| 1179 | struct qi_desc desc; |
| 1180 | |
| 1181 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
| 1182 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
| 1183 | desc.high = 0; |
| 1184 | |
| 1185 | qi_submit_sync(&desc, iommu); |
| 1186 | } |
| 1187 | |
| 1188 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
| 1189 | unsigned int size_order, u64 type) |
| 1190 | { |
| 1191 | u8 dw = 0, dr = 0; |
| 1192 | |
| 1193 | struct qi_desc desc; |
| 1194 | int ih = 0; |
| 1195 | |
| 1196 | if (cap_write_drain(iommu->cap)) |
| 1197 | dw = 1; |
| 1198 | |
| 1199 | if (cap_read_drain(iommu->cap)) |
| 1200 | dr = 1; |
| 1201 | |
| 1202 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) |
| 1203 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; |
| 1204 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
| 1205 | | QI_IOTLB_AM(size_order); |
| 1206 | |
| 1207 | qi_submit_sync(&desc, iommu); |
| 1208 | } |
| 1209 | |
| 1210 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
| 1211 | u64 addr, unsigned mask) |
| 1212 | { |
| 1213 | struct qi_desc desc; |
| 1214 | |
| 1215 | if (mask) { |
| 1216 | BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); |
| 1217 | addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; |
| 1218 | desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; |
| 1219 | } else |
| 1220 | desc.high = QI_DEV_IOTLB_ADDR(addr); |
| 1221 | |
| 1222 | if (qdep >= QI_DEV_IOTLB_MAX_INVS) |
| 1223 | qdep = 0; |
| 1224 | |
| 1225 | desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | |
| 1226 | QI_DIOTLB_TYPE; |
| 1227 | |
| 1228 | qi_submit_sync(&desc, iommu); |
| 1229 | } |
| 1230 | |
| 1231 | /* |
| 1232 | * Disable Queued Invalidation interface. |
| 1233 | */ |
| 1234 | void dmar_disable_qi(struct intel_iommu *iommu) |
| 1235 | { |
| 1236 | unsigned long flags; |
| 1237 | u32 sts; |
| 1238 | cycles_t start_time = get_cycles(); |
| 1239 | |
| 1240 | if (!ecap_qis(iommu->ecap)) |
| 1241 | return; |
| 1242 | |
| 1243 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1244 | |
| 1245 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
| 1246 | if (!(sts & DMA_GSTS_QIES)) |
| 1247 | goto end; |
| 1248 | |
| 1249 | /* |
| 1250 | * Give a chance to HW to complete the pending invalidation requests. |
| 1251 | */ |
| 1252 | while ((readl(iommu->reg + DMAR_IQT_REG) != |
| 1253 | readl(iommu->reg + DMAR_IQH_REG)) && |
| 1254 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) |
| 1255 | cpu_relax(); |
| 1256 | |
| 1257 | iommu->gcmd &= ~DMA_GCMD_QIE; |
| 1258 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1259 | |
| 1260 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, |
| 1261 | !(sts & DMA_GSTS_QIES), sts); |
| 1262 | end: |
| 1263 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1264 | } |
| 1265 | |
| 1266 | /* |
| 1267 | * Enable queued invalidation. |
| 1268 | */ |
| 1269 | static void __dmar_enable_qi(struct intel_iommu *iommu) |
| 1270 | { |
| 1271 | u32 sts; |
| 1272 | unsigned long flags; |
| 1273 | struct q_inval *qi = iommu->qi; |
| 1274 | |
| 1275 | qi->free_head = qi->free_tail = 0; |
| 1276 | qi->free_cnt = QI_LENGTH; |
| 1277 | |
| 1278 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1279 | |
| 1280 | /* write zero to the tail reg */ |
| 1281 | writel(0, iommu->reg + DMAR_IQT_REG); |
| 1282 | |
| 1283 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); |
| 1284 | |
| 1285 | iommu->gcmd |= DMA_GCMD_QIE; |
| 1286 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1287 | |
| 1288 | /* Make sure hardware complete it */ |
| 1289 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
| 1290 | |
| 1291 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1292 | } |
| 1293 | |
| 1294 | /* |
| 1295 | * Enable Queued Invalidation interface. This is a must to support |
| 1296 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
| 1297 | * register based IOTLB invalidation. |
| 1298 | */ |
| 1299 | int dmar_enable_qi(struct intel_iommu *iommu) |
| 1300 | { |
| 1301 | struct q_inval *qi; |
| 1302 | struct page *desc_page; |
| 1303 | |
| 1304 | if (!ecap_qis(iommu->ecap)) |
| 1305 | return -ENOENT; |
| 1306 | |
| 1307 | /* |
| 1308 | * queued invalidation is already setup and enabled. |
| 1309 | */ |
| 1310 | if (iommu->qi) |
| 1311 | return 0; |
| 1312 | |
| 1313 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
| 1314 | if (!iommu->qi) |
| 1315 | return -ENOMEM; |
| 1316 | |
| 1317 | qi = iommu->qi; |
| 1318 | |
| 1319 | |
| 1320 | desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); |
| 1321 | if (!desc_page) { |
| 1322 | kfree(qi); |
| 1323 | iommu->qi = NULL; |
| 1324 | return -ENOMEM; |
| 1325 | } |
| 1326 | |
| 1327 | qi->desc = page_address(desc_page); |
| 1328 | |
| 1329 | qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
| 1330 | if (!qi->desc_status) { |
| 1331 | free_page((unsigned long) qi->desc); |
| 1332 | kfree(qi); |
| 1333 | iommu->qi = NULL; |
| 1334 | return -ENOMEM; |
| 1335 | } |
| 1336 | |
| 1337 | qi->free_head = qi->free_tail = 0; |
| 1338 | qi->free_cnt = QI_LENGTH; |
| 1339 | |
| 1340 | raw_spin_lock_init(&qi->q_lock); |
| 1341 | |
| 1342 | __dmar_enable_qi(iommu); |
| 1343 | |
| 1344 | return 0; |
| 1345 | } |
| 1346 | |
| 1347 | /* iommu interrupt handling. Most stuff are MSI-like. */ |
| 1348 | |
| 1349 | enum faulttype { |
| 1350 | DMA_REMAP, |
| 1351 | INTR_REMAP, |
| 1352 | UNKNOWN, |
| 1353 | }; |
| 1354 | |
| 1355 | static const char *dma_remap_fault_reasons[] = |
| 1356 | { |
| 1357 | "Software", |
| 1358 | "Present bit in root entry is clear", |
| 1359 | "Present bit in context entry is clear", |
| 1360 | "Invalid context entry", |
| 1361 | "Access beyond MGAW", |
| 1362 | "PTE Write access is not set", |
| 1363 | "PTE Read access is not set", |
| 1364 | "Next page table ptr is invalid", |
| 1365 | "Root table address invalid", |
| 1366 | "Context table ptr is invalid", |
| 1367 | "non-zero reserved fields in RTP", |
| 1368 | "non-zero reserved fields in CTP", |
| 1369 | "non-zero reserved fields in PTE", |
| 1370 | "PCE for translation request specifies blocking", |
| 1371 | }; |
| 1372 | |
| 1373 | static const char *irq_remap_fault_reasons[] = |
| 1374 | { |
| 1375 | "Detected reserved fields in the decoded interrupt-remapped request", |
| 1376 | "Interrupt index exceeded the interrupt-remapping table size", |
| 1377 | "Present field in the IRTE entry is clear", |
| 1378 | "Error accessing interrupt-remapping table pointed by IRTA_REG", |
| 1379 | "Detected reserved fields in the IRTE entry", |
| 1380 | "Blocked a compatibility format interrupt request", |
| 1381 | "Blocked an interrupt request due to source-id verification failure", |
| 1382 | }; |
| 1383 | |
| 1384 | static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) |
| 1385 | { |
| 1386 | if (fault_reason >= 0x20 && (fault_reason - 0x20 < |
| 1387 | ARRAY_SIZE(irq_remap_fault_reasons))) { |
| 1388 | *fault_type = INTR_REMAP; |
| 1389 | return irq_remap_fault_reasons[fault_reason - 0x20]; |
| 1390 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { |
| 1391 | *fault_type = DMA_REMAP; |
| 1392 | return dma_remap_fault_reasons[fault_reason]; |
| 1393 | } else { |
| 1394 | *fault_type = UNKNOWN; |
| 1395 | return "Unknown"; |
| 1396 | } |
| 1397 | } |
| 1398 | |
| 1399 | void dmar_msi_unmask(struct irq_data *data) |
| 1400 | { |
| 1401 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
| 1402 | unsigned long flag; |
| 1403 | |
| 1404 | /* unmask it */ |
| 1405 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1406 | writel(0, iommu->reg + DMAR_FECTL_REG); |
| 1407 | /* Read a reg to force flush the post write */ |
| 1408 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1409 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1410 | } |
| 1411 | |
| 1412 | void dmar_msi_mask(struct irq_data *data) |
| 1413 | { |
| 1414 | unsigned long flag; |
| 1415 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
| 1416 | |
| 1417 | /* mask it */ |
| 1418 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1419 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
| 1420 | /* Read a reg to force flush the post write */ |
| 1421 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1422 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1423 | } |
| 1424 | |
| 1425 | void dmar_msi_write(int irq, struct msi_msg *msg) |
| 1426 | { |
| 1427 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1428 | unsigned long flag; |
| 1429 | |
| 1430 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1431 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
| 1432 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); |
| 1433 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); |
| 1434 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1435 | } |
| 1436 | |
| 1437 | void dmar_msi_read(int irq, struct msi_msg *msg) |
| 1438 | { |
| 1439 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1440 | unsigned long flag; |
| 1441 | |
| 1442 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1443 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
| 1444 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); |
| 1445 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); |
| 1446 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1447 | } |
| 1448 | |
| 1449 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, |
| 1450 | u8 fault_reason, u16 source_id, unsigned long long addr) |
| 1451 | { |
| 1452 | const char *reason; |
| 1453 | int fault_type; |
| 1454 | |
| 1455 | reason = dmar_get_fault_reason(fault_reason, &fault_type); |
| 1456 | |
| 1457 | if (fault_type == INTR_REMAP) |
| 1458 | pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] " |
| 1459 | "fault index %llx\n" |
| 1460 | "INTR-REMAP:[fault reason %02d] %s\n", |
| 1461 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), |
| 1462 | PCI_FUNC(source_id & 0xFF), addr >> 48, |
| 1463 | fault_reason, reason); |
| 1464 | else |
| 1465 | pr_err("DMAR:[%s] Request device [%02x:%02x.%d] " |
| 1466 | "fault addr %llx \n" |
| 1467 | "DMAR:[fault reason %02d] %s\n", |
| 1468 | (type ? "DMA Read" : "DMA Write"), |
| 1469 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), |
| 1470 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); |
| 1471 | return 0; |
| 1472 | } |
| 1473 | |
| 1474 | #define PRIMARY_FAULT_REG_LEN (16) |
| 1475 | irqreturn_t dmar_fault(int irq, void *dev_id) |
| 1476 | { |
| 1477 | struct intel_iommu *iommu = dev_id; |
| 1478 | int reg, fault_index; |
| 1479 | u32 fault_status; |
| 1480 | unsigned long flag; |
| 1481 | |
| 1482 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1483 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1484 | if (fault_status) |
| 1485 | pr_err("DRHD: handling fault status reg %x\n", fault_status); |
| 1486 | |
| 1487 | /* TBD: ignore advanced fault log currently */ |
| 1488 | if (!(fault_status & DMA_FSTS_PPF)) |
| 1489 | goto unlock_exit; |
| 1490 | |
| 1491 | fault_index = dma_fsts_fault_record_index(fault_status); |
| 1492 | reg = cap_fault_reg_offset(iommu->cap); |
| 1493 | while (1) { |
| 1494 | u8 fault_reason; |
| 1495 | u16 source_id; |
| 1496 | u64 guest_addr; |
| 1497 | int type; |
| 1498 | u32 data; |
| 1499 | |
| 1500 | /* highest 32 bits */ |
| 1501 | data = readl(iommu->reg + reg + |
| 1502 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
| 1503 | if (!(data & DMA_FRCD_F)) |
| 1504 | break; |
| 1505 | |
| 1506 | fault_reason = dma_frcd_fault_reason(data); |
| 1507 | type = dma_frcd_type(data); |
| 1508 | |
| 1509 | data = readl(iommu->reg + reg + |
| 1510 | fault_index * PRIMARY_FAULT_REG_LEN + 8); |
| 1511 | source_id = dma_frcd_source_id(data); |
| 1512 | |
| 1513 | guest_addr = dmar_readq(iommu->reg + reg + |
| 1514 | fault_index * PRIMARY_FAULT_REG_LEN); |
| 1515 | guest_addr = dma_frcd_page_addr(guest_addr); |
| 1516 | /* clear the fault */ |
| 1517 | writel(DMA_FRCD_F, iommu->reg + reg + |
| 1518 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
| 1519 | |
| 1520 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1521 | |
| 1522 | dmar_fault_do_one(iommu, type, fault_reason, |
| 1523 | source_id, guest_addr); |
| 1524 | |
| 1525 | fault_index++; |
| 1526 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
| 1527 | fault_index = 0; |
| 1528 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1529 | } |
| 1530 | |
| 1531 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); |
| 1532 | |
| 1533 | unlock_exit: |
| 1534 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1535 | return IRQ_HANDLED; |
| 1536 | } |
| 1537 | |
| 1538 | int dmar_set_interrupt(struct intel_iommu *iommu) |
| 1539 | { |
| 1540 | int irq, ret; |
| 1541 | |
| 1542 | /* |
| 1543 | * Check if the fault interrupt is already initialized. |
| 1544 | */ |
| 1545 | if (iommu->irq) |
| 1546 | return 0; |
| 1547 | |
| 1548 | irq = create_irq(); |
| 1549 | if (!irq) { |
| 1550 | pr_err("IOMMU: no free vectors\n"); |
| 1551 | return -EINVAL; |
| 1552 | } |
| 1553 | |
| 1554 | irq_set_handler_data(irq, iommu); |
| 1555 | iommu->irq = irq; |
| 1556 | |
| 1557 | ret = arch_setup_dmar_msi(irq); |
| 1558 | if (ret) { |
| 1559 | irq_set_handler_data(irq, NULL); |
| 1560 | iommu->irq = 0; |
| 1561 | destroy_irq(irq); |
| 1562 | return ret; |
| 1563 | } |
| 1564 | |
| 1565 | ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); |
| 1566 | if (ret) |
| 1567 | pr_err("IOMMU: can't request irq\n"); |
| 1568 | return ret; |
| 1569 | } |
| 1570 | |
| 1571 | int __init enable_drhd_fault_handling(void) |
| 1572 | { |
| 1573 | struct dmar_drhd_unit *drhd; |
| 1574 | struct intel_iommu *iommu; |
| 1575 | |
| 1576 | /* |
| 1577 | * Enable fault control interrupt. |
| 1578 | */ |
| 1579 | for_each_iommu(iommu, drhd) { |
| 1580 | u32 fault_status; |
| 1581 | int ret = dmar_set_interrupt(iommu); |
| 1582 | |
| 1583 | if (ret) { |
| 1584 | pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", |
| 1585 | (unsigned long long)drhd->reg_base_addr, ret); |
| 1586 | return -1; |
| 1587 | } |
| 1588 | |
| 1589 | /* |
| 1590 | * Clear any previous faults. |
| 1591 | */ |
| 1592 | dmar_fault(iommu->irq, iommu); |
| 1593 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1594 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
| 1595 | } |
| 1596 | |
| 1597 | return 0; |
| 1598 | } |
| 1599 | |
| 1600 | /* |
| 1601 | * Re-enable Queued Invalidation interface. |
| 1602 | */ |
| 1603 | int dmar_reenable_qi(struct intel_iommu *iommu) |
| 1604 | { |
| 1605 | if (!ecap_qis(iommu->ecap)) |
| 1606 | return -ENOENT; |
| 1607 | |
| 1608 | if (!iommu->qi) |
| 1609 | return -ENOENT; |
| 1610 | |
| 1611 | /* |
| 1612 | * First disable queued invalidation. |
| 1613 | */ |
| 1614 | dmar_disable_qi(iommu); |
| 1615 | /* |
| 1616 | * Then enable queued invalidation again. Since there is no pending |
| 1617 | * invalidation requests now, it's safe to re-enable queued |
| 1618 | * invalidation. |
| 1619 | */ |
| 1620 | __dmar_enable_qi(iommu); |
| 1621 | |
| 1622 | return 0; |
| 1623 | } |
| 1624 | |
| 1625 | /* |
| 1626 | * Check interrupt remapping support in DMAR table description. |
| 1627 | */ |
| 1628 | int __init dmar_ir_support(void) |
| 1629 | { |
| 1630 | struct acpi_table_dmar *dmar; |
| 1631 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
| 1632 | if (!dmar) |
| 1633 | return 0; |
| 1634 | return dmar->flags & 0x1; |
| 1635 | } |
| 1636 | |
| 1637 | static int __init dmar_free_unused_resources(void) |
| 1638 | { |
| 1639 | struct dmar_drhd_unit *dmaru, *dmaru_n; |
| 1640 | |
| 1641 | /* DMAR units are in use */ |
| 1642 | if (irq_remapping_enabled || intel_iommu_enabled) |
| 1643 | return 0; |
| 1644 | |
| 1645 | if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) |
| 1646 | bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb); |
| 1647 | |
| 1648 | down_write(&dmar_global_lock); |
| 1649 | list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { |
| 1650 | list_del(&dmaru->list); |
| 1651 | dmar_free_drhd(dmaru); |
| 1652 | } |
| 1653 | up_write(&dmar_global_lock); |
| 1654 | |
| 1655 | return 0; |
| 1656 | } |
| 1657 | |
| 1658 | late_initcall(dmar_free_unused_resources); |
| 1659 | IOMMU_INIT_POST(detect_intel_iommu); |