iommu/vt-d: Fixed interaction of VFIO_IOMMU_MAP_DMA with IOMMU address limits
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f
KA
54
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
10e5247f
KA
67static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
e9071b0b 88 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
89 break;
90 }
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
e9071b0b 93 /* warning will be printed below */
10e5247f
KA
94 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
e9071b0b 101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
bf947fcb 102 segment, scope->bus, path->dev, path->fn);
10e5247f
KA
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
e9071b0b
DD
110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
10e5247f
KA
112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
318fe7df
SS
118int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
10e5247f
KA
120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
ae3e7f3a
LC
132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 134 pr_warn("Unsupported device scope\n");
5715f0f9 135 }
10e5247f
KA
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
10e5247f 176
e523b38e 177 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
1886e8a9 182 dmaru->hdr = header;
10e5247f 183 dmaru->reg_base_addr = drhd->address;
276dbf99 184 dmaru->segment = drhd->segment;
10e5247f
KA
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
1886e8a9
SS
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
f82851a8 196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
197{
198 struct acpi_dmar_hardware_unit *drhd;
f82851a8 199 int ret = 0;
1886e8a9
SS
200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
2e824f79
YZ
203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 207 ((void *)drhd) + drhd->header.length,
10e5247f
KA
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
1c7d1bca 210 if (ret) {
1886e8a9 211 list_del(&dmaru->list);
10e5247f 212 kfree(dmaru);
1886e8a9 213 }
10e5247f
KA
214 return ret;
215}
216
aa697079 217#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
218static int __init
219dmar_parse_one_rhsa(struct acpi_dmar_header *header)
220{
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
223
224 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 225 for_each_drhd_unit(drhd) {
ee34b32d
SS
226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
228
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
aa697079
DW
232 return 0;
233 }
ee34b32d 234 }
fd0c8894
BH
235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 243
aa697079 244 return 0;
ee34b32d 245}
aa697079 246#endif
ee34b32d 247
10e5247f
KA
248static void __init
249dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
250{
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 253 struct acpi_dmar_atsr *atsr;
17b60977 254 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
255
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
e9071b0b 260 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 261 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
e9071b0b 266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
10e5247f 269 break;
aa5d2b51
YZ
270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 272 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 273 break;
17b60977
RD
274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
10e5247f
KA
280 }
281}
282
f6dd5c31
YL
283/**
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
285 */
286static int __init dmar_table_detect(void)
287{
288 acpi_status status = AE_OK;
289
290 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
f6dd5c31
YL
294
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 296 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
297 status = AE_NOT_FOUND;
298 }
299
300 return (ACPI_SUCCESS(status) ? 1 : 0);
301}
aaa9d1dd 302
10e5247f
KA
303/**
304 * parse_dmar_table - parses the DMA reporting table
305 */
306static int __init
307parse_dmar_table(void)
308{
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
7cef3347 312 int drhd_count = 0;
10e5247f 313
f6dd5c31
YL
314 /*
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
317 */
318 dmar_table_detect();
319
a59b50e9
JC
320 /*
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 */
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325
10e5247f
KA
326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
329
5b6985ce 330 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 331 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
332 return -EINVAL;
333 }
334
e9071b0b 335 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
336
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
e9071b0b 342 pr_warn("Invalid 0-length structure\n");
084eb960
TB
343 ret = -EINVAL;
344 break;
345 }
346
10e5247f
KA
347 dmar_table_print_dmar_entry(entry_header);
348
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 351 drhd_count++;
10e5247f
KA
352 ret = dmar_parse_one_drhd(entry_header);
353 break;
354 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
355 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
356 break;
357 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 358 ret = dmar_parse_one_atsr(entry_header);
10e5247f 359 break;
17b60977 360 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 361#ifdef CONFIG_ACPI_NUMA
ee34b32d 362 ret = dmar_parse_one_rhsa(entry_header);
aa697079 363#endif
17b60977 364 break;
10e5247f 365 default:
e9071b0b 366 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 367 entry_header->type);
10e5247f
KA
368 ret = 0; /* for forward compatibility */
369 break;
370 }
371 if (ret)
372 break;
373
374 entry_header = ((void *)entry_header + entry_header->length);
375 }
7cef3347
LZH
376 if (drhd_count == 0)
377 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
378 return ret;
379}
380
dda56549 381static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
382 struct pci_dev *dev)
383{
384 int index;
385
386 while (dev) {
387 for (index = 0; index < cnt; index++)
388 if (dev == devices[index])
389 return 1;
390
391 /* Check our parent */
392 dev = dev->bus->self;
393 }
394
395 return 0;
396}
397
398struct dmar_drhd_unit *
399dmar_find_matched_drhd_unit(struct pci_dev *dev)
400{
2e824f79
YZ
401 struct dmar_drhd_unit *dmaru = NULL;
402 struct acpi_dmar_hardware_unit *drhd;
403
dda56549
Y
404 dev = pci_physfn(dev);
405
2e824f79
YZ
406 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
407 drhd = container_of(dmaru->hdr,
408 struct acpi_dmar_hardware_unit,
409 header);
410
411 if (dmaru->include_all &&
412 drhd->segment == pci_domain_nr(dev->bus))
413 return dmaru;
e61d98d8 414
2e824f79
YZ
415 if (dmar_pci_device_match(dmaru->devices,
416 dmaru->devices_cnt, dev))
417 return dmaru;
e61d98d8
SS
418 }
419
420 return NULL;
421}
422
1886e8a9
SS
423int __init dmar_dev_scope_init(void)
424{
c2c7286a 425 static int dmar_dev_scope_initialized;
04e2ea67 426 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
427 int ret = -ENODEV;
428
c2c7286a
SS
429 if (dmar_dev_scope_initialized)
430 return dmar_dev_scope_initialized;
431
318fe7df
SS
432 if (list_empty(&dmar_drhd_units))
433 goto fail;
434
04e2ea67 435 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
436 ret = dmar_parse_dev(drhd);
437 if (ret)
c2c7286a 438 goto fail;
1886e8a9
SS
439 }
440
318fe7df
SS
441 ret = dmar_parse_rmrr_atsr_dev();
442 if (ret)
443 goto fail;
1886e8a9 444
c2c7286a
SS
445 dmar_dev_scope_initialized = 1;
446 return 0;
447
448fail:
449 dmar_dev_scope_initialized = ret;
1886e8a9
SS
450 return ret;
451}
452
10e5247f
KA
453
454int __init dmar_table_init(void)
455{
1886e8a9 456 static int dmar_table_initialized;
093f87d2
FY
457 int ret;
458
1886e8a9
SS
459 if (dmar_table_initialized)
460 return 0;
461
462 dmar_table_initialized = 1;
463
093f87d2
FY
464 ret = parse_dmar_table();
465 if (ret) {
1886e8a9 466 if (ret != -ENODEV)
e9071b0b 467 pr_info("parse DMAR table failure.\n");
093f87d2
FY
468 return ret;
469 }
470
10e5247f 471 if (list_empty(&dmar_drhd_units)) {
e9071b0b 472 pr_info("No DMAR devices found\n");
10e5247f
KA
473 return -ENODEV;
474 }
093f87d2 475
10e5247f
KA
476 return 0;
477}
478
3a8663ee
BH
479static void warn_invalid_dmar(u64 addr, const char *message)
480{
fd0c8894
BH
481 WARN_TAINT_ONCE(
482 1, TAINT_FIRMWARE_WORKAROUND,
483 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
484 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
485 addr, message,
486 dmi_get_system_info(DMI_BIOS_VENDOR),
487 dmi_get_system_info(DMI_BIOS_VERSION),
488 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 489}
6ecbf01c 490
86cf898e
DW
491int __init check_zero_address(void)
492{
493 struct acpi_table_dmar *dmar;
494 struct acpi_dmar_header *entry_header;
495 struct acpi_dmar_hardware_unit *drhd;
496
497 dmar = (struct acpi_table_dmar *)dmar_tbl;
498 entry_header = (struct acpi_dmar_header *)(dmar + 1);
499
500 while (((unsigned long)entry_header) <
501 (((unsigned long)dmar) + dmar_tbl->length)) {
502 /* Avoid looping forever on bad ACPI tables */
503 if (entry_header->length == 0) {
e9071b0b 504 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
505 return 0;
506 }
507
508 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
509 void __iomem *addr;
510 u64 cap, ecap;
511
86cf898e
DW
512 drhd = (void *)entry_header;
513 if (!drhd->address) {
3a8663ee 514 warn_invalid_dmar(0, "");
2c992208
CW
515 goto failed;
516 }
517
518 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
519 if (!addr ) {
520 printk("IOMMU: can't validate: %llx\n", drhd->address);
521 goto failed;
522 }
523 cap = dmar_readq(addr + DMAR_CAP_REG);
524 ecap = dmar_readq(addr + DMAR_ECAP_REG);
525 early_iounmap(addr, VTD_PAGE_SIZE);
526 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
527 warn_invalid_dmar(drhd->address,
528 " returns all ones");
2c992208 529 goto failed;
86cf898e 530 }
86cf898e
DW
531 }
532
533 entry_header = ((void *)entry_header + entry_header->length);
534 }
535 return 1;
2c992208
CW
536
537failed:
2c992208 538 return 0;
86cf898e
DW
539}
540
480125ba 541int __init detect_intel_iommu(void)
2ae21010
SS
542{
543 int ret;
544
f6dd5c31 545 ret = dmar_table_detect();
86cf898e
DW
546 if (ret)
547 ret = check_zero_address();
2ae21010 548 {
1cb11583 549 struct acpi_table_dmar *dmar;
b3a530e4 550
1cb11583 551 dmar = (struct acpi_table_dmar *) dmar_tbl;
f5d1b97b 552
95a02e97 553 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
f5d1b97b 554 dmar->flags & 0x1)
e9071b0b 555 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
f5d1b97b 556
11bd04f6 557 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 558 iommu_detected = 1;
5d990b62
CW
559 /* Make sure ACS will be enabled */
560 pci_request_acs();
561 }
f5d1b97b 562
9d5ce73a
FT
563#ifdef CONFIG_X86
564 if (ret)
565 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 566#endif
cacd4213 567 }
8e1568f3 568 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 569 dmar_tbl = NULL;
480125ba 570
4db77ff3 571 return ret ? 1 : -ENODEV;
2ae21010
SS
572}
573
574
6f5cf521
DD
575static void unmap_iommu(struct intel_iommu *iommu)
576{
577 iounmap(iommu->reg);
578 release_mem_region(iommu->reg_phys, iommu->reg_size);
579}
580
581/**
582 * map_iommu: map the iommu's registers
583 * @iommu: the iommu to map
584 * @phys_addr: the physical address of the base resgister
e9071b0b 585 *
6f5cf521 586 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 587 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
588 */
589static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
590{
591 int map_size, err=0;
592
593 iommu->reg_phys = phys_addr;
594 iommu->reg_size = VTD_PAGE_SIZE;
595
596 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
597 pr_err("IOMMU: can't reserve memory\n");
598 err = -EBUSY;
599 goto out;
600 }
601
602 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
603 if (!iommu->reg) {
604 pr_err("IOMMU: can't map the region\n");
605 err = -ENOMEM;
606 goto release;
607 }
608
609 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
610 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
611
612 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
613 err = -EINVAL;
614 warn_invalid_dmar(phys_addr, " returns all ones");
615 goto unmap;
616 }
617
618 /* the registers might be more than one page */
619 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
620 cap_max_fault_reg_offset(iommu->cap));
621 map_size = VTD_PAGE_ALIGN(map_size);
622 if (map_size > iommu->reg_size) {
623 iounmap(iommu->reg);
624 release_mem_region(iommu->reg_phys, iommu->reg_size);
625 iommu->reg_size = map_size;
626 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
627 iommu->name)) {
628 pr_err("IOMMU: can't reserve memory\n");
629 err = -EBUSY;
630 goto out;
631 }
632 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
633 if (!iommu->reg) {
634 pr_err("IOMMU: can't map the region\n");
635 err = -ENOMEM;
636 goto release;
637 }
638 }
639 err = 0;
640 goto out;
641
642unmap:
643 iounmap(iommu->reg);
644release:
645 release_mem_region(iommu->reg_phys, iommu->reg_size);
646out:
647 return err;
648}
649
1886e8a9 650int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 651{
c42d9f32 652 struct intel_iommu *iommu;
3a93c841 653 u32 ver, sts;
c42d9f32 654 static int iommu_allocated = 0;
43f7392b 655 int agaw = 0;
4ed0d3e6 656 int msagaw = 0;
6f5cf521 657 int err;
c42d9f32 658
6ecbf01c 659 if (!drhd->reg_base_addr) {
3a8663ee 660 warn_invalid_dmar(0, "");
6ecbf01c
DW
661 return -EINVAL;
662 }
663
c42d9f32
SS
664 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
665 if (!iommu)
1886e8a9 666 return -ENOMEM;
c42d9f32
SS
667
668 iommu->seq_id = iommu_allocated++;
9d783ba0 669 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 670
6f5cf521
DD
671 err = map_iommu(iommu, drhd->reg_base_addr);
672 if (err) {
673 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
674 goto error;
675 }
0815565a 676
6f5cf521 677 err = -EINVAL;
1b573683
WH
678 agaw = iommu_calculate_agaw(iommu);
679 if (agaw < 0) {
bf947fcb
DD
680 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
681 iommu->seq_id);
0815565a 682 goto err_unmap;
4ed0d3e6
FY
683 }
684 msagaw = iommu_calculate_max_sagaw(iommu);
685 if (msagaw < 0) {
bf947fcb 686 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 687 iommu->seq_id);
0815565a 688 goto err_unmap;
1b573683
WH
689 }
690 iommu->agaw = agaw;
4ed0d3e6 691 iommu->msagaw = msagaw;
1b573683 692
ee34b32d
SS
693 iommu->node = -1;
694
e61d98d8 695 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
696 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
697 iommu->seq_id,
5b6985ce
FY
698 (unsigned long long)drhd->reg_base_addr,
699 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
700 (unsigned long long)iommu->cap,
701 (unsigned long long)iommu->ecap);
e61d98d8 702
3a93c841
TI
703 /* Reflect status in gcmd */
704 sts = readl(iommu->reg + DMAR_GSTS_REG);
705 if (sts & DMA_GSTS_IRES)
706 iommu->gcmd |= DMA_GCMD_IRE;
707 if (sts & DMA_GSTS_TES)
708 iommu->gcmd |= DMA_GCMD_TE;
709 if (sts & DMA_GSTS_QIES)
710 iommu->gcmd |= DMA_GCMD_QIE;
711
1f5b3c3f 712 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
713
714 drhd->iommu = iommu;
1886e8a9 715 return 0;
0815565a
DW
716
717 err_unmap:
6f5cf521 718 unmap_iommu(iommu);
0815565a 719 error:
e61d98d8 720 kfree(iommu);
6f5cf521 721 return err;
e61d98d8
SS
722}
723
724void free_iommu(struct intel_iommu *iommu)
725{
726 if (!iommu)
727 return;
728
e61d98d8 729 free_dmar_iommu(iommu);
e61d98d8
SS
730
731 if (iommu->reg)
6f5cf521
DD
732 unmap_iommu(iommu);
733
e61d98d8
SS
734 kfree(iommu);
735}
fe962e90
SS
736
737/*
738 * Reclaim all the submitted descriptors which have completed its work.
739 */
740static inline void reclaim_free_desc(struct q_inval *qi)
741{
6ba6c3a4
YZ
742 while (qi->desc_status[qi->free_tail] == QI_DONE ||
743 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
744 qi->desc_status[qi->free_tail] = QI_FREE;
745 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
746 qi->free_cnt++;
747 }
748}
749
704126ad
YZ
750static int qi_check_fault(struct intel_iommu *iommu, int index)
751{
752 u32 fault;
6ba6c3a4 753 int head, tail;
704126ad
YZ
754 struct q_inval *qi = iommu->qi;
755 int wait_index = (index + 1) % QI_LENGTH;
756
6ba6c3a4
YZ
757 if (qi->desc_status[wait_index] == QI_ABORT)
758 return -EAGAIN;
759
704126ad
YZ
760 fault = readl(iommu->reg + DMAR_FSTS_REG);
761
762 /*
763 * If IQE happens, the head points to the descriptor associated
764 * with the error. No new descriptors are fetched until the IQE
765 * is cleared.
766 */
767 if (fault & DMA_FSTS_IQE) {
768 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 769 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 770 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
771 "low=%llx, high=%llx\n",
772 (unsigned long long)qi->desc[index].low,
773 (unsigned long long)qi->desc[index].high);
704126ad
YZ
774 memcpy(&qi->desc[index], &qi->desc[wait_index],
775 sizeof(struct qi_desc));
776 __iommu_flush_cache(iommu, &qi->desc[index],
777 sizeof(struct qi_desc));
778 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
779 return -EINVAL;
780 }
781 }
782
6ba6c3a4
YZ
783 /*
784 * If ITE happens, all pending wait_desc commands are aborted.
785 * No new descriptors are fetched until the ITE is cleared.
786 */
787 if (fault & DMA_FSTS_ITE) {
788 head = readl(iommu->reg + DMAR_IQH_REG);
789 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
790 head |= 1;
791 tail = readl(iommu->reg + DMAR_IQT_REG);
792 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
793
794 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
795
796 do {
797 if (qi->desc_status[head] == QI_IN_USE)
798 qi->desc_status[head] = QI_ABORT;
799 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
800 } while (head != tail);
801
802 if (qi->desc_status[wait_index] == QI_ABORT)
803 return -EAGAIN;
804 }
805
806 if (fault & DMA_FSTS_ICE)
807 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
808
704126ad
YZ
809 return 0;
810}
811
fe962e90
SS
812/*
813 * Submit the queued invalidation descriptor to the remapping
814 * hardware unit and wait for its completion.
815 */
704126ad 816int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 817{
6ba6c3a4 818 int rc;
fe962e90
SS
819 struct q_inval *qi = iommu->qi;
820 struct qi_desc *hw, wait_desc;
821 int wait_index, index;
822 unsigned long flags;
823
824 if (!qi)
704126ad 825 return 0;
fe962e90
SS
826
827 hw = qi->desc;
828
6ba6c3a4
YZ
829restart:
830 rc = 0;
831
3b8f4048 832 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 833 while (qi->free_cnt < 3) {
3b8f4048 834 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 835 cpu_relax();
3b8f4048 836 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
837 }
838
839 index = qi->free_head;
840 wait_index = (index + 1) % QI_LENGTH;
841
842 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
843
844 hw[index] = *desc;
845
704126ad
YZ
846 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
847 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
848 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
849
850 hw[wait_index] = wait_desc;
851
852 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
853 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
854
855 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
856 qi->free_cnt -= 2;
857
fe962e90
SS
858 /*
859 * update the HW tail register indicating the presence of
860 * new descriptors.
861 */
6ba6c3a4 862 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
863
864 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
865 /*
866 * We will leave the interrupts disabled, to prevent interrupt
867 * context to queue another cmd while a cmd is already submitted
868 * and waiting for completion on this cpu. This is to avoid
869 * a deadlock where the interrupt context can wait indefinitely
870 * for free slots in the queue.
871 */
704126ad
YZ
872 rc = qi_check_fault(iommu, index);
873 if (rc)
6ba6c3a4 874 break;
704126ad 875
3b8f4048 876 raw_spin_unlock(&qi->q_lock);
fe962e90 877 cpu_relax();
3b8f4048 878 raw_spin_lock(&qi->q_lock);
fe962e90 879 }
6ba6c3a4
YZ
880
881 qi->desc_status[index] = QI_DONE;
fe962e90
SS
882
883 reclaim_free_desc(qi);
3b8f4048 884 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 885
6ba6c3a4
YZ
886 if (rc == -EAGAIN)
887 goto restart;
888
704126ad 889 return rc;
fe962e90
SS
890}
891
892/*
893 * Flush the global interrupt entry cache.
894 */
895void qi_global_iec(struct intel_iommu *iommu)
896{
897 struct qi_desc desc;
898
899 desc.low = QI_IEC_TYPE;
900 desc.high = 0;
901
704126ad 902 /* should never fail */
fe962e90
SS
903 qi_submit_sync(&desc, iommu);
904}
905
4c25a2c1
DW
906void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
907 u64 type)
3481f210 908{
3481f210
YS
909 struct qi_desc desc;
910
3481f210
YS
911 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
912 | QI_CC_GRAN(type) | QI_CC_TYPE;
913 desc.high = 0;
914
4c25a2c1 915 qi_submit_sync(&desc, iommu);
3481f210
YS
916}
917
1f0ef2aa
DW
918void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
919 unsigned int size_order, u64 type)
3481f210
YS
920{
921 u8 dw = 0, dr = 0;
922
923 struct qi_desc desc;
924 int ih = 0;
925
3481f210
YS
926 if (cap_write_drain(iommu->cap))
927 dw = 1;
928
929 if (cap_read_drain(iommu->cap))
930 dr = 1;
931
932 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
933 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
934 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
935 | QI_IOTLB_AM(size_order);
936
1f0ef2aa 937 qi_submit_sync(&desc, iommu);
3481f210
YS
938}
939
6ba6c3a4
YZ
940void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
941 u64 addr, unsigned mask)
942{
943 struct qi_desc desc;
944
945 if (mask) {
946 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
947 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
948 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
949 } else
950 desc.high = QI_DEV_IOTLB_ADDR(addr);
951
952 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
953 qdep = 0;
954
955 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
956 QI_DIOTLB_TYPE;
957
958 qi_submit_sync(&desc, iommu);
959}
960
eba67e5d
SS
961/*
962 * Disable Queued Invalidation interface.
963 */
964void dmar_disable_qi(struct intel_iommu *iommu)
965{
966 unsigned long flags;
967 u32 sts;
968 cycles_t start_time = get_cycles();
969
970 if (!ecap_qis(iommu->ecap))
971 return;
972
1f5b3c3f 973 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
974
975 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
976 if (!(sts & DMA_GSTS_QIES))
977 goto end;
978
979 /*
980 * Give a chance to HW to complete the pending invalidation requests.
981 */
982 while ((readl(iommu->reg + DMAR_IQT_REG) !=
983 readl(iommu->reg + DMAR_IQH_REG)) &&
984 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
985 cpu_relax();
986
987 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
988 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
989
990 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
991 !(sts & DMA_GSTS_QIES), sts);
992end:
1f5b3c3f 993 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
994}
995
eb4a52bc
FY
996/*
997 * Enable queued invalidation.
998 */
999static void __dmar_enable_qi(struct intel_iommu *iommu)
1000{
c416daa9 1001 u32 sts;
eb4a52bc
FY
1002 unsigned long flags;
1003 struct q_inval *qi = iommu->qi;
1004
1005 qi->free_head = qi->free_tail = 0;
1006 qi->free_cnt = QI_LENGTH;
1007
1f5b3c3f 1008 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1009
1010 /* write zero to the tail reg */
1011 writel(0, iommu->reg + DMAR_IQT_REG);
1012
1013 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1014
eb4a52bc 1015 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1016 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1017
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1020
1f5b3c3f 1021 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1022}
1023
fe962e90
SS
1024/*
1025 * Enable Queued Invalidation interface. This is a must to support
1026 * interrupt-remapping. Also used by DMA-remapping, which replaces
1027 * register based IOTLB invalidation.
1028 */
1029int dmar_enable_qi(struct intel_iommu *iommu)
1030{
fe962e90 1031 struct q_inval *qi;
751cafe3 1032 struct page *desc_page;
fe962e90
SS
1033
1034 if (!ecap_qis(iommu->ecap))
1035 return -ENOENT;
1036
1037 /*
1038 * queued invalidation is already setup and enabled.
1039 */
1040 if (iommu->qi)
1041 return 0;
1042
fa4b57cc 1043 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1044 if (!iommu->qi)
1045 return -ENOMEM;
1046
1047 qi = iommu->qi;
1048
751cafe3
SS
1049
1050 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1051 if (!desc_page) {
fe962e90
SS
1052 kfree(qi);
1053 iommu->qi = 0;
1054 return -ENOMEM;
1055 }
1056
751cafe3
SS
1057 qi->desc = page_address(desc_page);
1058
37a40710 1059 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1060 if (!qi->desc_status) {
1061 free_page((unsigned long) qi->desc);
1062 kfree(qi);
1063 iommu->qi = 0;
1064 return -ENOMEM;
1065 }
1066
1067 qi->free_head = qi->free_tail = 0;
1068 qi->free_cnt = QI_LENGTH;
1069
3b8f4048 1070 raw_spin_lock_init(&qi->q_lock);
fe962e90 1071
eb4a52bc 1072 __dmar_enable_qi(iommu);
fe962e90
SS
1073
1074 return 0;
1075}
0ac2491f
SS
1076
1077/* iommu interrupt handling. Most stuff are MSI-like. */
1078
9d783ba0
SS
1079enum faulttype {
1080 DMA_REMAP,
1081 INTR_REMAP,
1082 UNKNOWN,
1083};
1084
1085static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1086{
1087 "Software",
1088 "Present bit in root entry is clear",
1089 "Present bit in context entry is clear",
1090 "Invalid context entry",
1091 "Access beyond MGAW",
1092 "PTE Write access is not set",
1093 "PTE Read access is not set",
1094 "Next page table ptr is invalid",
1095 "Root table address invalid",
1096 "Context table ptr is invalid",
1097 "non-zero reserved fields in RTP",
1098 "non-zero reserved fields in CTP",
1099 "non-zero reserved fields in PTE",
4ecccd9e 1100 "PCE for translation request specifies blocking",
0ac2491f 1101};
9d783ba0 1102
95a02e97 1103static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1104{
1105 "Detected reserved fields in the decoded interrupt-remapped request",
1106 "Interrupt index exceeded the interrupt-remapping table size",
1107 "Present field in the IRTE entry is clear",
1108 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1109 "Detected reserved fields in the IRTE entry",
1110 "Blocked a compatibility format interrupt request",
1111 "Blocked an interrupt request due to source-id verification failure",
1112};
1113
0ac2491f
SS
1114#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1115
9d783ba0 1116const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1117{
fefe1ed1
DC
1118 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1120 *fault_type = INTR_REMAP;
95a02e97 1121 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1122 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1123 *fault_type = DMA_REMAP;
1124 return dma_remap_fault_reasons[fault_reason];
1125 } else {
1126 *fault_type = UNKNOWN;
0ac2491f 1127 return "Unknown";
9d783ba0 1128 }
0ac2491f
SS
1129}
1130
5c2837fb 1131void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1132{
dced35ae 1133 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1134 unsigned long flag;
1135
1136 /* unmask it */
1f5b3c3f 1137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1138 writel(0, iommu->reg + DMAR_FECTL_REG);
1139 /* Read a reg to force flush the post write */
1140 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1141 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1142}
1143
5c2837fb 1144void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1145{
1146 unsigned long flag;
dced35ae 1147 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1148
1149 /* mask it */
1f5b3c3f 1150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1151 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1155}
1156
1157void dmar_msi_write(int irq, struct msi_msg *msg)
1158{
dced35ae 1159 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1160 unsigned long flag;
1161
1f5b3c3f 1162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1163 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1164 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1165 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1167}
1168
1169void dmar_msi_read(int irq, struct msi_msg *msg)
1170{
dced35ae 1171 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1172 unsigned long flag;
1173
1f5b3c3f 1174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1175 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1176 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1177 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1178 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1179}
1180
1181static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1182 u8 fault_reason, u16 source_id, unsigned long long addr)
1183{
1184 const char *reason;
9d783ba0 1185 int fault_type;
0ac2491f 1186
9d783ba0 1187 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1188
9d783ba0 1189 if (fault_type == INTR_REMAP)
bf947fcb 1190 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1191 "fault index %llx\n"
1192 "INTR-REMAP:[fault reason %02d] %s\n",
1193 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1194 PCI_FUNC(source_id & 0xFF), addr >> 48,
1195 fault_reason, reason);
1196 else
bf947fcb 1197 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1198 "fault addr %llx \n"
1199 "DMAR:[fault reason %02d] %s\n",
1200 (type ? "DMA Read" : "DMA Write"),
1201 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1202 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1203 return 0;
1204}
1205
1206#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1207irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1208{
1209 struct intel_iommu *iommu = dev_id;
1210 int reg, fault_index;
1211 u32 fault_status;
1212 unsigned long flag;
1213
1f5b3c3f 1214 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1215 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1216 if (fault_status)
bf947fcb 1217 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1218
1219 /* TBD: ignore advanced fault log currently */
1220 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1221 goto unlock_exit;
0ac2491f
SS
1222
1223 fault_index = dma_fsts_fault_record_index(fault_status);
1224 reg = cap_fault_reg_offset(iommu->cap);
1225 while (1) {
1226 u8 fault_reason;
1227 u16 source_id;
1228 u64 guest_addr;
1229 int type;
1230 u32 data;
1231
1232 /* highest 32 bits */
1233 data = readl(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1235 if (!(data & DMA_FRCD_F))
1236 break;
1237
1238 fault_reason = dma_frcd_fault_reason(data);
1239 type = dma_frcd_type(data);
1240
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1243 source_id = dma_frcd_source_id(data);
1244
1245 guest_addr = dmar_readq(iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN);
1247 guest_addr = dma_frcd_page_addr(guest_addr);
1248 /* clear the fault */
1249 writel(DMA_FRCD_F, iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251
1f5b3c3f 1252 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1253
1254 dmar_fault_do_one(iommu, type, fault_reason,
1255 source_id, guest_addr);
1256
1257 fault_index++;
8211a7b5 1258 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1259 fault_index = 0;
1f5b3c3f 1260 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1261 }
0ac2491f 1262
bd5cdad0
LZH
1263 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1264
1265unlock_exit:
1f5b3c3f 1266 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1267 return IRQ_HANDLED;
1268}
1269
1270int dmar_set_interrupt(struct intel_iommu *iommu)
1271{
1272 int irq, ret;
1273
9d783ba0
SS
1274 /*
1275 * Check if the fault interrupt is already initialized.
1276 */
1277 if (iommu->irq)
1278 return 0;
1279
0ac2491f
SS
1280 irq = create_irq();
1281 if (!irq) {
bf947fcb 1282 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1283 return -EINVAL;
1284 }
1285
dced35ae 1286 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1287 iommu->irq = irq;
1288
1289 ret = arch_setup_dmar_msi(irq);
1290 if (ret) {
dced35ae 1291 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1292 iommu->irq = 0;
1293 destroy_irq(irq);
dd726435 1294 return ret;
0ac2491f
SS
1295 }
1296
477694e7 1297 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1298 if (ret)
bf947fcb 1299 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1300 return ret;
1301}
9d783ba0
SS
1302
1303int __init enable_drhd_fault_handling(void)
1304{
1305 struct dmar_drhd_unit *drhd;
1306
1307 /*
1308 * Enable fault control interrupt.
1309 */
1310 for_each_drhd_unit(drhd) {
1311 int ret;
1312 struct intel_iommu *iommu = drhd->iommu;
bd5cdad0 1313 u32 fault_status;
9d783ba0
SS
1314 ret = dmar_set_interrupt(iommu);
1315
1316 if (ret) {
e9071b0b 1317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1318 (unsigned long long)drhd->reg_base_addr, ret);
1319 return -1;
1320 }
7f99d946
SS
1321
1322 /*
1323 * Clear any previous faults.
1324 */
1325 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1326 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1327 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1328 }
1329
1330 return 0;
1331}
eb4a52bc
FY
1332
1333/*
1334 * Re-enable Queued Invalidation interface.
1335 */
1336int dmar_reenable_qi(struct intel_iommu *iommu)
1337{
1338 if (!ecap_qis(iommu->ecap))
1339 return -ENOENT;
1340
1341 if (!iommu->qi)
1342 return -ENOENT;
1343
1344 /*
1345 * First disable queued invalidation.
1346 */
1347 dmar_disable_qi(iommu);
1348 /*
1349 * Then enable queued invalidation again. Since there is no pending
1350 * invalidation requests now, it's safe to re-enable queued
1351 * invalidation.
1352 */
1353 __dmar_enable_qi(iommu);
1354
1355 return 0;
1356}
074835f0
YS
1357
1358/*
1359 * Check interrupt remapping support in DMAR table description.
1360 */
0b8973a8 1361int __init dmar_ir_support(void)
074835f0
YS
1362{
1363 struct acpi_table_dmar *dmar;
1364 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1365 if (!dmar)
1366 return 0;
074835f0
YS
1367 return dmar->flags & 0x1;
1368}
4db77ff3 1369IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.658164 seconds and 5 git commands to generate.