iommu/vt-d, trivial: clean up unused code
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f
KA
54
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
10e5247f
KA
67static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
e9071b0b 88 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
89 break;
90 }
fa5f508f 91 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
10e5247f 92 if (!pdev) {
e9071b0b 93 /* warning will be printed below */
10e5247f
KA
94 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
e9071b0b 101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
fa5f508f 102 segment, scope->bus, path->device, path->function);
10e5247f
KA
103 return 0;
104 }
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
e9071b0b
DD
109 pr_warn("Device scope type does not match for %s\n",
110 pci_name(pdev));
10e5247f
KA
111 return -EINVAL;
112 }
113 *dev = pdev;
114 return 0;
115}
116
318fe7df
SS
117int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
118 struct pci_dev ***devices, u16 segment)
10e5247f
KA
119{
120 struct acpi_dmar_device_scope *scope;
121 void * tmp = start;
122 int index;
123 int ret;
124
125 *cnt = 0;
126 while (start < end) {
127 scope = start;
128 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
129 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
130 (*cnt)++;
ae3e7f3a
LC
131 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
132 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 133 pr_warn("Unsupported device scope\n");
5715f0f9 134 }
10e5247f
KA
135 start += scope->length;
136 }
137 if (*cnt == 0)
138 return 0;
139
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
143
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
ada4d4b2 153 dmar_free_dev_scope(devices, cnt);
10e5247f
KA
154 return ret;
155 }
156 index ++;
157 }
158 start += scope->length;
159 }
160
161 return 0;
162}
163
ada4d4b2
JL
164void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
165{
166 if (*devices && *cnt) {
167 while (--*cnt >= 0)
168 pci_dev_put((*devices)[*cnt]);
169 kfree(*devices);
170 *devices = NULL;
171 *cnt = 0;
172 }
173}
174
10e5247f
KA
175/**
176 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
177 * structure which uniquely represent one DMA remapping hardware unit
178 * present in the platform
179 */
180static int __init
181dmar_parse_one_drhd(struct acpi_dmar_header *header)
182{
183 struct acpi_dmar_hardware_unit *drhd;
184 struct dmar_drhd_unit *dmaru;
185 int ret = 0;
10e5247f 186
e523b38e 187 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
188 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
189 if (!dmaru)
190 return -ENOMEM;
191
1886e8a9 192 dmaru->hdr = header;
10e5247f 193 dmaru->reg_base_addr = drhd->address;
276dbf99 194 dmaru->segment = drhd->segment;
10e5247f
KA
195 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
196
1886e8a9
SS
197 ret = alloc_iommu(dmaru);
198 if (ret) {
199 kfree(dmaru);
200 return ret;
201 }
202 dmar_register_drhd_unit(dmaru);
203 return 0;
204}
205
f82851a8 206static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
207{
208 struct acpi_dmar_hardware_unit *drhd;
f82851a8 209 int ret = 0;
1886e8a9
SS
210
211 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
212
2e824f79
YZ
213 if (dmaru->include_all)
214 return 0;
215
216 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 217 ((void *)drhd) + drhd->header.length,
10e5247f
KA
218 &dmaru->devices_cnt, &dmaru->devices,
219 drhd->segment);
1c7d1bca 220 if (ret) {
1886e8a9 221 list_del(&dmaru->list);
10e5247f 222 kfree(dmaru);
1886e8a9 223 }
10e5247f
KA
224 return ret;
225}
226
aa697079 227#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
228static int __init
229dmar_parse_one_rhsa(struct acpi_dmar_header *header)
230{
231 struct acpi_dmar_rhsa *rhsa;
232 struct dmar_drhd_unit *drhd;
233
234 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 235 for_each_drhd_unit(drhd) {
ee34b32d
SS
236 if (drhd->reg_base_addr == rhsa->base_address) {
237 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
238
239 if (!node_online(node))
240 node = -1;
241 drhd->iommu->node = node;
aa697079
DW
242 return 0;
243 }
ee34b32d 244 }
fd0c8894
BH
245 WARN_TAINT(
246 1, TAINT_FIRMWARE_WORKAROUND,
247 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
248 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
249 drhd->reg_base_addr,
250 dmi_get_system_info(DMI_BIOS_VENDOR),
251 dmi_get_system_info(DMI_BIOS_VERSION),
252 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 253
aa697079 254 return 0;
ee34b32d 255}
aa697079 256#endif
ee34b32d 257
10e5247f
KA
258static void __init
259dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
260{
261 struct acpi_dmar_hardware_unit *drhd;
262 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 263 struct acpi_dmar_atsr *atsr;
17b60977 264 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
265
266 switch (header->type) {
267 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
268 drhd = container_of(header, struct acpi_dmar_hardware_unit,
269 header);
e9071b0b 270 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 271 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
272 break;
273 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
274 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
275 header);
e9071b0b 276 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
277 (unsigned long long)rmrr->base_address,
278 (unsigned long long)rmrr->end_address);
10e5247f 279 break;
aa5d2b51
YZ
280 case ACPI_DMAR_TYPE_ATSR:
281 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 282 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 283 break;
17b60977
RD
284 case ACPI_DMAR_HARDWARE_AFFINITY:
285 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 286 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
287 (unsigned long long)rhsa->base_address,
288 rhsa->proximity_domain);
289 break;
10e5247f
KA
290 }
291}
292
f6dd5c31
YL
293/**
294 * dmar_table_detect - checks to see if the platform supports DMAR devices
295 */
296static int __init dmar_table_detect(void)
297{
298 acpi_status status = AE_OK;
299
300 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
301 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
302 (struct acpi_table_header **)&dmar_tbl,
303 &dmar_tbl_size);
f6dd5c31
YL
304
305 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 306 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
307 status = AE_NOT_FOUND;
308 }
309
310 return (ACPI_SUCCESS(status) ? 1 : 0);
311}
aaa9d1dd 312
10e5247f
KA
313/**
314 * parse_dmar_table - parses the DMA reporting table
315 */
316static int __init
317parse_dmar_table(void)
318{
319 struct acpi_table_dmar *dmar;
320 struct acpi_dmar_header *entry_header;
321 int ret = 0;
7cef3347 322 int drhd_count = 0;
10e5247f 323
f6dd5c31
YL
324 /*
325 * Do it again, earlier dmar_tbl mapping could be mapped with
326 * fixed map.
327 */
328 dmar_table_detect();
329
a59b50e9
JC
330 /*
331 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
332 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
333 */
334 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
335
10e5247f
KA
336 dmar = (struct acpi_table_dmar *)dmar_tbl;
337 if (!dmar)
338 return -ENODEV;
339
5b6985ce 340 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 341 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
342 return -EINVAL;
343 }
344
e9071b0b 345 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
346
347 entry_header = (struct acpi_dmar_header *)(dmar + 1);
348 while (((unsigned long)entry_header) <
349 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
350 /* Avoid looping forever on bad ACPI tables */
351 if (entry_header->length == 0) {
e9071b0b 352 pr_warn("Invalid 0-length structure\n");
084eb960
TB
353 ret = -EINVAL;
354 break;
355 }
356
10e5247f
KA
357 dmar_table_print_dmar_entry(entry_header);
358
359 switch (entry_header->type) {
360 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 361 drhd_count++;
10e5247f
KA
362 ret = dmar_parse_one_drhd(entry_header);
363 break;
364 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
365 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
366 break;
367 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 368 ret = dmar_parse_one_atsr(entry_header);
10e5247f 369 break;
17b60977 370 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 371#ifdef CONFIG_ACPI_NUMA
ee34b32d 372 ret = dmar_parse_one_rhsa(entry_header);
aa697079 373#endif
17b60977 374 break;
10e5247f 375 default:
e9071b0b 376 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 377 entry_header->type);
10e5247f
KA
378 ret = 0; /* for forward compatibility */
379 break;
380 }
381 if (ret)
382 break;
383
384 entry_header = ((void *)entry_header + entry_header->length);
385 }
7cef3347
LZH
386 if (drhd_count == 0)
387 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
388 return ret;
389}
390
dda56549 391static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
392 struct pci_dev *dev)
393{
394 int index;
395
396 while (dev) {
397 for (index = 0; index < cnt; index++)
398 if (dev == devices[index])
399 return 1;
400
401 /* Check our parent */
402 dev = dev->bus->self;
403 }
404
405 return 0;
406}
407
408struct dmar_drhd_unit *
409dmar_find_matched_drhd_unit(struct pci_dev *dev)
410{
2e824f79
YZ
411 struct dmar_drhd_unit *dmaru = NULL;
412 struct acpi_dmar_hardware_unit *drhd;
413
dda56549
Y
414 dev = pci_physfn(dev);
415
8b161f0e 416 for_each_drhd_unit(dmaru) {
2e824f79
YZ
417 drhd = container_of(dmaru->hdr,
418 struct acpi_dmar_hardware_unit,
419 header);
420
421 if (dmaru->include_all &&
422 drhd->segment == pci_domain_nr(dev->bus))
423 return dmaru;
e61d98d8 424
2e824f79
YZ
425 if (dmar_pci_device_match(dmaru->devices,
426 dmaru->devices_cnt, dev))
427 return dmaru;
e61d98d8
SS
428 }
429
430 return NULL;
431}
432
1886e8a9
SS
433int __init dmar_dev_scope_init(void)
434{
c2c7286a 435 static int dmar_dev_scope_initialized;
04e2ea67 436 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
437 int ret = -ENODEV;
438
c2c7286a
SS
439 if (dmar_dev_scope_initialized)
440 return dmar_dev_scope_initialized;
441
318fe7df
SS
442 if (list_empty(&dmar_drhd_units))
443 goto fail;
444
04e2ea67 445 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
446 ret = dmar_parse_dev(drhd);
447 if (ret)
c2c7286a 448 goto fail;
1886e8a9
SS
449 }
450
318fe7df
SS
451 ret = dmar_parse_rmrr_atsr_dev();
452 if (ret)
453 goto fail;
1886e8a9 454
c2c7286a
SS
455 dmar_dev_scope_initialized = 1;
456 return 0;
457
458fail:
459 dmar_dev_scope_initialized = ret;
1886e8a9
SS
460 return ret;
461}
462
10e5247f
KA
463
464int __init dmar_table_init(void)
465{
1886e8a9 466 static int dmar_table_initialized;
093f87d2
FY
467 int ret;
468
1886e8a9
SS
469 if (dmar_table_initialized)
470 return 0;
471
472 dmar_table_initialized = 1;
473
093f87d2
FY
474 ret = parse_dmar_table();
475 if (ret) {
1886e8a9 476 if (ret != -ENODEV)
e9071b0b 477 pr_info("parse DMAR table failure.\n");
093f87d2
FY
478 return ret;
479 }
480
10e5247f 481 if (list_empty(&dmar_drhd_units)) {
e9071b0b 482 pr_info("No DMAR devices found\n");
10e5247f
KA
483 return -ENODEV;
484 }
093f87d2 485
10e5247f
KA
486 return 0;
487}
488
3a8663ee
BH
489static void warn_invalid_dmar(u64 addr, const char *message)
490{
fd0c8894
BH
491 WARN_TAINT_ONCE(
492 1, TAINT_FIRMWARE_WORKAROUND,
493 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
494 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
495 addr, message,
496 dmi_get_system_info(DMI_BIOS_VENDOR),
497 dmi_get_system_info(DMI_BIOS_VERSION),
498 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 499}
6ecbf01c 500
21004dcd 501static int __init check_zero_address(void)
86cf898e
DW
502{
503 struct acpi_table_dmar *dmar;
504 struct acpi_dmar_header *entry_header;
505 struct acpi_dmar_hardware_unit *drhd;
506
507 dmar = (struct acpi_table_dmar *)dmar_tbl;
508 entry_header = (struct acpi_dmar_header *)(dmar + 1);
509
510 while (((unsigned long)entry_header) <
511 (((unsigned long)dmar) + dmar_tbl->length)) {
512 /* Avoid looping forever on bad ACPI tables */
513 if (entry_header->length == 0) {
e9071b0b 514 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
515 return 0;
516 }
517
518 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
519 void __iomem *addr;
520 u64 cap, ecap;
521
86cf898e
DW
522 drhd = (void *)entry_header;
523 if (!drhd->address) {
3a8663ee 524 warn_invalid_dmar(0, "");
2c992208
CW
525 goto failed;
526 }
527
528 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
529 if (!addr ) {
530 printk("IOMMU: can't validate: %llx\n", drhd->address);
531 goto failed;
532 }
533 cap = dmar_readq(addr + DMAR_CAP_REG);
534 ecap = dmar_readq(addr + DMAR_ECAP_REG);
535 early_iounmap(addr, VTD_PAGE_SIZE);
536 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
537 warn_invalid_dmar(drhd->address,
538 " returns all ones");
2c992208 539 goto failed;
86cf898e 540 }
86cf898e
DW
541 }
542
543 entry_header = ((void *)entry_header + entry_header->length);
544 }
545 return 1;
2c992208
CW
546
547failed:
2c992208 548 return 0;
86cf898e
DW
549}
550
480125ba 551int __init detect_intel_iommu(void)
2ae21010
SS
552{
553 int ret;
554
f6dd5c31 555 ret = dmar_table_detect();
86cf898e
DW
556 if (ret)
557 ret = check_zero_address();
2ae21010 558 {
11bd04f6 559 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 560 iommu_detected = 1;
5d990b62
CW
561 /* Make sure ACS will be enabled */
562 pci_request_acs();
563 }
f5d1b97b 564
9d5ce73a
FT
565#ifdef CONFIG_X86
566 if (ret)
567 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 568#endif
cacd4213 569 }
8e1568f3 570 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 571 dmar_tbl = NULL;
480125ba 572
4db77ff3 573 return ret ? 1 : -ENODEV;
2ae21010
SS
574}
575
576
6f5cf521
DD
577static void unmap_iommu(struct intel_iommu *iommu)
578{
579 iounmap(iommu->reg);
580 release_mem_region(iommu->reg_phys, iommu->reg_size);
581}
582
583/**
584 * map_iommu: map the iommu's registers
585 * @iommu: the iommu to map
586 * @phys_addr: the physical address of the base resgister
e9071b0b 587 *
6f5cf521 588 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 589 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
590 */
591static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
592{
593 int map_size, err=0;
594
595 iommu->reg_phys = phys_addr;
596 iommu->reg_size = VTD_PAGE_SIZE;
597
598 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
599 pr_err("IOMMU: can't reserve memory\n");
600 err = -EBUSY;
601 goto out;
602 }
603
604 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
605 if (!iommu->reg) {
606 pr_err("IOMMU: can't map the region\n");
607 err = -ENOMEM;
608 goto release;
609 }
610
611 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
612 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
613
614 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
615 err = -EINVAL;
616 warn_invalid_dmar(phys_addr, " returns all ones");
617 goto unmap;
618 }
619
620 /* the registers might be more than one page */
621 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
622 cap_max_fault_reg_offset(iommu->cap));
623 map_size = VTD_PAGE_ALIGN(map_size);
624 if (map_size > iommu->reg_size) {
625 iounmap(iommu->reg);
626 release_mem_region(iommu->reg_phys, iommu->reg_size);
627 iommu->reg_size = map_size;
628 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
629 iommu->name)) {
630 pr_err("IOMMU: can't reserve memory\n");
631 err = -EBUSY;
632 goto out;
633 }
634 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
635 if (!iommu->reg) {
636 pr_err("IOMMU: can't map the region\n");
637 err = -ENOMEM;
638 goto release;
639 }
640 }
641 err = 0;
642 goto out;
643
644unmap:
645 iounmap(iommu->reg);
646release:
647 release_mem_region(iommu->reg_phys, iommu->reg_size);
648out:
649 return err;
650}
651
1886e8a9 652int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 653{
c42d9f32 654 struct intel_iommu *iommu;
3a93c841 655 u32 ver, sts;
c42d9f32 656 static int iommu_allocated = 0;
43f7392b 657 int agaw = 0;
4ed0d3e6 658 int msagaw = 0;
6f5cf521 659 int err;
c42d9f32 660
6ecbf01c 661 if (!drhd->reg_base_addr) {
3a8663ee 662 warn_invalid_dmar(0, "");
6ecbf01c
DW
663 return -EINVAL;
664 }
665
c42d9f32
SS
666 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
667 if (!iommu)
1886e8a9 668 return -ENOMEM;
c42d9f32
SS
669
670 iommu->seq_id = iommu_allocated++;
9d783ba0 671 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 672
6f5cf521
DD
673 err = map_iommu(iommu, drhd->reg_base_addr);
674 if (err) {
675 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
676 goto error;
677 }
0815565a 678
6f5cf521 679 err = -EINVAL;
1b573683
WH
680 agaw = iommu_calculate_agaw(iommu);
681 if (agaw < 0) {
bf947fcb
DD
682 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
683 iommu->seq_id);
0815565a 684 goto err_unmap;
4ed0d3e6
FY
685 }
686 msagaw = iommu_calculate_max_sagaw(iommu);
687 if (msagaw < 0) {
bf947fcb 688 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 689 iommu->seq_id);
0815565a 690 goto err_unmap;
1b573683
WH
691 }
692 iommu->agaw = agaw;
4ed0d3e6 693 iommu->msagaw = msagaw;
1b573683 694
ee34b32d
SS
695 iommu->node = -1;
696
e61d98d8 697 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
698 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
699 iommu->seq_id,
5b6985ce
FY
700 (unsigned long long)drhd->reg_base_addr,
701 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
702 (unsigned long long)iommu->cap,
703 (unsigned long long)iommu->ecap);
e61d98d8 704
3a93c841
TI
705 /* Reflect status in gcmd */
706 sts = readl(iommu->reg + DMAR_GSTS_REG);
707 if (sts & DMA_GSTS_IRES)
708 iommu->gcmd |= DMA_GCMD_IRE;
709 if (sts & DMA_GSTS_TES)
710 iommu->gcmd |= DMA_GCMD_TE;
711 if (sts & DMA_GSTS_QIES)
712 iommu->gcmd |= DMA_GCMD_QIE;
713
1f5b3c3f 714 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
715
716 drhd->iommu = iommu;
1886e8a9 717 return 0;
0815565a
DW
718
719 err_unmap:
6f5cf521 720 unmap_iommu(iommu);
0815565a 721 error:
e61d98d8 722 kfree(iommu);
6f5cf521 723 return err;
e61d98d8
SS
724}
725
726void free_iommu(struct intel_iommu *iommu)
727{
728 if (!iommu)
729 return;
730
e61d98d8 731 free_dmar_iommu(iommu);
e61d98d8
SS
732
733 if (iommu->reg)
6f5cf521
DD
734 unmap_iommu(iommu);
735
e61d98d8
SS
736 kfree(iommu);
737}
fe962e90
SS
738
739/*
740 * Reclaim all the submitted descriptors which have completed its work.
741 */
742static inline void reclaim_free_desc(struct q_inval *qi)
743{
6ba6c3a4
YZ
744 while (qi->desc_status[qi->free_tail] == QI_DONE ||
745 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
746 qi->desc_status[qi->free_tail] = QI_FREE;
747 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
748 qi->free_cnt++;
749 }
750}
751
704126ad
YZ
752static int qi_check_fault(struct intel_iommu *iommu, int index)
753{
754 u32 fault;
6ba6c3a4 755 int head, tail;
704126ad
YZ
756 struct q_inval *qi = iommu->qi;
757 int wait_index = (index + 1) % QI_LENGTH;
758
6ba6c3a4
YZ
759 if (qi->desc_status[wait_index] == QI_ABORT)
760 return -EAGAIN;
761
704126ad
YZ
762 fault = readl(iommu->reg + DMAR_FSTS_REG);
763
764 /*
765 * If IQE happens, the head points to the descriptor associated
766 * with the error. No new descriptors are fetched until the IQE
767 * is cleared.
768 */
769 if (fault & DMA_FSTS_IQE) {
770 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 771 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 772 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
773 "low=%llx, high=%llx\n",
774 (unsigned long long)qi->desc[index].low,
775 (unsigned long long)qi->desc[index].high);
704126ad
YZ
776 memcpy(&qi->desc[index], &qi->desc[wait_index],
777 sizeof(struct qi_desc));
778 __iommu_flush_cache(iommu, &qi->desc[index],
779 sizeof(struct qi_desc));
780 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
781 return -EINVAL;
782 }
783 }
784
6ba6c3a4
YZ
785 /*
786 * If ITE happens, all pending wait_desc commands are aborted.
787 * No new descriptors are fetched until the ITE is cleared.
788 */
789 if (fault & DMA_FSTS_ITE) {
790 head = readl(iommu->reg + DMAR_IQH_REG);
791 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
792 head |= 1;
793 tail = readl(iommu->reg + DMAR_IQT_REG);
794 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
795
796 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
797
798 do {
799 if (qi->desc_status[head] == QI_IN_USE)
800 qi->desc_status[head] = QI_ABORT;
801 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
802 } while (head != tail);
803
804 if (qi->desc_status[wait_index] == QI_ABORT)
805 return -EAGAIN;
806 }
807
808 if (fault & DMA_FSTS_ICE)
809 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
810
704126ad
YZ
811 return 0;
812}
813
fe962e90
SS
814/*
815 * Submit the queued invalidation descriptor to the remapping
816 * hardware unit and wait for its completion.
817 */
704126ad 818int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 819{
6ba6c3a4 820 int rc;
fe962e90
SS
821 struct q_inval *qi = iommu->qi;
822 struct qi_desc *hw, wait_desc;
823 int wait_index, index;
824 unsigned long flags;
825
826 if (!qi)
704126ad 827 return 0;
fe962e90
SS
828
829 hw = qi->desc;
830
6ba6c3a4
YZ
831restart:
832 rc = 0;
833
3b8f4048 834 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 835 while (qi->free_cnt < 3) {
3b8f4048 836 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 837 cpu_relax();
3b8f4048 838 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
839 }
840
841 index = qi->free_head;
842 wait_index = (index + 1) % QI_LENGTH;
843
844 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
845
846 hw[index] = *desc;
847
704126ad
YZ
848 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
849 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
850 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
851
852 hw[wait_index] = wait_desc;
853
854 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
855 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
856
857 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
858 qi->free_cnt -= 2;
859
fe962e90
SS
860 /*
861 * update the HW tail register indicating the presence of
862 * new descriptors.
863 */
6ba6c3a4 864 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
865
866 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
867 /*
868 * We will leave the interrupts disabled, to prevent interrupt
869 * context to queue another cmd while a cmd is already submitted
870 * and waiting for completion on this cpu. This is to avoid
871 * a deadlock where the interrupt context can wait indefinitely
872 * for free slots in the queue.
873 */
704126ad
YZ
874 rc = qi_check_fault(iommu, index);
875 if (rc)
6ba6c3a4 876 break;
704126ad 877
3b8f4048 878 raw_spin_unlock(&qi->q_lock);
fe962e90 879 cpu_relax();
3b8f4048 880 raw_spin_lock(&qi->q_lock);
fe962e90 881 }
6ba6c3a4
YZ
882
883 qi->desc_status[index] = QI_DONE;
fe962e90
SS
884
885 reclaim_free_desc(qi);
3b8f4048 886 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 887
6ba6c3a4
YZ
888 if (rc == -EAGAIN)
889 goto restart;
890
704126ad 891 return rc;
fe962e90
SS
892}
893
894/*
895 * Flush the global interrupt entry cache.
896 */
897void qi_global_iec(struct intel_iommu *iommu)
898{
899 struct qi_desc desc;
900
901 desc.low = QI_IEC_TYPE;
902 desc.high = 0;
903
704126ad 904 /* should never fail */
fe962e90
SS
905 qi_submit_sync(&desc, iommu);
906}
907
4c25a2c1
DW
908void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
909 u64 type)
3481f210 910{
3481f210
YS
911 struct qi_desc desc;
912
3481f210
YS
913 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
914 | QI_CC_GRAN(type) | QI_CC_TYPE;
915 desc.high = 0;
916
4c25a2c1 917 qi_submit_sync(&desc, iommu);
3481f210
YS
918}
919
1f0ef2aa
DW
920void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
921 unsigned int size_order, u64 type)
3481f210
YS
922{
923 u8 dw = 0, dr = 0;
924
925 struct qi_desc desc;
926 int ih = 0;
927
3481f210
YS
928 if (cap_write_drain(iommu->cap))
929 dw = 1;
930
931 if (cap_read_drain(iommu->cap))
932 dr = 1;
933
934 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
935 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
936 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
937 | QI_IOTLB_AM(size_order);
938
1f0ef2aa 939 qi_submit_sync(&desc, iommu);
3481f210
YS
940}
941
6ba6c3a4
YZ
942void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
943 u64 addr, unsigned mask)
944{
945 struct qi_desc desc;
946
947 if (mask) {
948 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
949 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
950 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
951 } else
952 desc.high = QI_DEV_IOTLB_ADDR(addr);
953
954 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
955 qdep = 0;
956
957 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
958 QI_DIOTLB_TYPE;
959
960 qi_submit_sync(&desc, iommu);
961}
962
eba67e5d
SS
963/*
964 * Disable Queued Invalidation interface.
965 */
966void dmar_disable_qi(struct intel_iommu *iommu)
967{
968 unsigned long flags;
969 u32 sts;
970 cycles_t start_time = get_cycles();
971
972 if (!ecap_qis(iommu->ecap))
973 return;
974
1f5b3c3f 975 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
976
977 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
978 if (!(sts & DMA_GSTS_QIES))
979 goto end;
980
981 /*
982 * Give a chance to HW to complete the pending invalidation requests.
983 */
984 while ((readl(iommu->reg + DMAR_IQT_REG) !=
985 readl(iommu->reg + DMAR_IQH_REG)) &&
986 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
987 cpu_relax();
988
989 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
990 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
991
992 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
993 !(sts & DMA_GSTS_QIES), sts);
994end:
1f5b3c3f 995 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
996}
997
eb4a52bc
FY
998/*
999 * Enable queued invalidation.
1000 */
1001static void __dmar_enable_qi(struct intel_iommu *iommu)
1002{
c416daa9 1003 u32 sts;
eb4a52bc
FY
1004 unsigned long flags;
1005 struct q_inval *qi = iommu->qi;
1006
1007 qi->free_head = qi->free_tail = 0;
1008 qi->free_cnt = QI_LENGTH;
1009
1f5b3c3f 1010 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1011
1012 /* write zero to the tail reg */
1013 writel(0, iommu->reg + DMAR_IQT_REG);
1014
1015 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1016
eb4a52bc 1017 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1018 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1019
1020 /* Make sure hardware complete it */
1021 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1022
1f5b3c3f 1023 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1024}
1025
fe962e90
SS
1026/*
1027 * Enable Queued Invalidation interface. This is a must to support
1028 * interrupt-remapping. Also used by DMA-remapping, which replaces
1029 * register based IOTLB invalidation.
1030 */
1031int dmar_enable_qi(struct intel_iommu *iommu)
1032{
fe962e90 1033 struct q_inval *qi;
751cafe3 1034 struct page *desc_page;
fe962e90
SS
1035
1036 if (!ecap_qis(iommu->ecap))
1037 return -ENOENT;
1038
1039 /*
1040 * queued invalidation is already setup and enabled.
1041 */
1042 if (iommu->qi)
1043 return 0;
1044
fa4b57cc 1045 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1046 if (!iommu->qi)
1047 return -ENOMEM;
1048
1049 qi = iommu->qi;
1050
751cafe3
SS
1051
1052 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1053 if (!desc_page) {
fe962e90
SS
1054 kfree(qi);
1055 iommu->qi = 0;
1056 return -ENOMEM;
1057 }
1058
751cafe3
SS
1059 qi->desc = page_address(desc_page);
1060
37a40710 1061 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1062 if (!qi->desc_status) {
1063 free_page((unsigned long) qi->desc);
1064 kfree(qi);
1065 iommu->qi = 0;
1066 return -ENOMEM;
1067 }
1068
1069 qi->free_head = qi->free_tail = 0;
1070 qi->free_cnt = QI_LENGTH;
1071
3b8f4048 1072 raw_spin_lock_init(&qi->q_lock);
fe962e90 1073
eb4a52bc 1074 __dmar_enable_qi(iommu);
fe962e90
SS
1075
1076 return 0;
1077}
0ac2491f
SS
1078
1079/* iommu interrupt handling. Most stuff are MSI-like. */
1080
9d783ba0
SS
1081enum faulttype {
1082 DMA_REMAP,
1083 INTR_REMAP,
1084 UNKNOWN,
1085};
1086
1087static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1088{
1089 "Software",
1090 "Present bit in root entry is clear",
1091 "Present bit in context entry is clear",
1092 "Invalid context entry",
1093 "Access beyond MGAW",
1094 "PTE Write access is not set",
1095 "PTE Read access is not set",
1096 "Next page table ptr is invalid",
1097 "Root table address invalid",
1098 "Context table ptr is invalid",
1099 "non-zero reserved fields in RTP",
1100 "non-zero reserved fields in CTP",
1101 "non-zero reserved fields in PTE",
4ecccd9e 1102 "PCE for translation request specifies blocking",
0ac2491f 1103};
9d783ba0 1104
95a02e97 1105static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1106{
1107 "Detected reserved fields in the decoded interrupt-remapped request",
1108 "Interrupt index exceeded the interrupt-remapping table size",
1109 "Present field in the IRTE entry is clear",
1110 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1111 "Detected reserved fields in the IRTE entry",
1112 "Blocked a compatibility format interrupt request",
1113 "Blocked an interrupt request due to source-id verification failure",
1114};
1115
21004dcd 1116static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1117{
fefe1ed1
DC
1118 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1120 *fault_type = INTR_REMAP;
95a02e97 1121 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1122 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1123 *fault_type = DMA_REMAP;
1124 return dma_remap_fault_reasons[fault_reason];
1125 } else {
1126 *fault_type = UNKNOWN;
0ac2491f 1127 return "Unknown";
9d783ba0 1128 }
0ac2491f
SS
1129}
1130
5c2837fb 1131void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1132{
dced35ae 1133 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1134 unsigned long flag;
1135
1136 /* unmask it */
1f5b3c3f 1137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1138 writel(0, iommu->reg + DMAR_FECTL_REG);
1139 /* Read a reg to force flush the post write */
1140 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1141 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1142}
1143
5c2837fb 1144void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1145{
1146 unsigned long flag;
dced35ae 1147 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1148
1149 /* mask it */
1f5b3c3f 1150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1151 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1155}
1156
1157void dmar_msi_write(int irq, struct msi_msg *msg)
1158{
dced35ae 1159 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1160 unsigned long flag;
1161
1f5b3c3f 1162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1163 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1164 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1165 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1167}
1168
1169void dmar_msi_read(int irq, struct msi_msg *msg)
1170{
dced35ae 1171 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1172 unsigned long flag;
1173
1f5b3c3f 1174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1175 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1176 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1177 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1178 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1179}
1180
1181static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1182 u8 fault_reason, u16 source_id, unsigned long long addr)
1183{
1184 const char *reason;
9d783ba0 1185 int fault_type;
0ac2491f 1186
9d783ba0 1187 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1188
9d783ba0 1189 if (fault_type == INTR_REMAP)
bf947fcb 1190 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1191 "fault index %llx\n"
1192 "INTR-REMAP:[fault reason %02d] %s\n",
1193 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1194 PCI_FUNC(source_id & 0xFF), addr >> 48,
1195 fault_reason, reason);
1196 else
bf947fcb 1197 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1198 "fault addr %llx \n"
1199 "DMAR:[fault reason %02d] %s\n",
1200 (type ? "DMA Read" : "DMA Write"),
1201 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1202 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1203 return 0;
1204}
1205
1206#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1207irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1208{
1209 struct intel_iommu *iommu = dev_id;
1210 int reg, fault_index;
1211 u32 fault_status;
1212 unsigned long flag;
1213
1f5b3c3f 1214 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1215 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1216 if (fault_status)
bf947fcb 1217 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1218
1219 /* TBD: ignore advanced fault log currently */
1220 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1221 goto unlock_exit;
0ac2491f
SS
1222
1223 fault_index = dma_fsts_fault_record_index(fault_status);
1224 reg = cap_fault_reg_offset(iommu->cap);
1225 while (1) {
1226 u8 fault_reason;
1227 u16 source_id;
1228 u64 guest_addr;
1229 int type;
1230 u32 data;
1231
1232 /* highest 32 bits */
1233 data = readl(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1235 if (!(data & DMA_FRCD_F))
1236 break;
1237
1238 fault_reason = dma_frcd_fault_reason(data);
1239 type = dma_frcd_type(data);
1240
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1243 source_id = dma_frcd_source_id(data);
1244
1245 guest_addr = dmar_readq(iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN);
1247 guest_addr = dma_frcd_page_addr(guest_addr);
1248 /* clear the fault */
1249 writel(DMA_FRCD_F, iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251
1f5b3c3f 1252 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1253
1254 dmar_fault_do_one(iommu, type, fault_reason,
1255 source_id, guest_addr);
1256
1257 fault_index++;
8211a7b5 1258 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1259 fault_index = 0;
1f5b3c3f 1260 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1261 }
0ac2491f 1262
bd5cdad0
LZH
1263 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1264
1265unlock_exit:
1f5b3c3f 1266 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1267 return IRQ_HANDLED;
1268}
1269
1270int dmar_set_interrupt(struct intel_iommu *iommu)
1271{
1272 int irq, ret;
1273
9d783ba0
SS
1274 /*
1275 * Check if the fault interrupt is already initialized.
1276 */
1277 if (iommu->irq)
1278 return 0;
1279
0ac2491f
SS
1280 irq = create_irq();
1281 if (!irq) {
bf947fcb 1282 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1283 return -EINVAL;
1284 }
1285
dced35ae 1286 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1287 iommu->irq = irq;
1288
1289 ret = arch_setup_dmar_msi(irq);
1290 if (ret) {
dced35ae 1291 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1292 iommu->irq = 0;
1293 destroy_irq(irq);
dd726435 1294 return ret;
0ac2491f
SS
1295 }
1296
477694e7 1297 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1298 if (ret)
bf947fcb 1299 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1300 return ret;
1301}
9d783ba0
SS
1302
1303int __init enable_drhd_fault_handling(void)
1304{
1305 struct dmar_drhd_unit *drhd;
1306
1307 /*
1308 * Enable fault control interrupt.
1309 */
1310 for_each_drhd_unit(drhd) {
1311 int ret;
1312 struct intel_iommu *iommu = drhd->iommu;
bd5cdad0 1313 u32 fault_status;
9d783ba0
SS
1314 ret = dmar_set_interrupt(iommu);
1315
1316 if (ret) {
e9071b0b 1317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1318 (unsigned long long)drhd->reg_base_addr, ret);
1319 return -1;
1320 }
7f99d946
SS
1321
1322 /*
1323 * Clear any previous faults.
1324 */
1325 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1326 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1327 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1328 }
1329
1330 return 0;
1331}
eb4a52bc
FY
1332
1333/*
1334 * Re-enable Queued Invalidation interface.
1335 */
1336int dmar_reenable_qi(struct intel_iommu *iommu)
1337{
1338 if (!ecap_qis(iommu->ecap))
1339 return -ENOENT;
1340
1341 if (!iommu->qi)
1342 return -ENOENT;
1343
1344 /*
1345 * First disable queued invalidation.
1346 */
1347 dmar_disable_qi(iommu);
1348 /*
1349 * Then enable queued invalidation again. Since there is no pending
1350 * invalidation requests now, it's safe to re-enable queued
1351 * invalidation.
1352 */
1353 __dmar_enable_qi(iommu);
1354
1355 return 0;
1356}
074835f0
YS
1357
1358/*
1359 * Check interrupt remapping support in DMAR table description.
1360 */
0b8973a8 1361int __init dmar_ir_support(void)
074835f0
YS
1362{
1363 struct acpi_table_dmar *dmar;
1364 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1365 if (!dmar)
1366 return 0;
074835f0
YS
1367 return dmar->flags & 0x1;
1368}
4db77ff3 1369IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.440382 seconds and 5 git commands to generate.