Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
5a0e3ad6 38#include <linux/slab.h>
4db77ff3 39#include <asm/iommu_table.h>
10e5247f 40
a192a958 41#define PREFIX "DMAR: "
10e5247f
KA
42
43/* No locks are needed as DMA remapping hardware unit
44 * list is constructed at boot time and hotplug of
45 * these units are not supported by the architecture.
46 */
47LIST_HEAD(dmar_drhd_units);
10e5247f 48
41750d31 49struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 50static acpi_size dmar_tbl_size;
10e5247f
KA
51
52static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53{
54 /*
55 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 * the very end.
57 */
58 if (drhd->include_all)
59 list_add_tail(&drhd->list, &dmar_drhd_units);
60 else
61 list_add(&drhd->list, &dmar_drhd_units);
62}
63
10e5247f
KA
64static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
65 struct pci_dev **dev, u16 segment)
66{
67 struct pci_bus *bus;
68 struct pci_dev *pdev = NULL;
69 struct acpi_dmar_pci_path *path;
70 int count;
71
72 bus = pci_find_bus(segment, scope->bus);
73 path = (struct acpi_dmar_pci_path *)(scope + 1);
74 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
75 / sizeof(struct acpi_dmar_pci_path);
76
77 while (count) {
78 if (pdev)
79 pci_dev_put(pdev);
80 /*
81 * Some BIOSes list non-exist devices in DMAR table, just
82 * ignore it
83 */
84 if (!bus) {
85 printk(KERN_WARNING
86 PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
88 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
92 printk(KERN_WARNING PREFIX
93 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
94 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
102 printk(KERN_WARNING PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
112 printk(KERN_WARNING PREFIX
113 "Device scope type does not match for %s\n",
114 pci_name(pdev));
115 return -EINVAL;
116 }
117 *dev = pdev;
118 return 0;
119}
120
318fe7df
SS
121int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
122 struct pci_dev ***devices, u16 segment)
10e5247f
KA
123{
124 struct acpi_dmar_device_scope *scope;
125 void * tmp = start;
126 int index;
127 int ret;
128
129 *cnt = 0;
130 while (start < end) {
131 scope = start;
132 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
133 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 (*cnt)++;
5715f0f9 135 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
10e5247f 136 printk(KERN_WARNING PREFIX
5715f0f9
YL
137 "Unsupported device scope\n");
138 }
10e5247f
KA
139 start += scope->length;
140 }
141 if (*cnt == 0)
142 return 0;
143
144 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
145 if (!*devices)
146 return -ENOMEM;
147
148 start = tmp;
149 index = 0;
150 while (start < end) {
151 scope = start;
152 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
153 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
154 ret = dmar_parse_one_dev_scope(scope,
155 &(*devices)[index], segment);
156 if (ret) {
157 kfree(*devices);
158 return ret;
159 }
160 index ++;
161 }
162 start += scope->length;
163 }
164
165 return 0;
166}
167
168/**
169 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
170 * structure which uniquely represent one DMA remapping hardware unit
171 * present in the platform
172 */
173static int __init
174dmar_parse_one_drhd(struct acpi_dmar_header *header)
175{
176 struct acpi_dmar_hardware_unit *drhd;
177 struct dmar_drhd_unit *dmaru;
178 int ret = 0;
10e5247f 179
e523b38e 180 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
181 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
182 if (!dmaru)
183 return -ENOMEM;
184
1886e8a9 185 dmaru->hdr = header;
10e5247f 186 dmaru->reg_base_addr = drhd->address;
276dbf99 187 dmaru->segment = drhd->segment;
10e5247f
KA
188 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
189
1886e8a9
SS
190 ret = alloc_iommu(dmaru);
191 if (ret) {
192 kfree(dmaru);
193 return ret;
194 }
195 dmar_register_drhd_unit(dmaru);
196 return 0;
197}
198
f82851a8 199static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
200{
201 struct acpi_dmar_hardware_unit *drhd;
f82851a8 202 int ret = 0;
1886e8a9
SS
203
204 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
205
2e824f79
YZ
206 if (dmaru->include_all)
207 return 0;
208
209 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 210 ((void *)drhd) + drhd->header.length,
10e5247f
KA
211 &dmaru->devices_cnt, &dmaru->devices,
212 drhd->segment);
1c7d1bca 213 if (ret) {
1886e8a9 214 list_del(&dmaru->list);
10e5247f 215 kfree(dmaru);
1886e8a9 216 }
10e5247f
KA
217 return ret;
218}
219
aa697079 220#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
221static int __init
222dmar_parse_one_rhsa(struct acpi_dmar_header *header)
223{
224 struct acpi_dmar_rhsa *rhsa;
225 struct dmar_drhd_unit *drhd;
226
227 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 228 for_each_drhd_unit(drhd) {
ee34b32d
SS
229 if (drhd->reg_base_addr == rhsa->base_address) {
230 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
231
232 if (!node_online(node))
233 node = -1;
234 drhd->iommu->node = node;
aa697079
DW
235 return 0;
236 }
ee34b32d 237 }
fd0c8894
BH
238 WARN_TAINT(
239 1, TAINT_FIRMWARE_WORKAROUND,
240 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
241 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
242 drhd->reg_base_addr,
243 dmi_get_system_info(DMI_BIOS_VENDOR),
244 dmi_get_system_info(DMI_BIOS_VERSION),
245 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 246
aa697079 247 return 0;
ee34b32d 248}
aa697079 249#endif
ee34b32d 250
10e5247f
KA
251static void __init
252dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
253{
254 struct acpi_dmar_hardware_unit *drhd;
255 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 256 struct acpi_dmar_atsr *atsr;
17b60977 257 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
258
259 switch (header->type) {
260 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
261 drhd = container_of(header, struct acpi_dmar_hardware_unit,
262 header);
10e5247f 263 printk (KERN_INFO PREFIX
aa5d2b51
YZ
264 "DRHD base: %#016Lx flags: %#x\n",
265 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
266 break;
267 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
268 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
269 header);
10e5247f 270 printk (KERN_INFO PREFIX
aa5d2b51 271 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
272 (unsigned long long)rmrr->base_address,
273 (unsigned long long)rmrr->end_address);
10e5247f 274 break;
aa5d2b51
YZ
275 case ACPI_DMAR_TYPE_ATSR:
276 atsr = container_of(header, struct acpi_dmar_atsr, header);
277 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
278 break;
17b60977
RD
279 case ACPI_DMAR_HARDWARE_AFFINITY:
280 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
281 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
282 (unsigned long long)rhsa->base_address,
283 rhsa->proximity_domain);
284 break;
10e5247f
KA
285 }
286}
287
f6dd5c31
YL
288/**
289 * dmar_table_detect - checks to see if the platform supports DMAR devices
290 */
291static int __init dmar_table_detect(void)
292{
293 acpi_status status = AE_OK;
294
295 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
296 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
297 (struct acpi_table_header **)&dmar_tbl,
298 &dmar_tbl_size);
f6dd5c31
YL
299
300 if (ACPI_SUCCESS(status) && !dmar_tbl) {
301 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
302 status = AE_NOT_FOUND;
303 }
304
305 return (ACPI_SUCCESS(status) ? 1 : 0);
306}
aaa9d1dd 307
10e5247f
KA
308/**
309 * parse_dmar_table - parses the DMA reporting table
310 */
311static int __init
312parse_dmar_table(void)
313{
314 struct acpi_table_dmar *dmar;
315 struct acpi_dmar_header *entry_header;
316 int ret = 0;
317
f6dd5c31
YL
318 /*
319 * Do it again, earlier dmar_tbl mapping could be mapped with
320 * fixed map.
321 */
322 dmar_table_detect();
323
a59b50e9
JC
324 /*
325 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
326 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
327 */
328 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
329
10e5247f
KA
330 dmar = (struct acpi_table_dmar *)dmar_tbl;
331 if (!dmar)
332 return -ENODEV;
333
5b6985ce 334 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 335 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
336 return -EINVAL;
337 }
338
339 printk (KERN_INFO PREFIX "Host address width %d\n",
340 dmar->width + 1);
341
342 entry_header = (struct acpi_dmar_header *)(dmar + 1);
343 while (((unsigned long)entry_header) <
344 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
345 /* Avoid looping forever on bad ACPI tables */
346 if (entry_header->length == 0) {
347 printk(KERN_WARNING PREFIX
348 "Invalid 0-length structure\n");
349 ret = -EINVAL;
350 break;
351 }
352
10e5247f
KA
353 dmar_table_print_dmar_entry(entry_header);
354
355 switch (entry_header->type) {
356 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
357 ret = dmar_parse_one_drhd(entry_header);
358 break;
359 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
360 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
361 break;
362 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 363 ret = dmar_parse_one_atsr(entry_header);
10e5247f 364 break;
17b60977 365 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 366#ifdef CONFIG_ACPI_NUMA
ee34b32d 367 ret = dmar_parse_one_rhsa(entry_header);
aa697079 368#endif
17b60977 369 break;
10e5247f
KA
370 default:
371 printk(KERN_WARNING PREFIX
4de75cf9
RD
372 "Unknown DMAR structure type %d\n",
373 entry_header->type);
10e5247f
KA
374 ret = 0; /* for forward compatibility */
375 break;
376 }
377 if (ret)
378 break;
379
380 entry_header = ((void *)entry_header + entry_header->length);
381 }
382 return ret;
383}
384
dda56549 385static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
386 struct pci_dev *dev)
387{
388 int index;
389
390 while (dev) {
391 for (index = 0; index < cnt; index++)
392 if (dev == devices[index])
393 return 1;
394
395 /* Check our parent */
396 dev = dev->bus->self;
397 }
398
399 return 0;
400}
401
402struct dmar_drhd_unit *
403dmar_find_matched_drhd_unit(struct pci_dev *dev)
404{
2e824f79
YZ
405 struct dmar_drhd_unit *dmaru = NULL;
406 struct acpi_dmar_hardware_unit *drhd;
407
dda56549
Y
408 dev = pci_physfn(dev);
409
2e824f79
YZ
410 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
411 drhd = container_of(dmaru->hdr,
412 struct acpi_dmar_hardware_unit,
413 header);
414
415 if (dmaru->include_all &&
416 drhd->segment == pci_domain_nr(dev->bus))
417 return dmaru;
e61d98d8 418
2e824f79
YZ
419 if (dmar_pci_device_match(dmaru->devices,
420 dmaru->devices_cnt, dev))
421 return dmaru;
e61d98d8
SS
422 }
423
424 return NULL;
425}
426
1886e8a9
SS
427int __init dmar_dev_scope_init(void)
428{
c2c7286a 429 static int dmar_dev_scope_initialized;
04e2ea67 430 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
431 int ret = -ENODEV;
432
c2c7286a
SS
433 if (dmar_dev_scope_initialized)
434 return dmar_dev_scope_initialized;
435
318fe7df
SS
436 if (list_empty(&dmar_drhd_units))
437 goto fail;
438
04e2ea67 439 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
440 ret = dmar_parse_dev(drhd);
441 if (ret)
c2c7286a 442 goto fail;
1886e8a9
SS
443 }
444
318fe7df
SS
445 ret = dmar_parse_rmrr_atsr_dev();
446 if (ret)
447 goto fail;
1886e8a9 448
c2c7286a
SS
449 dmar_dev_scope_initialized = 1;
450 return 0;
451
452fail:
453 dmar_dev_scope_initialized = ret;
1886e8a9
SS
454 return ret;
455}
456
10e5247f
KA
457
458int __init dmar_table_init(void)
459{
1886e8a9 460 static int dmar_table_initialized;
093f87d2
FY
461 int ret;
462
1886e8a9
SS
463 if (dmar_table_initialized)
464 return 0;
465
466 dmar_table_initialized = 1;
467
093f87d2
FY
468 ret = parse_dmar_table();
469 if (ret) {
1886e8a9
SS
470 if (ret != -ENODEV)
471 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
472 return ret;
473 }
474
10e5247f
KA
475 if (list_empty(&dmar_drhd_units)) {
476 printk(KERN_INFO PREFIX "No DMAR devices found\n");
477 return -ENODEV;
478 }
093f87d2 479
10e5247f
KA
480 return 0;
481}
482
3a8663ee
BH
483static void warn_invalid_dmar(u64 addr, const char *message)
484{
fd0c8894
BH
485 WARN_TAINT_ONCE(
486 1, TAINT_FIRMWARE_WORKAROUND,
487 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
488 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
489 addr, message,
490 dmi_get_system_info(DMI_BIOS_VENDOR),
491 dmi_get_system_info(DMI_BIOS_VERSION),
492 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 493}
6ecbf01c 494
86cf898e
DW
495int __init check_zero_address(void)
496{
497 struct acpi_table_dmar *dmar;
498 struct acpi_dmar_header *entry_header;
499 struct acpi_dmar_hardware_unit *drhd;
500
501 dmar = (struct acpi_table_dmar *)dmar_tbl;
502 entry_header = (struct acpi_dmar_header *)(dmar + 1);
503
504 while (((unsigned long)entry_header) <
505 (((unsigned long)dmar) + dmar_tbl->length)) {
506 /* Avoid looping forever on bad ACPI tables */
507 if (entry_header->length == 0) {
508 printk(KERN_WARNING PREFIX
509 "Invalid 0-length structure\n");
510 return 0;
511 }
512
513 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
514 void __iomem *addr;
515 u64 cap, ecap;
516
86cf898e
DW
517 drhd = (void *)entry_header;
518 if (!drhd->address) {
3a8663ee 519 warn_invalid_dmar(0, "");
2c992208
CW
520 goto failed;
521 }
522
523 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
524 if (!addr ) {
525 printk("IOMMU: can't validate: %llx\n", drhd->address);
526 goto failed;
527 }
528 cap = dmar_readq(addr + DMAR_CAP_REG);
529 ecap = dmar_readq(addr + DMAR_ECAP_REG);
530 early_iounmap(addr, VTD_PAGE_SIZE);
531 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
532 warn_invalid_dmar(drhd->address,
533 " returns all ones");
2c992208 534 goto failed;
86cf898e 535 }
86cf898e
DW
536 }
537
538 entry_header = ((void *)entry_header + entry_header->length);
539 }
540 return 1;
2c992208
CW
541
542failed:
2c992208 543 return 0;
86cf898e
DW
544}
545
480125ba 546int __init detect_intel_iommu(void)
2ae21010
SS
547{
548 int ret;
549
f6dd5c31 550 ret = dmar_table_detect();
86cf898e
DW
551 if (ret)
552 ret = check_zero_address();
2ae21010 553 {
1cb11583 554 struct acpi_table_dmar *dmar;
b3a530e4 555
1cb11583 556 dmar = (struct acpi_table_dmar *) dmar_tbl;
f5d1b97b
SS
557
558 if (ret && intr_remapping_enabled && cpu_has_x2apic &&
559 dmar->flags & 0x1)
1cb11583 560 printk(KERN_INFO
f5d1b97b
SS
561 "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
562
11bd04f6 563 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 564 iommu_detected = 1;
5d990b62
CW
565 /* Make sure ACS will be enabled */
566 pci_request_acs();
567 }
f5d1b97b 568
9d5ce73a
FT
569#ifdef CONFIG_X86
570 if (ret)
571 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 572#endif
cacd4213 573 }
8e1568f3 574 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 575 dmar_tbl = NULL;
480125ba 576
4db77ff3 577 return ret ? 1 : -ENODEV;
2ae21010
SS
578}
579
580
1886e8a9 581int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 582{
c42d9f32 583 struct intel_iommu *iommu;
e61d98d8
SS
584 int map_size;
585 u32 ver;
c42d9f32 586 static int iommu_allocated = 0;
43f7392b 587 int agaw = 0;
4ed0d3e6 588 int msagaw = 0;
c42d9f32 589
6ecbf01c 590 if (!drhd->reg_base_addr) {
3a8663ee 591 warn_invalid_dmar(0, "");
6ecbf01c
DW
592 return -EINVAL;
593 }
594
c42d9f32
SS
595 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
596 if (!iommu)
1886e8a9 597 return -ENOMEM;
c42d9f32
SS
598
599 iommu->seq_id = iommu_allocated++;
9d783ba0 600 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 601
5b6985ce 602 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
603 if (!iommu->reg) {
604 printk(KERN_ERR "IOMMU: can't map the region\n");
605 goto error;
606 }
607 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
608 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
609
0815565a 610 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
3a8663ee 611 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
0815565a
DW
612 goto err_unmap;
613 }
614
1b573683
WH
615 agaw = iommu_calculate_agaw(iommu);
616 if (agaw < 0) {
617 printk(KERN_ERR
4ed0d3e6
FY
618 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
619 iommu->seq_id);
0815565a 620 goto err_unmap;
4ed0d3e6
FY
621 }
622 msagaw = iommu_calculate_max_sagaw(iommu);
623 if (msagaw < 0) {
624 printk(KERN_ERR
625 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 626 iommu->seq_id);
0815565a 627 goto err_unmap;
1b573683
WH
628 }
629 iommu->agaw = agaw;
4ed0d3e6 630 iommu->msagaw = msagaw;
1b573683 631
ee34b32d
SS
632 iommu->node = -1;
633
e61d98d8
SS
634 /* the registers might be more than one page */
635 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
636 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
637 map_size = VTD_PAGE_ALIGN(map_size);
638 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
639 iounmap(iommu->reg);
640 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
641 if (!iommu->reg) {
642 printk(KERN_ERR "IOMMU: can't map the region\n");
643 goto error;
644 }
645 }
646
647 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
648 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
649 iommu->seq_id,
5b6985ce
FY
650 (unsigned long long)drhd->reg_base_addr,
651 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
652 (unsigned long long)iommu->cap,
653 (unsigned long long)iommu->ecap);
e61d98d8 654
1f5b3c3f 655 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
656
657 drhd->iommu = iommu;
1886e8a9 658 return 0;
0815565a
DW
659
660 err_unmap:
661 iounmap(iommu->reg);
662 error:
e61d98d8 663 kfree(iommu);
1886e8a9 664 return -1;
e61d98d8
SS
665}
666
667void free_iommu(struct intel_iommu *iommu)
668{
669 if (!iommu)
670 return;
671
e61d98d8 672 free_dmar_iommu(iommu);
e61d98d8
SS
673
674 if (iommu->reg)
675 iounmap(iommu->reg);
676 kfree(iommu);
677}
fe962e90
SS
678
679/*
680 * Reclaim all the submitted descriptors which have completed its work.
681 */
682static inline void reclaim_free_desc(struct q_inval *qi)
683{
6ba6c3a4
YZ
684 while (qi->desc_status[qi->free_tail] == QI_DONE ||
685 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
686 qi->desc_status[qi->free_tail] = QI_FREE;
687 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
688 qi->free_cnt++;
689 }
690}
691
704126ad
YZ
692static int qi_check_fault(struct intel_iommu *iommu, int index)
693{
694 u32 fault;
6ba6c3a4 695 int head, tail;
704126ad
YZ
696 struct q_inval *qi = iommu->qi;
697 int wait_index = (index + 1) % QI_LENGTH;
698
6ba6c3a4
YZ
699 if (qi->desc_status[wait_index] == QI_ABORT)
700 return -EAGAIN;
701
704126ad
YZ
702 fault = readl(iommu->reg + DMAR_FSTS_REG);
703
704 /*
705 * If IQE happens, the head points to the descriptor associated
706 * with the error. No new descriptors are fetched until the IQE
707 * is cleared.
708 */
709 if (fault & DMA_FSTS_IQE) {
710 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
711 if ((head >> DMAR_IQ_SHIFT) == index) {
712 printk(KERN_ERR "VT-d detected invalid descriptor: "
713 "low=%llx, high=%llx\n",
714 (unsigned long long)qi->desc[index].low,
715 (unsigned long long)qi->desc[index].high);
704126ad
YZ
716 memcpy(&qi->desc[index], &qi->desc[wait_index],
717 sizeof(struct qi_desc));
718 __iommu_flush_cache(iommu, &qi->desc[index],
719 sizeof(struct qi_desc));
720 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
721 return -EINVAL;
722 }
723 }
724
6ba6c3a4
YZ
725 /*
726 * If ITE happens, all pending wait_desc commands are aborted.
727 * No new descriptors are fetched until the ITE is cleared.
728 */
729 if (fault & DMA_FSTS_ITE) {
730 head = readl(iommu->reg + DMAR_IQH_REG);
731 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
732 head |= 1;
733 tail = readl(iommu->reg + DMAR_IQT_REG);
734 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
735
736 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
737
738 do {
739 if (qi->desc_status[head] == QI_IN_USE)
740 qi->desc_status[head] = QI_ABORT;
741 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
742 } while (head != tail);
743
744 if (qi->desc_status[wait_index] == QI_ABORT)
745 return -EAGAIN;
746 }
747
748 if (fault & DMA_FSTS_ICE)
749 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
750
704126ad
YZ
751 return 0;
752}
753
fe962e90
SS
754/*
755 * Submit the queued invalidation descriptor to the remapping
756 * hardware unit and wait for its completion.
757 */
704126ad 758int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 759{
6ba6c3a4 760 int rc;
fe962e90
SS
761 struct q_inval *qi = iommu->qi;
762 struct qi_desc *hw, wait_desc;
763 int wait_index, index;
764 unsigned long flags;
765
766 if (!qi)
704126ad 767 return 0;
fe962e90
SS
768
769 hw = qi->desc;
770
6ba6c3a4
YZ
771restart:
772 rc = 0;
773
3b8f4048 774 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 775 while (qi->free_cnt < 3) {
3b8f4048 776 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 777 cpu_relax();
3b8f4048 778 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
779 }
780
781 index = qi->free_head;
782 wait_index = (index + 1) % QI_LENGTH;
783
784 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
785
786 hw[index] = *desc;
787
704126ad
YZ
788 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
789 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
790 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
791
792 hw[wait_index] = wait_desc;
793
794 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
795 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
796
797 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
798 qi->free_cnt -= 2;
799
fe962e90
SS
800 /*
801 * update the HW tail register indicating the presence of
802 * new descriptors.
803 */
6ba6c3a4 804 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
805
806 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
807 /*
808 * We will leave the interrupts disabled, to prevent interrupt
809 * context to queue another cmd while a cmd is already submitted
810 * and waiting for completion on this cpu. This is to avoid
811 * a deadlock where the interrupt context can wait indefinitely
812 * for free slots in the queue.
813 */
704126ad
YZ
814 rc = qi_check_fault(iommu, index);
815 if (rc)
6ba6c3a4 816 break;
704126ad 817
3b8f4048 818 raw_spin_unlock(&qi->q_lock);
fe962e90 819 cpu_relax();
3b8f4048 820 raw_spin_lock(&qi->q_lock);
fe962e90 821 }
6ba6c3a4
YZ
822
823 qi->desc_status[index] = QI_DONE;
fe962e90
SS
824
825 reclaim_free_desc(qi);
3b8f4048 826 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 827
6ba6c3a4
YZ
828 if (rc == -EAGAIN)
829 goto restart;
830
704126ad 831 return rc;
fe962e90
SS
832}
833
834/*
835 * Flush the global interrupt entry cache.
836 */
837void qi_global_iec(struct intel_iommu *iommu)
838{
839 struct qi_desc desc;
840
841 desc.low = QI_IEC_TYPE;
842 desc.high = 0;
843
704126ad 844 /* should never fail */
fe962e90
SS
845 qi_submit_sync(&desc, iommu);
846}
847
4c25a2c1
DW
848void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
849 u64 type)
3481f210 850{
3481f210
YS
851 struct qi_desc desc;
852
3481f210
YS
853 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
854 | QI_CC_GRAN(type) | QI_CC_TYPE;
855 desc.high = 0;
856
4c25a2c1 857 qi_submit_sync(&desc, iommu);
3481f210
YS
858}
859
1f0ef2aa
DW
860void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
861 unsigned int size_order, u64 type)
3481f210
YS
862{
863 u8 dw = 0, dr = 0;
864
865 struct qi_desc desc;
866 int ih = 0;
867
3481f210
YS
868 if (cap_write_drain(iommu->cap))
869 dw = 1;
870
871 if (cap_read_drain(iommu->cap))
872 dr = 1;
873
874 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
875 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
876 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
877 | QI_IOTLB_AM(size_order);
878
1f0ef2aa 879 qi_submit_sync(&desc, iommu);
3481f210
YS
880}
881
6ba6c3a4
YZ
882void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
883 u64 addr, unsigned mask)
884{
885 struct qi_desc desc;
886
887 if (mask) {
888 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
889 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
890 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
891 } else
892 desc.high = QI_DEV_IOTLB_ADDR(addr);
893
894 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
895 qdep = 0;
896
897 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
898 QI_DIOTLB_TYPE;
899
900 qi_submit_sync(&desc, iommu);
901}
902
eba67e5d
SS
903/*
904 * Disable Queued Invalidation interface.
905 */
906void dmar_disable_qi(struct intel_iommu *iommu)
907{
908 unsigned long flags;
909 u32 sts;
910 cycles_t start_time = get_cycles();
911
912 if (!ecap_qis(iommu->ecap))
913 return;
914
1f5b3c3f 915 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
916
917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
918 if (!(sts & DMA_GSTS_QIES))
919 goto end;
920
921 /*
922 * Give a chance to HW to complete the pending invalidation requests.
923 */
924 while ((readl(iommu->reg + DMAR_IQT_REG) !=
925 readl(iommu->reg + DMAR_IQH_REG)) &&
926 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
927 cpu_relax();
928
929 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
930 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
931
932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
933 !(sts & DMA_GSTS_QIES), sts);
934end:
1f5b3c3f 935 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
936}
937
eb4a52bc
FY
938/*
939 * Enable queued invalidation.
940 */
941static void __dmar_enable_qi(struct intel_iommu *iommu)
942{
c416daa9 943 u32 sts;
eb4a52bc
FY
944 unsigned long flags;
945 struct q_inval *qi = iommu->qi;
946
947 qi->free_head = qi->free_tail = 0;
948 qi->free_cnt = QI_LENGTH;
949
1f5b3c3f 950 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
951
952 /* write zero to the tail reg */
953 writel(0, iommu->reg + DMAR_IQT_REG);
954
955 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
956
eb4a52bc 957 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 958 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
959
960 /* Make sure hardware complete it */
961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
962
1f5b3c3f 963 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
964}
965
fe962e90
SS
966/*
967 * Enable Queued Invalidation interface. This is a must to support
968 * interrupt-remapping. Also used by DMA-remapping, which replaces
969 * register based IOTLB invalidation.
970 */
971int dmar_enable_qi(struct intel_iommu *iommu)
972{
fe962e90 973 struct q_inval *qi;
751cafe3 974 struct page *desc_page;
fe962e90
SS
975
976 if (!ecap_qis(iommu->ecap))
977 return -ENOENT;
978
979 /*
980 * queued invalidation is already setup and enabled.
981 */
982 if (iommu->qi)
983 return 0;
984
fa4b57cc 985 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
986 if (!iommu->qi)
987 return -ENOMEM;
988
989 qi = iommu->qi;
990
751cafe3
SS
991
992 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
993 if (!desc_page) {
fe962e90
SS
994 kfree(qi);
995 iommu->qi = 0;
996 return -ENOMEM;
997 }
998
751cafe3
SS
999 qi->desc = page_address(desc_page);
1000
fa4b57cc 1001 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1002 if (!qi->desc_status) {
1003 free_page((unsigned long) qi->desc);
1004 kfree(qi);
1005 iommu->qi = 0;
1006 return -ENOMEM;
1007 }
1008
1009 qi->free_head = qi->free_tail = 0;
1010 qi->free_cnt = QI_LENGTH;
1011
3b8f4048 1012 raw_spin_lock_init(&qi->q_lock);
fe962e90 1013
eb4a52bc 1014 __dmar_enable_qi(iommu);
fe962e90
SS
1015
1016 return 0;
1017}
0ac2491f
SS
1018
1019/* iommu interrupt handling. Most stuff are MSI-like. */
1020
9d783ba0
SS
1021enum faulttype {
1022 DMA_REMAP,
1023 INTR_REMAP,
1024 UNKNOWN,
1025};
1026
1027static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1028{
1029 "Software",
1030 "Present bit in root entry is clear",
1031 "Present bit in context entry is clear",
1032 "Invalid context entry",
1033 "Access beyond MGAW",
1034 "PTE Write access is not set",
1035 "PTE Read access is not set",
1036 "Next page table ptr is invalid",
1037 "Root table address invalid",
1038 "Context table ptr is invalid",
1039 "non-zero reserved fields in RTP",
1040 "non-zero reserved fields in CTP",
1041 "non-zero reserved fields in PTE",
1042};
9d783ba0
SS
1043
1044static const char *intr_remap_fault_reasons[] =
1045{
1046 "Detected reserved fields in the decoded interrupt-remapped request",
1047 "Interrupt index exceeded the interrupt-remapping table size",
1048 "Present field in the IRTE entry is clear",
1049 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1050 "Detected reserved fields in the IRTE entry",
1051 "Blocked a compatibility format interrupt request",
1052 "Blocked an interrupt request due to source-id verification failure",
1053};
1054
0ac2491f
SS
1055#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1056
9d783ba0 1057const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1058{
9d783ba0
SS
1059 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1060 ARRAY_SIZE(intr_remap_fault_reasons))) {
1061 *fault_type = INTR_REMAP;
1062 return intr_remap_fault_reasons[fault_reason - 0x20];
1063 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1064 *fault_type = DMA_REMAP;
1065 return dma_remap_fault_reasons[fault_reason];
1066 } else {
1067 *fault_type = UNKNOWN;
0ac2491f 1068 return "Unknown";
9d783ba0 1069 }
0ac2491f
SS
1070}
1071
5c2837fb 1072void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1073{
dced35ae 1074 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1075 unsigned long flag;
1076
1077 /* unmask it */
1f5b3c3f 1078 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1079 writel(0, iommu->reg + DMAR_FECTL_REG);
1080 /* Read a reg to force flush the post write */
1081 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1082 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1083}
1084
5c2837fb 1085void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1086{
1087 unsigned long flag;
dced35ae 1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1089
1090 /* mask it */
1f5b3c3f 1091 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1093 /* Read a reg to force flush the post write */
1094 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1095 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1096}
1097
1098void dmar_msi_write(int irq, struct msi_msg *msg)
1099{
dced35ae 1100 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1101 unsigned long flag;
1102
1f5b3c3f 1103 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1107 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1108}
1109
1110void dmar_msi_read(int irq, struct msi_msg *msg)
1111{
dced35ae 1112 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1113 unsigned long flag;
1114
1f5b3c3f 1115 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1119 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1120}
1121
1122static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1123 u8 fault_reason, u16 source_id, unsigned long long addr)
1124{
1125 const char *reason;
9d783ba0 1126 int fault_type;
0ac2491f 1127
9d783ba0 1128 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1129
9d783ba0
SS
1130 if (fault_type == INTR_REMAP)
1131 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1132 "fault index %llx\n"
1133 "INTR-REMAP:[fault reason %02d] %s\n",
1134 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1135 PCI_FUNC(source_id & 0xFF), addr >> 48,
1136 fault_reason, reason);
1137 else
1138 printk(KERN_ERR
1139 "DMAR:[%s] Request device [%02x:%02x.%d] "
1140 "fault addr %llx \n"
1141 "DMAR:[fault reason %02d] %s\n",
1142 (type ? "DMA Read" : "DMA Write"),
1143 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1144 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1145 return 0;
1146}
1147
1148#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1149irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1150{
1151 struct intel_iommu *iommu = dev_id;
1152 int reg, fault_index;
1153 u32 fault_status;
1154 unsigned long flag;
1155
1f5b3c3f 1156 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1158 if (fault_status)
1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1160 fault_status);
0ac2491f
SS
1161
1162 /* TBD: ignore advanced fault log currently */
1163 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1164 goto clear_rest;
0ac2491f
SS
1165
1166 fault_index = dma_fsts_fault_record_index(fault_status);
1167 reg = cap_fault_reg_offset(iommu->cap);
1168 while (1) {
1169 u8 fault_reason;
1170 u16 source_id;
1171 u64 guest_addr;
1172 int type;
1173 u32 data;
1174
1175 /* highest 32 bits */
1176 data = readl(iommu->reg + reg +
1177 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1178 if (!(data & DMA_FRCD_F))
1179 break;
1180
1181 fault_reason = dma_frcd_fault_reason(data);
1182 type = dma_frcd_type(data);
1183
1184 data = readl(iommu->reg + reg +
1185 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1186 source_id = dma_frcd_source_id(data);
1187
1188 guest_addr = dmar_readq(iommu->reg + reg +
1189 fault_index * PRIMARY_FAULT_REG_LEN);
1190 guest_addr = dma_frcd_page_addr(guest_addr);
1191 /* clear the fault */
1192 writel(DMA_FRCD_F, iommu->reg + reg +
1193 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1194
1f5b3c3f 1195 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1196
1197 dmar_fault_do_one(iommu, type, fault_reason,
1198 source_id, guest_addr);
1199
1200 fault_index++;
8211a7b5 1201 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1202 fault_index = 0;
1f5b3c3f 1203 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1204 }
9d783ba0
SS
1205clear_rest:
1206 /* clear all the other faults */
0ac2491f 1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f 1209
1f5b3c3f 1210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1211 return IRQ_HANDLED;
1212}
1213
1214int dmar_set_interrupt(struct intel_iommu *iommu)
1215{
1216 int irq, ret;
1217
9d783ba0
SS
1218 /*
1219 * Check if the fault interrupt is already initialized.
1220 */
1221 if (iommu->irq)
1222 return 0;
1223
0ac2491f
SS
1224 irq = create_irq();
1225 if (!irq) {
1226 printk(KERN_ERR "IOMMU: no free vectors\n");
1227 return -EINVAL;
1228 }
1229
dced35ae 1230 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1231 iommu->irq = irq;
1232
1233 ret = arch_setup_dmar_msi(irq);
1234 if (ret) {
dced35ae 1235 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1236 iommu->irq = 0;
1237 destroy_irq(irq);
dd726435 1238 return ret;
0ac2491f
SS
1239 }
1240
477694e7 1241 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f
SS
1242 if (ret)
1243 printk(KERN_ERR "IOMMU: can't request irq\n");
1244 return ret;
1245}
9d783ba0
SS
1246
1247int __init enable_drhd_fault_handling(void)
1248{
1249 struct dmar_drhd_unit *drhd;
1250
1251 /*
1252 * Enable fault control interrupt.
1253 */
1254 for_each_drhd_unit(drhd) {
1255 int ret;
1256 struct intel_iommu *iommu = drhd->iommu;
1257 ret = dmar_set_interrupt(iommu);
1258
1259 if (ret) {
1260 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1261 " interrupt, ret %d\n",
1262 (unsigned long long)drhd->reg_base_addr, ret);
1263 return -1;
1264 }
7f99d946
SS
1265
1266 /*
1267 * Clear any previous faults.
1268 */
1269 dmar_fault(iommu->irq, iommu);
9d783ba0
SS
1270 }
1271
1272 return 0;
1273}
eb4a52bc
FY
1274
1275/*
1276 * Re-enable Queued Invalidation interface.
1277 */
1278int dmar_reenable_qi(struct intel_iommu *iommu)
1279{
1280 if (!ecap_qis(iommu->ecap))
1281 return -ENOENT;
1282
1283 if (!iommu->qi)
1284 return -ENOENT;
1285
1286 /*
1287 * First disable queued invalidation.
1288 */
1289 dmar_disable_qi(iommu);
1290 /*
1291 * Then enable queued invalidation again. Since there is no pending
1292 * invalidation requests now, it's safe to re-enable queued
1293 * invalidation.
1294 */
1295 __dmar_enable_qi(iommu);
1296
1297 return 0;
1298}
074835f0
YS
1299
1300/*
1301 * Check interrupt remapping support in DMAR table description.
1302 */
0b8973a8 1303int __init dmar_ir_support(void)
074835f0
YS
1304{
1305 struct acpi_table_dmar *dmar;
1306 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1307 if (!dmar)
1308 return 0;
074835f0
YS
1309 return dmar->flags & 0x1;
1310}
4db77ff3 1311IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.387748 seconds and 5 git commands to generate.