x86: ioapic/hpet: Convert to new chip functions
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
5a0e3ad6 38#include <linux/slab.h>
10e5247f 39
a192a958 40#define PREFIX "DMAR: "
10e5247f
KA
41
42/* No locks are needed as DMA remapping hardware unit
43 * list is constructed at boot time and hotplug of
44 * these units are not supported by the architecture.
45 */
46LIST_HEAD(dmar_drhd_units);
10e5247f
KA
47
48static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 49static acpi_size dmar_tbl_size;
10e5247f
KA
50
51static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
52{
53 /*
54 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 * the very end.
56 */
57 if (drhd->include_all)
58 list_add_tail(&drhd->list, &dmar_drhd_units);
59 else
60 list_add(&drhd->list, &dmar_drhd_units);
61}
62
10e5247f
KA
63static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
64 struct pci_dev **dev, u16 segment)
65{
66 struct pci_bus *bus;
67 struct pci_dev *pdev = NULL;
68 struct acpi_dmar_pci_path *path;
69 int count;
70
71 bus = pci_find_bus(segment, scope->bus);
72 path = (struct acpi_dmar_pci_path *)(scope + 1);
73 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
74 / sizeof(struct acpi_dmar_pci_path);
75
76 while (count) {
77 if (pdev)
78 pci_dev_put(pdev);
79 /*
80 * Some BIOSes list non-exist devices in DMAR table, just
81 * ignore it
82 */
83 if (!bus) {
84 printk(KERN_WARNING
85 PREFIX "Device scope bus [%d] not found\n",
86 scope->bus);
87 break;
88 }
89 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 if (!pdev) {
91 printk(KERN_WARNING PREFIX
92 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
93 segment, bus->number, path->dev, path->fn);
94 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
101 printk(KERN_WARNING PREFIX
102 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
103 segment, scope->bus, path->dev, path->fn);
104 *dev = NULL;
105 return 0;
106 }
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 pci_dev_put(pdev);
111 printk(KERN_WARNING PREFIX
112 "Device scope type does not match for %s\n",
113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
120static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
5715f0f9 134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
10e5247f 135 printk(KERN_WARNING PREFIX
5715f0f9
YL
136 "Unsupported device scope\n");
137 }
10e5247f
KA
138 start += scope->length;
139 }
140 if (*cnt == 0)
141 return 0;
142
143 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144 if (!*devices)
145 return -ENOMEM;
146
147 start = tmp;
148 index = 0;
149 while (start < end) {
150 scope = start;
151 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153 ret = dmar_parse_one_dev_scope(scope,
154 &(*devices)[index], segment);
155 if (ret) {
156 kfree(*devices);
157 return ret;
158 }
159 index ++;
160 }
161 start += scope->length;
162 }
163
164 return 0;
165}
166
167/**
168 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
169 * structure which uniquely represent one DMA remapping hardware unit
170 * present in the platform
171 */
172static int __init
173dmar_parse_one_drhd(struct acpi_dmar_header *header)
174{
175 struct acpi_dmar_hardware_unit *drhd;
176 struct dmar_drhd_unit *dmaru;
177 int ret = 0;
10e5247f 178
e523b38e 179 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
180 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
181 if (!dmaru)
182 return -ENOMEM;
183
1886e8a9 184 dmaru->hdr = header;
10e5247f 185 dmaru->reg_base_addr = drhd->address;
276dbf99 186 dmaru->segment = drhd->segment;
10e5247f
KA
187 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
188
1886e8a9
SS
189 ret = alloc_iommu(dmaru);
190 if (ret) {
191 kfree(dmaru);
192 return ret;
193 }
194 dmar_register_drhd_unit(dmaru);
195 return 0;
196}
197
f82851a8 198static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
199{
200 struct acpi_dmar_hardware_unit *drhd;
f82851a8 201 int ret = 0;
1886e8a9
SS
202
203 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
204
2e824f79
YZ
205 if (dmaru->include_all)
206 return 0;
207
208 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 209 ((void *)drhd) + drhd->header.length,
10e5247f
KA
210 &dmaru->devices_cnt, &dmaru->devices,
211 drhd->segment);
1c7d1bca 212 if (ret) {
1886e8a9 213 list_del(&dmaru->list);
10e5247f 214 kfree(dmaru);
1886e8a9 215 }
10e5247f
KA
216 return ret;
217}
218
aaa9d1dd
SS
219#ifdef CONFIG_DMAR
220LIST_HEAD(dmar_rmrr_units);
221
222static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
223{
224 list_add(&rmrr->list, &dmar_rmrr_units);
225}
226
227
10e5247f
KA
228static int __init
229dmar_parse_one_rmrr(struct acpi_dmar_header *header)
230{
231 struct acpi_dmar_reserved_memory *rmrr;
232 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
233
234 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
235 if (!rmrru)
236 return -ENOMEM;
237
1886e8a9 238 rmrru->hdr = header;
10e5247f
KA
239 rmrr = (struct acpi_dmar_reserved_memory *)header;
240 rmrru->base_address = rmrr->base_address;
241 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
242
243 dmar_register_rmrr_unit(rmrru);
244 return 0;
245}
246
247static int __init
248rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
249{
250 struct acpi_dmar_reserved_memory *rmrr;
251 int ret;
252
253 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 254 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 255 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
256 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
257
1886e8a9
SS
258 if (ret || (rmrru->devices_cnt == 0)) {
259 list_del(&rmrru->list);
10e5247f 260 kfree(rmrru);
1886e8a9 261 }
10e5247f
KA
262 return ret;
263}
aa5d2b51
YZ
264
265static LIST_HEAD(dmar_atsr_units);
266
267static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
268{
269 struct acpi_dmar_atsr *atsr;
270 struct dmar_atsr_unit *atsru;
271
272 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
273 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
274 if (!atsru)
275 return -ENOMEM;
276
277 atsru->hdr = hdr;
278 atsru->include_all = atsr->flags & 0x1;
279
280 list_add(&atsru->list, &dmar_atsr_units);
281
282 return 0;
283}
284
285static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
286{
287 int rc;
288 struct acpi_dmar_atsr *atsr;
289
290 if (atsru->include_all)
291 return 0;
292
293 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
294 rc = dmar_parse_dev_scope((void *)(atsr + 1),
295 (void *)atsr + atsr->header.length,
296 &atsru->devices_cnt, &atsru->devices,
297 atsr->segment);
298 if (rc || !atsru->devices_cnt) {
299 list_del(&atsru->list);
300 kfree(atsru);
301 }
302
303 return rc;
304}
305
306int dmar_find_matched_atsr_unit(struct pci_dev *dev)
307{
308 int i;
309 struct pci_bus *bus;
310 struct acpi_dmar_atsr *atsr;
311 struct dmar_atsr_unit *atsru;
312
dda56549
Y
313 dev = pci_physfn(dev);
314
aa5d2b51
YZ
315 list_for_each_entry(atsru, &dmar_atsr_units, list) {
316 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
317 if (atsr->segment == pci_domain_nr(dev->bus))
318 goto found;
319 }
320
321 return 0;
322
323found:
324 for (bus = dev->bus; bus; bus = bus->parent) {
325 struct pci_dev *bridge = bus->self;
326
5f4d91a1 327 if (!bridge || !pci_is_pcie(bridge) ||
aa5d2b51
YZ
328 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
329 return 0;
330
331 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
332 for (i = 0; i < atsru->devices_cnt; i++)
333 if (atsru->devices[i] == bridge)
334 return 1;
335 break;
336 }
337 }
338
339 if (atsru->include_all)
340 return 1;
341
342 return 0;
343}
aaa9d1dd 344#endif
10e5247f 345
aa697079 346#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
347static int __init
348dmar_parse_one_rhsa(struct acpi_dmar_header *header)
349{
350 struct acpi_dmar_rhsa *rhsa;
351 struct dmar_drhd_unit *drhd;
352
353 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 354 for_each_drhd_unit(drhd) {
ee34b32d
SS
355 if (drhd->reg_base_addr == rhsa->base_address) {
356 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
357
358 if (!node_online(node))
359 node = -1;
360 drhd->iommu->node = node;
aa697079
DW
361 return 0;
362 }
ee34b32d 363 }
fd0c8894
BH
364 WARN_TAINT(
365 1, TAINT_FIRMWARE_WORKAROUND,
366 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
367 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
368 drhd->reg_base_addr,
369 dmi_get_system_info(DMI_BIOS_VENDOR),
370 dmi_get_system_info(DMI_BIOS_VERSION),
371 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 372
aa697079 373 return 0;
ee34b32d 374}
aa697079 375#endif
ee34b32d 376
10e5247f
KA
377static void __init
378dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
379{
380 struct acpi_dmar_hardware_unit *drhd;
381 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 382 struct acpi_dmar_atsr *atsr;
17b60977 383 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
384
385 switch (header->type) {
386 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
387 drhd = container_of(header, struct acpi_dmar_hardware_unit,
388 header);
10e5247f 389 printk (KERN_INFO PREFIX
aa5d2b51
YZ
390 "DRHD base: %#016Lx flags: %#x\n",
391 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
392 break;
393 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
394 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
395 header);
10e5247f 396 printk (KERN_INFO PREFIX
aa5d2b51 397 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
398 (unsigned long long)rmrr->base_address,
399 (unsigned long long)rmrr->end_address);
10e5247f 400 break;
aa5d2b51
YZ
401 case ACPI_DMAR_TYPE_ATSR:
402 atsr = container_of(header, struct acpi_dmar_atsr, header);
403 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
404 break;
17b60977
RD
405 case ACPI_DMAR_HARDWARE_AFFINITY:
406 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
407 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
408 (unsigned long long)rhsa->base_address,
409 rhsa->proximity_domain);
410 break;
10e5247f
KA
411 }
412}
413
f6dd5c31
YL
414/**
415 * dmar_table_detect - checks to see if the platform supports DMAR devices
416 */
417static int __init dmar_table_detect(void)
418{
419 acpi_status status = AE_OK;
420
421 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
422 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
423 (struct acpi_table_header **)&dmar_tbl,
424 &dmar_tbl_size);
f6dd5c31
YL
425
426 if (ACPI_SUCCESS(status) && !dmar_tbl) {
427 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
428 status = AE_NOT_FOUND;
429 }
430
431 return (ACPI_SUCCESS(status) ? 1 : 0);
432}
aaa9d1dd 433
10e5247f
KA
434/**
435 * parse_dmar_table - parses the DMA reporting table
436 */
437static int __init
438parse_dmar_table(void)
439{
440 struct acpi_table_dmar *dmar;
441 struct acpi_dmar_header *entry_header;
442 int ret = 0;
443
f6dd5c31
YL
444 /*
445 * Do it again, earlier dmar_tbl mapping could be mapped with
446 * fixed map.
447 */
448 dmar_table_detect();
449
a59b50e9
JC
450 /*
451 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
452 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
453 */
454 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
455
10e5247f
KA
456 dmar = (struct acpi_table_dmar *)dmar_tbl;
457 if (!dmar)
458 return -ENODEV;
459
5b6985ce 460 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 461 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
462 return -EINVAL;
463 }
464
465 printk (KERN_INFO PREFIX "Host address width %d\n",
466 dmar->width + 1);
467
468 entry_header = (struct acpi_dmar_header *)(dmar + 1);
469 while (((unsigned long)entry_header) <
470 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
471 /* Avoid looping forever on bad ACPI tables */
472 if (entry_header->length == 0) {
473 printk(KERN_WARNING PREFIX
474 "Invalid 0-length structure\n");
475 ret = -EINVAL;
476 break;
477 }
478
10e5247f
KA
479 dmar_table_print_dmar_entry(entry_header);
480
481 switch (entry_header->type) {
482 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
483 ret = dmar_parse_one_drhd(entry_header);
484 break;
485 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 486#ifdef CONFIG_DMAR
10e5247f 487 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
488#endif
489 break;
490 case ACPI_DMAR_TYPE_ATSR:
491#ifdef CONFIG_DMAR
492 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 493#endif
10e5247f 494 break;
17b60977 495 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 496#ifdef CONFIG_ACPI_NUMA
ee34b32d 497 ret = dmar_parse_one_rhsa(entry_header);
aa697079 498#endif
17b60977 499 break;
10e5247f
KA
500 default:
501 printk(KERN_WARNING PREFIX
4de75cf9
RD
502 "Unknown DMAR structure type %d\n",
503 entry_header->type);
10e5247f
KA
504 ret = 0; /* for forward compatibility */
505 break;
506 }
507 if (ret)
508 break;
509
510 entry_header = ((void *)entry_header + entry_header->length);
511 }
512 return ret;
513}
514
dda56549 515static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
516 struct pci_dev *dev)
517{
518 int index;
519
520 while (dev) {
521 for (index = 0; index < cnt; index++)
522 if (dev == devices[index])
523 return 1;
524
525 /* Check our parent */
526 dev = dev->bus->self;
527 }
528
529 return 0;
530}
531
532struct dmar_drhd_unit *
533dmar_find_matched_drhd_unit(struct pci_dev *dev)
534{
2e824f79
YZ
535 struct dmar_drhd_unit *dmaru = NULL;
536 struct acpi_dmar_hardware_unit *drhd;
537
dda56549
Y
538 dev = pci_physfn(dev);
539
2e824f79
YZ
540 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
541 drhd = container_of(dmaru->hdr,
542 struct acpi_dmar_hardware_unit,
543 header);
544
545 if (dmaru->include_all &&
546 drhd->segment == pci_domain_nr(dev->bus))
547 return dmaru;
e61d98d8 548
2e824f79
YZ
549 if (dmar_pci_device_match(dmaru->devices,
550 dmaru->devices_cnt, dev))
551 return dmaru;
e61d98d8
SS
552 }
553
554 return NULL;
555}
556
1886e8a9
SS
557int __init dmar_dev_scope_init(void)
558{
04e2ea67 559 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
560 int ret = -ENODEV;
561
04e2ea67 562 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
563 ret = dmar_parse_dev(drhd);
564 if (ret)
565 return ret;
566 }
567
aaa9d1dd
SS
568#ifdef CONFIG_DMAR
569 {
04e2ea67 570 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
571 struct dmar_atsr_unit *atsr, *atsr_n;
572
04e2ea67 573 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
574 ret = rmrr_parse_dev(rmrr);
575 if (ret)
576 return ret;
577 }
aa5d2b51
YZ
578
579 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
580 ret = atsr_parse_dev(atsr);
581 if (ret)
582 return ret;
583 }
1886e8a9 584 }
aaa9d1dd 585#endif
1886e8a9
SS
586
587 return ret;
588}
589
10e5247f
KA
590
591int __init dmar_table_init(void)
592{
1886e8a9 593 static int dmar_table_initialized;
093f87d2
FY
594 int ret;
595
1886e8a9
SS
596 if (dmar_table_initialized)
597 return 0;
598
599 dmar_table_initialized = 1;
600
093f87d2
FY
601 ret = parse_dmar_table();
602 if (ret) {
1886e8a9
SS
603 if (ret != -ENODEV)
604 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
605 return ret;
606 }
607
10e5247f
KA
608 if (list_empty(&dmar_drhd_units)) {
609 printk(KERN_INFO PREFIX "No DMAR devices found\n");
610 return -ENODEV;
611 }
093f87d2 612
aaa9d1dd 613#ifdef CONFIG_DMAR
2d6b5f85 614 if (list_empty(&dmar_rmrr_units))
093f87d2 615 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
616
617 if (list_empty(&dmar_atsr_units))
618 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 619#endif
093f87d2 620
10e5247f
KA
621 return 0;
622}
623
3a8663ee
BH
624static void warn_invalid_dmar(u64 addr, const char *message)
625{
fd0c8894
BH
626 WARN_TAINT_ONCE(
627 1, TAINT_FIRMWARE_WORKAROUND,
628 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
629 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
630 addr, message,
631 dmi_get_system_info(DMI_BIOS_VENDOR),
632 dmi_get_system_info(DMI_BIOS_VERSION),
633 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 634}
6ecbf01c 635
86cf898e
DW
636int __init check_zero_address(void)
637{
638 struct acpi_table_dmar *dmar;
639 struct acpi_dmar_header *entry_header;
640 struct acpi_dmar_hardware_unit *drhd;
641
642 dmar = (struct acpi_table_dmar *)dmar_tbl;
643 entry_header = (struct acpi_dmar_header *)(dmar + 1);
644
645 while (((unsigned long)entry_header) <
646 (((unsigned long)dmar) + dmar_tbl->length)) {
647 /* Avoid looping forever on bad ACPI tables */
648 if (entry_header->length == 0) {
649 printk(KERN_WARNING PREFIX
650 "Invalid 0-length structure\n");
651 return 0;
652 }
653
654 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
655 void __iomem *addr;
656 u64 cap, ecap;
657
86cf898e
DW
658 drhd = (void *)entry_header;
659 if (!drhd->address) {
3a8663ee 660 warn_invalid_dmar(0, "");
2c992208
CW
661 goto failed;
662 }
663
664 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
665 if (!addr ) {
666 printk("IOMMU: can't validate: %llx\n", drhd->address);
667 goto failed;
668 }
669 cap = dmar_readq(addr + DMAR_CAP_REG);
670 ecap = dmar_readq(addr + DMAR_ECAP_REG);
671 early_iounmap(addr, VTD_PAGE_SIZE);
672 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
673 warn_invalid_dmar(drhd->address,
674 " returns all ones");
2c992208 675 goto failed;
86cf898e 676 }
86cf898e
DW
677 }
678
679 entry_header = ((void *)entry_header + entry_header->length);
680 }
681 return 1;
2c992208
CW
682
683failed:
684#ifdef CONFIG_DMAR
685 dmar_disabled = 1;
686#endif
687 return 0;
86cf898e
DW
688}
689
2ae21010
SS
690void __init detect_intel_iommu(void)
691{
692 int ret;
693
f6dd5c31 694 ret = dmar_table_detect();
86cf898e
DW
695 if (ret)
696 ret = check_zero_address();
2ae21010 697 {
cacd4213 698#ifdef CONFIG_INTR_REMAP
1cb11583
SS
699 struct acpi_table_dmar *dmar;
700 /*
701 * for now we will disable dma-remapping when interrupt
702 * remapping is enabled.
703 * When support for queued invalidation for IOTLB invalidation
704 * is added, we will not need this any more.
705 */
706 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 707 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
708 printk(KERN_INFO
709 "Queued invalidation will be enabled to support "
710 "x2apic and Intr-remapping.\n");
cacd4213 711#endif
cacd4213 712#ifdef CONFIG_DMAR
11bd04f6 713 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 714 iommu_detected = 1;
5d990b62
CW
715 /* Make sure ACS will be enabled */
716 pci_request_acs();
717 }
9d5ce73a
FT
718#endif
719#ifdef CONFIG_X86
720 if (ret)
721 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 722#endif
cacd4213 723 }
8e1568f3 724 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 725 dmar_tbl = NULL;
2ae21010
SS
726}
727
728
1886e8a9 729int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 730{
c42d9f32 731 struct intel_iommu *iommu;
e61d98d8
SS
732 int map_size;
733 u32 ver;
c42d9f32 734 static int iommu_allocated = 0;
43f7392b 735 int agaw = 0;
4ed0d3e6 736 int msagaw = 0;
c42d9f32 737
6ecbf01c 738 if (!drhd->reg_base_addr) {
3a8663ee 739 warn_invalid_dmar(0, "");
6ecbf01c
DW
740 return -EINVAL;
741 }
742
c42d9f32
SS
743 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
744 if (!iommu)
1886e8a9 745 return -ENOMEM;
c42d9f32
SS
746
747 iommu->seq_id = iommu_allocated++;
9d783ba0 748 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 749
5b6985ce 750 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
751 if (!iommu->reg) {
752 printk(KERN_ERR "IOMMU: can't map the region\n");
753 goto error;
754 }
755 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
756 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
757
0815565a 758 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
3a8663ee 759 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
0815565a
DW
760 goto err_unmap;
761 }
762
43f7392b 763#ifdef CONFIG_DMAR
1b573683
WH
764 agaw = iommu_calculate_agaw(iommu);
765 if (agaw < 0) {
766 printk(KERN_ERR
4ed0d3e6
FY
767 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
768 iommu->seq_id);
0815565a 769 goto err_unmap;
4ed0d3e6
FY
770 }
771 msagaw = iommu_calculate_max_sagaw(iommu);
772 if (msagaw < 0) {
773 printk(KERN_ERR
774 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 775 iommu->seq_id);
0815565a 776 goto err_unmap;
1b573683 777 }
43f7392b 778#endif
1b573683 779 iommu->agaw = agaw;
4ed0d3e6 780 iommu->msagaw = msagaw;
1b573683 781
ee34b32d
SS
782 iommu->node = -1;
783
e61d98d8
SS
784 /* the registers might be more than one page */
785 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
786 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
787 map_size = VTD_PAGE_ALIGN(map_size);
788 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
789 iounmap(iommu->reg);
790 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
791 if (!iommu->reg) {
792 printk(KERN_ERR "IOMMU: can't map the region\n");
793 goto error;
794 }
795 }
796
797 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
798 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
799 iommu->seq_id,
5b6985ce
FY
800 (unsigned long long)drhd->reg_base_addr,
801 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
802 (unsigned long long)iommu->cap,
803 (unsigned long long)iommu->ecap);
e61d98d8
SS
804
805 spin_lock_init(&iommu->register_lock);
806
807 drhd->iommu = iommu;
1886e8a9 808 return 0;
0815565a
DW
809
810 err_unmap:
811 iounmap(iommu->reg);
812 error:
e61d98d8 813 kfree(iommu);
1886e8a9 814 return -1;
e61d98d8
SS
815}
816
817void free_iommu(struct intel_iommu *iommu)
818{
819 if (!iommu)
820 return;
821
822#ifdef CONFIG_DMAR
823 free_dmar_iommu(iommu);
824#endif
825
826 if (iommu->reg)
827 iounmap(iommu->reg);
828 kfree(iommu);
829}
fe962e90
SS
830
831/*
832 * Reclaim all the submitted descriptors which have completed its work.
833 */
834static inline void reclaim_free_desc(struct q_inval *qi)
835{
6ba6c3a4
YZ
836 while (qi->desc_status[qi->free_tail] == QI_DONE ||
837 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
838 qi->desc_status[qi->free_tail] = QI_FREE;
839 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
840 qi->free_cnt++;
841 }
842}
843
704126ad
YZ
844static int qi_check_fault(struct intel_iommu *iommu, int index)
845{
846 u32 fault;
6ba6c3a4 847 int head, tail;
704126ad
YZ
848 struct q_inval *qi = iommu->qi;
849 int wait_index = (index + 1) % QI_LENGTH;
850
6ba6c3a4
YZ
851 if (qi->desc_status[wait_index] == QI_ABORT)
852 return -EAGAIN;
853
704126ad
YZ
854 fault = readl(iommu->reg + DMAR_FSTS_REG);
855
856 /*
857 * If IQE happens, the head points to the descriptor associated
858 * with the error. No new descriptors are fetched until the IQE
859 * is cleared.
860 */
861 if (fault & DMA_FSTS_IQE) {
862 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
863 if ((head >> DMAR_IQ_SHIFT) == index) {
864 printk(KERN_ERR "VT-d detected invalid descriptor: "
865 "low=%llx, high=%llx\n",
866 (unsigned long long)qi->desc[index].low,
867 (unsigned long long)qi->desc[index].high);
704126ad
YZ
868 memcpy(&qi->desc[index], &qi->desc[wait_index],
869 sizeof(struct qi_desc));
870 __iommu_flush_cache(iommu, &qi->desc[index],
871 sizeof(struct qi_desc));
872 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
873 return -EINVAL;
874 }
875 }
876
6ba6c3a4
YZ
877 /*
878 * If ITE happens, all pending wait_desc commands are aborted.
879 * No new descriptors are fetched until the ITE is cleared.
880 */
881 if (fault & DMA_FSTS_ITE) {
882 head = readl(iommu->reg + DMAR_IQH_REG);
883 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
884 head |= 1;
885 tail = readl(iommu->reg + DMAR_IQT_REG);
886 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
887
888 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
889
890 do {
891 if (qi->desc_status[head] == QI_IN_USE)
892 qi->desc_status[head] = QI_ABORT;
893 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
894 } while (head != tail);
895
896 if (qi->desc_status[wait_index] == QI_ABORT)
897 return -EAGAIN;
898 }
899
900 if (fault & DMA_FSTS_ICE)
901 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
902
704126ad
YZ
903 return 0;
904}
905
fe962e90
SS
906/*
907 * Submit the queued invalidation descriptor to the remapping
908 * hardware unit and wait for its completion.
909 */
704126ad 910int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 911{
6ba6c3a4 912 int rc;
fe962e90
SS
913 struct q_inval *qi = iommu->qi;
914 struct qi_desc *hw, wait_desc;
915 int wait_index, index;
916 unsigned long flags;
917
918 if (!qi)
704126ad 919 return 0;
fe962e90
SS
920
921 hw = qi->desc;
922
6ba6c3a4
YZ
923restart:
924 rc = 0;
925
f05810c9 926 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 927 while (qi->free_cnt < 3) {
f05810c9 928 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 929 cpu_relax();
f05810c9 930 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
931 }
932
933 index = qi->free_head;
934 wait_index = (index + 1) % QI_LENGTH;
935
936 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
937
938 hw[index] = *desc;
939
704126ad
YZ
940 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
941 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
942 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
943
944 hw[wait_index] = wait_desc;
945
946 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
947 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
948
949 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
950 qi->free_cnt -= 2;
951
fe962e90
SS
952 /*
953 * update the HW tail register indicating the presence of
954 * new descriptors.
955 */
6ba6c3a4 956 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
957
958 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
959 /*
960 * We will leave the interrupts disabled, to prevent interrupt
961 * context to queue another cmd while a cmd is already submitted
962 * and waiting for completion on this cpu. This is to avoid
963 * a deadlock where the interrupt context can wait indefinitely
964 * for free slots in the queue.
965 */
704126ad
YZ
966 rc = qi_check_fault(iommu, index);
967 if (rc)
6ba6c3a4 968 break;
704126ad 969
fe962e90
SS
970 spin_unlock(&qi->q_lock);
971 cpu_relax();
972 spin_lock(&qi->q_lock);
973 }
6ba6c3a4
YZ
974
975 qi->desc_status[index] = QI_DONE;
fe962e90
SS
976
977 reclaim_free_desc(qi);
f05810c9 978 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 979
6ba6c3a4
YZ
980 if (rc == -EAGAIN)
981 goto restart;
982
704126ad 983 return rc;
fe962e90
SS
984}
985
986/*
987 * Flush the global interrupt entry cache.
988 */
989void qi_global_iec(struct intel_iommu *iommu)
990{
991 struct qi_desc desc;
992
993 desc.low = QI_IEC_TYPE;
994 desc.high = 0;
995
704126ad 996 /* should never fail */
fe962e90
SS
997 qi_submit_sync(&desc, iommu);
998}
999
4c25a2c1
DW
1000void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1001 u64 type)
3481f210 1002{
3481f210
YS
1003 struct qi_desc desc;
1004
3481f210
YS
1005 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1006 | QI_CC_GRAN(type) | QI_CC_TYPE;
1007 desc.high = 0;
1008
4c25a2c1 1009 qi_submit_sync(&desc, iommu);
3481f210
YS
1010}
1011
1f0ef2aa
DW
1012void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1013 unsigned int size_order, u64 type)
3481f210
YS
1014{
1015 u8 dw = 0, dr = 0;
1016
1017 struct qi_desc desc;
1018 int ih = 0;
1019
3481f210
YS
1020 if (cap_write_drain(iommu->cap))
1021 dw = 1;
1022
1023 if (cap_read_drain(iommu->cap))
1024 dr = 1;
1025
1026 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1027 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1028 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1029 | QI_IOTLB_AM(size_order);
1030
1f0ef2aa 1031 qi_submit_sync(&desc, iommu);
3481f210
YS
1032}
1033
6ba6c3a4
YZ
1034void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1035 u64 addr, unsigned mask)
1036{
1037 struct qi_desc desc;
1038
1039 if (mask) {
1040 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1041 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1042 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1043 } else
1044 desc.high = QI_DEV_IOTLB_ADDR(addr);
1045
1046 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1047 qdep = 0;
1048
1049 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1050 QI_DIOTLB_TYPE;
1051
1052 qi_submit_sync(&desc, iommu);
1053}
1054
eba67e5d
SS
1055/*
1056 * Disable Queued Invalidation interface.
1057 */
1058void dmar_disable_qi(struct intel_iommu *iommu)
1059{
1060 unsigned long flags;
1061 u32 sts;
1062 cycles_t start_time = get_cycles();
1063
1064 if (!ecap_qis(iommu->ecap))
1065 return;
1066
1067 spin_lock_irqsave(&iommu->register_lock, flags);
1068
1069 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1070 if (!(sts & DMA_GSTS_QIES))
1071 goto end;
1072
1073 /*
1074 * Give a chance to HW to complete the pending invalidation requests.
1075 */
1076 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1077 readl(iommu->reg + DMAR_IQH_REG)) &&
1078 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1079 cpu_relax();
1080
1081 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1082 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1083
1084 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1085 !(sts & DMA_GSTS_QIES), sts);
1086end:
1087 spin_unlock_irqrestore(&iommu->register_lock, flags);
1088}
1089
eb4a52bc
FY
1090/*
1091 * Enable queued invalidation.
1092 */
1093static void __dmar_enable_qi(struct intel_iommu *iommu)
1094{
c416daa9 1095 u32 sts;
eb4a52bc
FY
1096 unsigned long flags;
1097 struct q_inval *qi = iommu->qi;
1098
1099 qi->free_head = qi->free_tail = 0;
1100 qi->free_cnt = QI_LENGTH;
1101
1102 spin_lock_irqsave(&iommu->register_lock, flags);
1103
1104 /* write zero to the tail reg */
1105 writel(0, iommu->reg + DMAR_IQT_REG);
1106
1107 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1108
eb4a52bc 1109 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1110 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1111
1112 /* Make sure hardware complete it */
1113 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1114
1115 spin_unlock_irqrestore(&iommu->register_lock, flags);
1116}
1117
fe962e90
SS
1118/*
1119 * Enable Queued Invalidation interface. This is a must to support
1120 * interrupt-remapping. Also used by DMA-remapping, which replaces
1121 * register based IOTLB invalidation.
1122 */
1123int dmar_enable_qi(struct intel_iommu *iommu)
1124{
fe962e90 1125 struct q_inval *qi;
751cafe3 1126 struct page *desc_page;
fe962e90
SS
1127
1128 if (!ecap_qis(iommu->ecap))
1129 return -ENOENT;
1130
1131 /*
1132 * queued invalidation is already setup and enabled.
1133 */
1134 if (iommu->qi)
1135 return 0;
1136
fa4b57cc 1137 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1138 if (!iommu->qi)
1139 return -ENOMEM;
1140
1141 qi = iommu->qi;
1142
751cafe3
SS
1143
1144 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1145 if (!desc_page) {
fe962e90
SS
1146 kfree(qi);
1147 iommu->qi = 0;
1148 return -ENOMEM;
1149 }
1150
751cafe3
SS
1151 qi->desc = page_address(desc_page);
1152
fa4b57cc 1153 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1154 if (!qi->desc_status) {
1155 free_page((unsigned long) qi->desc);
1156 kfree(qi);
1157 iommu->qi = 0;
1158 return -ENOMEM;
1159 }
1160
1161 qi->free_head = qi->free_tail = 0;
1162 qi->free_cnt = QI_LENGTH;
1163
1164 spin_lock_init(&qi->q_lock);
1165
eb4a52bc 1166 __dmar_enable_qi(iommu);
fe962e90
SS
1167
1168 return 0;
1169}
0ac2491f
SS
1170
1171/* iommu interrupt handling. Most stuff are MSI-like. */
1172
9d783ba0
SS
1173enum faulttype {
1174 DMA_REMAP,
1175 INTR_REMAP,
1176 UNKNOWN,
1177};
1178
1179static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1180{
1181 "Software",
1182 "Present bit in root entry is clear",
1183 "Present bit in context entry is clear",
1184 "Invalid context entry",
1185 "Access beyond MGAW",
1186 "PTE Write access is not set",
1187 "PTE Read access is not set",
1188 "Next page table ptr is invalid",
1189 "Root table address invalid",
1190 "Context table ptr is invalid",
1191 "non-zero reserved fields in RTP",
1192 "non-zero reserved fields in CTP",
1193 "non-zero reserved fields in PTE",
1194};
9d783ba0
SS
1195
1196static const char *intr_remap_fault_reasons[] =
1197{
1198 "Detected reserved fields in the decoded interrupt-remapped request",
1199 "Interrupt index exceeded the interrupt-remapping table size",
1200 "Present field in the IRTE entry is clear",
1201 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1202 "Detected reserved fields in the IRTE entry",
1203 "Blocked a compatibility format interrupt request",
1204 "Blocked an interrupt request due to source-id verification failure",
1205};
1206
0ac2491f
SS
1207#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1208
9d783ba0 1209const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1210{
9d783ba0
SS
1211 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1212 ARRAY_SIZE(intr_remap_fault_reasons))) {
1213 *fault_type = INTR_REMAP;
1214 return intr_remap_fault_reasons[fault_reason - 0x20];
1215 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1216 *fault_type = DMA_REMAP;
1217 return dma_remap_fault_reasons[fault_reason];
1218 } else {
1219 *fault_type = UNKNOWN;
0ac2491f 1220 return "Unknown";
9d783ba0 1221 }
0ac2491f
SS
1222}
1223
1224void dmar_msi_unmask(unsigned int irq)
1225{
1226 struct intel_iommu *iommu = get_irq_data(irq);
1227 unsigned long flag;
1228
1229 /* unmask it */
1230 spin_lock_irqsave(&iommu->register_lock, flag);
1231 writel(0, iommu->reg + DMAR_FECTL_REG);
1232 /* Read a reg to force flush the post write */
1233 readl(iommu->reg + DMAR_FECTL_REG);
1234 spin_unlock_irqrestore(&iommu->register_lock, flag);
1235}
1236
1237void dmar_msi_mask(unsigned int irq)
1238{
1239 unsigned long flag;
1240 struct intel_iommu *iommu = get_irq_data(irq);
1241
1242 /* mask it */
1243 spin_lock_irqsave(&iommu->register_lock, flag);
1244 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1245 /* Read a reg to force flush the post write */
1246 readl(iommu->reg + DMAR_FECTL_REG);
1247 spin_unlock_irqrestore(&iommu->register_lock, flag);
1248}
1249
1250void dmar_msi_write(int irq, struct msi_msg *msg)
1251{
1252 struct intel_iommu *iommu = get_irq_data(irq);
1253 unsigned long flag;
1254
1255 spin_lock_irqsave(&iommu->register_lock, flag);
1256 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1257 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1258 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1259 spin_unlock_irqrestore(&iommu->register_lock, flag);
1260}
1261
1262void dmar_msi_read(int irq, struct msi_msg *msg)
1263{
1264 struct intel_iommu *iommu = get_irq_data(irq);
1265 unsigned long flag;
1266
1267 spin_lock_irqsave(&iommu->register_lock, flag);
1268 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1269 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1270 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1271 spin_unlock_irqrestore(&iommu->register_lock, flag);
1272}
1273
1274static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1275 u8 fault_reason, u16 source_id, unsigned long long addr)
1276{
1277 const char *reason;
9d783ba0 1278 int fault_type;
0ac2491f 1279
9d783ba0 1280 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1281
9d783ba0
SS
1282 if (fault_type == INTR_REMAP)
1283 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1284 "fault index %llx\n"
1285 "INTR-REMAP:[fault reason %02d] %s\n",
1286 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1287 PCI_FUNC(source_id & 0xFF), addr >> 48,
1288 fault_reason, reason);
1289 else
1290 printk(KERN_ERR
1291 "DMAR:[%s] Request device [%02x:%02x.%d] "
1292 "fault addr %llx \n"
1293 "DMAR:[fault reason %02d] %s\n",
1294 (type ? "DMA Read" : "DMA Write"),
1295 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1296 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1297 return 0;
1298}
1299
1300#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1301irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1302{
1303 struct intel_iommu *iommu = dev_id;
1304 int reg, fault_index;
1305 u32 fault_status;
1306 unsigned long flag;
1307
1308 spin_lock_irqsave(&iommu->register_lock, flag);
1309 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1310 if (fault_status)
1311 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1312 fault_status);
0ac2491f
SS
1313
1314 /* TBD: ignore advanced fault log currently */
1315 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1316 goto clear_rest;
0ac2491f
SS
1317
1318 fault_index = dma_fsts_fault_record_index(fault_status);
1319 reg = cap_fault_reg_offset(iommu->cap);
1320 while (1) {
1321 u8 fault_reason;
1322 u16 source_id;
1323 u64 guest_addr;
1324 int type;
1325 u32 data;
1326
1327 /* highest 32 bits */
1328 data = readl(iommu->reg + reg +
1329 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1330 if (!(data & DMA_FRCD_F))
1331 break;
1332
1333 fault_reason = dma_frcd_fault_reason(data);
1334 type = dma_frcd_type(data);
1335
1336 data = readl(iommu->reg + reg +
1337 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1338 source_id = dma_frcd_source_id(data);
1339
1340 guest_addr = dmar_readq(iommu->reg + reg +
1341 fault_index * PRIMARY_FAULT_REG_LEN);
1342 guest_addr = dma_frcd_page_addr(guest_addr);
1343 /* clear the fault */
1344 writel(DMA_FRCD_F, iommu->reg + reg +
1345 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1346
1347 spin_unlock_irqrestore(&iommu->register_lock, flag);
1348
1349 dmar_fault_do_one(iommu, type, fault_reason,
1350 source_id, guest_addr);
1351
1352 fault_index++;
8211a7b5 1353 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1354 fault_index = 0;
1355 spin_lock_irqsave(&iommu->register_lock, flag);
1356 }
9d783ba0
SS
1357clear_rest:
1358 /* clear all the other faults */
0ac2491f 1359 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1360 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1361
1362 spin_unlock_irqrestore(&iommu->register_lock, flag);
1363 return IRQ_HANDLED;
1364}
1365
1366int dmar_set_interrupt(struct intel_iommu *iommu)
1367{
1368 int irq, ret;
1369
9d783ba0
SS
1370 /*
1371 * Check if the fault interrupt is already initialized.
1372 */
1373 if (iommu->irq)
1374 return 0;
1375
0ac2491f
SS
1376 irq = create_irq();
1377 if (!irq) {
1378 printk(KERN_ERR "IOMMU: no free vectors\n");
1379 return -EINVAL;
1380 }
1381
1382 set_irq_data(irq, iommu);
1383 iommu->irq = irq;
1384
1385 ret = arch_setup_dmar_msi(irq);
1386 if (ret) {
1387 set_irq_data(irq, NULL);
1388 iommu->irq = 0;
1389 destroy_irq(irq);
dd726435 1390 return ret;
0ac2491f
SS
1391 }
1392
0ac2491f
SS
1393 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1394 if (ret)
1395 printk(KERN_ERR "IOMMU: can't request irq\n");
1396 return ret;
1397}
9d783ba0
SS
1398
1399int __init enable_drhd_fault_handling(void)
1400{
1401 struct dmar_drhd_unit *drhd;
1402
1403 /*
1404 * Enable fault control interrupt.
1405 */
1406 for_each_drhd_unit(drhd) {
1407 int ret;
1408 struct intel_iommu *iommu = drhd->iommu;
1409 ret = dmar_set_interrupt(iommu);
1410
1411 if (ret) {
1412 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1413 " interrupt, ret %d\n",
1414 (unsigned long long)drhd->reg_base_addr, ret);
1415 return -1;
1416 }
1417 }
1418
1419 return 0;
1420}
eb4a52bc
FY
1421
1422/*
1423 * Re-enable Queued Invalidation interface.
1424 */
1425int dmar_reenable_qi(struct intel_iommu *iommu)
1426{
1427 if (!ecap_qis(iommu->ecap))
1428 return -ENOENT;
1429
1430 if (!iommu->qi)
1431 return -ENOENT;
1432
1433 /*
1434 * First disable queued invalidation.
1435 */
1436 dmar_disable_qi(iommu);
1437 /*
1438 * Then enable queued invalidation again. Since there is no pending
1439 * invalidation requests now, it's safe to re-enable queued
1440 * invalidation.
1441 */
1442 __dmar_enable_qi(iommu);
1443
1444 return 0;
1445}
074835f0
YS
1446
1447/*
1448 * Check interrupt remapping support in DMAR table description.
1449 */
0b8973a8 1450int __init dmar_ir_support(void)
074835f0
YS
1451{
1452 struct acpi_table_dmar *dmar;
1453 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1454 if (!dmar)
1455 return 0;
074835f0
YS
1456 return dmar->flags & 0x1;
1457}
This page took 0.283148 seconds and 5 git commands to generate.