dmar: support for parsing Remapping Hardware Static Affinity structure
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
10e5247f 38
a192a958 39#define PREFIX "DMAR: "
10e5247f
KA
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
10e5247f
KA
46
47static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 48static acpi_size dmar_tbl_size;
10e5247f
KA
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
10e5247f
KA
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
10e5247f 176
e523b38e
DW
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
185 return -ENODEV;
186 }
10e5247f
KA
187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188 if (!dmaru)
189 return -ENOMEM;
190
1886e8a9 191 dmaru->hdr = header;
10e5247f 192 dmaru->reg_base_addr = drhd->address;
276dbf99 193 dmaru->segment = drhd->segment;
10e5247f
KA
194 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195
1886e8a9
SS
196 ret = alloc_iommu(dmaru);
197 if (ret) {
198 kfree(dmaru);
199 return ret;
200 }
201 dmar_register_drhd_unit(dmaru);
202 return 0;
203}
204
f82851a8 205static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
206{
207 struct acpi_dmar_hardware_unit *drhd;
f82851a8 208 int ret = 0;
1886e8a9
SS
209
210 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211
2e824f79
YZ
212 if (dmaru->include_all)
213 return 0;
214
215 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 216 ((void *)drhd) + drhd->header.length,
10e5247f
KA
217 &dmaru->devices_cnt, &dmaru->devices,
218 drhd->segment);
1c7d1bca 219 if (ret) {
1886e8a9 220 list_del(&dmaru->list);
10e5247f 221 kfree(dmaru);
1886e8a9 222 }
10e5247f
KA
223 return ret;
224}
225
aaa9d1dd
SS
226#ifdef CONFIG_DMAR
227LIST_HEAD(dmar_rmrr_units);
228
229static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230{
231 list_add(&rmrr->list, &dmar_rmrr_units);
232}
233
234
10e5247f
KA
235static int __init
236dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237{
238 struct acpi_dmar_reserved_memory *rmrr;
239 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
240
241 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
242 if (!rmrru)
243 return -ENOMEM;
244
1886e8a9 245 rmrru->hdr = header;
10e5247f
KA
246 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 rmrru->base_address = rmrr->base_address;
248 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
249
250 dmar_register_rmrr_unit(rmrru);
251 return 0;
252}
253
254static int __init
255rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256{
257 struct acpi_dmar_reserved_memory *rmrr;
258 int ret;
259
260 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 261 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 262 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
263 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264
1886e8a9
SS
265 if (ret || (rmrru->devices_cnt == 0)) {
266 list_del(&rmrru->list);
10e5247f 267 kfree(rmrru);
1886e8a9 268 }
10e5247f
KA
269 return ret;
270}
aa5d2b51
YZ
271
272static LIST_HEAD(dmar_atsr_units);
273
274static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
275{
276 struct acpi_dmar_atsr *atsr;
277 struct dmar_atsr_unit *atsru;
278
279 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
281 if (!atsru)
282 return -ENOMEM;
283
284 atsru->hdr = hdr;
285 atsru->include_all = atsr->flags & 0x1;
286
287 list_add(&atsru->list, &dmar_atsr_units);
288
289 return 0;
290}
291
292static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
293{
294 int rc;
295 struct acpi_dmar_atsr *atsr;
296
297 if (atsru->include_all)
298 return 0;
299
300 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301 rc = dmar_parse_dev_scope((void *)(atsr + 1),
302 (void *)atsr + atsr->header.length,
303 &atsru->devices_cnt, &atsru->devices,
304 atsr->segment);
305 if (rc || !atsru->devices_cnt) {
306 list_del(&atsru->list);
307 kfree(atsru);
308 }
309
310 return rc;
311}
312
313int dmar_find_matched_atsr_unit(struct pci_dev *dev)
314{
315 int i;
316 struct pci_bus *bus;
317 struct acpi_dmar_atsr *atsr;
318 struct dmar_atsr_unit *atsru;
319
320 list_for_each_entry(atsru, &dmar_atsr_units, list) {
321 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322 if (atsr->segment == pci_domain_nr(dev->bus))
323 goto found;
324 }
325
326 return 0;
327
328found:
329 for (bus = dev->bus; bus; bus = bus->parent) {
330 struct pci_dev *bridge = bus->self;
331
332 if (!bridge || !bridge->is_pcie ||
333 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
334 return 0;
335
336 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337 for (i = 0; i < atsru->devices_cnt; i++)
338 if (atsru->devices[i] == bridge)
339 return 1;
340 break;
341 }
342 }
343
344 if (atsru->include_all)
345 return 1;
346
347 return 0;
348}
aaa9d1dd 349#endif
10e5247f 350
ee34b32d
SS
351static int __init
352dmar_parse_one_rhsa(struct acpi_dmar_header *header)
353{
354 struct acpi_dmar_rhsa *rhsa;
355 struct dmar_drhd_unit *drhd;
356
357 rhsa = (struct acpi_dmar_rhsa *)header;
358 for_each_drhd_unit(drhd)
359 if (drhd->reg_base_addr == rhsa->base_address) {
360 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
361
362 if (!node_online(node))
363 node = -1;
364 drhd->iommu->node = node;
365 return 0;
366 }
367
368 return -ENODEV;
369}
370
10e5247f
KA
371static void __init
372dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
373{
374 struct acpi_dmar_hardware_unit *drhd;
375 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 376 struct acpi_dmar_atsr *atsr;
17b60977 377 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
378
379 switch (header->type) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
381 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382 header);
10e5247f 383 printk (KERN_INFO PREFIX
aa5d2b51
YZ
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
386 break;
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
388 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389 header);
10e5247f 390 printk (KERN_INFO PREFIX
aa5d2b51 391 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
392 (unsigned long long)rmrr->base_address,
393 (unsigned long long)rmrr->end_address);
10e5247f 394 break;
aa5d2b51
YZ
395 case ACPI_DMAR_TYPE_ATSR:
396 atsr = container_of(header, struct acpi_dmar_atsr, header);
397 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398 break;
17b60977
RD
399 case ACPI_DMAR_HARDWARE_AFFINITY:
400 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa->base_address,
403 rhsa->proximity_domain);
404 break;
10e5247f
KA
405 }
406}
407
f6dd5c31
YL
408/**
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
410 */
411static int __init dmar_table_detect(void)
412{
413 acpi_status status = AE_OK;
414
415 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
416 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417 (struct acpi_table_header **)&dmar_tbl,
418 &dmar_tbl_size);
f6dd5c31
YL
419
420 if (ACPI_SUCCESS(status) && !dmar_tbl) {
421 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422 status = AE_NOT_FOUND;
423 }
424
425 return (ACPI_SUCCESS(status) ? 1 : 0);
426}
aaa9d1dd 427
10e5247f
KA
428/**
429 * parse_dmar_table - parses the DMA reporting table
430 */
431static int __init
432parse_dmar_table(void)
433{
434 struct acpi_table_dmar *dmar;
435 struct acpi_dmar_header *entry_header;
436 int ret = 0;
437
f6dd5c31
YL
438 /*
439 * Do it again, earlier dmar_tbl mapping could be mapped with
440 * fixed map.
441 */
442 dmar_table_detect();
443
a59b50e9
JC
444 /*
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
447 */
448 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
449
10e5247f
KA
450 dmar = (struct acpi_table_dmar *)dmar_tbl;
451 if (!dmar)
452 return -ENODEV;
453
5b6985ce 454 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 455 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
456 return -EINVAL;
457 }
458
459 printk (KERN_INFO PREFIX "Host address width %d\n",
460 dmar->width + 1);
461
462 entry_header = (struct acpi_dmar_header *)(dmar + 1);
463 while (((unsigned long)entry_header) <
464 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header->length == 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
469 ret = -EINVAL;
470 break;
471 }
472
10e5247f
KA
473 dmar_table_print_dmar_entry(entry_header);
474
475 switch (entry_header->type) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477 ret = dmar_parse_one_drhd(entry_header);
478 break;
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 480#ifdef CONFIG_DMAR
10e5247f 481 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
482#endif
483 break;
484 case ACPI_DMAR_TYPE_ATSR:
485#ifdef CONFIG_DMAR
486 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 487#endif
10e5247f 488 break;
17b60977 489 case ACPI_DMAR_HARDWARE_AFFINITY:
ee34b32d 490 ret = dmar_parse_one_rhsa(entry_header);
17b60977 491 break;
10e5247f
KA
492 default:
493 printk(KERN_WARNING PREFIX
4de75cf9
RD
494 "Unknown DMAR structure type %d\n",
495 entry_header->type);
10e5247f
KA
496 ret = 0; /* for forward compatibility */
497 break;
498 }
499 if (ret)
500 break;
501
502 entry_header = ((void *)entry_header + entry_header->length);
503 }
504 return ret;
505}
506
e61d98d8
SS
507int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
508 struct pci_dev *dev)
509{
510 int index;
511
512 while (dev) {
513 for (index = 0; index < cnt; index++)
514 if (dev == devices[index])
515 return 1;
516
517 /* Check our parent */
518 dev = dev->bus->self;
519 }
520
521 return 0;
522}
523
524struct dmar_drhd_unit *
525dmar_find_matched_drhd_unit(struct pci_dev *dev)
526{
2e824f79
YZ
527 struct dmar_drhd_unit *dmaru = NULL;
528 struct acpi_dmar_hardware_unit *drhd;
529
530 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
531 drhd = container_of(dmaru->hdr,
532 struct acpi_dmar_hardware_unit,
533 header);
534
535 if (dmaru->include_all &&
536 drhd->segment == pci_domain_nr(dev->bus))
537 return dmaru;
e61d98d8 538
2e824f79
YZ
539 if (dmar_pci_device_match(dmaru->devices,
540 dmaru->devices_cnt, dev))
541 return dmaru;
e61d98d8
SS
542 }
543
544 return NULL;
545}
546
1886e8a9
SS
547int __init dmar_dev_scope_init(void)
548{
04e2ea67 549 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
550 int ret = -ENODEV;
551
04e2ea67 552 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
553 ret = dmar_parse_dev(drhd);
554 if (ret)
555 return ret;
556 }
557
aaa9d1dd
SS
558#ifdef CONFIG_DMAR
559 {
04e2ea67 560 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
561 struct dmar_atsr_unit *atsr, *atsr_n;
562
04e2ea67 563 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
564 ret = rmrr_parse_dev(rmrr);
565 if (ret)
566 return ret;
567 }
aa5d2b51
YZ
568
569 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
570 ret = atsr_parse_dev(atsr);
571 if (ret)
572 return ret;
573 }
1886e8a9 574 }
aaa9d1dd 575#endif
1886e8a9
SS
576
577 return ret;
578}
579
10e5247f
KA
580
581int __init dmar_table_init(void)
582{
1886e8a9 583 static int dmar_table_initialized;
093f87d2
FY
584 int ret;
585
1886e8a9
SS
586 if (dmar_table_initialized)
587 return 0;
588
589 dmar_table_initialized = 1;
590
093f87d2
FY
591 ret = parse_dmar_table();
592 if (ret) {
1886e8a9
SS
593 if (ret != -ENODEV)
594 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
595 return ret;
596 }
597
10e5247f
KA
598 if (list_empty(&dmar_drhd_units)) {
599 printk(KERN_INFO PREFIX "No DMAR devices found\n");
600 return -ENODEV;
601 }
093f87d2 602
aaa9d1dd 603#ifdef CONFIG_DMAR
2d6b5f85 604 if (list_empty(&dmar_rmrr_units))
093f87d2 605 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
606
607 if (list_empty(&dmar_atsr_units))
608 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 609#endif
093f87d2 610
10e5247f
KA
611 return 0;
612}
613
2ae21010
SS
614void __init detect_intel_iommu(void)
615{
616 int ret;
617
f6dd5c31 618 ret = dmar_table_detect();
2ae21010 619
2ae21010 620 {
cacd4213 621#ifdef CONFIG_INTR_REMAP
1cb11583
SS
622 struct acpi_table_dmar *dmar;
623 /*
624 * for now we will disable dma-remapping when interrupt
625 * remapping is enabled.
626 * When support for queued invalidation for IOTLB invalidation
627 * is added, we will not need this any more.
628 */
629 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 630 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
631 printk(KERN_INFO
632 "Queued invalidation will be enabled to support "
633 "x2apic and Intr-remapping.\n");
cacd4213 634#endif
cacd4213 635#ifdef CONFIG_DMAR
2ae21010
SS
636 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
637 !dmar_disabled)
638 iommu_detected = 1;
2ae21010 639#endif
cacd4213 640 }
8e1568f3 641 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 642 dmar_tbl = NULL;
2ae21010
SS
643}
644
645
1886e8a9 646int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 647{
c42d9f32 648 struct intel_iommu *iommu;
e61d98d8
SS
649 int map_size;
650 u32 ver;
c42d9f32 651 static int iommu_allocated = 0;
43f7392b 652 int agaw = 0;
4ed0d3e6 653 int msagaw = 0;
c42d9f32
SS
654
655 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
656 if (!iommu)
1886e8a9 657 return -ENOMEM;
c42d9f32
SS
658
659 iommu->seq_id = iommu_allocated++;
9d783ba0 660 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 661
5b6985ce 662 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
663 if (!iommu->reg) {
664 printk(KERN_ERR "IOMMU: can't map the region\n");
665 goto error;
666 }
667 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
668 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
669
0815565a
DW
670 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
671 /* Promote an attitude of violence to a BIOS engineer today */
672 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
673 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
674 drhd->reg_base_addr,
675 dmi_get_system_info(DMI_BIOS_VENDOR),
676 dmi_get_system_info(DMI_BIOS_VERSION),
677 dmi_get_system_info(DMI_PRODUCT_VERSION));
678 goto err_unmap;
679 }
680
43f7392b 681#ifdef CONFIG_DMAR
1b573683
WH
682 agaw = iommu_calculate_agaw(iommu);
683 if (agaw < 0) {
684 printk(KERN_ERR
4ed0d3e6
FY
685 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
686 iommu->seq_id);
0815565a 687 goto err_unmap;
4ed0d3e6
FY
688 }
689 msagaw = iommu_calculate_max_sagaw(iommu);
690 if (msagaw < 0) {
691 printk(KERN_ERR
692 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 693 iommu->seq_id);
0815565a 694 goto err_unmap;
1b573683 695 }
43f7392b 696#endif
1b573683 697 iommu->agaw = agaw;
4ed0d3e6 698 iommu->msagaw = msagaw;
1b573683 699
ee34b32d
SS
700 iommu->node = -1;
701
e61d98d8
SS
702 /* the registers might be more than one page */
703 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
704 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
705 map_size = VTD_PAGE_ALIGN(map_size);
706 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
707 iounmap(iommu->reg);
708 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
709 if (!iommu->reg) {
710 printk(KERN_ERR "IOMMU: can't map the region\n");
711 goto error;
712 }
713 }
714
715 ver = readl(iommu->reg + DMAR_VER_REG);
0815565a 716 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
5b6985ce
FY
717 (unsigned long long)drhd->reg_base_addr,
718 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
719 (unsigned long long)iommu->cap,
720 (unsigned long long)iommu->ecap);
e61d98d8
SS
721
722 spin_lock_init(&iommu->register_lock);
723
724 drhd->iommu = iommu;
1886e8a9 725 return 0;
0815565a
DW
726
727 err_unmap:
728 iounmap(iommu->reg);
729 error:
e61d98d8 730 kfree(iommu);
1886e8a9 731 return -1;
e61d98d8
SS
732}
733
734void free_iommu(struct intel_iommu *iommu)
735{
736 if (!iommu)
737 return;
738
739#ifdef CONFIG_DMAR
740 free_dmar_iommu(iommu);
741#endif
742
743 if (iommu->reg)
744 iounmap(iommu->reg);
745 kfree(iommu);
746}
fe962e90
SS
747
748/*
749 * Reclaim all the submitted descriptors which have completed its work.
750 */
751static inline void reclaim_free_desc(struct q_inval *qi)
752{
6ba6c3a4
YZ
753 while (qi->desc_status[qi->free_tail] == QI_DONE ||
754 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
755 qi->desc_status[qi->free_tail] = QI_FREE;
756 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
757 qi->free_cnt++;
758 }
759}
760
704126ad
YZ
761static int qi_check_fault(struct intel_iommu *iommu, int index)
762{
763 u32 fault;
6ba6c3a4 764 int head, tail;
704126ad
YZ
765 struct q_inval *qi = iommu->qi;
766 int wait_index = (index + 1) % QI_LENGTH;
767
6ba6c3a4
YZ
768 if (qi->desc_status[wait_index] == QI_ABORT)
769 return -EAGAIN;
770
704126ad
YZ
771 fault = readl(iommu->reg + DMAR_FSTS_REG);
772
773 /*
774 * If IQE happens, the head points to the descriptor associated
775 * with the error. No new descriptors are fetched until the IQE
776 * is cleared.
777 */
778 if (fault & DMA_FSTS_IQE) {
779 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
780 if ((head >> DMAR_IQ_SHIFT) == index) {
781 printk(KERN_ERR "VT-d detected invalid descriptor: "
782 "low=%llx, high=%llx\n",
783 (unsigned long long)qi->desc[index].low,
784 (unsigned long long)qi->desc[index].high);
704126ad
YZ
785 memcpy(&qi->desc[index], &qi->desc[wait_index],
786 sizeof(struct qi_desc));
787 __iommu_flush_cache(iommu, &qi->desc[index],
788 sizeof(struct qi_desc));
789 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
790 return -EINVAL;
791 }
792 }
793
6ba6c3a4
YZ
794 /*
795 * If ITE happens, all pending wait_desc commands are aborted.
796 * No new descriptors are fetched until the ITE is cleared.
797 */
798 if (fault & DMA_FSTS_ITE) {
799 head = readl(iommu->reg + DMAR_IQH_REG);
800 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
801 head |= 1;
802 tail = readl(iommu->reg + DMAR_IQT_REG);
803 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
804
805 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
806
807 do {
808 if (qi->desc_status[head] == QI_IN_USE)
809 qi->desc_status[head] = QI_ABORT;
810 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
811 } while (head != tail);
812
813 if (qi->desc_status[wait_index] == QI_ABORT)
814 return -EAGAIN;
815 }
816
817 if (fault & DMA_FSTS_ICE)
818 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
819
704126ad
YZ
820 return 0;
821}
822
fe962e90
SS
823/*
824 * Submit the queued invalidation descriptor to the remapping
825 * hardware unit and wait for its completion.
826 */
704126ad 827int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 828{
6ba6c3a4 829 int rc;
fe962e90
SS
830 struct q_inval *qi = iommu->qi;
831 struct qi_desc *hw, wait_desc;
832 int wait_index, index;
833 unsigned long flags;
834
835 if (!qi)
704126ad 836 return 0;
fe962e90
SS
837
838 hw = qi->desc;
839
6ba6c3a4
YZ
840restart:
841 rc = 0;
842
f05810c9 843 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 844 while (qi->free_cnt < 3) {
f05810c9 845 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 846 cpu_relax();
f05810c9 847 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
848 }
849
850 index = qi->free_head;
851 wait_index = (index + 1) % QI_LENGTH;
852
853 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
854
855 hw[index] = *desc;
856
704126ad
YZ
857 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
858 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
859 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
860
861 hw[wait_index] = wait_desc;
862
863 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
864 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
865
866 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
867 qi->free_cnt -= 2;
868
fe962e90
SS
869 /*
870 * update the HW tail register indicating the presence of
871 * new descriptors.
872 */
6ba6c3a4 873 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
874
875 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
876 /*
877 * We will leave the interrupts disabled, to prevent interrupt
878 * context to queue another cmd while a cmd is already submitted
879 * and waiting for completion on this cpu. This is to avoid
880 * a deadlock where the interrupt context can wait indefinitely
881 * for free slots in the queue.
882 */
704126ad
YZ
883 rc = qi_check_fault(iommu, index);
884 if (rc)
6ba6c3a4 885 break;
704126ad 886
fe962e90
SS
887 spin_unlock(&qi->q_lock);
888 cpu_relax();
889 spin_lock(&qi->q_lock);
890 }
6ba6c3a4
YZ
891
892 qi->desc_status[index] = QI_DONE;
fe962e90
SS
893
894 reclaim_free_desc(qi);
f05810c9 895 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 896
6ba6c3a4
YZ
897 if (rc == -EAGAIN)
898 goto restart;
899
704126ad 900 return rc;
fe962e90
SS
901}
902
903/*
904 * Flush the global interrupt entry cache.
905 */
906void qi_global_iec(struct intel_iommu *iommu)
907{
908 struct qi_desc desc;
909
910 desc.low = QI_IEC_TYPE;
911 desc.high = 0;
912
704126ad 913 /* should never fail */
fe962e90
SS
914 qi_submit_sync(&desc, iommu);
915}
916
4c25a2c1
DW
917void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
918 u64 type)
3481f210 919{
3481f210
YS
920 struct qi_desc desc;
921
3481f210
YS
922 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
923 | QI_CC_GRAN(type) | QI_CC_TYPE;
924 desc.high = 0;
925
4c25a2c1 926 qi_submit_sync(&desc, iommu);
3481f210
YS
927}
928
1f0ef2aa
DW
929void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
930 unsigned int size_order, u64 type)
3481f210
YS
931{
932 u8 dw = 0, dr = 0;
933
934 struct qi_desc desc;
935 int ih = 0;
936
3481f210
YS
937 if (cap_write_drain(iommu->cap))
938 dw = 1;
939
940 if (cap_read_drain(iommu->cap))
941 dr = 1;
942
943 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
944 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
945 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
946 | QI_IOTLB_AM(size_order);
947
1f0ef2aa 948 qi_submit_sync(&desc, iommu);
3481f210
YS
949}
950
6ba6c3a4
YZ
951void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
952 u64 addr, unsigned mask)
953{
954 struct qi_desc desc;
955
956 if (mask) {
957 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
958 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
959 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
960 } else
961 desc.high = QI_DEV_IOTLB_ADDR(addr);
962
963 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
964 qdep = 0;
965
966 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
967 QI_DIOTLB_TYPE;
968
969 qi_submit_sync(&desc, iommu);
970}
971
eba67e5d
SS
972/*
973 * Disable Queued Invalidation interface.
974 */
975void dmar_disable_qi(struct intel_iommu *iommu)
976{
977 unsigned long flags;
978 u32 sts;
979 cycles_t start_time = get_cycles();
980
981 if (!ecap_qis(iommu->ecap))
982 return;
983
984 spin_lock_irqsave(&iommu->register_lock, flags);
985
986 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
987 if (!(sts & DMA_GSTS_QIES))
988 goto end;
989
990 /*
991 * Give a chance to HW to complete the pending invalidation requests.
992 */
993 while ((readl(iommu->reg + DMAR_IQT_REG) !=
994 readl(iommu->reg + DMAR_IQH_REG)) &&
995 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
996 cpu_relax();
997
998 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
999 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1000
1001 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1002 !(sts & DMA_GSTS_QIES), sts);
1003end:
1004 spin_unlock_irqrestore(&iommu->register_lock, flags);
1005}
1006
eb4a52bc
FY
1007/*
1008 * Enable queued invalidation.
1009 */
1010static void __dmar_enable_qi(struct intel_iommu *iommu)
1011{
c416daa9 1012 u32 sts;
eb4a52bc
FY
1013 unsigned long flags;
1014 struct q_inval *qi = iommu->qi;
1015
1016 qi->free_head = qi->free_tail = 0;
1017 qi->free_cnt = QI_LENGTH;
1018
1019 spin_lock_irqsave(&iommu->register_lock, flags);
1020
1021 /* write zero to the tail reg */
1022 writel(0, iommu->reg + DMAR_IQT_REG);
1023
1024 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1025
eb4a52bc 1026 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1027 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1028
1029 /* Make sure hardware complete it */
1030 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1031
1032 spin_unlock_irqrestore(&iommu->register_lock, flags);
1033}
1034
fe962e90
SS
1035/*
1036 * Enable Queued Invalidation interface. This is a must to support
1037 * interrupt-remapping. Also used by DMA-remapping, which replaces
1038 * register based IOTLB invalidation.
1039 */
1040int dmar_enable_qi(struct intel_iommu *iommu)
1041{
fe962e90
SS
1042 struct q_inval *qi;
1043
1044 if (!ecap_qis(iommu->ecap))
1045 return -ENOENT;
1046
1047 /*
1048 * queued invalidation is already setup and enabled.
1049 */
1050 if (iommu->qi)
1051 return 0;
1052
fa4b57cc 1053 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1054 if (!iommu->qi)
1055 return -ENOMEM;
1056
1057 qi = iommu->qi;
1058
fa4b57cc 1059 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
fe962e90
SS
1060 if (!qi->desc) {
1061 kfree(qi);
1062 iommu->qi = 0;
1063 return -ENOMEM;
1064 }
1065
fa4b57cc 1066 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1067 if (!qi->desc_status) {
1068 free_page((unsigned long) qi->desc);
1069 kfree(qi);
1070 iommu->qi = 0;
1071 return -ENOMEM;
1072 }
1073
1074 qi->free_head = qi->free_tail = 0;
1075 qi->free_cnt = QI_LENGTH;
1076
1077 spin_lock_init(&qi->q_lock);
1078
eb4a52bc 1079 __dmar_enable_qi(iommu);
fe962e90
SS
1080
1081 return 0;
1082}
0ac2491f
SS
1083
1084/* iommu interrupt handling. Most stuff are MSI-like. */
1085
9d783ba0
SS
1086enum faulttype {
1087 DMA_REMAP,
1088 INTR_REMAP,
1089 UNKNOWN,
1090};
1091
1092static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1093{
1094 "Software",
1095 "Present bit in root entry is clear",
1096 "Present bit in context entry is clear",
1097 "Invalid context entry",
1098 "Access beyond MGAW",
1099 "PTE Write access is not set",
1100 "PTE Read access is not set",
1101 "Next page table ptr is invalid",
1102 "Root table address invalid",
1103 "Context table ptr is invalid",
1104 "non-zero reserved fields in RTP",
1105 "non-zero reserved fields in CTP",
1106 "non-zero reserved fields in PTE",
1107};
9d783ba0
SS
1108
1109static const char *intr_remap_fault_reasons[] =
1110{
1111 "Detected reserved fields in the decoded interrupt-remapped request",
1112 "Interrupt index exceeded the interrupt-remapping table size",
1113 "Present field in the IRTE entry is clear",
1114 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1115 "Detected reserved fields in the IRTE entry",
1116 "Blocked a compatibility format interrupt request",
1117 "Blocked an interrupt request due to source-id verification failure",
1118};
1119
0ac2491f
SS
1120#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1121
9d783ba0 1122const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1123{
9d783ba0
SS
1124 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1125 ARRAY_SIZE(intr_remap_fault_reasons))) {
1126 *fault_type = INTR_REMAP;
1127 return intr_remap_fault_reasons[fault_reason - 0x20];
1128 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1129 *fault_type = DMA_REMAP;
1130 return dma_remap_fault_reasons[fault_reason];
1131 } else {
1132 *fault_type = UNKNOWN;
0ac2491f 1133 return "Unknown";
9d783ba0 1134 }
0ac2491f
SS
1135}
1136
1137void dmar_msi_unmask(unsigned int irq)
1138{
1139 struct intel_iommu *iommu = get_irq_data(irq);
1140 unsigned long flag;
1141
1142 /* unmask it */
1143 spin_lock_irqsave(&iommu->register_lock, flag);
1144 writel(0, iommu->reg + DMAR_FECTL_REG);
1145 /* Read a reg to force flush the post write */
1146 readl(iommu->reg + DMAR_FECTL_REG);
1147 spin_unlock_irqrestore(&iommu->register_lock, flag);
1148}
1149
1150void dmar_msi_mask(unsigned int irq)
1151{
1152 unsigned long flag;
1153 struct intel_iommu *iommu = get_irq_data(irq);
1154
1155 /* mask it */
1156 spin_lock_irqsave(&iommu->register_lock, flag);
1157 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1158 /* Read a reg to force flush the post write */
1159 readl(iommu->reg + DMAR_FECTL_REG);
1160 spin_unlock_irqrestore(&iommu->register_lock, flag);
1161}
1162
1163void dmar_msi_write(int irq, struct msi_msg *msg)
1164{
1165 struct intel_iommu *iommu = get_irq_data(irq);
1166 unsigned long flag;
1167
1168 spin_lock_irqsave(&iommu->register_lock, flag);
1169 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1170 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1171 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1172 spin_unlock_irqrestore(&iommu->register_lock, flag);
1173}
1174
1175void dmar_msi_read(int irq, struct msi_msg *msg)
1176{
1177 struct intel_iommu *iommu = get_irq_data(irq);
1178 unsigned long flag;
1179
1180 spin_lock_irqsave(&iommu->register_lock, flag);
1181 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1182 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1183 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1184 spin_unlock_irqrestore(&iommu->register_lock, flag);
1185}
1186
1187static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1188 u8 fault_reason, u16 source_id, unsigned long long addr)
1189{
1190 const char *reason;
9d783ba0 1191 int fault_type;
0ac2491f 1192
9d783ba0 1193 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1194
9d783ba0
SS
1195 if (fault_type == INTR_REMAP)
1196 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1197 "fault index %llx\n"
1198 "INTR-REMAP:[fault reason %02d] %s\n",
1199 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1200 PCI_FUNC(source_id & 0xFF), addr >> 48,
1201 fault_reason, reason);
1202 else
1203 printk(KERN_ERR
1204 "DMAR:[%s] Request device [%02x:%02x.%d] "
1205 "fault addr %llx \n"
1206 "DMAR:[fault reason %02d] %s\n",
1207 (type ? "DMA Read" : "DMA Write"),
1208 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1209 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1210 return 0;
1211}
1212
1213#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1214irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1215{
1216 struct intel_iommu *iommu = dev_id;
1217 int reg, fault_index;
1218 u32 fault_status;
1219 unsigned long flag;
1220
1221 spin_lock_irqsave(&iommu->register_lock, flag);
1222 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1223 if (fault_status)
1224 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1225 fault_status);
0ac2491f
SS
1226
1227 /* TBD: ignore advanced fault log currently */
1228 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1229 goto clear_rest;
0ac2491f
SS
1230
1231 fault_index = dma_fsts_fault_record_index(fault_status);
1232 reg = cap_fault_reg_offset(iommu->cap);
1233 while (1) {
1234 u8 fault_reason;
1235 u16 source_id;
1236 u64 guest_addr;
1237 int type;
1238 u32 data;
1239
1240 /* highest 32 bits */
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1243 if (!(data & DMA_FRCD_F))
1244 break;
1245
1246 fault_reason = dma_frcd_fault_reason(data);
1247 type = dma_frcd_type(data);
1248
1249 data = readl(iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1251 source_id = dma_frcd_source_id(data);
1252
1253 guest_addr = dmar_readq(iommu->reg + reg +
1254 fault_index * PRIMARY_FAULT_REG_LEN);
1255 guest_addr = dma_frcd_page_addr(guest_addr);
1256 /* clear the fault */
1257 writel(DMA_FRCD_F, iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1259
1260 spin_unlock_irqrestore(&iommu->register_lock, flag);
1261
1262 dmar_fault_do_one(iommu, type, fault_reason,
1263 source_id, guest_addr);
1264
1265 fault_index++;
8211a7b5 1266 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1267 fault_index = 0;
1268 spin_lock_irqsave(&iommu->register_lock, flag);
1269 }
9d783ba0
SS
1270clear_rest:
1271 /* clear all the other faults */
0ac2491f 1272 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1273 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1274
1275 spin_unlock_irqrestore(&iommu->register_lock, flag);
1276 return IRQ_HANDLED;
1277}
1278
1279int dmar_set_interrupt(struct intel_iommu *iommu)
1280{
1281 int irq, ret;
1282
9d783ba0
SS
1283 /*
1284 * Check if the fault interrupt is already initialized.
1285 */
1286 if (iommu->irq)
1287 return 0;
1288
0ac2491f
SS
1289 irq = create_irq();
1290 if (!irq) {
1291 printk(KERN_ERR "IOMMU: no free vectors\n");
1292 return -EINVAL;
1293 }
1294
1295 set_irq_data(irq, iommu);
1296 iommu->irq = irq;
1297
1298 ret = arch_setup_dmar_msi(irq);
1299 if (ret) {
1300 set_irq_data(irq, NULL);
1301 iommu->irq = 0;
1302 destroy_irq(irq);
dd726435 1303 return ret;
0ac2491f
SS
1304 }
1305
0ac2491f
SS
1306 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1307 if (ret)
1308 printk(KERN_ERR "IOMMU: can't request irq\n");
1309 return ret;
1310}
9d783ba0
SS
1311
1312int __init enable_drhd_fault_handling(void)
1313{
1314 struct dmar_drhd_unit *drhd;
1315
1316 /*
1317 * Enable fault control interrupt.
1318 */
1319 for_each_drhd_unit(drhd) {
1320 int ret;
1321 struct intel_iommu *iommu = drhd->iommu;
1322 ret = dmar_set_interrupt(iommu);
1323
1324 if (ret) {
1325 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1326 " interrupt, ret %d\n",
1327 (unsigned long long)drhd->reg_base_addr, ret);
1328 return -1;
1329 }
1330 }
1331
1332 return 0;
1333}
eb4a52bc
FY
1334
1335/*
1336 * Re-enable Queued Invalidation interface.
1337 */
1338int dmar_reenable_qi(struct intel_iommu *iommu)
1339{
1340 if (!ecap_qis(iommu->ecap))
1341 return -ENOENT;
1342
1343 if (!iommu->qi)
1344 return -ENOENT;
1345
1346 /*
1347 * First disable queued invalidation.
1348 */
1349 dmar_disable_qi(iommu);
1350 /*
1351 * Then enable queued invalidation again. Since there is no pending
1352 * invalidation requests now, it's safe to re-enable queued
1353 * invalidation.
1354 */
1355 __dmar_enable_qi(iommu);
1356
1357 return 0;
1358}
074835f0
YS
1359
1360/*
1361 * Check interrupt remapping support in DMAR table description.
1362 */
1363int dmar_ir_support(void)
1364{
1365 struct acpi_table_dmar *dmar;
1366 dmar = (struct acpi_table_dmar *)dmar_tbl;
1367 return dmar->flags & 0x1;
1368}
This page took 0.239522 seconds and 5 git commands to generate.