iommu/vt-d: Factor out dmar_alloc_dev_scope() for later reuse
[deliverable/linux.git] / drivers / iommu / dmar.c
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 *
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50 LIST_HEAD(dmar_drhd_units);
51
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
54
55 static int alloc_iommu(struct dmar_drhd_unit *drhd);
56 static void free_iommu(struct intel_iommu *iommu);
57
58 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59 {
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68 }
69
70 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72 {
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
91 pr_warn("Device scope bus [%d] not found\n", scope->bus);
92 break;
93 }
94 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
95 if (!pdev) {
96 /* warning will be printed below */
97 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
105 segment, scope->bus, path->device, path->function);
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118 }
119
120 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
121 {
122 struct acpi_dmar_device_scope *scope;
123
124 *cnt = 0;
125 while (start < end) {
126 scope = start;
127 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
128 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
129 (*cnt)++;
130 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
131 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
132 pr_warn("Unsupported device scope\n");
133 }
134 start += scope->length;
135 }
136 if (*cnt == 0)
137 return NULL;
138
139 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
140 }
141
142 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
143 struct pci_dev ***devices, u16 segment)
144 {
145 struct acpi_dmar_device_scope *scope;
146 int index, ret;
147
148 *devices = dmar_alloc_dev_scope(start, end, cnt);
149 if (*cnt == 0)
150 return 0;
151 else if (!*devices)
152 return -ENOMEM;
153
154 for (index = 0; start < end; start += scope->length) {
155 scope = start;
156 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
157 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
158 ret = dmar_parse_one_dev_scope(scope,
159 &(*devices)[index], segment);
160 if (ret) {
161 dmar_free_dev_scope(devices, cnt);
162 return ret;
163 }
164 index ++;
165 }
166 }
167
168 return 0;
169 }
170
171 void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
172 {
173 if (*devices && *cnt) {
174 while (--*cnt >= 0)
175 pci_dev_put((*devices)[*cnt]);
176 kfree(*devices);
177 *devices = NULL;
178 *cnt = 0;
179 }
180 }
181
182 /**
183 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
184 * structure which uniquely represent one DMA remapping hardware unit
185 * present in the platform
186 */
187 static int __init
188 dmar_parse_one_drhd(struct acpi_dmar_header *header)
189 {
190 struct acpi_dmar_hardware_unit *drhd;
191 struct dmar_drhd_unit *dmaru;
192 int ret = 0;
193
194 drhd = (struct acpi_dmar_hardware_unit *)header;
195 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
196 if (!dmaru)
197 return -ENOMEM;
198
199 dmaru->hdr = header;
200 dmaru->reg_base_addr = drhd->address;
201 dmaru->segment = drhd->segment;
202 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
203
204 ret = alloc_iommu(dmaru);
205 if (ret) {
206 kfree(dmaru);
207 return ret;
208 }
209 dmar_register_drhd_unit(dmaru);
210 return 0;
211 }
212
213 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
214 {
215 if (dmaru->devices && dmaru->devices_cnt)
216 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
217 if (dmaru->iommu)
218 free_iommu(dmaru->iommu);
219 kfree(dmaru);
220 }
221
222 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
223 {
224 struct acpi_dmar_hardware_unit *drhd;
225
226 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
227
228 if (dmaru->include_all)
229 return 0;
230
231 return dmar_parse_dev_scope((void *)(drhd + 1),
232 ((void *)drhd) + drhd->header.length,
233 &dmaru->devices_cnt, &dmaru->devices,
234 drhd->segment);
235 }
236
237 #ifdef CONFIG_ACPI_NUMA
238 static int __init
239 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
240 {
241 struct acpi_dmar_rhsa *rhsa;
242 struct dmar_drhd_unit *drhd;
243
244 rhsa = (struct acpi_dmar_rhsa *)header;
245 for_each_drhd_unit(drhd) {
246 if (drhd->reg_base_addr == rhsa->base_address) {
247 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
248
249 if (!node_online(node))
250 node = -1;
251 drhd->iommu->node = node;
252 return 0;
253 }
254 }
255 WARN_TAINT(
256 1, TAINT_FIRMWARE_WORKAROUND,
257 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
258 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
259 drhd->reg_base_addr,
260 dmi_get_system_info(DMI_BIOS_VENDOR),
261 dmi_get_system_info(DMI_BIOS_VERSION),
262 dmi_get_system_info(DMI_PRODUCT_VERSION));
263
264 return 0;
265 }
266 #endif
267
268 static void __init
269 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
270 {
271 struct acpi_dmar_hardware_unit *drhd;
272 struct acpi_dmar_reserved_memory *rmrr;
273 struct acpi_dmar_atsr *atsr;
274 struct acpi_dmar_rhsa *rhsa;
275
276 switch (header->type) {
277 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
278 drhd = container_of(header, struct acpi_dmar_hardware_unit,
279 header);
280 pr_info("DRHD base: %#016Lx flags: %#x\n",
281 (unsigned long long)drhd->address, drhd->flags);
282 break;
283 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
284 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
285 header);
286 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
287 (unsigned long long)rmrr->base_address,
288 (unsigned long long)rmrr->end_address);
289 break;
290 case ACPI_DMAR_TYPE_ATSR:
291 atsr = container_of(header, struct acpi_dmar_atsr, header);
292 pr_info("ATSR flags: %#x\n", atsr->flags);
293 break;
294 case ACPI_DMAR_HARDWARE_AFFINITY:
295 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
296 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
297 (unsigned long long)rhsa->base_address,
298 rhsa->proximity_domain);
299 break;
300 }
301 }
302
303 /**
304 * dmar_table_detect - checks to see if the platform supports DMAR devices
305 */
306 static int __init dmar_table_detect(void)
307 {
308 acpi_status status = AE_OK;
309
310 /* if we could find DMAR table, then there are DMAR devices */
311 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
312 (struct acpi_table_header **)&dmar_tbl,
313 &dmar_tbl_size);
314
315 if (ACPI_SUCCESS(status) && !dmar_tbl) {
316 pr_warn("Unable to map DMAR\n");
317 status = AE_NOT_FOUND;
318 }
319
320 return (ACPI_SUCCESS(status) ? 1 : 0);
321 }
322
323 /**
324 * parse_dmar_table - parses the DMA reporting table
325 */
326 static int __init
327 parse_dmar_table(void)
328 {
329 struct acpi_table_dmar *dmar;
330 struct acpi_dmar_header *entry_header;
331 int ret = 0;
332 int drhd_count = 0;
333
334 /*
335 * Do it again, earlier dmar_tbl mapping could be mapped with
336 * fixed map.
337 */
338 dmar_table_detect();
339
340 /*
341 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
342 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
343 */
344 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
345
346 dmar = (struct acpi_table_dmar *)dmar_tbl;
347 if (!dmar)
348 return -ENODEV;
349
350 if (dmar->width < PAGE_SHIFT - 1) {
351 pr_warn("Invalid DMAR haw\n");
352 return -EINVAL;
353 }
354
355 pr_info("Host address width %d\n", dmar->width + 1);
356
357 entry_header = (struct acpi_dmar_header *)(dmar + 1);
358 while (((unsigned long)entry_header) <
359 (((unsigned long)dmar) + dmar_tbl->length)) {
360 /* Avoid looping forever on bad ACPI tables */
361 if (entry_header->length == 0) {
362 pr_warn("Invalid 0-length structure\n");
363 ret = -EINVAL;
364 break;
365 }
366
367 dmar_table_print_dmar_entry(entry_header);
368
369 switch (entry_header->type) {
370 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
371 drhd_count++;
372 ret = dmar_parse_one_drhd(entry_header);
373 break;
374 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
375 ret = dmar_parse_one_rmrr(entry_header);
376 break;
377 case ACPI_DMAR_TYPE_ATSR:
378 ret = dmar_parse_one_atsr(entry_header);
379 break;
380 case ACPI_DMAR_HARDWARE_AFFINITY:
381 #ifdef CONFIG_ACPI_NUMA
382 ret = dmar_parse_one_rhsa(entry_header);
383 #endif
384 break;
385 default:
386 pr_warn("Unknown DMAR structure type %d\n",
387 entry_header->type);
388 ret = 0; /* for forward compatibility */
389 break;
390 }
391 if (ret)
392 break;
393
394 entry_header = ((void *)entry_header + entry_header->length);
395 }
396 if (drhd_count == 0)
397 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
398 return ret;
399 }
400
401 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
402 struct pci_dev *dev)
403 {
404 int index;
405
406 while (dev) {
407 for (index = 0; index < cnt; index++)
408 if (dev == devices[index])
409 return 1;
410
411 /* Check our parent */
412 dev = dev->bus->self;
413 }
414
415 return 0;
416 }
417
418 struct dmar_drhd_unit *
419 dmar_find_matched_drhd_unit(struct pci_dev *dev)
420 {
421 struct dmar_drhd_unit *dmaru = NULL;
422 struct acpi_dmar_hardware_unit *drhd;
423
424 dev = pci_physfn(dev);
425
426 for_each_drhd_unit(dmaru) {
427 drhd = container_of(dmaru->hdr,
428 struct acpi_dmar_hardware_unit,
429 header);
430
431 if (dmaru->include_all &&
432 drhd->segment == pci_domain_nr(dev->bus))
433 return dmaru;
434
435 if (dmar_pci_device_match(dmaru->devices,
436 dmaru->devices_cnt, dev))
437 return dmaru;
438 }
439
440 return NULL;
441 }
442
443 int __init dmar_dev_scope_init(void)
444 {
445 static int dmar_dev_scope_initialized;
446 struct dmar_drhd_unit *drhd;
447 int ret = -ENODEV;
448
449 if (dmar_dev_scope_initialized)
450 return dmar_dev_scope_initialized;
451
452 if (list_empty(&dmar_drhd_units))
453 goto fail;
454
455 list_for_each_entry(drhd, &dmar_drhd_units, list) {
456 ret = dmar_parse_dev(drhd);
457 if (ret)
458 goto fail;
459 }
460
461 ret = dmar_parse_rmrr_atsr_dev();
462 if (ret)
463 goto fail;
464
465 dmar_dev_scope_initialized = 1;
466 return 0;
467
468 fail:
469 dmar_dev_scope_initialized = ret;
470 return ret;
471 }
472
473
474 int __init dmar_table_init(void)
475 {
476 static int dmar_table_initialized;
477 int ret;
478
479 if (dmar_table_initialized == 0) {
480 ret = parse_dmar_table();
481 if (ret < 0) {
482 if (ret != -ENODEV)
483 pr_info("parse DMAR table failure.\n");
484 } else if (list_empty(&dmar_drhd_units)) {
485 pr_info("No DMAR devices found\n");
486 ret = -ENODEV;
487 }
488
489 if (ret < 0)
490 dmar_table_initialized = ret;
491 else
492 dmar_table_initialized = 1;
493 }
494
495 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
496 }
497
498 static void warn_invalid_dmar(u64 addr, const char *message)
499 {
500 WARN_TAINT_ONCE(
501 1, TAINT_FIRMWARE_WORKAROUND,
502 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
503 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
504 addr, message,
505 dmi_get_system_info(DMI_BIOS_VENDOR),
506 dmi_get_system_info(DMI_BIOS_VERSION),
507 dmi_get_system_info(DMI_PRODUCT_VERSION));
508 }
509
510 static int __init check_zero_address(void)
511 {
512 struct acpi_table_dmar *dmar;
513 struct acpi_dmar_header *entry_header;
514 struct acpi_dmar_hardware_unit *drhd;
515
516 dmar = (struct acpi_table_dmar *)dmar_tbl;
517 entry_header = (struct acpi_dmar_header *)(dmar + 1);
518
519 while (((unsigned long)entry_header) <
520 (((unsigned long)dmar) + dmar_tbl->length)) {
521 /* Avoid looping forever on bad ACPI tables */
522 if (entry_header->length == 0) {
523 pr_warn("Invalid 0-length structure\n");
524 return 0;
525 }
526
527 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
528 void __iomem *addr;
529 u64 cap, ecap;
530
531 drhd = (void *)entry_header;
532 if (!drhd->address) {
533 warn_invalid_dmar(0, "");
534 goto failed;
535 }
536
537 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
538 if (!addr ) {
539 printk("IOMMU: can't validate: %llx\n", drhd->address);
540 goto failed;
541 }
542 cap = dmar_readq(addr + DMAR_CAP_REG);
543 ecap = dmar_readq(addr + DMAR_ECAP_REG);
544 early_iounmap(addr, VTD_PAGE_SIZE);
545 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
546 warn_invalid_dmar(drhd->address,
547 " returns all ones");
548 goto failed;
549 }
550 }
551
552 entry_header = ((void *)entry_header + entry_header->length);
553 }
554 return 1;
555
556 failed:
557 return 0;
558 }
559
560 int __init detect_intel_iommu(void)
561 {
562 int ret;
563
564 ret = dmar_table_detect();
565 if (ret)
566 ret = check_zero_address();
567 {
568 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
569 iommu_detected = 1;
570 /* Make sure ACS will be enabled */
571 pci_request_acs();
572 }
573
574 #ifdef CONFIG_X86
575 if (ret)
576 x86_init.iommu.iommu_init = intel_iommu_init;
577 #endif
578 }
579 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
580 dmar_tbl = NULL;
581
582 return ret ? 1 : -ENODEV;
583 }
584
585
586 static void unmap_iommu(struct intel_iommu *iommu)
587 {
588 iounmap(iommu->reg);
589 release_mem_region(iommu->reg_phys, iommu->reg_size);
590 }
591
592 /**
593 * map_iommu: map the iommu's registers
594 * @iommu: the iommu to map
595 * @phys_addr: the physical address of the base resgister
596 *
597 * Memory map the iommu's registers. Start w/ a single page, and
598 * possibly expand if that turns out to be insufficent.
599 */
600 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
601 {
602 int map_size, err=0;
603
604 iommu->reg_phys = phys_addr;
605 iommu->reg_size = VTD_PAGE_SIZE;
606
607 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
608 pr_err("IOMMU: can't reserve memory\n");
609 err = -EBUSY;
610 goto out;
611 }
612
613 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
614 if (!iommu->reg) {
615 pr_err("IOMMU: can't map the region\n");
616 err = -ENOMEM;
617 goto release;
618 }
619
620 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
621 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
622
623 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
624 err = -EINVAL;
625 warn_invalid_dmar(phys_addr, " returns all ones");
626 goto unmap;
627 }
628
629 /* the registers might be more than one page */
630 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
631 cap_max_fault_reg_offset(iommu->cap));
632 map_size = VTD_PAGE_ALIGN(map_size);
633 if (map_size > iommu->reg_size) {
634 iounmap(iommu->reg);
635 release_mem_region(iommu->reg_phys, iommu->reg_size);
636 iommu->reg_size = map_size;
637 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
638 iommu->name)) {
639 pr_err("IOMMU: can't reserve memory\n");
640 err = -EBUSY;
641 goto out;
642 }
643 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
644 if (!iommu->reg) {
645 pr_err("IOMMU: can't map the region\n");
646 err = -ENOMEM;
647 goto release;
648 }
649 }
650 err = 0;
651 goto out;
652
653 unmap:
654 iounmap(iommu->reg);
655 release:
656 release_mem_region(iommu->reg_phys, iommu->reg_size);
657 out:
658 return err;
659 }
660
661 static int alloc_iommu(struct dmar_drhd_unit *drhd)
662 {
663 struct intel_iommu *iommu;
664 u32 ver, sts;
665 static int iommu_allocated = 0;
666 int agaw = 0;
667 int msagaw = 0;
668 int err;
669
670 if (!drhd->reg_base_addr) {
671 warn_invalid_dmar(0, "");
672 return -EINVAL;
673 }
674
675 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
676 if (!iommu)
677 return -ENOMEM;
678
679 iommu->seq_id = iommu_allocated++;
680 sprintf (iommu->name, "dmar%d", iommu->seq_id);
681
682 err = map_iommu(iommu, drhd->reg_base_addr);
683 if (err) {
684 pr_err("IOMMU: failed to map %s\n", iommu->name);
685 goto error;
686 }
687
688 err = -EINVAL;
689 agaw = iommu_calculate_agaw(iommu);
690 if (agaw < 0) {
691 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
692 iommu->seq_id);
693 goto err_unmap;
694 }
695 msagaw = iommu_calculate_max_sagaw(iommu);
696 if (msagaw < 0) {
697 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
698 iommu->seq_id);
699 goto err_unmap;
700 }
701 iommu->agaw = agaw;
702 iommu->msagaw = msagaw;
703
704 iommu->node = -1;
705
706 ver = readl(iommu->reg + DMAR_VER_REG);
707 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
708 iommu->seq_id,
709 (unsigned long long)drhd->reg_base_addr,
710 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
711 (unsigned long long)iommu->cap,
712 (unsigned long long)iommu->ecap);
713
714 /* Reflect status in gcmd */
715 sts = readl(iommu->reg + DMAR_GSTS_REG);
716 if (sts & DMA_GSTS_IRES)
717 iommu->gcmd |= DMA_GCMD_IRE;
718 if (sts & DMA_GSTS_TES)
719 iommu->gcmd |= DMA_GCMD_TE;
720 if (sts & DMA_GSTS_QIES)
721 iommu->gcmd |= DMA_GCMD_QIE;
722
723 raw_spin_lock_init(&iommu->register_lock);
724
725 drhd->iommu = iommu;
726 return 0;
727
728 err_unmap:
729 unmap_iommu(iommu);
730 error:
731 kfree(iommu);
732 return err;
733 }
734
735 static void free_iommu(struct intel_iommu *iommu)
736 {
737 if (iommu->irq) {
738 free_irq(iommu->irq, iommu);
739 irq_set_handler_data(iommu->irq, NULL);
740 destroy_irq(iommu->irq);
741 }
742
743 if (iommu->qi) {
744 free_page((unsigned long)iommu->qi->desc);
745 kfree(iommu->qi->desc_status);
746 kfree(iommu->qi);
747 }
748
749 if (iommu->reg)
750 unmap_iommu(iommu);
751
752 kfree(iommu);
753 }
754
755 /*
756 * Reclaim all the submitted descriptors which have completed its work.
757 */
758 static inline void reclaim_free_desc(struct q_inval *qi)
759 {
760 while (qi->desc_status[qi->free_tail] == QI_DONE ||
761 qi->desc_status[qi->free_tail] == QI_ABORT) {
762 qi->desc_status[qi->free_tail] = QI_FREE;
763 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
764 qi->free_cnt++;
765 }
766 }
767
768 static int qi_check_fault(struct intel_iommu *iommu, int index)
769 {
770 u32 fault;
771 int head, tail;
772 struct q_inval *qi = iommu->qi;
773 int wait_index = (index + 1) % QI_LENGTH;
774
775 if (qi->desc_status[wait_index] == QI_ABORT)
776 return -EAGAIN;
777
778 fault = readl(iommu->reg + DMAR_FSTS_REG);
779
780 /*
781 * If IQE happens, the head points to the descriptor associated
782 * with the error. No new descriptors are fetched until the IQE
783 * is cleared.
784 */
785 if (fault & DMA_FSTS_IQE) {
786 head = readl(iommu->reg + DMAR_IQH_REG);
787 if ((head >> DMAR_IQ_SHIFT) == index) {
788 pr_err("VT-d detected invalid descriptor: "
789 "low=%llx, high=%llx\n",
790 (unsigned long long)qi->desc[index].low,
791 (unsigned long long)qi->desc[index].high);
792 memcpy(&qi->desc[index], &qi->desc[wait_index],
793 sizeof(struct qi_desc));
794 __iommu_flush_cache(iommu, &qi->desc[index],
795 sizeof(struct qi_desc));
796 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
797 return -EINVAL;
798 }
799 }
800
801 /*
802 * If ITE happens, all pending wait_desc commands are aborted.
803 * No new descriptors are fetched until the ITE is cleared.
804 */
805 if (fault & DMA_FSTS_ITE) {
806 head = readl(iommu->reg + DMAR_IQH_REG);
807 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808 head |= 1;
809 tail = readl(iommu->reg + DMAR_IQT_REG);
810 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
811
812 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
813
814 do {
815 if (qi->desc_status[head] == QI_IN_USE)
816 qi->desc_status[head] = QI_ABORT;
817 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
818 } while (head != tail);
819
820 if (qi->desc_status[wait_index] == QI_ABORT)
821 return -EAGAIN;
822 }
823
824 if (fault & DMA_FSTS_ICE)
825 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
826
827 return 0;
828 }
829
830 /*
831 * Submit the queued invalidation descriptor to the remapping
832 * hardware unit and wait for its completion.
833 */
834 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
835 {
836 int rc;
837 struct q_inval *qi = iommu->qi;
838 struct qi_desc *hw, wait_desc;
839 int wait_index, index;
840 unsigned long flags;
841
842 if (!qi)
843 return 0;
844
845 hw = qi->desc;
846
847 restart:
848 rc = 0;
849
850 raw_spin_lock_irqsave(&qi->q_lock, flags);
851 while (qi->free_cnt < 3) {
852 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
853 cpu_relax();
854 raw_spin_lock_irqsave(&qi->q_lock, flags);
855 }
856
857 index = qi->free_head;
858 wait_index = (index + 1) % QI_LENGTH;
859
860 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
861
862 hw[index] = *desc;
863
864 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
865 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
866 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
867
868 hw[wait_index] = wait_desc;
869
870 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
871 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
872
873 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
874 qi->free_cnt -= 2;
875
876 /*
877 * update the HW tail register indicating the presence of
878 * new descriptors.
879 */
880 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
881
882 while (qi->desc_status[wait_index] != QI_DONE) {
883 /*
884 * We will leave the interrupts disabled, to prevent interrupt
885 * context to queue another cmd while a cmd is already submitted
886 * and waiting for completion on this cpu. This is to avoid
887 * a deadlock where the interrupt context can wait indefinitely
888 * for free slots in the queue.
889 */
890 rc = qi_check_fault(iommu, index);
891 if (rc)
892 break;
893
894 raw_spin_unlock(&qi->q_lock);
895 cpu_relax();
896 raw_spin_lock(&qi->q_lock);
897 }
898
899 qi->desc_status[index] = QI_DONE;
900
901 reclaim_free_desc(qi);
902 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
903
904 if (rc == -EAGAIN)
905 goto restart;
906
907 return rc;
908 }
909
910 /*
911 * Flush the global interrupt entry cache.
912 */
913 void qi_global_iec(struct intel_iommu *iommu)
914 {
915 struct qi_desc desc;
916
917 desc.low = QI_IEC_TYPE;
918 desc.high = 0;
919
920 /* should never fail */
921 qi_submit_sync(&desc, iommu);
922 }
923
924 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
925 u64 type)
926 {
927 struct qi_desc desc;
928
929 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
930 | QI_CC_GRAN(type) | QI_CC_TYPE;
931 desc.high = 0;
932
933 qi_submit_sync(&desc, iommu);
934 }
935
936 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
937 unsigned int size_order, u64 type)
938 {
939 u8 dw = 0, dr = 0;
940
941 struct qi_desc desc;
942 int ih = 0;
943
944 if (cap_write_drain(iommu->cap))
945 dw = 1;
946
947 if (cap_read_drain(iommu->cap))
948 dr = 1;
949
950 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
951 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
952 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
953 | QI_IOTLB_AM(size_order);
954
955 qi_submit_sync(&desc, iommu);
956 }
957
958 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
959 u64 addr, unsigned mask)
960 {
961 struct qi_desc desc;
962
963 if (mask) {
964 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
965 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
966 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
967 } else
968 desc.high = QI_DEV_IOTLB_ADDR(addr);
969
970 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
971 qdep = 0;
972
973 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
974 QI_DIOTLB_TYPE;
975
976 qi_submit_sync(&desc, iommu);
977 }
978
979 /*
980 * Disable Queued Invalidation interface.
981 */
982 void dmar_disable_qi(struct intel_iommu *iommu)
983 {
984 unsigned long flags;
985 u32 sts;
986 cycles_t start_time = get_cycles();
987
988 if (!ecap_qis(iommu->ecap))
989 return;
990
991 raw_spin_lock_irqsave(&iommu->register_lock, flags);
992
993 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
994 if (!(sts & DMA_GSTS_QIES))
995 goto end;
996
997 /*
998 * Give a chance to HW to complete the pending invalidation requests.
999 */
1000 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1001 readl(iommu->reg + DMAR_IQH_REG)) &&
1002 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1003 cpu_relax();
1004
1005 iommu->gcmd &= ~DMA_GCMD_QIE;
1006 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1007
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1009 !(sts & DMA_GSTS_QIES), sts);
1010 end:
1011 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1012 }
1013
1014 /*
1015 * Enable queued invalidation.
1016 */
1017 static void __dmar_enable_qi(struct intel_iommu *iommu)
1018 {
1019 u32 sts;
1020 unsigned long flags;
1021 struct q_inval *qi = iommu->qi;
1022
1023 qi->free_head = qi->free_tail = 0;
1024 qi->free_cnt = QI_LENGTH;
1025
1026 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1027
1028 /* write zero to the tail reg */
1029 writel(0, iommu->reg + DMAR_IQT_REG);
1030
1031 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1032
1033 iommu->gcmd |= DMA_GCMD_QIE;
1034 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1035
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1038
1039 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1040 }
1041
1042 /*
1043 * Enable Queued Invalidation interface. This is a must to support
1044 * interrupt-remapping. Also used by DMA-remapping, which replaces
1045 * register based IOTLB invalidation.
1046 */
1047 int dmar_enable_qi(struct intel_iommu *iommu)
1048 {
1049 struct q_inval *qi;
1050 struct page *desc_page;
1051
1052 if (!ecap_qis(iommu->ecap))
1053 return -ENOENT;
1054
1055 /*
1056 * queued invalidation is already setup and enabled.
1057 */
1058 if (iommu->qi)
1059 return 0;
1060
1061 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1062 if (!iommu->qi)
1063 return -ENOMEM;
1064
1065 qi = iommu->qi;
1066
1067
1068 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1069 if (!desc_page) {
1070 kfree(qi);
1071 iommu->qi = NULL;
1072 return -ENOMEM;
1073 }
1074
1075 qi->desc = page_address(desc_page);
1076
1077 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1078 if (!qi->desc_status) {
1079 free_page((unsigned long) qi->desc);
1080 kfree(qi);
1081 iommu->qi = NULL;
1082 return -ENOMEM;
1083 }
1084
1085 qi->free_head = qi->free_tail = 0;
1086 qi->free_cnt = QI_LENGTH;
1087
1088 raw_spin_lock_init(&qi->q_lock);
1089
1090 __dmar_enable_qi(iommu);
1091
1092 return 0;
1093 }
1094
1095 /* iommu interrupt handling. Most stuff are MSI-like. */
1096
1097 enum faulttype {
1098 DMA_REMAP,
1099 INTR_REMAP,
1100 UNKNOWN,
1101 };
1102
1103 static const char *dma_remap_fault_reasons[] =
1104 {
1105 "Software",
1106 "Present bit in root entry is clear",
1107 "Present bit in context entry is clear",
1108 "Invalid context entry",
1109 "Access beyond MGAW",
1110 "PTE Write access is not set",
1111 "PTE Read access is not set",
1112 "Next page table ptr is invalid",
1113 "Root table address invalid",
1114 "Context table ptr is invalid",
1115 "non-zero reserved fields in RTP",
1116 "non-zero reserved fields in CTP",
1117 "non-zero reserved fields in PTE",
1118 "PCE for translation request specifies blocking",
1119 };
1120
1121 static const char *irq_remap_fault_reasons[] =
1122 {
1123 "Detected reserved fields in the decoded interrupt-remapped request",
1124 "Interrupt index exceeded the interrupt-remapping table size",
1125 "Present field in the IRTE entry is clear",
1126 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1127 "Detected reserved fields in the IRTE entry",
1128 "Blocked a compatibility format interrupt request",
1129 "Blocked an interrupt request due to source-id verification failure",
1130 };
1131
1132 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1133 {
1134 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1135 ARRAY_SIZE(irq_remap_fault_reasons))) {
1136 *fault_type = INTR_REMAP;
1137 return irq_remap_fault_reasons[fault_reason - 0x20];
1138 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1139 *fault_type = DMA_REMAP;
1140 return dma_remap_fault_reasons[fault_reason];
1141 } else {
1142 *fault_type = UNKNOWN;
1143 return "Unknown";
1144 }
1145 }
1146
1147 void dmar_msi_unmask(struct irq_data *data)
1148 {
1149 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1150 unsigned long flag;
1151
1152 /* unmask it */
1153 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1154 writel(0, iommu->reg + DMAR_FECTL_REG);
1155 /* Read a reg to force flush the post write */
1156 readl(iommu->reg + DMAR_FECTL_REG);
1157 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1158 }
1159
1160 void dmar_msi_mask(struct irq_data *data)
1161 {
1162 unsigned long flag;
1163 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1164
1165 /* mask it */
1166 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1167 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1168 /* Read a reg to force flush the post write */
1169 readl(iommu->reg + DMAR_FECTL_REG);
1170 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1171 }
1172
1173 void dmar_msi_write(int irq, struct msi_msg *msg)
1174 {
1175 struct intel_iommu *iommu = irq_get_handler_data(irq);
1176 unsigned long flag;
1177
1178 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1179 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1180 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1181 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1183 }
1184
1185 void dmar_msi_read(int irq, struct msi_msg *msg)
1186 {
1187 struct intel_iommu *iommu = irq_get_handler_data(irq);
1188 unsigned long flag;
1189
1190 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1191 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1192 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1193 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1194 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1195 }
1196
1197 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1198 u8 fault_reason, u16 source_id, unsigned long long addr)
1199 {
1200 const char *reason;
1201 int fault_type;
1202
1203 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1204
1205 if (fault_type == INTR_REMAP)
1206 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1210 PCI_FUNC(source_id & 0xFF), addr >> 48,
1211 fault_reason, reason);
1212 else
1213 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1214 "fault addr %llx \n"
1215 "DMAR:[fault reason %02d] %s\n",
1216 (type ? "DMA Read" : "DMA Write"),
1217 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1218 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1219 return 0;
1220 }
1221
1222 #define PRIMARY_FAULT_REG_LEN (16)
1223 irqreturn_t dmar_fault(int irq, void *dev_id)
1224 {
1225 struct intel_iommu *iommu = dev_id;
1226 int reg, fault_index;
1227 u32 fault_status;
1228 unsigned long flag;
1229
1230 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1231 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1232 if (fault_status)
1233 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1234
1235 /* TBD: ignore advanced fault log currently */
1236 if (!(fault_status & DMA_FSTS_PPF))
1237 goto unlock_exit;
1238
1239 fault_index = dma_fsts_fault_record_index(fault_status);
1240 reg = cap_fault_reg_offset(iommu->cap);
1241 while (1) {
1242 u8 fault_reason;
1243 u16 source_id;
1244 u64 guest_addr;
1245 int type;
1246 u32 data;
1247
1248 /* highest 32 bits */
1249 data = readl(iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251 if (!(data & DMA_FRCD_F))
1252 break;
1253
1254 fault_reason = dma_frcd_fault_reason(data);
1255 type = dma_frcd_type(data);
1256
1257 data = readl(iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1259 source_id = dma_frcd_source_id(data);
1260
1261 guest_addr = dmar_readq(iommu->reg + reg +
1262 fault_index * PRIMARY_FAULT_REG_LEN);
1263 guest_addr = dma_frcd_page_addr(guest_addr);
1264 /* clear the fault */
1265 writel(DMA_FRCD_F, iommu->reg + reg +
1266 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1267
1268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1269
1270 dmar_fault_do_one(iommu, type, fault_reason,
1271 source_id, guest_addr);
1272
1273 fault_index++;
1274 if (fault_index >= cap_num_fault_regs(iommu->cap))
1275 fault_index = 0;
1276 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1277 }
1278
1279 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1280
1281 unlock_exit:
1282 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1283 return IRQ_HANDLED;
1284 }
1285
1286 int dmar_set_interrupt(struct intel_iommu *iommu)
1287 {
1288 int irq, ret;
1289
1290 /*
1291 * Check if the fault interrupt is already initialized.
1292 */
1293 if (iommu->irq)
1294 return 0;
1295
1296 irq = create_irq();
1297 if (!irq) {
1298 pr_err("IOMMU: no free vectors\n");
1299 return -EINVAL;
1300 }
1301
1302 irq_set_handler_data(irq, iommu);
1303 iommu->irq = irq;
1304
1305 ret = arch_setup_dmar_msi(irq);
1306 if (ret) {
1307 irq_set_handler_data(irq, NULL);
1308 iommu->irq = 0;
1309 destroy_irq(irq);
1310 return ret;
1311 }
1312
1313 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1314 if (ret)
1315 pr_err("IOMMU: can't request irq\n");
1316 return ret;
1317 }
1318
1319 int __init enable_drhd_fault_handling(void)
1320 {
1321 struct dmar_drhd_unit *drhd;
1322 struct intel_iommu *iommu;
1323
1324 /*
1325 * Enable fault control interrupt.
1326 */
1327 for_each_iommu(iommu, drhd) {
1328 u32 fault_status;
1329 int ret = dmar_set_interrupt(iommu);
1330
1331 if (ret) {
1332 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1333 (unsigned long long)drhd->reg_base_addr, ret);
1334 return -1;
1335 }
1336
1337 /*
1338 * Clear any previous faults.
1339 */
1340 dmar_fault(iommu->irq, iommu);
1341 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1342 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1343 }
1344
1345 return 0;
1346 }
1347
1348 /*
1349 * Re-enable Queued Invalidation interface.
1350 */
1351 int dmar_reenable_qi(struct intel_iommu *iommu)
1352 {
1353 if (!ecap_qis(iommu->ecap))
1354 return -ENOENT;
1355
1356 if (!iommu->qi)
1357 return -ENOENT;
1358
1359 /*
1360 * First disable queued invalidation.
1361 */
1362 dmar_disable_qi(iommu);
1363 /*
1364 * Then enable queued invalidation again. Since there is no pending
1365 * invalidation requests now, it's safe to re-enable queued
1366 * invalidation.
1367 */
1368 __dmar_enable_qi(iommu);
1369
1370 return 0;
1371 }
1372
1373 /*
1374 * Check interrupt remapping support in DMAR table description.
1375 */
1376 int __init dmar_ir_support(void)
1377 {
1378 struct acpi_table_dmar *dmar;
1379 dmar = (struct acpi_table_dmar *)dmar_tbl;
1380 if (!dmar)
1381 return 0;
1382 return dmar->flags & 0x1;
1383 }
1384
1385 static int __init dmar_free_unused_resources(void)
1386 {
1387 struct dmar_drhd_unit *dmaru, *dmaru_n;
1388
1389 /* DMAR units are in use */
1390 if (irq_remapping_enabled || intel_iommu_enabled)
1391 return 0;
1392
1393 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1394 list_del(&dmaru->list);
1395 dmar_free_drhd(dmaru);
1396 }
1397
1398 return 0;
1399 }
1400
1401 late_initcall(dmar_free_unused_resources);
1402 IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.115247 seconds and 5 git commands to generate.