iommu/vt-d: Dynamically allocate and free seq_id for DMAR units
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
a5459cfe 41#include <linux/iommu.h>
8a8f422d 42#include <asm/irq_remapping.h>
4db77ff3 43#include <asm/iommu_table.h>
10e5247f 44
078e1ee2
JR
45#include "irq_remapping.h"
46
c2a0b538
JL
47typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
52 bool print_entry;
53};
54
3a5670e8
JL
55/*
56 * Assumptions:
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
62 *
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
10e5247f 66 */
3a5670e8 67DECLARE_RWSEM(dmar_global_lock);
10e5247f 68LIST_HEAD(dmar_drhd_units);
10e5247f 69
41750d31 70struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 71static acpi_size dmar_tbl_size;
2e455289 72static int dmar_dev_scope_status = 1;
78d8e704 73static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
10e5247f 74
694835dc 75static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 76static void free_iommu(struct intel_iommu *iommu);
694835dc 77
10e5247f
KA
78static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
79{
80 /*
81 * add INCLUDE_ALL at the tail, so scan the list will find it at
82 * the very end.
83 */
84 if (drhd->include_all)
0e242612 85 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
10e5247f 86 else
0e242612 87 list_add_rcu(&drhd->list, &dmar_drhd_units);
10e5247f
KA
88}
89
bb3a6b78 90void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f
KA
91{
92 struct acpi_dmar_device_scope *scope;
10e5247f
KA
93
94 *cnt = 0;
95 while (start < end) {
96 scope = start;
83118b0d 97 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
07cb52ff 98 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
10e5247f
KA
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
100 (*cnt)++;
ae3e7f3a
LC
101 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
102 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 103 pr_warn("Unsupported device scope\n");
5715f0f9 104 }
10e5247f
KA
105 start += scope->length;
106 }
107 if (*cnt == 0)
bb3a6b78
JL
108 return NULL;
109
832bd858 110 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
bb3a6b78
JL
111}
112
832bd858 113void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
ada4d4b2 114{
b683b230 115 int i;
832bd858 116 struct device *tmp_dev;
b683b230 117
ada4d4b2 118 if (*devices && *cnt) {
b683b230 119 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
832bd858 120 put_device(tmp_dev);
ada4d4b2 121 kfree(*devices);
ada4d4b2 122 }
0e242612
JL
123
124 *devices = NULL;
125 *cnt = 0;
ada4d4b2
JL
126}
127
59ce0515
JL
128/* Optimize out kzalloc()/kfree() for normal cases */
129static char dmar_pci_notify_info_buf[64];
130
131static struct dmar_pci_notify_info *
132dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
133{
134 int level = 0;
135 size_t size;
136 struct pci_dev *tmp;
137 struct dmar_pci_notify_info *info;
138
139 BUG_ON(dev->is_virtfn);
140
141 /* Only generate path[] for device addition event */
142 if (event == BUS_NOTIFY_ADD_DEVICE)
143 for (tmp = dev; tmp; tmp = tmp->bus->self)
144 level++;
145
146 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
147 if (size <= sizeof(dmar_pci_notify_info_buf)) {
148 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
149 } else {
150 info = kzalloc(size, GFP_KERNEL);
151 if (!info) {
152 pr_warn("Out of memory when allocating notify_info "
153 "for %s.\n", pci_name(dev));
2e455289
JL
154 if (dmar_dev_scope_status == 0)
155 dmar_dev_scope_status = -ENOMEM;
59ce0515
JL
156 return NULL;
157 }
158 }
159
160 info->event = event;
161 info->dev = dev;
162 info->seg = pci_domain_nr(dev->bus);
163 info->level = level;
164 if (event == BUS_NOTIFY_ADD_DEVICE) {
5ae0566a
JL
165 for (tmp = dev; tmp; tmp = tmp->bus->self) {
166 level--;
57384592 167 info->path[level].bus = tmp->bus->number;
59ce0515
JL
168 info->path[level].device = PCI_SLOT(tmp->devfn);
169 info->path[level].function = PCI_FUNC(tmp->devfn);
170 if (pci_is_root_bus(tmp->bus))
171 info->bus = tmp->bus->number;
172 }
173 }
174
175 return info;
176}
177
178static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
179{
180 if ((void *)info != dmar_pci_notify_info_buf)
181 kfree(info);
182}
183
184static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
185 struct acpi_dmar_pci_path *path, int count)
186{
187 int i;
188
189 if (info->bus != bus)
80f7b3d1 190 goto fallback;
59ce0515 191 if (info->level != count)
80f7b3d1 192 goto fallback;
59ce0515
JL
193
194 for (i = 0; i < count; i++) {
195 if (path[i].device != info->path[i].device ||
196 path[i].function != info->path[i].function)
80f7b3d1 197 goto fallback;
59ce0515
JL
198 }
199
200 return true;
80f7b3d1
JR
201
202fallback:
203
204 if (count != 1)
205 return false;
206
207 i = info->level - 1;
208 if (bus == info->path[i].bus &&
209 path[0].device == info->path[i].device &&
210 path[0].function == info->path[i].function) {
211 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 bus, path[0].device, path[0].function);
213 return true;
214 }
215
216 return false;
59ce0515
JL
217}
218
219/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
221 void *start, void*end, u16 segment,
832bd858
DW
222 struct dmar_dev_scope *devices,
223 int devices_cnt)
59ce0515
JL
224{
225 int i, level;
832bd858 226 struct device *tmp, *dev = &info->dev->dev;
59ce0515
JL
227 struct acpi_dmar_device_scope *scope;
228 struct acpi_dmar_pci_path *path;
229
230 if (segment != info->seg)
231 return 0;
232
233 for (; start < end; start += scope->length) {
234 scope = start;
235 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
236 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
237 continue;
238
239 path = (struct acpi_dmar_pci_path *)(scope + 1);
240 level = (scope->length - sizeof(*scope)) / sizeof(*path);
241 if (!dmar_match_pci_path(info, scope->bus, path, level))
242 continue;
243
244 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
832bd858 245 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
59ce0515 246 pr_warn("Device scope type does not match for %s\n",
832bd858 247 pci_name(info->dev));
59ce0515
JL
248 return -EINVAL;
249 }
250
251 for_each_dev_scope(devices, devices_cnt, i, tmp)
252 if (tmp == NULL) {
832bd858
DW
253 devices[i].bus = info->dev->bus->number;
254 devices[i].devfn = info->dev->devfn;
255 rcu_assign_pointer(devices[i].dev,
256 get_device(dev));
59ce0515
JL
257 return 1;
258 }
259 BUG_ON(i >= devices_cnt);
260 }
261
262 return 0;
263}
264
265int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
832bd858 266 struct dmar_dev_scope *devices, int count)
59ce0515
JL
267{
268 int index;
832bd858 269 struct device *tmp;
59ce0515
JL
270
271 if (info->seg != segment)
272 return 0;
273
274 for_each_active_dev_scope(devices, count, index, tmp)
832bd858 275 if (tmp == &info->dev->dev) {
eecbad7d 276 RCU_INIT_POINTER(devices[index].dev, NULL);
59ce0515 277 synchronize_rcu();
832bd858 278 put_device(tmp);
59ce0515
JL
279 return 1;
280 }
281
282 return 0;
283}
284
285static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
286{
287 int ret = 0;
288 struct dmar_drhd_unit *dmaru;
289 struct acpi_dmar_hardware_unit *drhd;
290
291 for_each_drhd_unit(dmaru) {
292 if (dmaru->include_all)
293 continue;
294
295 drhd = container_of(dmaru->hdr,
296 struct acpi_dmar_hardware_unit, header);
297 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
298 ((void *)drhd) + drhd->header.length,
299 dmaru->segment,
300 dmaru->devices, dmaru->devices_cnt);
301 if (ret != 0)
302 break;
303 }
304 if (ret >= 0)
305 ret = dmar_iommu_notify_scope_dev(info);
2e455289
JL
306 if (ret < 0 && dmar_dev_scope_status == 0)
307 dmar_dev_scope_status = ret;
59ce0515
JL
308
309 return ret;
310}
311
312static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
313{
314 struct dmar_drhd_unit *dmaru;
315
316 for_each_drhd_unit(dmaru)
317 if (dmar_remove_dev_scope(info, dmaru->segment,
318 dmaru->devices, dmaru->devices_cnt))
319 break;
320 dmar_iommu_notify_scope_dev(info);
321}
322
323static int dmar_pci_bus_notifier(struct notifier_block *nb,
324 unsigned long action, void *data)
325{
326 struct pci_dev *pdev = to_pci_dev(data);
327 struct dmar_pci_notify_info *info;
328
329 /* Only care about add/remove events for physical functions */
330 if (pdev->is_virtfn)
331 return NOTIFY_DONE;
332 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
333 return NOTIFY_DONE;
334
335 info = dmar_alloc_pci_notify_info(pdev, action);
336 if (!info)
337 return NOTIFY_DONE;
338
339 down_write(&dmar_global_lock);
340 if (action == BUS_NOTIFY_ADD_DEVICE)
341 dmar_pci_bus_add_dev(info);
342 else if (action == BUS_NOTIFY_DEL_DEVICE)
343 dmar_pci_bus_del_dev(info);
344 up_write(&dmar_global_lock);
345
346 dmar_free_pci_notify_info(info);
347
348 return NOTIFY_OK;
349}
350
351static struct notifier_block dmar_pci_bus_nb = {
352 .notifier_call = dmar_pci_bus_notifier,
353 .priority = INT_MIN,
354};
355
10e5247f
KA
356/**
357 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
358 * structure which uniquely represent one DMA remapping hardware unit
359 * present in the platform
360 */
361static int __init
c2a0b538 362dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
10e5247f
KA
363{
364 struct acpi_dmar_hardware_unit *drhd;
365 struct dmar_drhd_unit *dmaru;
366 int ret = 0;
10e5247f 367
e523b38e 368 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
369 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
370 if (!dmaru)
371 return -ENOMEM;
372
1886e8a9 373 dmaru->hdr = header;
10e5247f 374 dmaru->reg_base_addr = drhd->address;
276dbf99 375 dmaru->segment = drhd->segment;
10e5247f 376 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
07cb52ff
DW
377 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
378 ((void *)drhd) + drhd->header.length,
379 &dmaru->devices_cnt);
380 if (dmaru->devices_cnt && dmaru->devices == NULL) {
381 kfree(dmaru);
382 return -ENOMEM;
2e455289 383 }
10e5247f 384
1886e8a9
SS
385 ret = alloc_iommu(dmaru);
386 if (ret) {
07cb52ff
DW
387 dmar_free_dev_scope(&dmaru->devices,
388 &dmaru->devices_cnt);
1886e8a9
SS
389 kfree(dmaru);
390 return ret;
391 }
392 dmar_register_drhd_unit(dmaru);
c2a0b538
JL
393
394 if (arg)
395 (*(int *)arg)++;
396
1886e8a9
SS
397 return 0;
398}
399
a868e6b7
JL
400static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
401{
402 if (dmaru->devices && dmaru->devices_cnt)
403 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
404 if (dmaru->iommu)
405 free_iommu(dmaru->iommu);
406 kfree(dmaru);
407}
408
c2a0b538
JL
409static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
410 void *arg)
e625b4a9
DW
411{
412 struct acpi_dmar_andd *andd = (void *)header;
413
414 /* Check for NUL termination within the designated length */
83118b0d 415 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
e625b4a9
DW
416 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
417 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
418 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
419 dmi_get_system_info(DMI_BIOS_VENDOR),
420 dmi_get_system_info(DMI_BIOS_VERSION),
421 dmi_get_system_info(DMI_PRODUCT_VERSION));
422 return -EINVAL;
423 }
424 pr_info("ANDD device: %x name: %s\n", andd->device_number,
83118b0d 425 andd->device_name);
e625b4a9
DW
426
427 return 0;
428}
429
aa697079 430#ifdef CONFIG_ACPI_NUMA
ee34b32d 431static int __init
c2a0b538 432dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
ee34b32d
SS
433{
434 struct acpi_dmar_rhsa *rhsa;
435 struct dmar_drhd_unit *drhd;
436
437 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 438 for_each_drhd_unit(drhd) {
ee34b32d
SS
439 if (drhd->reg_base_addr == rhsa->base_address) {
440 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
441
442 if (!node_online(node))
443 node = -1;
444 drhd->iommu->node = node;
aa697079
DW
445 return 0;
446 }
ee34b32d 447 }
fd0c8894
BH
448 WARN_TAINT(
449 1, TAINT_FIRMWARE_WORKAROUND,
450 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
451 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
452 drhd->reg_base_addr,
453 dmi_get_system_info(DMI_BIOS_VENDOR),
454 dmi_get_system_info(DMI_BIOS_VERSION),
455 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 456
aa697079 457 return 0;
ee34b32d 458}
c2a0b538
JL
459#else
460#define dmar_parse_one_rhsa dmar_res_noop
aa697079 461#endif
ee34b32d 462
10e5247f
KA
463static void __init
464dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
465{
466 struct acpi_dmar_hardware_unit *drhd;
467 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 468 struct acpi_dmar_atsr *atsr;
17b60977 469 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
470
471 switch (header->type) {
472 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
473 drhd = container_of(header, struct acpi_dmar_hardware_unit,
474 header);
e9071b0b 475 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 476 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
477 break;
478 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
479 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
480 header);
e9071b0b 481 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
482 (unsigned long long)rmrr->base_address,
483 (unsigned long long)rmrr->end_address);
10e5247f 484 break;
83118b0d 485 case ACPI_DMAR_TYPE_ROOT_ATS:
aa5d2b51 486 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 487 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 488 break;
83118b0d 489 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
17b60977 490 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 491 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
492 (unsigned long long)rhsa->base_address,
493 rhsa->proximity_domain);
494 break;
83118b0d 495 case ACPI_DMAR_TYPE_NAMESPACE:
e625b4a9
DW
496 /* We don't print this here because we need to sanity-check
497 it first. So print it in dmar_parse_one_andd() instead. */
498 break;
10e5247f
KA
499 }
500}
501
f6dd5c31
YL
502/**
503 * dmar_table_detect - checks to see if the platform supports DMAR devices
504 */
505static int __init dmar_table_detect(void)
506{
507 acpi_status status = AE_OK;
508
509 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
510 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
511 (struct acpi_table_header **)&dmar_tbl,
512 &dmar_tbl_size);
f6dd5c31
YL
513
514 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 515 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
516 status = AE_NOT_FOUND;
517 }
518
519 return (ACPI_SUCCESS(status) ? 1 : 0);
520}
aaa9d1dd 521
c2a0b538
JL
522static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
523 size_t len, struct dmar_res_callback *cb)
524{
525 int ret = 0;
526 struct acpi_dmar_header *iter, *next;
527 struct acpi_dmar_header *end = ((void *)start) + len;
528
529 for (iter = start; iter < end && ret == 0; iter = next) {
530 next = (void *)iter + iter->length;
531 if (iter->length == 0) {
532 /* Avoid looping forever on bad ACPI tables */
533 pr_debug(FW_BUG "Invalid 0-length structure\n");
534 break;
535 } else if (next > end) {
536 /* Avoid passing table end */
537 pr_warn(FW_BUG "record passes table end\n");
538 ret = -EINVAL;
539 break;
540 }
541
542 if (cb->print_entry)
543 dmar_table_print_dmar_entry(iter);
544
545 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
546 /* continue for forward compatibility */
547 pr_debug("Unknown DMAR structure type %d\n",
548 iter->type);
549 } else if (cb->cb[iter->type]) {
550 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
551 } else if (!cb->ignore_unhandled) {
552 pr_warn("No handler for DMAR structure type %d\n",
553 iter->type);
554 ret = -EINVAL;
555 }
556 }
557
558 return ret;
559}
560
561static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
562 struct dmar_res_callback *cb)
563{
564 return dmar_walk_remapping_entries((void *)(dmar + 1),
565 dmar->header.length - sizeof(*dmar), cb);
566}
567
10e5247f
KA
568/**
569 * parse_dmar_table - parses the DMA reporting table
570 */
571static int __init
572parse_dmar_table(void)
573{
574 struct acpi_table_dmar *dmar;
10e5247f 575 int ret = 0;
7cef3347 576 int drhd_count = 0;
c2a0b538
JL
577 struct dmar_res_callback cb = {
578 .print_entry = true,
579 .ignore_unhandled = true,
580 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
581 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
582 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
583 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
584 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
585 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
586 };
10e5247f 587
f6dd5c31
YL
588 /*
589 * Do it again, earlier dmar_tbl mapping could be mapped with
590 * fixed map.
591 */
592 dmar_table_detect();
593
a59b50e9
JC
594 /*
595 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
596 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
597 */
598 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
599
10e5247f
KA
600 dmar = (struct acpi_table_dmar *)dmar_tbl;
601 if (!dmar)
602 return -ENODEV;
603
5b6985ce 604 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 605 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
606 return -EINVAL;
607 }
608
e9071b0b 609 pr_info("Host address width %d\n", dmar->width + 1);
c2a0b538
JL
610 ret = dmar_walk_dmar_table(dmar, &cb);
611 if (ret == 0 && drhd_count == 0)
7cef3347 612 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
c2a0b538 613
10e5247f
KA
614 return ret;
615}
616
832bd858
DW
617static int dmar_pci_device_match(struct dmar_dev_scope devices[],
618 int cnt, struct pci_dev *dev)
e61d98d8
SS
619{
620 int index;
832bd858 621 struct device *tmp;
e61d98d8
SS
622
623 while (dev) {
b683b230 624 for_each_active_dev_scope(devices, cnt, index, tmp)
832bd858 625 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
e61d98d8
SS
626 return 1;
627
628 /* Check our parent */
629 dev = dev->bus->self;
630 }
631
632 return 0;
633}
634
635struct dmar_drhd_unit *
636dmar_find_matched_drhd_unit(struct pci_dev *dev)
637{
0e242612 638 struct dmar_drhd_unit *dmaru;
2e824f79
YZ
639 struct acpi_dmar_hardware_unit *drhd;
640
dda56549
Y
641 dev = pci_physfn(dev);
642
0e242612 643 rcu_read_lock();
8b161f0e 644 for_each_drhd_unit(dmaru) {
2e824f79
YZ
645 drhd = container_of(dmaru->hdr,
646 struct acpi_dmar_hardware_unit,
647 header);
648
649 if (dmaru->include_all &&
650 drhd->segment == pci_domain_nr(dev->bus))
0e242612 651 goto out;
e61d98d8 652
2e824f79
YZ
653 if (dmar_pci_device_match(dmaru->devices,
654 dmaru->devices_cnt, dev))
0e242612 655 goto out;
e61d98d8 656 }
0e242612
JL
657 dmaru = NULL;
658out:
659 rcu_read_unlock();
e61d98d8 660
0e242612 661 return dmaru;
e61d98d8
SS
662}
663
ed40356b
DW
664static void __init dmar_acpi_insert_dev_scope(u8 device_number,
665 struct acpi_device *adev)
666{
667 struct dmar_drhd_unit *dmaru;
668 struct acpi_dmar_hardware_unit *drhd;
669 struct acpi_dmar_device_scope *scope;
670 struct device *tmp;
671 int i;
672 struct acpi_dmar_pci_path *path;
673
674 for_each_drhd_unit(dmaru) {
675 drhd = container_of(dmaru->hdr,
676 struct acpi_dmar_hardware_unit,
677 header);
678
679 for (scope = (void *)(drhd + 1);
680 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
681 scope = ((void *)scope) + scope->length) {
83118b0d 682 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
ed40356b
DW
683 continue;
684 if (scope->enumeration_id != device_number)
685 continue;
686
687 path = (void *)(scope + 1);
688 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
689 dev_name(&adev->dev), dmaru->reg_base_addr,
690 scope->bus, path->device, path->function);
691 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
692 if (tmp == NULL) {
693 dmaru->devices[i].bus = scope->bus;
694 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
695 path->function);
696 rcu_assign_pointer(dmaru->devices[i].dev,
697 get_device(&adev->dev));
698 return;
699 }
700 BUG_ON(i >= dmaru->devices_cnt);
701 }
702 }
703 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
704 device_number, dev_name(&adev->dev));
705}
706
707static int __init dmar_acpi_dev_scope_init(void)
708{
11f1a776
JR
709 struct acpi_dmar_andd *andd;
710
711 if (dmar_tbl == NULL)
712 return -ENODEV;
713
7713ec06
DW
714 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
715 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
716 andd = ((void *)andd) + andd->header.length) {
83118b0d 717 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
ed40356b
DW
718 acpi_handle h;
719 struct acpi_device *adev;
720
721 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
83118b0d 722 andd->device_name,
ed40356b
DW
723 &h))) {
724 pr_err("Failed to find handle for ACPI object %s\n",
83118b0d 725 andd->device_name);
ed40356b
DW
726 continue;
727 }
c0df975f 728 if (acpi_bus_get_device(h, &adev)) {
ed40356b 729 pr_err("Failed to get device for ACPI object %s\n",
83118b0d 730 andd->device_name);
ed40356b
DW
731 continue;
732 }
733 dmar_acpi_insert_dev_scope(andd->device_number, adev);
734 }
ed40356b
DW
735 }
736 return 0;
737}
738
1886e8a9
SS
739int __init dmar_dev_scope_init(void)
740{
2e455289
JL
741 struct pci_dev *dev = NULL;
742 struct dmar_pci_notify_info *info;
1886e8a9 743
2e455289
JL
744 if (dmar_dev_scope_status != 1)
745 return dmar_dev_scope_status;
c2c7286a 746
2e455289
JL
747 if (list_empty(&dmar_drhd_units)) {
748 dmar_dev_scope_status = -ENODEV;
749 } else {
750 dmar_dev_scope_status = 0;
751
63b42624
DW
752 dmar_acpi_dev_scope_init();
753
2e455289
JL
754 for_each_pci_dev(dev) {
755 if (dev->is_virtfn)
756 continue;
757
758 info = dmar_alloc_pci_notify_info(dev,
759 BUS_NOTIFY_ADD_DEVICE);
760 if (!info) {
761 return dmar_dev_scope_status;
762 } else {
763 dmar_pci_bus_add_dev(info);
764 dmar_free_pci_notify_info(info);
765 }
766 }
318fe7df 767
2e455289 768 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1886e8a9
SS
769 }
770
2e455289 771 return dmar_dev_scope_status;
1886e8a9
SS
772}
773
10e5247f
KA
774
775int __init dmar_table_init(void)
776{
1886e8a9 777 static int dmar_table_initialized;
093f87d2
FY
778 int ret;
779
cc05301f
JL
780 if (dmar_table_initialized == 0) {
781 ret = parse_dmar_table();
782 if (ret < 0) {
783 if (ret != -ENODEV)
784 pr_info("parse DMAR table failure.\n");
785 } else if (list_empty(&dmar_drhd_units)) {
786 pr_info("No DMAR devices found\n");
787 ret = -ENODEV;
788 }
093f87d2 789
cc05301f
JL
790 if (ret < 0)
791 dmar_table_initialized = ret;
792 else
793 dmar_table_initialized = 1;
10e5247f 794 }
093f87d2 795
cc05301f 796 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f
KA
797}
798
3a8663ee
BH
799static void warn_invalid_dmar(u64 addr, const char *message)
800{
fd0c8894
BH
801 WARN_TAINT_ONCE(
802 1, TAINT_FIRMWARE_WORKAROUND,
803 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
804 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
805 addr, message,
806 dmi_get_system_info(DMI_BIOS_VENDOR),
807 dmi_get_system_info(DMI_BIOS_VERSION),
808 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 809}
6ecbf01c 810
c2a0b538
JL
811static int __ref
812dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
86cf898e 813{
86cf898e 814 struct acpi_dmar_hardware_unit *drhd;
c2a0b538
JL
815 void __iomem *addr;
816 u64 cap, ecap;
86cf898e 817
c2a0b538
JL
818 drhd = (void *)entry;
819 if (!drhd->address) {
820 warn_invalid_dmar(0, "");
821 return -EINVAL;
822 }
2c992208 823
c2a0b538
JL
824 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
825 if (!addr) {
826 pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
827 return -EINVAL;
828 }
829 cap = dmar_readq(addr + DMAR_CAP_REG);
830 ecap = dmar_readq(addr + DMAR_ECAP_REG);
831 early_iounmap(addr, VTD_PAGE_SIZE);
86cf898e 832
c2a0b538
JL
833 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
834 warn_invalid_dmar(drhd->address, " returns all ones");
835 return -EINVAL;
86cf898e 836 }
2c992208 837
2c992208 838 return 0;
86cf898e
DW
839}
840
480125ba 841int __init detect_intel_iommu(void)
2ae21010
SS
842{
843 int ret;
c2a0b538
JL
844 struct dmar_res_callback validate_drhd_cb = {
845 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
846 .ignore_unhandled = true,
847 };
2ae21010 848
3a5670e8 849 down_write(&dmar_global_lock);
f6dd5c31 850 ret = dmar_table_detect();
86cf898e 851 if (ret)
c2a0b538
JL
852 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
853 &validate_drhd_cb);
854 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
855 iommu_detected = 1;
856 /* Make sure ACS will be enabled */
857 pci_request_acs();
858 }
f5d1b97b 859
9d5ce73a 860#ifdef CONFIG_X86
c2a0b538
JL
861 if (ret)
862 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 863#endif
c2a0b538 864
b707cb02 865 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
f6dd5c31 866 dmar_tbl = NULL;
3a5670e8 867 up_write(&dmar_global_lock);
480125ba 868
4db77ff3 869 return ret ? 1 : -ENODEV;
2ae21010
SS
870}
871
872
6f5cf521
DD
873static void unmap_iommu(struct intel_iommu *iommu)
874{
875 iounmap(iommu->reg);
876 release_mem_region(iommu->reg_phys, iommu->reg_size);
877}
878
879/**
880 * map_iommu: map the iommu's registers
881 * @iommu: the iommu to map
882 * @phys_addr: the physical address of the base resgister
e9071b0b 883 *
6f5cf521 884 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 885 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
886 */
887static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
888{
889 int map_size, err=0;
890
891 iommu->reg_phys = phys_addr;
892 iommu->reg_size = VTD_PAGE_SIZE;
893
894 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
895 pr_err("IOMMU: can't reserve memory\n");
896 err = -EBUSY;
897 goto out;
898 }
899
900 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
901 if (!iommu->reg) {
902 pr_err("IOMMU: can't map the region\n");
903 err = -ENOMEM;
904 goto release;
905 }
906
907 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
908 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
909
910 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
911 err = -EINVAL;
912 warn_invalid_dmar(phys_addr, " returns all ones");
913 goto unmap;
914 }
915
916 /* the registers might be more than one page */
917 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
918 cap_max_fault_reg_offset(iommu->cap));
919 map_size = VTD_PAGE_ALIGN(map_size);
920 if (map_size > iommu->reg_size) {
921 iounmap(iommu->reg);
922 release_mem_region(iommu->reg_phys, iommu->reg_size);
923 iommu->reg_size = map_size;
924 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
925 iommu->name)) {
926 pr_err("IOMMU: can't reserve memory\n");
927 err = -EBUSY;
928 goto out;
929 }
930 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
931 if (!iommu->reg) {
932 pr_err("IOMMU: can't map the region\n");
933 err = -ENOMEM;
934 goto release;
935 }
936 }
937 err = 0;
938 goto out;
939
940unmap:
941 iounmap(iommu->reg);
942release:
943 release_mem_region(iommu->reg_phys, iommu->reg_size);
944out:
945 return err;
946}
947
78d8e704
JL
948static int dmar_alloc_seq_id(struct intel_iommu *iommu)
949{
950 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
951 DMAR_UNITS_SUPPORTED);
952 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
953 iommu->seq_id = -1;
954 } else {
955 set_bit(iommu->seq_id, dmar_seq_ids);
956 sprintf(iommu->name, "dmar%d", iommu->seq_id);
957 }
958
959 return iommu->seq_id;
960}
961
962static void dmar_free_seq_id(struct intel_iommu *iommu)
963{
964 if (iommu->seq_id >= 0) {
965 clear_bit(iommu->seq_id, dmar_seq_ids);
966 iommu->seq_id = -1;
967 }
968}
969
694835dc 970static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 971{
c42d9f32 972 struct intel_iommu *iommu;
3a93c841 973 u32 ver, sts;
43f7392b 974 int agaw = 0;
4ed0d3e6 975 int msagaw = 0;
6f5cf521 976 int err;
c42d9f32 977
6ecbf01c 978 if (!drhd->reg_base_addr) {
3a8663ee 979 warn_invalid_dmar(0, "");
6ecbf01c
DW
980 return -EINVAL;
981 }
982
c42d9f32
SS
983 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
984 if (!iommu)
1886e8a9 985 return -ENOMEM;
c42d9f32 986
78d8e704
JL
987 if (dmar_alloc_seq_id(iommu) < 0) {
988 pr_err("IOMMU: failed to allocate seq_id\n");
989 err = -ENOSPC;
990 goto error;
991 }
e61d98d8 992
6f5cf521
DD
993 err = map_iommu(iommu, drhd->reg_base_addr);
994 if (err) {
995 pr_err("IOMMU: failed to map %s\n", iommu->name);
78d8e704 996 goto error_free_seq_id;
e61d98d8 997 }
0815565a 998
6f5cf521 999 err = -EINVAL;
1b573683
WH
1000 agaw = iommu_calculate_agaw(iommu);
1001 if (agaw < 0) {
bf947fcb
DD
1002 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1003 iommu->seq_id);
0815565a 1004 goto err_unmap;
4ed0d3e6
FY
1005 }
1006 msagaw = iommu_calculate_max_sagaw(iommu);
1007 if (msagaw < 0) {
bf947fcb 1008 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 1009 iommu->seq_id);
0815565a 1010 goto err_unmap;
1b573683
WH
1011 }
1012 iommu->agaw = agaw;
4ed0d3e6 1013 iommu->msagaw = msagaw;
67ccac41 1014 iommu->segment = drhd->segment;
1b573683 1015
ee34b32d
SS
1016 iommu->node = -1;
1017
e61d98d8 1018 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
1019 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1020 iommu->seq_id,
5b6985ce
FY
1021 (unsigned long long)drhd->reg_base_addr,
1022 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1023 (unsigned long long)iommu->cap,
1024 (unsigned long long)iommu->ecap);
e61d98d8 1025
3a93c841
TI
1026 /* Reflect status in gcmd */
1027 sts = readl(iommu->reg + DMAR_GSTS_REG);
1028 if (sts & DMA_GSTS_IRES)
1029 iommu->gcmd |= DMA_GCMD_IRE;
1030 if (sts & DMA_GSTS_TES)
1031 iommu->gcmd |= DMA_GCMD_TE;
1032 if (sts & DMA_GSTS_QIES)
1033 iommu->gcmd |= DMA_GCMD_QIE;
1034
1f5b3c3f 1035 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
1036
1037 drhd->iommu = iommu;
a5459cfe
AW
1038
1039 if (intel_iommu_enabled)
1040 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1041 intel_iommu_groups,
1042 iommu->name);
1043
1886e8a9 1044 return 0;
0815565a 1045
78d8e704 1046err_unmap:
6f5cf521 1047 unmap_iommu(iommu);
78d8e704
JL
1048error_free_seq_id:
1049 dmar_free_seq_id(iommu);
1050error:
e61d98d8 1051 kfree(iommu);
6f5cf521 1052 return err;
e61d98d8
SS
1053}
1054
a868e6b7 1055static void free_iommu(struct intel_iommu *iommu)
e61d98d8 1056{
a5459cfe
AW
1057 iommu_device_destroy(iommu->iommu_dev);
1058
a868e6b7
JL
1059 if (iommu->irq) {
1060 free_irq(iommu->irq, iommu);
1061 irq_set_handler_data(iommu->irq, NULL);
a553b142 1062 dmar_free_hwirq(iommu->irq);
a868e6b7 1063 }
e61d98d8 1064
a84da70b
JL
1065 if (iommu->qi) {
1066 free_page((unsigned long)iommu->qi->desc);
1067 kfree(iommu->qi->desc_status);
1068 kfree(iommu->qi);
1069 }
1070
e61d98d8 1071 if (iommu->reg)
6f5cf521
DD
1072 unmap_iommu(iommu);
1073
78d8e704 1074 dmar_free_seq_id(iommu);
e61d98d8
SS
1075 kfree(iommu);
1076}
fe962e90
SS
1077
1078/*
1079 * Reclaim all the submitted descriptors which have completed its work.
1080 */
1081static inline void reclaim_free_desc(struct q_inval *qi)
1082{
6ba6c3a4
YZ
1083 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1084 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
1085 qi->desc_status[qi->free_tail] = QI_FREE;
1086 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1087 qi->free_cnt++;
1088 }
1089}
1090
704126ad
YZ
1091static int qi_check_fault(struct intel_iommu *iommu, int index)
1092{
1093 u32 fault;
6ba6c3a4 1094 int head, tail;
704126ad
YZ
1095 struct q_inval *qi = iommu->qi;
1096 int wait_index = (index + 1) % QI_LENGTH;
1097
6ba6c3a4
YZ
1098 if (qi->desc_status[wait_index] == QI_ABORT)
1099 return -EAGAIN;
1100
704126ad
YZ
1101 fault = readl(iommu->reg + DMAR_FSTS_REG);
1102
1103 /*
1104 * If IQE happens, the head points to the descriptor associated
1105 * with the error. No new descriptors are fetched until the IQE
1106 * is cleared.
1107 */
1108 if (fault & DMA_FSTS_IQE) {
1109 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 1110 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 1111 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
1112 "low=%llx, high=%llx\n",
1113 (unsigned long long)qi->desc[index].low,
1114 (unsigned long long)qi->desc[index].high);
704126ad
YZ
1115 memcpy(&qi->desc[index], &qi->desc[wait_index],
1116 sizeof(struct qi_desc));
1117 __iommu_flush_cache(iommu, &qi->desc[index],
1118 sizeof(struct qi_desc));
1119 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1120 return -EINVAL;
1121 }
1122 }
1123
6ba6c3a4
YZ
1124 /*
1125 * If ITE happens, all pending wait_desc commands are aborted.
1126 * No new descriptors are fetched until the ITE is cleared.
1127 */
1128 if (fault & DMA_FSTS_ITE) {
1129 head = readl(iommu->reg + DMAR_IQH_REG);
1130 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1131 head |= 1;
1132 tail = readl(iommu->reg + DMAR_IQT_REG);
1133 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1134
1135 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1136
1137 do {
1138 if (qi->desc_status[head] == QI_IN_USE)
1139 qi->desc_status[head] = QI_ABORT;
1140 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1141 } while (head != tail);
1142
1143 if (qi->desc_status[wait_index] == QI_ABORT)
1144 return -EAGAIN;
1145 }
1146
1147 if (fault & DMA_FSTS_ICE)
1148 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1149
704126ad
YZ
1150 return 0;
1151}
1152
fe962e90
SS
1153/*
1154 * Submit the queued invalidation descriptor to the remapping
1155 * hardware unit and wait for its completion.
1156 */
704126ad 1157int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 1158{
6ba6c3a4 1159 int rc;
fe962e90
SS
1160 struct q_inval *qi = iommu->qi;
1161 struct qi_desc *hw, wait_desc;
1162 int wait_index, index;
1163 unsigned long flags;
1164
1165 if (!qi)
704126ad 1166 return 0;
fe962e90
SS
1167
1168 hw = qi->desc;
1169
6ba6c3a4
YZ
1170restart:
1171 rc = 0;
1172
3b8f4048 1173 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 1174 while (qi->free_cnt < 3) {
3b8f4048 1175 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 1176 cpu_relax();
3b8f4048 1177 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
1178 }
1179
1180 index = qi->free_head;
1181 wait_index = (index + 1) % QI_LENGTH;
1182
1183 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1184
1185 hw[index] = *desc;
1186
704126ad
YZ
1187 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1188 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
1189 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1190
1191 hw[wait_index] = wait_desc;
1192
1193 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1194 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1195
1196 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1197 qi->free_cnt -= 2;
1198
fe962e90
SS
1199 /*
1200 * update the HW tail register indicating the presence of
1201 * new descriptors.
1202 */
6ba6c3a4 1203 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
1204
1205 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
1206 /*
1207 * We will leave the interrupts disabled, to prevent interrupt
1208 * context to queue another cmd while a cmd is already submitted
1209 * and waiting for completion on this cpu. This is to avoid
1210 * a deadlock where the interrupt context can wait indefinitely
1211 * for free slots in the queue.
1212 */
704126ad
YZ
1213 rc = qi_check_fault(iommu, index);
1214 if (rc)
6ba6c3a4 1215 break;
704126ad 1216
3b8f4048 1217 raw_spin_unlock(&qi->q_lock);
fe962e90 1218 cpu_relax();
3b8f4048 1219 raw_spin_lock(&qi->q_lock);
fe962e90 1220 }
6ba6c3a4
YZ
1221
1222 qi->desc_status[index] = QI_DONE;
fe962e90
SS
1223
1224 reclaim_free_desc(qi);
3b8f4048 1225 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 1226
6ba6c3a4
YZ
1227 if (rc == -EAGAIN)
1228 goto restart;
1229
704126ad 1230 return rc;
fe962e90
SS
1231}
1232
1233/*
1234 * Flush the global interrupt entry cache.
1235 */
1236void qi_global_iec(struct intel_iommu *iommu)
1237{
1238 struct qi_desc desc;
1239
1240 desc.low = QI_IEC_TYPE;
1241 desc.high = 0;
1242
704126ad 1243 /* should never fail */
fe962e90
SS
1244 qi_submit_sync(&desc, iommu);
1245}
1246
4c25a2c1
DW
1247void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1248 u64 type)
3481f210 1249{
3481f210
YS
1250 struct qi_desc desc;
1251
3481f210
YS
1252 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1253 | QI_CC_GRAN(type) | QI_CC_TYPE;
1254 desc.high = 0;
1255
4c25a2c1 1256 qi_submit_sync(&desc, iommu);
3481f210
YS
1257}
1258
1f0ef2aa
DW
1259void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1260 unsigned int size_order, u64 type)
3481f210
YS
1261{
1262 u8 dw = 0, dr = 0;
1263
1264 struct qi_desc desc;
1265 int ih = 0;
1266
3481f210
YS
1267 if (cap_write_drain(iommu->cap))
1268 dw = 1;
1269
1270 if (cap_read_drain(iommu->cap))
1271 dr = 1;
1272
1273 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1274 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1275 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1276 | QI_IOTLB_AM(size_order);
1277
1f0ef2aa 1278 qi_submit_sync(&desc, iommu);
3481f210
YS
1279}
1280
6ba6c3a4
YZ
1281void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1282 u64 addr, unsigned mask)
1283{
1284 struct qi_desc desc;
1285
1286 if (mask) {
1287 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1288 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1289 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1290 } else
1291 desc.high = QI_DEV_IOTLB_ADDR(addr);
1292
1293 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1294 qdep = 0;
1295
1296 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1297 QI_DIOTLB_TYPE;
1298
1299 qi_submit_sync(&desc, iommu);
1300}
1301
eba67e5d
SS
1302/*
1303 * Disable Queued Invalidation interface.
1304 */
1305void dmar_disable_qi(struct intel_iommu *iommu)
1306{
1307 unsigned long flags;
1308 u32 sts;
1309 cycles_t start_time = get_cycles();
1310
1311 if (!ecap_qis(iommu->ecap))
1312 return;
1313
1f5b3c3f 1314 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
1315
1316 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1317 if (!(sts & DMA_GSTS_QIES))
1318 goto end;
1319
1320 /*
1321 * Give a chance to HW to complete the pending invalidation requests.
1322 */
1323 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1324 readl(iommu->reg + DMAR_IQH_REG)) &&
1325 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1326 cpu_relax();
1327
1328 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1329 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1330
1331 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1332 !(sts & DMA_GSTS_QIES), sts);
1333end:
1f5b3c3f 1334 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1335}
1336
eb4a52bc
FY
1337/*
1338 * Enable queued invalidation.
1339 */
1340static void __dmar_enable_qi(struct intel_iommu *iommu)
1341{
c416daa9 1342 u32 sts;
eb4a52bc
FY
1343 unsigned long flags;
1344 struct q_inval *qi = iommu->qi;
1345
1346 qi->free_head = qi->free_tail = 0;
1347 qi->free_cnt = QI_LENGTH;
1348
1f5b3c3f 1349 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1350
1351 /* write zero to the tail reg */
1352 writel(0, iommu->reg + DMAR_IQT_REG);
1353
1354 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1355
eb4a52bc 1356 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1357 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1358
1359 /* Make sure hardware complete it */
1360 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1361
1f5b3c3f 1362 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1363}
1364
fe962e90
SS
1365/*
1366 * Enable Queued Invalidation interface. This is a must to support
1367 * interrupt-remapping. Also used by DMA-remapping, which replaces
1368 * register based IOTLB invalidation.
1369 */
1370int dmar_enable_qi(struct intel_iommu *iommu)
1371{
fe962e90 1372 struct q_inval *qi;
751cafe3 1373 struct page *desc_page;
fe962e90
SS
1374
1375 if (!ecap_qis(iommu->ecap))
1376 return -ENOENT;
1377
1378 /*
1379 * queued invalidation is already setup and enabled.
1380 */
1381 if (iommu->qi)
1382 return 0;
1383
fa4b57cc 1384 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1385 if (!iommu->qi)
1386 return -ENOMEM;
1387
1388 qi = iommu->qi;
1389
751cafe3
SS
1390
1391 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1392 if (!desc_page) {
fe962e90 1393 kfree(qi);
b707cb02 1394 iommu->qi = NULL;
fe962e90
SS
1395 return -ENOMEM;
1396 }
1397
751cafe3
SS
1398 qi->desc = page_address(desc_page);
1399
37a40710 1400 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1401 if (!qi->desc_status) {
1402 free_page((unsigned long) qi->desc);
1403 kfree(qi);
b707cb02 1404 iommu->qi = NULL;
fe962e90
SS
1405 return -ENOMEM;
1406 }
1407
3b8f4048 1408 raw_spin_lock_init(&qi->q_lock);
fe962e90 1409
eb4a52bc 1410 __dmar_enable_qi(iommu);
fe962e90
SS
1411
1412 return 0;
1413}
0ac2491f
SS
1414
1415/* iommu interrupt handling. Most stuff are MSI-like. */
1416
9d783ba0
SS
1417enum faulttype {
1418 DMA_REMAP,
1419 INTR_REMAP,
1420 UNKNOWN,
1421};
1422
1423static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1424{
1425 "Software",
1426 "Present bit in root entry is clear",
1427 "Present bit in context entry is clear",
1428 "Invalid context entry",
1429 "Access beyond MGAW",
1430 "PTE Write access is not set",
1431 "PTE Read access is not set",
1432 "Next page table ptr is invalid",
1433 "Root table address invalid",
1434 "Context table ptr is invalid",
1435 "non-zero reserved fields in RTP",
1436 "non-zero reserved fields in CTP",
1437 "non-zero reserved fields in PTE",
4ecccd9e 1438 "PCE for translation request specifies blocking",
0ac2491f 1439};
9d783ba0 1440
95a02e97 1441static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1442{
1443 "Detected reserved fields in the decoded interrupt-remapped request",
1444 "Interrupt index exceeded the interrupt-remapping table size",
1445 "Present field in the IRTE entry is clear",
1446 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1447 "Detected reserved fields in the IRTE entry",
1448 "Blocked a compatibility format interrupt request",
1449 "Blocked an interrupt request due to source-id verification failure",
1450};
1451
21004dcd 1452static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1453{
fefe1ed1
DC
1454 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1455 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1456 *fault_type = INTR_REMAP;
95a02e97 1457 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1458 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1459 *fault_type = DMA_REMAP;
1460 return dma_remap_fault_reasons[fault_reason];
1461 } else {
1462 *fault_type = UNKNOWN;
0ac2491f 1463 return "Unknown";
9d783ba0 1464 }
0ac2491f
SS
1465}
1466
5c2837fb 1467void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1468{
dced35ae 1469 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1470 unsigned long flag;
1471
1472 /* unmask it */
1f5b3c3f 1473 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1474 writel(0, iommu->reg + DMAR_FECTL_REG);
1475 /* Read a reg to force flush the post write */
1476 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1477 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1478}
1479
5c2837fb 1480void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1481{
1482 unsigned long flag;
dced35ae 1483 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1484
1485 /* mask it */
1f5b3c3f 1486 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1487 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1488 /* Read a reg to force flush the post write */
1489 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1490 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1491}
1492
1493void dmar_msi_write(int irq, struct msi_msg *msg)
1494{
dced35ae 1495 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1496 unsigned long flag;
1497
1f5b3c3f 1498 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1499 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1500 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1501 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1502 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1503}
1504
1505void dmar_msi_read(int irq, struct msi_msg *msg)
1506{
dced35ae 1507 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1508 unsigned long flag;
1509
1f5b3c3f 1510 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1511 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1512 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1513 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1514 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1515}
1516
1517static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1518 u8 fault_reason, u16 source_id, unsigned long long addr)
1519{
1520 const char *reason;
9d783ba0 1521 int fault_type;
0ac2491f 1522
9d783ba0 1523 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1524
9d783ba0 1525 if (fault_type == INTR_REMAP)
bf947fcb 1526 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1527 "fault index %llx\n"
1528 "INTR-REMAP:[fault reason %02d] %s\n",
1529 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1530 PCI_FUNC(source_id & 0xFF), addr >> 48,
1531 fault_reason, reason);
1532 else
bf947fcb 1533 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1534 "fault addr %llx \n"
1535 "DMAR:[fault reason %02d] %s\n",
1536 (type ? "DMA Read" : "DMA Write"),
1537 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1538 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1539 return 0;
1540}
1541
1542#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1543irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1544{
1545 struct intel_iommu *iommu = dev_id;
1546 int reg, fault_index;
1547 u32 fault_status;
1548 unsigned long flag;
1549
1f5b3c3f 1550 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1551 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1552 if (fault_status)
bf947fcb 1553 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1554
1555 /* TBD: ignore advanced fault log currently */
1556 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1557 goto unlock_exit;
0ac2491f
SS
1558
1559 fault_index = dma_fsts_fault_record_index(fault_status);
1560 reg = cap_fault_reg_offset(iommu->cap);
1561 while (1) {
1562 u8 fault_reason;
1563 u16 source_id;
1564 u64 guest_addr;
1565 int type;
1566 u32 data;
1567
1568 /* highest 32 bits */
1569 data = readl(iommu->reg + reg +
1570 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1571 if (!(data & DMA_FRCD_F))
1572 break;
1573
1574 fault_reason = dma_frcd_fault_reason(data);
1575 type = dma_frcd_type(data);
1576
1577 data = readl(iommu->reg + reg +
1578 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1579 source_id = dma_frcd_source_id(data);
1580
1581 guest_addr = dmar_readq(iommu->reg + reg +
1582 fault_index * PRIMARY_FAULT_REG_LEN);
1583 guest_addr = dma_frcd_page_addr(guest_addr);
1584 /* clear the fault */
1585 writel(DMA_FRCD_F, iommu->reg + reg +
1586 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1587
1f5b3c3f 1588 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1589
1590 dmar_fault_do_one(iommu, type, fault_reason,
1591 source_id, guest_addr);
1592
1593 fault_index++;
8211a7b5 1594 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1595 fault_index = 0;
1f5b3c3f 1596 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1597 }
0ac2491f 1598
bd5cdad0
LZH
1599 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1600
1601unlock_exit:
1f5b3c3f 1602 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1603 return IRQ_HANDLED;
1604}
1605
1606int dmar_set_interrupt(struct intel_iommu *iommu)
1607{
1608 int irq, ret;
1609
9d783ba0
SS
1610 /*
1611 * Check if the fault interrupt is already initialized.
1612 */
1613 if (iommu->irq)
1614 return 0;
1615
a553b142 1616 irq = dmar_alloc_hwirq();
aa5125a4 1617 if (irq <= 0) {
bf947fcb 1618 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1619 return -EINVAL;
1620 }
1621
dced35ae 1622 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1623 iommu->irq = irq;
1624
1625 ret = arch_setup_dmar_msi(irq);
1626 if (ret) {
dced35ae 1627 irq_set_handler_data(irq, NULL);
0ac2491f 1628 iommu->irq = 0;
a553b142 1629 dmar_free_hwirq(irq);
dd726435 1630 return ret;
0ac2491f
SS
1631 }
1632
477694e7 1633 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1634 if (ret)
bf947fcb 1635 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1636 return ret;
1637}
9d783ba0
SS
1638
1639int __init enable_drhd_fault_handling(void)
1640{
1641 struct dmar_drhd_unit *drhd;
7c919779 1642 struct intel_iommu *iommu;
9d783ba0
SS
1643
1644 /*
1645 * Enable fault control interrupt.
1646 */
7c919779 1647 for_each_iommu(iommu, drhd) {
bd5cdad0 1648 u32 fault_status;
7c919779 1649 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1650
1651 if (ret) {
e9071b0b 1652 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1653 (unsigned long long)drhd->reg_base_addr, ret);
1654 return -1;
1655 }
7f99d946
SS
1656
1657 /*
1658 * Clear any previous faults.
1659 */
1660 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1661 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1662 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1663 }
1664
1665 return 0;
1666}
eb4a52bc
FY
1667
1668/*
1669 * Re-enable Queued Invalidation interface.
1670 */
1671int dmar_reenable_qi(struct intel_iommu *iommu)
1672{
1673 if (!ecap_qis(iommu->ecap))
1674 return -ENOENT;
1675
1676 if (!iommu->qi)
1677 return -ENOENT;
1678
1679 /*
1680 * First disable queued invalidation.
1681 */
1682 dmar_disable_qi(iommu);
1683 /*
1684 * Then enable queued invalidation again. Since there is no pending
1685 * invalidation requests now, it's safe to re-enable queued
1686 * invalidation.
1687 */
1688 __dmar_enable_qi(iommu);
1689
1690 return 0;
1691}
074835f0
YS
1692
1693/*
1694 * Check interrupt remapping support in DMAR table description.
1695 */
0b8973a8 1696int __init dmar_ir_support(void)
074835f0
YS
1697{
1698 struct acpi_table_dmar *dmar;
1699 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1700 if (!dmar)
1701 return 0;
074835f0
YS
1702 return dmar->flags & 0x1;
1703}
694835dc 1704
a868e6b7
JL
1705static int __init dmar_free_unused_resources(void)
1706{
1707 struct dmar_drhd_unit *dmaru, *dmaru_n;
1708
1709 /* DMAR units are in use */
1710 if (irq_remapping_enabled || intel_iommu_enabled)
1711 return 0;
1712
2e455289
JL
1713 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1714 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
59ce0515 1715
3a5670e8 1716 down_write(&dmar_global_lock);
a868e6b7
JL
1717 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1718 list_del(&dmaru->list);
1719 dmar_free_drhd(dmaru);
1720 }
3a5670e8 1721 up_write(&dmar_global_lock);
a868e6b7
JL
1722
1723 return 0;
1724}
1725
1726late_initcall(dmar_free_unused_resources);
4db77ff3 1727IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.511321 seconds and 5 git commands to generate.