iommu/vt-d: Include ACPI devices in iommu=pt
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
3a5670e8
JL
46/*
47 * Assumptions:
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
53 *
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
10e5247f 57 */
3a5670e8 58DECLARE_RWSEM(dmar_global_lock);
10e5247f 59LIST_HEAD(dmar_drhd_units);
10e5247f 60
41750d31 61struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 62static acpi_size dmar_tbl_size;
2e455289 63static int dmar_dev_scope_status = 1;
10e5247f 64
694835dc 65static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 66static void free_iommu(struct intel_iommu *iommu);
694835dc 67
10e5247f
KA
68static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
69{
70 /*
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
72 * the very end.
73 */
74 if (drhd->include_all)
0e242612 75 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
10e5247f 76 else
0e242612 77 list_add_rcu(&drhd->list, &dmar_drhd_units);
10e5247f
KA
78}
79
bb3a6b78 80void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f
KA
81{
82 struct acpi_dmar_device_scope *scope;
10e5247f
KA
83
84 *cnt = 0;
85 while (start < end) {
86 scope = start;
07cb52ff
DW
87 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
88 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
10e5247f
KA
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
90 (*cnt)++;
ae3e7f3a
LC
91 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
92 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 93 pr_warn("Unsupported device scope\n");
5715f0f9 94 }
10e5247f
KA
95 start += scope->length;
96 }
97 if (*cnt == 0)
bb3a6b78
JL
98 return NULL;
99
832bd858 100 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
bb3a6b78
JL
101}
102
832bd858 103void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
ada4d4b2 104{
b683b230 105 int i;
832bd858 106 struct device *tmp_dev;
b683b230 107
ada4d4b2 108 if (*devices && *cnt) {
b683b230 109 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
832bd858 110 put_device(tmp_dev);
ada4d4b2 111 kfree(*devices);
ada4d4b2 112 }
0e242612
JL
113
114 *devices = NULL;
115 *cnt = 0;
ada4d4b2
JL
116}
117
59ce0515
JL
118/* Optimize out kzalloc()/kfree() for normal cases */
119static char dmar_pci_notify_info_buf[64];
120
121static struct dmar_pci_notify_info *
122dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
123{
124 int level = 0;
125 size_t size;
126 struct pci_dev *tmp;
127 struct dmar_pci_notify_info *info;
128
129 BUG_ON(dev->is_virtfn);
130
131 /* Only generate path[] for device addition event */
132 if (event == BUS_NOTIFY_ADD_DEVICE)
133 for (tmp = dev; tmp; tmp = tmp->bus->self)
134 level++;
135
136 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
137 if (size <= sizeof(dmar_pci_notify_info_buf)) {
138 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
139 } else {
140 info = kzalloc(size, GFP_KERNEL);
141 if (!info) {
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev));
2e455289
JL
144 if (dmar_dev_scope_status == 0)
145 dmar_dev_scope_status = -ENOMEM;
59ce0515
JL
146 return NULL;
147 }
148 }
149
150 info->event = event;
151 info->dev = dev;
152 info->seg = pci_domain_nr(dev->bus);
153 info->level = level;
154 if (event == BUS_NOTIFY_ADD_DEVICE) {
155 for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
156 info->path[level].device = PCI_SLOT(tmp->devfn);
157 info->path[level].function = PCI_FUNC(tmp->devfn);
158 if (pci_is_root_bus(tmp->bus))
159 info->bus = tmp->bus->number;
160 }
161 }
162
163 return info;
164}
165
166static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
167{
168 if ((void *)info != dmar_pci_notify_info_buf)
169 kfree(info);
170}
171
172static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
173 struct acpi_dmar_pci_path *path, int count)
174{
175 int i;
176
177 if (info->bus != bus)
178 return false;
179 if (info->level != count)
180 return false;
181
182 for (i = 0; i < count; i++) {
183 if (path[i].device != info->path[i].device ||
184 path[i].function != info->path[i].function)
185 return false;
186 }
187
188 return true;
189}
190
191/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
192int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
193 void *start, void*end, u16 segment,
832bd858
DW
194 struct dmar_dev_scope *devices,
195 int devices_cnt)
59ce0515
JL
196{
197 int i, level;
832bd858 198 struct device *tmp, *dev = &info->dev->dev;
59ce0515
JL
199 struct acpi_dmar_device_scope *scope;
200 struct acpi_dmar_pci_path *path;
201
202 if (segment != info->seg)
203 return 0;
204
205 for (; start < end; start += scope->length) {
206 scope = start;
207 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
208 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
209 continue;
210
211 path = (struct acpi_dmar_pci_path *)(scope + 1);
212 level = (scope->length - sizeof(*scope)) / sizeof(*path);
213 if (!dmar_match_pci_path(info, scope->bus, path, level))
214 continue;
215
216 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
832bd858 217 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
59ce0515 218 pr_warn("Device scope type does not match for %s\n",
832bd858 219 pci_name(info->dev));
59ce0515
JL
220 return -EINVAL;
221 }
222
223 for_each_dev_scope(devices, devices_cnt, i, tmp)
224 if (tmp == NULL) {
832bd858
DW
225 devices[i].bus = info->dev->bus->number;
226 devices[i].devfn = info->dev->devfn;
227 rcu_assign_pointer(devices[i].dev,
228 get_device(dev));
59ce0515
JL
229 return 1;
230 }
231 BUG_ON(i >= devices_cnt);
232 }
233
234 return 0;
235}
236
237int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
832bd858 238 struct dmar_dev_scope *devices, int count)
59ce0515
JL
239{
240 int index;
832bd858 241 struct device *tmp;
59ce0515
JL
242
243 if (info->seg != segment)
244 return 0;
245
246 for_each_active_dev_scope(devices, count, index, tmp)
832bd858
DW
247 if (tmp == &info->dev->dev) {
248 rcu_assign_pointer(devices[index].dev, NULL);
59ce0515 249 synchronize_rcu();
832bd858 250 put_device(tmp);
59ce0515
JL
251 return 1;
252 }
253
254 return 0;
255}
256
257static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
258{
259 int ret = 0;
260 struct dmar_drhd_unit *dmaru;
261 struct acpi_dmar_hardware_unit *drhd;
262
263 for_each_drhd_unit(dmaru) {
264 if (dmaru->include_all)
265 continue;
266
267 drhd = container_of(dmaru->hdr,
268 struct acpi_dmar_hardware_unit, header);
269 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
270 ((void *)drhd) + drhd->header.length,
271 dmaru->segment,
272 dmaru->devices, dmaru->devices_cnt);
273 if (ret != 0)
274 break;
275 }
276 if (ret >= 0)
277 ret = dmar_iommu_notify_scope_dev(info);
2e455289
JL
278 if (ret < 0 && dmar_dev_scope_status == 0)
279 dmar_dev_scope_status = ret;
59ce0515
JL
280
281 return ret;
282}
283
284static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
285{
286 struct dmar_drhd_unit *dmaru;
287
288 for_each_drhd_unit(dmaru)
289 if (dmar_remove_dev_scope(info, dmaru->segment,
290 dmaru->devices, dmaru->devices_cnt))
291 break;
292 dmar_iommu_notify_scope_dev(info);
293}
294
295static int dmar_pci_bus_notifier(struct notifier_block *nb,
296 unsigned long action, void *data)
297{
298 struct pci_dev *pdev = to_pci_dev(data);
299 struct dmar_pci_notify_info *info;
300
301 /* Only care about add/remove events for physical functions */
302 if (pdev->is_virtfn)
303 return NOTIFY_DONE;
304 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
305 return NOTIFY_DONE;
306
307 info = dmar_alloc_pci_notify_info(pdev, action);
308 if (!info)
309 return NOTIFY_DONE;
310
311 down_write(&dmar_global_lock);
312 if (action == BUS_NOTIFY_ADD_DEVICE)
313 dmar_pci_bus_add_dev(info);
314 else if (action == BUS_NOTIFY_DEL_DEVICE)
315 dmar_pci_bus_del_dev(info);
316 up_write(&dmar_global_lock);
317
318 dmar_free_pci_notify_info(info);
319
320 return NOTIFY_OK;
321}
322
323static struct notifier_block dmar_pci_bus_nb = {
324 .notifier_call = dmar_pci_bus_notifier,
325 .priority = INT_MIN,
326};
327
10e5247f
KA
328/**
329 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
330 * structure which uniquely represent one DMA remapping hardware unit
331 * present in the platform
332 */
333static int __init
334dmar_parse_one_drhd(struct acpi_dmar_header *header)
335{
336 struct acpi_dmar_hardware_unit *drhd;
337 struct dmar_drhd_unit *dmaru;
338 int ret = 0;
10e5247f 339
e523b38e 340 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
341 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
342 if (!dmaru)
343 return -ENOMEM;
344
1886e8a9 345 dmaru->hdr = header;
10e5247f 346 dmaru->reg_base_addr = drhd->address;
276dbf99 347 dmaru->segment = drhd->segment;
10e5247f 348 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
07cb52ff
DW
349 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
350 ((void *)drhd) + drhd->header.length,
351 &dmaru->devices_cnt);
352 if (dmaru->devices_cnt && dmaru->devices == NULL) {
353 kfree(dmaru);
354 return -ENOMEM;
2e455289 355 }
10e5247f 356
1886e8a9
SS
357 ret = alloc_iommu(dmaru);
358 if (ret) {
07cb52ff
DW
359 dmar_free_dev_scope(&dmaru->devices,
360 &dmaru->devices_cnt);
1886e8a9
SS
361 kfree(dmaru);
362 return ret;
363 }
364 dmar_register_drhd_unit(dmaru);
365 return 0;
366}
367
a868e6b7
JL
368static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
369{
370 if (dmaru->devices && dmaru->devices_cnt)
371 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
372 if (dmaru->iommu)
373 free_iommu(dmaru->iommu);
374 kfree(dmaru);
375}
376
e625b4a9
DW
377static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
378{
379 struct acpi_dmar_andd *andd = (void *)header;
380
381 /* Check for NUL termination within the designated length */
382 if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
383 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
384 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
385 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
386 dmi_get_system_info(DMI_BIOS_VENDOR),
387 dmi_get_system_info(DMI_BIOS_VERSION),
388 dmi_get_system_info(DMI_PRODUCT_VERSION));
389 return -EINVAL;
390 }
391 pr_info("ANDD device: %x name: %s\n", andd->device_number,
392 andd->object_name);
393
394 return 0;
395}
396
aa697079 397#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
398static int __init
399dmar_parse_one_rhsa(struct acpi_dmar_header *header)
400{
401 struct acpi_dmar_rhsa *rhsa;
402 struct dmar_drhd_unit *drhd;
403
404 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 405 for_each_drhd_unit(drhd) {
ee34b32d
SS
406 if (drhd->reg_base_addr == rhsa->base_address) {
407 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
408
409 if (!node_online(node))
410 node = -1;
411 drhd->iommu->node = node;
aa697079
DW
412 return 0;
413 }
ee34b32d 414 }
fd0c8894
BH
415 WARN_TAINT(
416 1, TAINT_FIRMWARE_WORKAROUND,
417 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
418 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
419 drhd->reg_base_addr,
420 dmi_get_system_info(DMI_BIOS_VENDOR),
421 dmi_get_system_info(DMI_BIOS_VERSION),
422 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 423
aa697079 424 return 0;
ee34b32d 425}
aa697079 426#endif
ee34b32d 427
10e5247f
KA
428static void __init
429dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
430{
431 struct acpi_dmar_hardware_unit *drhd;
432 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 433 struct acpi_dmar_atsr *atsr;
17b60977 434 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
435
436 switch (header->type) {
437 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
438 drhd = container_of(header, struct acpi_dmar_hardware_unit,
439 header);
e9071b0b 440 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 441 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
442 break;
443 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
444 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
445 header);
e9071b0b 446 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
447 (unsigned long long)rmrr->base_address,
448 (unsigned long long)rmrr->end_address);
10e5247f 449 break;
aa5d2b51
YZ
450 case ACPI_DMAR_TYPE_ATSR:
451 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 452 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 453 break;
17b60977
RD
454 case ACPI_DMAR_HARDWARE_AFFINITY:
455 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 456 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
457 (unsigned long long)rhsa->base_address,
458 rhsa->proximity_domain);
459 break;
e625b4a9
DW
460 case ACPI_DMAR_TYPE_ANDD:
461 /* We don't print this here because we need to sanity-check
462 it first. So print it in dmar_parse_one_andd() instead. */
463 break;
10e5247f
KA
464 }
465}
466
f6dd5c31
YL
467/**
468 * dmar_table_detect - checks to see if the platform supports DMAR devices
469 */
470static int __init dmar_table_detect(void)
471{
472 acpi_status status = AE_OK;
473
474 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
475 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
476 (struct acpi_table_header **)&dmar_tbl,
477 &dmar_tbl_size);
f6dd5c31
YL
478
479 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 480 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
481 status = AE_NOT_FOUND;
482 }
483
484 return (ACPI_SUCCESS(status) ? 1 : 0);
485}
aaa9d1dd 486
10e5247f
KA
487/**
488 * parse_dmar_table - parses the DMA reporting table
489 */
490static int __init
491parse_dmar_table(void)
492{
493 struct acpi_table_dmar *dmar;
494 struct acpi_dmar_header *entry_header;
495 int ret = 0;
7cef3347 496 int drhd_count = 0;
10e5247f 497
f6dd5c31
YL
498 /*
499 * Do it again, earlier dmar_tbl mapping could be mapped with
500 * fixed map.
501 */
502 dmar_table_detect();
503
a59b50e9
JC
504 /*
505 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
506 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
507 */
508 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
509
10e5247f
KA
510 dmar = (struct acpi_table_dmar *)dmar_tbl;
511 if (!dmar)
512 return -ENODEV;
513
5b6985ce 514 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 515 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
516 return -EINVAL;
517 }
518
e9071b0b 519 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
520
521 entry_header = (struct acpi_dmar_header *)(dmar + 1);
522 while (((unsigned long)entry_header) <
523 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
524 /* Avoid looping forever on bad ACPI tables */
525 if (entry_header->length == 0) {
e9071b0b 526 pr_warn("Invalid 0-length structure\n");
084eb960
TB
527 ret = -EINVAL;
528 break;
529 }
530
10e5247f
KA
531 dmar_table_print_dmar_entry(entry_header);
532
533 switch (entry_header->type) {
534 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 535 drhd_count++;
10e5247f
KA
536 ret = dmar_parse_one_drhd(entry_header);
537 break;
538 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
539 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
540 break;
541 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 542 ret = dmar_parse_one_atsr(entry_header);
10e5247f 543 break;
17b60977 544 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 545#ifdef CONFIG_ACPI_NUMA
ee34b32d 546 ret = dmar_parse_one_rhsa(entry_header);
aa697079 547#endif
17b60977 548 break;
e625b4a9
DW
549 case ACPI_DMAR_TYPE_ANDD:
550 ret = dmar_parse_one_andd(entry_header);
551 break;
10e5247f 552 default:
e9071b0b 553 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 554 entry_header->type);
10e5247f
KA
555 ret = 0; /* for forward compatibility */
556 break;
557 }
558 if (ret)
559 break;
560
561 entry_header = ((void *)entry_header + entry_header->length);
562 }
7cef3347
LZH
563 if (drhd_count == 0)
564 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
565 return ret;
566}
567
832bd858
DW
568static int dmar_pci_device_match(struct dmar_dev_scope devices[],
569 int cnt, struct pci_dev *dev)
e61d98d8
SS
570{
571 int index;
832bd858 572 struct device *tmp;
e61d98d8
SS
573
574 while (dev) {
b683b230 575 for_each_active_dev_scope(devices, cnt, index, tmp)
832bd858 576 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
e61d98d8
SS
577 return 1;
578
579 /* Check our parent */
580 dev = dev->bus->self;
581 }
582
583 return 0;
584}
585
586struct dmar_drhd_unit *
587dmar_find_matched_drhd_unit(struct pci_dev *dev)
588{
0e242612 589 struct dmar_drhd_unit *dmaru;
2e824f79
YZ
590 struct acpi_dmar_hardware_unit *drhd;
591
dda56549
Y
592 dev = pci_physfn(dev);
593
0e242612 594 rcu_read_lock();
8b161f0e 595 for_each_drhd_unit(dmaru) {
2e824f79
YZ
596 drhd = container_of(dmaru->hdr,
597 struct acpi_dmar_hardware_unit,
598 header);
599
600 if (dmaru->include_all &&
601 drhd->segment == pci_domain_nr(dev->bus))
0e242612 602 goto out;
e61d98d8 603
2e824f79
YZ
604 if (dmar_pci_device_match(dmaru->devices,
605 dmaru->devices_cnt, dev))
0e242612 606 goto out;
e61d98d8 607 }
0e242612
JL
608 dmaru = NULL;
609out:
610 rcu_read_unlock();
e61d98d8 611
0e242612 612 return dmaru;
e61d98d8
SS
613}
614
ed40356b
DW
615static void __init dmar_acpi_insert_dev_scope(u8 device_number,
616 struct acpi_device *adev)
617{
618 struct dmar_drhd_unit *dmaru;
619 struct acpi_dmar_hardware_unit *drhd;
620 struct acpi_dmar_device_scope *scope;
621 struct device *tmp;
622 int i;
623 struct acpi_dmar_pci_path *path;
624
625 for_each_drhd_unit(dmaru) {
626 drhd = container_of(dmaru->hdr,
627 struct acpi_dmar_hardware_unit,
628 header);
629
630 for (scope = (void *)(drhd + 1);
631 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
632 scope = ((void *)scope) + scope->length) {
633 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
634 continue;
635 if (scope->enumeration_id != device_number)
636 continue;
637
638 path = (void *)(scope + 1);
639 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
640 dev_name(&adev->dev), dmaru->reg_base_addr,
641 scope->bus, path->device, path->function);
642 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
643 if (tmp == NULL) {
644 dmaru->devices[i].bus = scope->bus;
645 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
646 path->function);
647 rcu_assign_pointer(dmaru->devices[i].dev,
648 get_device(&adev->dev));
649 return;
650 }
651 BUG_ON(i >= dmaru->devices_cnt);
652 }
653 }
654 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
655 device_number, dev_name(&adev->dev));
656}
657
658static int __init dmar_acpi_dev_scope_init(void)
659{
660 struct acpi_dmar_andd *andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
661
662 while (((unsigned long)andd) <
663 ((unsigned long)dmar_tbl) + dmar_tbl->length) {
664 if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
665 acpi_handle h;
666 struct acpi_device *adev;
667
668 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
669 andd->object_name,
670 &h))) {
671 pr_err("Failed to find handle for ACPI object %s\n",
672 andd->object_name);
673 continue;
674 }
675 acpi_bus_get_device(h, &adev);
676 if (!adev) {
677 pr_err("Failed to get device for ACPI object %s\n",
678 andd->object_name);
679 continue;
680 }
681 dmar_acpi_insert_dev_scope(andd->device_number, adev);
682 }
683 andd = ((void *)andd) + andd->header.length;
684 }
685 return 0;
686}
687
1886e8a9
SS
688int __init dmar_dev_scope_init(void)
689{
2e455289
JL
690 struct pci_dev *dev = NULL;
691 struct dmar_pci_notify_info *info;
1886e8a9 692
2e455289
JL
693 if (dmar_dev_scope_status != 1)
694 return dmar_dev_scope_status;
c2c7286a 695
ed40356b
DW
696 dmar_acpi_dev_scope_init();
697
2e455289
JL
698 if (list_empty(&dmar_drhd_units)) {
699 dmar_dev_scope_status = -ENODEV;
700 } else {
701 dmar_dev_scope_status = 0;
702
703 for_each_pci_dev(dev) {
704 if (dev->is_virtfn)
705 continue;
706
707 info = dmar_alloc_pci_notify_info(dev,
708 BUS_NOTIFY_ADD_DEVICE);
709 if (!info) {
710 return dmar_dev_scope_status;
711 } else {
712 dmar_pci_bus_add_dev(info);
713 dmar_free_pci_notify_info(info);
714 }
715 }
318fe7df 716
2e455289 717 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1886e8a9
SS
718 }
719
2e455289 720 return dmar_dev_scope_status;
1886e8a9
SS
721}
722
10e5247f
KA
723
724int __init dmar_table_init(void)
725{
1886e8a9 726 static int dmar_table_initialized;
093f87d2
FY
727 int ret;
728
cc05301f
JL
729 if (dmar_table_initialized == 0) {
730 ret = parse_dmar_table();
731 if (ret < 0) {
732 if (ret != -ENODEV)
733 pr_info("parse DMAR table failure.\n");
734 } else if (list_empty(&dmar_drhd_units)) {
735 pr_info("No DMAR devices found\n");
736 ret = -ENODEV;
737 }
093f87d2 738
cc05301f
JL
739 if (ret < 0)
740 dmar_table_initialized = ret;
741 else
742 dmar_table_initialized = 1;
10e5247f 743 }
093f87d2 744
cc05301f 745 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f
KA
746}
747
3a8663ee
BH
748static void warn_invalid_dmar(u64 addr, const char *message)
749{
fd0c8894
BH
750 WARN_TAINT_ONCE(
751 1, TAINT_FIRMWARE_WORKAROUND,
752 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
753 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
754 addr, message,
755 dmi_get_system_info(DMI_BIOS_VENDOR),
756 dmi_get_system_info(DMI_BIOS_VERSION),
757 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 758}
6ecbf01c 759
21004dcd 760static int __init check_zero_address(void)
86cf898e
DW
761{
762 struct acpi_table_dmar *dmar;
763 struct acpi_dmar_header *entry_header;
764 struct acpi_dmar_hardware_unit *drhd;
765
766 dmar = (struct acpi_table_dmar *)dmar_tbl;
767 entry_header = (struct acpi_dmar_header *)(dmar + 1);
768
769 while (((unsigned long)entry_header) <
770 (((unsigned long)dmar) + dmar_tbl->length)) {
771 /* Avoid looping forever on bad ACPI tables */
772 if (entry_header->length == 0) {
e9071b0b 773 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
774 return 0;
775 }
776
777 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
778 void __iomem *addr;
779 u64 cap, ecap;
780
86cf898e
DW
781 drhd = (void *)entry_header;
782 if (!drhd->address) {
3a8663ee 783 warn_invalid_dmar(0, "");
2c992208
CW
784 goto failed;
785 }
786
787 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
788 if (!addr ) {
789 printk("IOMMU: can't validate: %llx\n", drhd->address);
790 goto failed;
791 }
792 cap = dmar_readq(addr + DMAR_CAP_REG);
793 ecap = dmar_readq(addr + DMAR_ECAP_REG);
794 early_iounmap(addr, VTD_PAGE_SIZE);
795 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
796 warn_invalid_dmar(drhd->address,
797 " returns all ones");
2c992208 798 goto failed;
86cf898e 799 }
86cf898e
DW
800 }
801
802 entry_header = ((void *)entry_header + entry_header->length);
803 }
804 return 1;
2c992208
CW
805
806failed:
2c992208 807 return 0;
86cf898e
DW
808}
809
480125ba 810int __init detect_intel_iommu(void)
2ae21010
SS
811{
812 int ret;
813
3a5670e8 814 down_write(&dmar_global_lock);
f6dd5c31 815 ret = dmar_table_detect();
86cf898e
DW
816 if (ret)
817 ret = check_zero_address();
2ae21010 818 {
11bd04f6 819 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 820 iommu_detected = 1;
5d990b62
CW
821 /* Make sure ACS will be enabled */
822 pci_request_acs();
823 }
f5d1b97b 824
9d5ce73a
FT
825#ifdef CONFIG_X86
826 if (ret)
827 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 828#endif
cacd4213 829 }
b707cb02 830 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
f6dd5c31 831 dmar_tbl = NULL;
3a5670e8 832 up_write(&dmar_global_lock);
480125ba 833
4db77ff3 834 return ret ? 1 : -ENODEV;
2ae21010
SS
835}
836
837
6f5cf521
DD
838static void unmap_iommu(struct intel_iommu *iommu)
839{
840 iounmap(iommu->reg);
841 release_mem_region(iommu->reg_phys, iommu->reg_size);
842}
843
844/**
845 * map_iommu: map the iommu's registers
846 * @iommu: the iommu to map
847 * @phys_addr: the physical address of the base resgister
e9071b0b 848 *
6f5cf521 849 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 850 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
851 */
852static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
853{
854 int map_size, err=0;
855
856 iommu->reg_phys = phys_addr;
857 iommu->reg_size = VTD_PAGE_SIZE;
858
859 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
860 pr_err("IOMMU: can't reserve memory\n");
861 err = -EBUSY;
862 goto out;
863 }
864
865 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
866 if (!iommu->reg) {
867 pr_err("IOMMU: can't map the region\n");
868 err = -ENOMEM;
869 goto release;
870 }
871
872 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
873 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
874
875 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
876 err = -EINVAL;
877 warn_invalid_dmar(phys_addr, " returns all ones");
878 goto unmap;
879 }
880
881 /* the registers might be more than one page */
882 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
883 cap_max_fault_reg_offset(iommu->cap));
884 map_size = VTD_PAGE_ALIGN(map_size);
885 if (map_size > iommu->reg_size) {
886 iounmap(iommu->reg);
887 release_mem_region(iommu->reg_phys, iommu->reg_size);
888 iommu->reg_size = map_size;
889 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
890 iommu->name)) {
891 pr_err("IOMMU: can't reserve memory\n");
892 err = -EBUSY;
893 goto out;
894 }
895 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
896 if (!iommu->reg) {
897 pr_err("IOMMU: can't map the region\n");
898 err = -ENOMEM;
899 goto release;
900 }
901 }
902 err = 0;
903 goto out;
904
905unmap:
906 iounmap(iommu->reg);
907release:
908 release_mem_region(iommu->reg_phys, iommu->reg_size);
909out:
910 return err;
911}
912
694835dc 913static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 914{
c42d9f32 915 struct intel_iommu *iommu;
3a93c841 916 u32 ver, sts;
c42d9f32 917 static int iommu_allocated = 0;
43f7392b 918 int agaw = 0;
4ed0d3e6 919 int msagaw = 0;
6f5cf521 920 int err;
c42d9f32 921
6ecbf01c 922 if (!drhd->reg_base_addr) {
3a8663ee 923 warn_invalid_dmar(0, "");
6ecbf01c
DW
924 return -EINVAL;
925 }
926
c42d9f32
SS
927 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
928 if (!iommu)
1886e8a9 929 return -ENOMEM;
c42d9f32
SS
930
931 iommu->seq_id = iommu_allocated++;
9d783ba0 932 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 933
6f5cf521
DD
934 err = map_iommu(iommu, drhd->reg_base_addr);
935 if (err) {
936 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
937 goto error;
938 }
0815565a 939
6f5cf521 940 err = -EINVAL;
1b573683
WH
941 agaw = iommu_calculate_agaw(iommu);
942 if (agaw < 0) {
bf947fcb
DD
943 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
944 iommu->seq_id);
0815565a 945 goto err_unmap;
4ed0d3e6
FY
946 }
947 msagaw = iommu_calculate_max_sagaw(iommu);
948 if (msagaw < 0) {
bf947fcb 949 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 950 iommu->seq_id);
0815565a 951 goto err_unmap;
1b573683
WH
952 }
953 iommu->agaw = agaw;
4ed0d3e6 954 iommu->msagaw = msagaw;
67ccac41 955 iommu->segment = drhd->segment;
1b573683 956
ee34b32d
SS
957 iommu->node = -1;
958
e61d98d8 959 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
960 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
961 iommu->seq_id,
5b6985ce
FY
962 (unsigned long long)drhd->reg_base_addr,
963 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
964 (unsigned long long)iommu->cap,
965 (unsigned long long)iommu->ecap);
e61d98d8 966
3a93c841
TI
967 /* Reflect status in gcmd */
968 sts = readl(iommu->reg + DMAR_GSTS_REG);
969 if (sts & DMA_GSTS_IRES)
970 iommu->gcmd |= DMA_GCMD_IRE;
971 if (sts & DMA_GSTS_TES)
972 iommu->gcmd |= DMA_GCMD_TE;
973 if (sts & DMA_GSTS_QIES)
974 iommu->gcmd |= DMA_GCMD_QIE;
975
1f5b3c3f 976 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
977
978 drhd->iommu = iommu;
1886e8a9 979 return 0;
0815565a
DW
980
981 err_unmap:
6f5cf521 982 unmap_iommu(iommu);
0815565a 983 error:
e61d98d8 984 kfree(iommu);
6f5cf521 985 return err;
e61d98d8
SS
986}
987
a868e6b7 988static void free_iommu(struct intel_iommu *iommu)
e61d98d8 989{
a868e6b7
JL
990 if (iommu->irq) {
991 free_irq(iommu->irq, iommu);
992 irq_set_handler_data(iommu->irq, NULL);
993 destroy_irq(iommu->irq);
994 }
e61d98d8 995
a84da70b
JL
996 if (iommu->qi) {
997 free_page((unsigned long)iommu->qi->desc);
998 kfree(iommu->qi->desc_status);
999 kfree(iommu->qi);
1000 }
1001
e61d98d8 1002 if (iommu->reg)
6f5cf521
DD
1003 unmap_iommu(iommu);
1004
e61d98d8
SS
1005 kfree(iommu);
1006}
fe962e90
SS
1007
1008/*
1009 * Reclaim all the submitted descriptors which have completed its work.
1010 */
1011static inline void reclaim_free_desc(struct q_inval *qi)
1012{
6ba6c3a4
YZ
1013 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1014 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
1015 qi->desc_status[qi->free_tail] = QI_FREE;
1016 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1017 qi->free_cnt++;
1018 }
1019}
1020
704126ad
YZ
1021static int qi_check_fault(struct intel_iommu *iommu, int index)
1022{
1023 u32 fault;
6ba6c3a4 1024 int head, tail;
704126ad
YZ
1025 struct q_inval *qi = iommu->qi;
1026 int wait_index = (index + 1) % QI_LENGTH;
1027
6ba6c3a4
YZ
1028 if (qi->desc_status[wait_index] == QI_ABORT)
1029 return -EAGAIN;
1030
704126ad
YZ
1031 fault = readl(iommu->reg + DMAR_FSTS_REG);
1032
1033 /*
1034 * If IQE happens, the head points to the descriptor associated
1035 * with the error. No new descriptors are fetched until the IQE
1036 * is cleared.
1037 */
1038 if (fault & DMA_FSTS_IQE) {
1039 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 1040 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 1041 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
1042 "low=%llx, high=%llx\n",
1043 (unsigned long long)qi->desc[index].low,
1044 (unsigned long long)qi->desc[index].high);
704126ad
YZ
1045 memcpy(&qi->desc[index], &qi->desc[wait_index],
1046 sizeof(struct qi_desc));
1047 __iommu_flush_cache(iommu, &qi->desc[index],
1048 sizeof(struct qi_desc));
1049 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1050 return -EINVAL;
1051 }
1052 }
1053
6ba6c3a4
YZ
1054 /*
1055 * If ITE happens, all pending wait_desc commands are aborted.
1056 * No new descriptors are fetched until the ITE is cleared.
1057 */
1058 if (fault & DMA_FSTS_ITE) {
1059 head = readl(iommu->reg + DMAR_IQH_REG);
1060 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1061 head |= 1;
1062 tail = readl(iommu->reg + DMAR_IQT_REG);
1063 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1064
1065 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1066
1067 do {
1068 if (qi->desc_status[head] == QI_IN_USE)
1069 qi->desc_status[head] = QI_ABORT;
1070 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1071 } while (head != tail);
1072
1073 if (qi->desc_status[wait_index] == QI_ABORT)
1074 return -EAGAIN;
1075 }
1076
1077 if (fault & DMA_FSTS_ICE)
1078 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1079
704126ad
YZ
1080 return 0;
1081}
1082
fe962e90
SS
1083/*
1084 * Submit the queued invalidation descriptor to the remapping
1085 * hardware unit and wait for its completion.
1086 */
704126ad 1087int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 1088{
6ba6c3a4 1089 int rc;
fe962e90
SS
1090 struct q_inval *qi = iommu->qi;
1091 struct qi_desc *hw, wait_desc;
1092 int wait_index, index;
1093 unsigned long flags;
1094
1095 if (!qi)
704126ad 1096 return 0;
fe962e90
SS
1097
1098 hw = qi->desc;
1099
6ba6c3a4
YZ
1100restart:
1101 rc = 0;
1102
3b8f4048 1103 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 1104 while (qi->free_cnt < 3) {
3b8f4048 1105 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 1106 cpu_relax();
3b8f4048 1107 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
1108 }
1109
1110 index = qi->free_head;
1111 wait_index = (index + 1) % QI_LENGTH;
1112
1113 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1114
1115 hw[index] = *desc;
1116
704126ad
YZ
1117 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1118 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
1119 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1120
1121 hw[wait_index] = wait_desc;
1122
1123 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1124 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1125
1126 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1127 qi->free_cnt -= 2;
1128
fe962e90
SS
1129 /*
1130 * update the HW tail register indicating the presence of
1131 * new descriptors.
1132 */
6ba6c3a4 1133 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
1134
1135 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
1136 /*
1137 * We will leave the interrupts disabled, to prevent interrupt
1138 * context to queue another cmd while a cmd is already submitted
1139 * and waiting for completion on this cpu. This is to avoid
1140 * a deadlock where the interrupt context can wait indefinitely
1141 * for free slots in the queue.
1142 */
704126ad
YZ
1143 rc = qi_check_fault(iommu, index);
1144 if (rc)
6ba6c3a4 1145 break;
704126ad 1146
3b8f4048 1147 raw_spin_unlock(&qi->q_lock);
fe962e90 1148 cpu_relax();
3b8f4048 1149 raw_spin_lock(&qi->q_lock);
fe962e90 1150 }
6ba6c3a4
YZ
1151
1152 qi->desc_status[index] = QI_DONE;
fe962e90
SS
1153
1154 reclaim_free_desc(qi);
3b8f4048 1155 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 1156
6ba6c3a4
YZ
1157 if (rc == -EAGAIN)
1158 goto restart;
1159
704126ad 1160 return rc;
fe962e90
SS
1161}
1162
1163/*
1164 * Flush the global interrupt entry cache.
1165 */
1166void qi_global_iec(struct intel_iommu *iommu)
1167{
1168 struct qi_desc desc;
1169
1170 desc.low = QI_IEC_TYPE;
1171 desc.high = 0;
1172
704126ad 1173 /* should never fail */
fe962e90
SS
1174 qi_submit_sync(&desc, iommu);
1175}
1176
4c25a2c1
DW
1177void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1178 u64 type)
3481f210 1179{
3481f210
YS
1180 struct qi_desc desc;
1181
3481f210
YS
1182 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1183 | QI_CC_GRAN(type) | QI_CC_TYPE;
1184 desc.high = 0;
1185
4c25a2c1 1186 qi_submit_sync(&desc, iommu);
3481f210
YS
1187}
1188
1f0ef2aa
DW
1189void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1190 unsigned int size_order, u64 type)
3481f210
YS
1191{
1192 u8 dw = 0, dr = 0;
1193
1194 struct qi_desc desc;
1195 int ih = 0;
1196
3481f210
YS
1197 if (cap_write_drain(iommu->cap))
1198 dw = 1;
1199
1200 if (cap_read_drain(iommu->cap))
1201 dr = 1;
1202
1203 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1204 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1205 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1206 | QI_IOTLB_AM(size_order);
1207
1f0ef2aa 1208 qi_submit_sync(&desc, iommu);
3481f210
YS
1209}
1210
6ba6c3a4
YZ
1211void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1212 u64 addr, unsigned mask)
1213{
1214 struct qi_desc desc;
1215
1216 if (mask) {
1217 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1218 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1219 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1220 } else
1221 desc.high = QI_DEV_IOTLB_ADDR(addr);
1222
1223 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1224 qdep = 0;
1225
1226 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1227 QI_DIOTLB_TYPE;
1228
1229 qi_submit_sync(&desc, iommu);
1230}
1231
eba67e5d
SS
1232/*
1233 * Disable Queued Invalidation interface.
1234 */
1235void dmar_disable_qi(struct intel_iommu *iommu)
1236{
1237 unsigned long flags;
1238 u32 sts;
1239 cycles_t start_time = get_cycles();
1240
1241 if (!ecap_qis(iommu->ecap))
1242 return;
1243
1f5b3c3f 1244 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
1245
1246 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1247 if (!(sts & DMA_GSTS_QIES))
1248 goto end;
1249
1250 /*
1251 * Give a chance to HW to complete the pending invalidation requests.
1252 */
1253 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1254 readl(iommu->reg + DMAR_IQH_REG)) &&
1255 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1256 cpu_relax();
1257
1258 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1259 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1260
1261 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1262 !(sts & DMA_GSTS_QIES), sts);
1263end:
1f5b3c3f 1264 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1265}
1266
eb4a52bc
FY
1267/*
1268 * Enable queued invalidation.
1269 */
1270static void __dmar_enable_qi(struct intel_iommu *iommu)
1271{
c416daa9 1272 u32 sts;
eb4a52bc
FY
1273 unsigned long flags;
1274 struct q_inval *qi = iommu->qi;
1275
1276 qi->free_head = qi->free_tail = 0;
1277 qi->free_cnt = QI_LENGTH;
1278
1f5b3c3f 1279 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1280
1281 /* write zero to the tail reg */
1282 writel(0, iommu->reg + DMAR_IQT_REG);
1283
1284 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1285
eb4a52bc 1286 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1287 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1288
1289 /* Make sure hardware complete it */
1290 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1291
1f5b3c3f 1292 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1293}
1294
fe962e90
SS
1295/*
1296 * Enable Queued Invalidation interface. This is a must to support
1297 * interrupt-remapping. Also used by DMA-remapping, which replaces
1298 * register based IOTLB invalidation.
1299 */
1300int dmar_enable_qi(struct intel_iommu *iommu)
1301{
fe962e90 1302 struct q_inval *qi;
751cafe3 1303 struct page *desc_page;
fe962e90
SS
1304
1305 if (!ecap_qis(iommu->ecap))
1306 return -ENOENT;
1307
1308 /*
1309 * queued invalidation is already setup and enabled.
1310 */
1311 if (iommu->qi)
1312 return 0;
1313
fa4b57cc 1314 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1315 if (!iommu->qi)
1316 return -ENOMEM;
1317
1318 qi = iommu->qi;
1319
751cafe3
SS
1320
1321 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1322 if (!desc_page) {
fe962e90 1323 kfree(qi);
b707cb02 1324 iommu->qi = NULL;
fe962e90
SS
1325 return -ENOMEM;
1326 }
1327
751cafe3
SS
1328 qi->desc = page_address(desc_page);
1329
37a40710 1330 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1331 if (!qi->desc_status) {
1332 free_page((unsigned long) qi->desc);
1333 kfree(qi);
b707cb02 1334 iommu->qi = NULL;
fe962e90
SS
1335 return -ENOMEM;
1336 }
1337
1338 qi->free_head = qi->free_tail = 0;
1339 qi->free_cnt = QI_LENGTH;
1340
3b8f4048 1341 raw_spin_lock_init(&qi->q_lock);
fe962e90 1342
eb4a52bc 1343 __dmar_enable_qi(iommu);
fe962e90
SS
1344
1345 return 0;
1346}
0ac2491f
SS
1347
1348/* iommu interrupt handling. Most stuff are MSI-like. */
1349
9d783ba0
SS
1350enum faulttype {
1351 DMA_REMAP,
1352 INTR_REMAP,
1353 UNKNOWN,
1354};
1355
1356static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1357{
1358 "Software",
1359 "Present bit in root entry is clear",
1360 "Present bit in context entry is clear",
1361 "Invalid context entry",
1362 "Access beyond MGAW",
1363 "PTE Write access is not set",
1364 "PTE Read access is not set",
1365 "Next page table ptr is invalid",
1366 "Root table address invalid",
1367 "Context table ptr is invalid",
1368 "non-zero reserved fields in RTP",
1369 "non-zero reserved fields in CTP",
1370 "non-zero reserved fields in PTE",
4ecccd9e 1371 "PCE for translation request specifies blocking",
0ac2491f 1372};
9d783ba0 1373
95a02e97 1374static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1375{
1376 "Detected reserved fields in the decoded interrupt-remapped request",
1377 "Interrupt index exceeded the interrupt-remapping table size",
1378 "Present field in the IRTE entry is clear",
1379 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1380 "Detected reserved fields in the IRTE entry",
1381 "Blocked a compatibility format interrupt request",
1382 "Blocked an interrupt request due to source-id verification failure",
1383};
1384
21004dcd 1385static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1386{
fefe1ed1
DC
1387 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1388 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1389 *fault_type = INTR_REMAP;
95a02e97 1390 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1391 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1392 *fault_type = DMA_REMAP;
1393 return dma_remap_fault_reasons[fault_reason];
1394 } else {
1395 *fault_type = UNKNOWN;
0ac2491f 1396 return "Unknown";
9d783ba0 1397 }
0ac2491f
SS
1398}
1399
5c2837fb 1400void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1401{
dced35ae 1402 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1403 unsigned long flag;
1404
1405 /* unmask it */
1f5b3c3f 1406 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1407 writel(0, iommu->reg + DMAR_FECTL_REG);
1408 /* Read a reg to force flush the post write */
1409 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1410 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1411}
1412
5c2837fb 1413void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1414{
1415 unsigned long flag;
dced35ae 1416 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1417
1418 /* mask it */
1f5b3c3f 1419 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1420 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1421 /* Read a reg to force flush the post write */
1422 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1423 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1424}
1425
1426void dmar_msi_write(int irq, struct msi_msg *msg)
1427{
dced35ae 1428 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1429 unsigned long flag;
1430
1f5b3c3f 1431 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1432 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1433 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1434 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1435 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1436}
1437
1438void dmar_msi_read(int irq, struct msi_msg *msg)
1439{
dced35ae 1440 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1441 unsigned long flag;
1442
1f5b3c3f 1443 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1444 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1445 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1446 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1447 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1448}
1449
1450static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1451 u8 fault_reason, u16 source_id, unsigned long long addr)
1452{
1453 const char *reason;
9d783ba0 1454 int fault_type;
0ac2491f 1455
9d783ba0 1456 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1457
9d783ba0 1458 if (fault_type == INTR_REMAP)
bf947fcb 1459 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1460 "fault index %llx\n"
1461 "INTR-REMAP:[fault reason %02d] %s\n",
1462 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1463 PCI_FUNC(source_id & 0xFF), addr >> 48,
1464 fault_reason, reason);
1465 else
bf947fcb 1466 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1467 "fault addr %llx \n"
1468 "DMAR:[fault reason %02d] %s\n",
1469 (type ? "DMA Read" : "DMA Write"),
1470 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1471 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1472 return 0;
1473}
1474
1475#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1476irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1477{
1478 struct intel_iommu *iommu = dev_id;
1479 int reg, fault_index;
1480 u32 fault_status;
1481 unsigned long flag;
1482
1f5b3c3f 1483 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1484 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1485 if (fault_status)
bf947fcb 1486 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1487
1488 /* TBD: ignore advanced fault log currently */
1489 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1490 goto unlock_exit;
0ac2491f
SS
1491
1492 fault_index = dma_fsts_fault_record_index(fault_status);
1493 reg = cap_fault_reg_offset(iommu->cap);
1494 while (1) {
1495 u8 fault_reason;
1496 u16 source_id;
1497 u64 guest_addr;
1498 int type;
1499 u32 data;
1500
1501 /* highest 32 bits */
1502 data = readl(iommu->reg + reg +
1503 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1504 if (!(data & DMA_FRCD_F))
1505 break;
1506
1507 fault_reason = dma_frcd_fault_reason(data);
1508 type = dma_frcd_type(data);
1509
1510 data = readl(iommu->reg + reg +
1511 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1512 source_id = dma_frcd_source_id(data);
1513
1514 guest_addr = dmar_readq(iommu->reg + reg +
1515 fault_index * PRIMARY_FAULT_REG_LEN);
1516 guest_addr = dma_frcd_page_addr(guest_addr);
1517 /* clear the fault */
1518 writel(DMA_FRCD_F, iommu->reg + reg +
1519 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1520
1f5b3c3f 1521 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1522
1523 dmar_fault_do_one(iommu, type, fault_reason,
1524 source_id, guest_addr);
1525
1526 fault_index++;
8211a7b5 1527 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1528 fault_index = 0;
1f5b3c3f 1529 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1530 }
0ac2491f 1531
bd5cdad0
LZH
1532 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1533
1534unlock_exit:
1f5b3c3f 1535 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1536 return IRQ_HANDLED;
1537}
1538
1539int dmar_set_interrupt(struct intel_iommu *iommu)
1540{
1541 int irq, ret;
1542
9d783ba0
SS
1543 /*
1544 * Check if the fault interrupt is already initialized.
1545 */
1546 if (iommu->irq)
1547 return 0;
1548
0ac2491f
SS
1549 irq = create_irq();
1550 if (!irq) {
bf947fcb 1551 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1552 return -EINVAL;
1553 }
1554
dced35ae 1555 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1556 iommu->irq = irq;
1557
1558 ret = arch_setup_dmar_msi(irq);
1559 if (ret) {
dced35ae 1560 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1561 iommu->irq = 0;
1562 destroy_irq(irq);
dd726435 1563 return ret;
0ac2491f
SS
1564 }
1565
477694e7 1566 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1567 if (ret)
bf947fcb 1568 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1569 return ret;
1570}
9d783ba0
SS
1571
1572int __init enable_drhd_fault_handling(void)
1573{
1574 struct dmar_drhd_unit *drhd;
7c919779 1575 struct intel_iommu *iommu;
9d783ba0
SS
1576
1577 /*
1578 * Enable fault control interrupt.
1579 */
7c919779 1580 for_each_iommu(iommu, drhd) {
bd5cdad0 1581 u32 fault_status;
7c919779 1582 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1583
1584 if (ret) {
e9071b0b 1585 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1586 (unsigned long long)drhd->reg_base_addr, ret);
1587 return -1;
1588 }
7f99d946
SS
1589
1590 /*
1591 * Clear any previous faults.
1592 */
1593 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1594 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1595 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1596 }
1597
1598 return 0;
1599}
eb4a52bc
FY
1600
1601/*
1602 * Re-enable Queued Invalidation interface.
1603 */
1604int dmar_reenable_qi(struct intel_iommu *iommu)
1605{
1606 if (!ecap_qis(iommu->ecap))
1607 return -ENOENT;
1608
1609 if (!iommu->qi)
1610 return -ENOENT;
1611
1612 /*
1613 * First disable queued invalidation.
1614 */
1615 dmar_disable_qi(iommu);
1616 /*
1617 * Then enable queued invalidation again. Since there is no pending
1618 * invalidation requests now, it's safe to re-enable queued
1619 * invalidation.
1620 */
1621 __dmar_enable_qi(iommu);
1622
1623 return 0;
1624}
074835f0
YS
1625
1626/*
1627 * Check interrupt remapping support in DMAR table description.
1628 */
0b8973a8 1629int __init dmar_ir_support(void)
074835f0
YS
1630{
1631 struct acpi_table_dmar *dmar;
1632 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1633 if (!dmar)
1634 return 0;
074835f0
YS
1635 return dmar->flags & 0x1;
1636}
694835dc 1637
a868e6b7
JL
1638static int __init dmar_free_unused_resources(void)
1639{
1640 struct dmar_drhd_unit *dmaru, *dmaru_n;
1641
1642 /* DMAR units are in use */
1643 if (irq_remapping_enabled || intel_iommu_enabled)
1644 return 0;
1645
2e455289
JL
1646 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1647 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
59ce0515 1648
3a5670e8 1649 down_write(&dmar_global_lock);
a868e6b7
JL
1650 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1651 list_del(&dmaru->list);
1652 dmar_free_drhd(dmaru);
1653 }
3a5670e8 1654 up_write(&dmar_global_lock);
a868e6b7
JL
1655
1656 return 0;
1657}
1658
1659late_initcall(dmar_free_unused_resources);
4db77ff3 1660IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.462543 seconds and 5 git commands to generate.