iommu/vt-d: Allocate space for ACPI devices
[deliverable/linux.git] / drivers / iommu / dmar.c
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 *
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /*
47 * Assumptions:
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
53 *
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
57 */
58 DECLARE_RWSEM(dmar_global_lock);
59 LIST_HEAD(dmar_drhd_units);
60
61 struct acpi_table_header * __initdata dmar_tbl;
62 static acpi_size dmar_tbl_size;
63 static int dmar_dev_scope_status = 1;
64
65 static int alloc_iommu(struct dmar_drhd_unit *drhd);
66 static void free_iommu(struct intel_iommu *iommu);
67
68 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
69 {
70 /*
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
72 * the very end.
73 */
74 if (drhd->include_all)
75 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
76 else
77 list_add_rcu(&drhd->list, &dmar_drhd_units);
78 }
79
80 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
81 {
82 struct acpi_dmar_device_scope *scope;
83
84 *cnt = 0;
85 while (start < end) {
86 scope = start;
87 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
88 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
90 (*cnt)++;
91 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
92 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
93 pr_warn("Unsupported device scope\n");
94 }
95 start += scope->length;
96 }
97 if (*cnt == 0)
98 return NULL;
99
100 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
101 }
102
103 void dmar_free_dev_scope(struct pci_dev __rcu ***devices, int *cnt)
104 {
105 int i;
106 struct pci_dev *tmp_dev;
107
108 if (*devices && *cnt) {
109 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
110 pci_dev_put(tmp_dev);
111 kfree(*devices);
112 }
113
114 *devices = NULL;
115 *cnt = 0;
116 }
117
118 /* Optimize out kzalloc()/kfree() for normal cases */
119 static char dmar_pci_notify_info_buf[64];
120
121 static struct dmar_pci_notify_info *
122 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
123 {
124 int level = 0;
125 size_t size;
126 struct pci_dev *tmp;
127 struct dmar_pci_notify_info *info;
128
129 BUG_ON(dev->is_virtfn);
130
131 /* Only generate path[] for device addition event */
132 if (event == BUS_NOTIFY_ADD_DEVICE)
133 for (tmp = dev; tmp; tmp = tmp->bus->self)
134 level++;
135
136 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
137 if (size <= sizeof(dmar_pci_notify_info_buf)) {
138 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
139 } else {
140 info = kzalloc(size, GFP_KERNEL);
141 if (!info) {
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev));
144 if (dmar_dev_scope_status == 0)
145 dmar_dev_scope_status = -ENOMEM;
146 return NULL;
147 }
148 }
149
150 info->event = event;
151 info->dev = dev;
152 info->seg = pci_domain_nr(dev->bus);
153 info->level = level;
154 if (event == BUS_NOTIFY_ADD_DEVICE) {
155 for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
156 info->path[level].device = PCI_SLOT(tmp->devfn);
157 info->path[level].function = PCI_FUNC(tmp->devfn);
158 if (pci_is_root_bus(tmp->bus))
159 info->bus = tmp->bus->number;
160 }
161 }
162
163 return info;
164 }
165
166 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
167 {
168 if ((void *)info != dmar_pci_notify_info_buf)
169 kfree(info);
170 }
171
172 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
173 struct acpi_dmar_pci_path *path, int count)
174 {
175 int i;
176
177 if (info->bus != bus)
178 return false;
179 if (info->level != count)
180 return false;
181
182 for (i = 0; i < count; i++) {
183 if (path[i].device != info->path[i].device ||
184 path[i].function != info->path[i].function)
185 return false;
186 }
187
188 return true;
189 }
190
191 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
192 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
193 void *start, void*end, u16 segment,
194 struct pci_dev __rcu **devices, int devices_cnt)
195 {
196 int i, level;
197 struct pci_dev *tmp, *dev = info->dev;
198 struct acpi_dmar_device_scope *scope;
199 struct acpi_dmar_pci_path *path;
200
201 if (segment != info->seg)
202 return 0;
203
204 for (; start < end; start += scope->length) {
205 scope = start;
206 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
207 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
208 continue;
209
210 path = (struct acpi_dmar_pci_path *)(scope + 1);
211 level = (scope->length - sizeof(*scope)) / sizeof(*path);
212 if (!dmar_match_pci_path(info, scope->bus, path, level))
213 continue;
214
215 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
216 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
217 pr_warn("Device scope type does not match for %s\n",
218 pci_name(dev));
219 return -EINVAL;
220 }
221
222 for_each_dev_scope(devices, devices_cnt, i, tmp)
223 if (tmp == NULL) {
224 rcu_assign_pointer(devices[i],
225 pci_dev_get(dev));
226 return 1;
227 }
228 BUG_ON(i >= devices_cnt);
229 }
230
231 return 0;
232 }
233
234 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
235 struct pci_dev __rcu **devices, int count)
236 {
237 int index;
238 struct pci_dev *tmp;
239
240 if (info->seg != segment)
241 return 0;
242
243 for_each_active_dev_scope(devices, count, index, tmp)
244 if (tmp == info->dev) {
245 rcu_assign_pointer(devices[index], NULL);
246 synchronize_rcu();
247 pci_dev_put(tmp);
248 return 1;
249 }
250
251 return 0;
252 }
253
254 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
255 {
256 int ret = 0;
257 struct dmar_drhd_unit *dmaru;
258 struct acpi_dmar_hardware_unit *drhd;
259
260 for_each_drhd_unit(dmaru) {
261 if (dmaru->include_all)
262 continue;
263
264 drhd = container_of(dmaru->hdr,
265 struct acpi_dmar_hardware_unit, header);
266 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
267 ((void *)drhd) + drhd->header.length,
268 dmaru->segment,
269 dmaru->devices, dmaru->devices_cnt);
270 if (ret != 0)
271 break;
272 }
273 if (ret >= 0)
274 ret = dmar_iommu_notify_scope_dev(info);
275 if (ret < 0 && dmar_dev_scope_status == 0)
276 dmar_dev_scope_status = ret;
277
278 return ret;
279 }
280
281 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
282 {
283 struct dmar_drhd_unit *dmaru;
284
285 for_each_drhd_unit(dmaru)
286 if (dmar_remove_dev_scope(info, dmaru->segment,
287 dmaru->devices, dmaru->devices_cnt))
288 break;
289 dmar_iommu_notify_scope_dev(info);
290 }
291
292 static int dmar_pci_bus_notifier(struct notifier_block *nb,
293 unsigned long action, void *data)
294 {
295 struct pci_dev *pdev = to_pci_dev(data);
296 struct dmar_pci_notify_info *info;
297
298 /* Only care about add/remove events for physical functions */
299 if (pdev->is_virtfn)
300 return NOTIFY_DONE;
301 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
302 return NOTIFY_DONE;
303
304 info = dmar_alloc_pci_notify_info(pdev, action);
305 if (!info)
306 return NOTIFY_DONE;
307
308 down_write(&dmar_global_lock);
309 if (action == BUS_NOTIFY_ADD_DEVICE)
310 dmar_pci_bus_add_dev(info);
311 else if (action == BUS_NOTIFY_DEL_DEVICE)
312 dmar_pci_bus_del_dev(info);
313 up_write(&dmar_global_lock);
314
315 dmar_free_pci_notify_info(info);
316
317 return NOTIFY_OK;
318 }
319
320 static struct notifier_block dmar_pci_bus_nb = {
321 .notifier_call = dmar_pci_bus_notifier,
322 .priority = INT_MIN,
323 };
324
325 /**
326 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
327 * structure which uniquely represent one DMA remapping hardware unit
328 * present in the platform
329 */
330 static int __init
331 dmar_parse_one_drhd(struct acpi_dmar_header *header)
332 {
333 struct acpi_dmar_hardware_unit *drhd;
334 struct dmar_drhd_unit *dmaru;
335 int ret = 0;
336
337 drhd = (struct acpi_dmar_hardware_unit *)header;
338 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
339 if (!dmaru)
340 return -ENOMEM;
341
342 dmaru->hdr = header;
343 dmaru->reg_base_addr = drhd->address;
344 dmaru->segment = drhd->segment;
345 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
346 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
347 ((void *)drhd) + drhd->header.length,
348 &dmaru->devices_cnt);
349 if (dmaru->devices_cnt && dmaru->devices == NULL) {
350 kfree(dmaru);
351 return -ENOMEM;
352 }
353
354 ret = alloc_iommu(dmaru);
355 if (ret) {
356 dmar_free_dev_scope(&dmaru->devices,
357 &dmaru->devices_cnt);
358 kfree(dmaru);
359 return ret;
360 }
361 dmar_register_drhd_unit(dmaru);
362 return 0;
363 }
364
365 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
366 {
367 if (dmaru->devices && dmaru->devices_cnt)
368 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
369 if (dmaru->iommu)
370 free_iommu(dmaru->iommu);
371 kfree(dmaru);
372 }
373
374 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
375 {
376 struct acpi_dmar_andd *andd = (void *)header;
377
378 /* Check for NUL termination within the designated length */
379 if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
380 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
381 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
382 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
383 dmi_get_system_info(DMI_BIOS_VENDOR),
384 dmi_get_system_info(DMI_BIOS_VERSION),
385 dmi_get_system_info(DMI_PRODUCT_VERSION));
386 return -EINVAL;
387 }
388 pr_info("ANDD device: %x name: %s\n", andd->device_number,
389 andd->object_name);
390
391 return 0;
392 }
393
394 #ifdef CONFIG_ACPI_NUMA
395 static int __init
396 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
397 {
398 struct acpi_dmar_rhsa *rhsa;
399 struct dmar_drhd_unit *drhd;
400
401 rhsa = (struct acpi_dmar_rhsa *)header;
402 for_each_drhd_unit(drhd) {
403 if (drhd->reg_base_addr == rhsa->base_address) {
404 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
405
406 if (!node_online(node))
407 node = -1;
408 drhd->iommu->node = node;
409 return 0;
410 }
411 }
412 WARN_TAINT(
413 1, TAINT_FIRMWARE_WORKAROUND,
414 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
415 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
416 drhd->reg_base_addr,
417 dmi_get_system_info(DMI_BIOS_VENDOR),
418 dmi_get_system_info(DMI_BIOS_VERSION),
419 dmi_get_system_info(DMI_PRODUCT_VERSION));
420
421 return 0;
422 }
423 #endif
424
425 static void __init
426 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
427 {
428 struct acpi_dmar_hardware_unit *drhd;
429 struct acpi_dmar_reserved_memory *rmrr;
430 struct acpi_dmar_atsr *atsr;
431 struct acpi_dmar_rhsa *rhsa;
432
433 switch (header->type) {
434 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
435 drhd = container_of(header, struct acpi_dmar_hardware_unit,
436 header);
437 pr_info("DRHD base: %#016Lx flags: %#x\n",
438 (unsigned long long)drhd->address, drhd->flags);
439 break;
440 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
441 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
442 header);
443 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
444 (unsigned long long)rmrr->base_address,
445 (unsigned long long)rmrr->end_address);
446 break;
447 case ACPI_DMAR_TYPE_ATSR:
448 atsr = container_of(header, struct acpi_dmar_atsr, header);
449 pr_info("ATSR flags: %#x\n", atsr->flags);
450 break;
451 case ACPI_DMAR_HARDWARE_AFFINITY:
452 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
453 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
454 (unsigned long long)rhsa->base_address,
455 rhsa->proximity_domain);
456 break;
457 case ACPI_DMAR_TYPE_ANDD:
458 /* We don't print this here because we need to sanity-check
459 it first. So print it in dmar_parse_one_andd() instead. */
460 break;
461 }
462 }
463
464 /**
465 * dmar_table_detect - checks to see if the platform supports DMAR devices
466 */
467 static int __init dmar_table_detect(void)
468 {
469 acpi_status status = AE_OK;
470
471 /* if we could find DMAR table, then there are DMAR devices */
472 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
473 (struct acpi_table_header **)&dmar_tbl,
474 &dmar_tbl_size);
475
476 if (ACPI_SUCCESS(status) && !dmar_tbl) {
477 pr_warn("Unable to map DMAR\n");
478 status = AE_NOT_FOUND;
479 }
480
481 return (ACPI_SUCCESS(status) ? 1 : 0);
482 }
483
484 /**
485 * parse_dmar_table - parses the DMA reporting table
486 */
487 static int __init
488 parse_dmar_table(void)
489 {
490 struct acpi_table_dmar *dmar;
491 struct acpi_dmar_header *entry_header;
492 int ret = 0;
493 int drhd_count = 0;
494
495 /*
496 * Do it again, earlier dmar_tbl mapping could be mapped with
497 * fixed map.
498 */
499 dmar_table_detect();
500
501 /*
502 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
503 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
504 */
505 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
506
507 dmar = (struct acpi_table_dmar *)dmar_tbl;
508 if (!dmar)
509 return -ENODEV;
510
511 if (dmar->width < PAGE_SHIFT - 1) {
512 pr_warn("Invalid DMAR haw\n");
513 return -EINVAL;
514 }
515
516 pr_info("Host address width %d\n", dmar->width + 1);
517
518 entry_header = (struct acpi_dmar_header *)(dmar + 1);
519 while (((unsigned long)entry_header) <
520 (((unsigned long)dmar) + dmar_tbl->length)) {
521 /* Avoid looping forever on bad ACPI tables */
522 if (entry_header->length == 0) {
523 pr_warn("Invalid 0-length structure\n");
524 ret = -EINVAL;
525 break;
526 }
527
528 dmar_table_print_dmar_entry(entry_header);
529
530 switch (entry_header->type) {
531 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
532 drhd_count++;
533 ret = dmar_parse_one_drhd(entry_header);
534 break;
535 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
536 ret = dmar_parse_one_rmrr(entry_header);
537 break;
538 case ACPI_DMAR_TYPE_ATSR:
539 ret = dmar_parse_one_atsr(entry_header);
540 break;
541 case ACPI_DMAR_HARDWARE_AFFINITY:
542 #ifdef CONFIG_ACPI_NUMA
543 ret = dmar_parse_one_rhsa(entry_header);
544 #endif
545 break;
546 case ACPI_DMAR_TYPE_ANDD:
547 ret = dmar_parse_one_andd(entry_header);
548 break;
549 default:
550 pr_warn("Unknown DMAR structure type %d\n",
551 entry_header->type);
552 ret = 0; /* for forward compatibility */
553 break;
554 }
555 if (ret)
556 break;
557
558 entry_header = ((void *)entry_header + entry_header->length);
559 }
560 if (drhd_count == 0)
561 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
562 return ret;
563 }
564
565 static int dmar_pci_device_match(struct pci_dev __rcu *devices[], int cnt,
566 struct pci_dev *dev)
567 {
568 int index;
569 struct pci_dev *tmp;
570
571 while (dev) {
572 for_each_active_dev_scope(devices, cnt, index, tmp)
573 if (dev == tmp)
574 return 1;
575
576 /* Check our parent */
577 dev = dev->bus->self;
578 }
579
580 return 0;
581 }
582
583 struct dmar_drhd_unit *
584 dmar_find_matched_drhd_unit(struct pci_dev *dev)
585 {
586 struct dmar_drhd_unit *dmaru;
587 struct acpi_dmar_hardware_unit *drhd;
588
589 dev = pci_physfn(dev);
590
591 rcu_read_lock();
592 for_each_drhd_unit(dmaru) {
593 drhd = container_of(dmaru->hdr,
594 struct acpi_dmar_hardware_unit,
595 header);
596
597 if (dmaru->include_all &&
598 drhd->segment == pci_domain_nr(dev->bus))
599 goto out;
600
601 if (dmar_pci_device_match(dmaru->devices,
602 dmaru->devices_cnt, dev))
603 goto out;
604 }
605 dmaru = NULL;
606 out:
607 rcu_read_unlock();
608
609 return dmaru;
610 }
611
612 int __init dmar_dev_scope_init(void)
613 {
614 struct pci_dev *dev = NULL;
615 struct dmar_pci_notify_info *info;
616
617 if (dmar_dev_scope_status != 1)
618 return dmar_dev_scope_status;
619
620 if (list_empty(&dmar_drhd_units)) {
621 dmar_dev_scope_status = -ENODEV;
622 } else {
623 dmar_dev_scope_status = 0;
624
625 for_each_pci_dev(dev) {
626 if (dev->is_virtfn)
627 continue;
628
629 info = dmar_alloc_pci_notify_info(dev,
630 BUS_NOTIFY_ADD_DEVICE);
631 if (!info) {
632 return dmar_dev_scope_status;
633 } else {
634 dmar_pci_bus_add_dev(info);
635 dmar_free_pci_notify_info(info);
636 }
637 }
638
639 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
640 }
641
642 return dmar_dev_scope_status;
643 }
644
645
646 int __init dmar_table_init(void)
647 {
648 static int dmar_table_initialized;
649 int ret;
650
651 if (dmar_table_initialized == 0) {
652 ret = parse_dmar_table();
653 if (ret < 0) {
654 if (ret != -ENODEV)
655 pr_info("parse DMAR table failure.\n");
656 } else if (list_empty(&dmar_drhd_units)) {
657 pr_info("No DMAR devices found\n");
658 ret = -ENODEV;
659 }
660
661 if (ret < 0)
662 dmar_table_initialized = ret;
663 else
664 dmar_table_initialized = 1;
665 }
666
667 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
668 }
669
670 static void warn_invalid_dmar(u64 addr, const char *message)
671 {
672 WARN_TAINT_ONCE(
673 1, TAINT_FIRMWARE_WORKAROUND,
674 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
675 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
676 addr, message,
677 dmi_get_system_info(DMI_BIOS_VENDOR),
678 dmi_get_system_info(DMI_BIOS_VERSION),
679 dmi_get_system_info(DMI_PRODUCT_VERSION));
680 }
681
682 static int __init check_zero_address(void)
683 {
684 struct acpi_table_dmar *dmar;
685 struct acpi_dmar_header *entry_header;
686 struct acpi_dmar_hardware_unit *drhd;
687
688 dmar = (struct acpi_table_dmar *)dmar_tbl;
689 entry_header = (struct acpi_dmar_header *)(dmar + 1);
690
691 while (((unsigned long)entry_header) <
692 (((unsigned long)dmar) + dmar_tbl->length)) {
693 /* Avoid looping forever on bad ACPI tables */
694 if (entry_header->length == 0) {
695 pr_warn("Invalid 0-length structure\n");
696 return 0;
697 }
698
699 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
700 void __iomem *addr;
701 u64 cap, ecap;
702
703 drhd = (void *)entry_header;
704 if (!drhd->address) {
705 warn_invalid_dmar(0, "");
706 goto failed;
707 }
708
709 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
710 if (!addr ) {
711 printk("IOMMU: can't validate: %llx\n", drhd->address);
712 goto failed;
713 }
714 cap = dmar_readq(addr + DMAR_CAP_REG);
715 ecap = dmar_readq(addr + DMAR_ECAP_REG);
716 early_iounmap(addr, VTD_PAGE_SIZE);
717 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
718 warn_invalid_dmar(drhd->address,
719 " returns all ones");
720 goto failed;
721 }
722 }
723
724 entry_header = ((void *)entry_header + entry_header->length);
725 }
726 return 1;
727
728 failed:
729 return 0;
730 }
731
732 int __init detect_intel_iommu(void)
733 {
734 int ret;
735
736 down_write(&dmar_global_lock);
737 ret = dmar_table_detect();
738 if (ret)
739 ret = check_zero_address();
740 {
741 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
742 iommu_detected = 1;
743 /* Make sure ACS will be enabled */
744 pci_request_acs();
745 }
746
747 #ifdef CONFIG_X86
748 if (ret)
749 x86_init.iommu.iommu_init = intel_iommu_init;
750 #endif
751 }
752 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
753 dmar_tbl = NULL;
754 up_write(&dmar_global_lock);
755
756 return ret ? 1 : -ENODEV;
757 }
758
759
760 static void unmap_iommu(struct intel_iommu *iommu)
761 {
762 iounmap(iommu->reg);
763 release_mem_region(iommu->reg_phys, iommu->reg_size);
764 }
765
766 /**
767 * map_iommu: map the iommu's registers
768 * @iommu: the iommu to map
769 * @phys_addr: the physical address of the base resgister
770 *
771 * Memory map the iommu's registers. Start w/ a single page, and
772 * possibly expand if that turns out to be insufficent.
773 */
774 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
775 {
776 int map_size, err=0;
777
778 iommu->reg_phys = phys_addr;
779 iommu->reg_size = VTD_PAGE_SIZE;
780
781 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
782 pr_err("IOMMU: can't reserve memory\n");
783 err = -EBUSY;
784 goto out;
785 }
786
787 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
788 if (!iommu->reg) {
789 pr_err("IOMMU: can't map the region\n");
790 err = -ENOMEM;
791 goto release;
792 }
793
794 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
795 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
796
797 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
798 err = -EINVAL;
799 warn_invalid_dmar(phys_addr, " returns all ones");
800 goto unmap;
801 }
802
803 /* the registers might be more than one page */
804 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
805 cap_max_fault_reg_offset(iommu->cap));
806 map_size = VTD_PAGE_ALIGN(map_size);
807 if (map_size > iommu->reg_size) {
808 iounmap(iommu->reg);
809 release_mem_region(iommu->reg_phys, iommu->reg_size);
810 iommu->reg_size = map_size;
811 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
812 iommu->name)) {
813 pr_err("IOMMU: can't reserve memory\n");
814 err = -EBUSY;
815 goto out;
816 }
817 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
818 if (!iommu->reg) {
819 pr_err("IOMMU: can't map the region\n");
820 err = -ENOMEM;
821 goto release;
822 }
823 }
824 err = 0;
825 goto out;
826
827 unmap:
828 iounmap(iommu->reg);
829 release:
830 release_mem_region(iommu->reg_phys, iommu->reg_size);
831 out:
832 return err;
833 }
834
835 static int alloc_iommu(struct dmar_drhd_unit *drhd)
836 {
837 struct intel_iommu *iommu;
838 u32 ver, sts;
839 static int iommu_allocated = 0;
840 int agaw = 0;
841 int msagaw = 0;
842 int err;
843
844 if (!drhd->reg_base_addr) {
845 warn_invalid_dmar(0, "");
846 return -EINVAL;
847 }
848
849 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
850 if (!iommu)
851 return -ENOMEM;
852
853 iommu->seq_id = iommu_allocated++;
854 sprintf (iommu->name, "dmar%d", iommu->seq_id);
855
856 err = map_iommu(iommu, drhd->reg_base_addr);
857 if (err) {
858 pr_err("IOMMU: failed to map %s\n", iommu->name);
859 goto error;
860 }
861
862 err = -EINVAL;
863 agaw = iommu_calculate_agaw(iommu);
864 if (agaw < 0) {
865 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
866 iommu->seq_id);
867 goto err_unmap;
868 }
869 msagaw = iommu_calculate_max_sagaw(iommu);
870 if (msagaw < 0) {
871 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
872 iommu->seq_id);
873 goto err_unmap;
874 }
875 iommu->agaw = agaw;
876 iommu->msagaw = msagaw;
877
878 iommu->node = -1;
879
880 ver = readl(iommu->reg + DMAR_VER_REG);
881 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
882 iommu->seq_id,
883 (unsigned long long)drhd->reg_base_addr,
884 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
885 (unsigned long long)iommu->cap,
886 (unsigned long long)iommu->ecap);
887
888 /* Reflect status in gcmd */
889 sts = readl(iommu->reg + DMAR_GSTS_REG);
890 if (sts & DMA_GSTS_IRES)
891 iommu->gcmd |= DMA_GCMD_IRE;
892 if (sts & DMA_GSTS_TES)
893 iommu->gcmd |= DMA_GCMD_TE;
894 if (sts & DMA_GSTS_QIES)
895 iommu->gcmd |= DMA_GCMD_QIE;
896
897 raw_spin_lock_init(&iommu->register_lock);
898
899 drhd->iommu = iommu;
900 return 0;
901
902 err_unmap:
903 unmap_iommu(iommu);
904 error:
905 kfree(iommu);
906 return err;
907 }
908
909 static void free_iommu(struct intel_iommu *iommu)
910 {
911 if (iommu->irq) {
912 free_irq(iommu->irq, iommu);
913 irq_set_handler_data(iommu->irq, NULL);
914 destroy_irq(iommu->irq);
915 }
916
917 if (iommu->qi) {
918 free_page((unsigned long)iommu->qi->desc);
919 kfree(iommu->qi->desc_status);
920 kfree(iommu->qi);
921 }
922
923 if (iommu->reg)
924 unmap_iommu(iommu);
925
926 kfree(iommu);
927 }
928
929 /*
930 * Reclaim all the submitted descriptors which have completed its work.
931 */
932 static inline void reclaim_free_desc(struct q_inval *qi)
933 {
934 while (qi->desc_status[qi->free_tail] == QI_DONE ||
935 qi->desc_status[qi->free_tail] == QI_ABORT) {
936 qi->desc_status[qi->free_tail] = QI_FREE;
937 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
938 qi->free_cnt++;
939 }
940 }
941
942 static int qi_check_fault(struct intel_iommu *iommu, int index)
943 {
944 u32 fault;
945 int head, tail;
946 struct q_inval *qi = iommu->qi;
947 int wait_index = (index + 1) % QI_LENGTH;
948
949 if (qi->desc_status[wait_index] == QI_ABORT)
950 return -EAGAIN;
951
952 fault = readl(iommu->reg + DMAR_FSTS_REG);
953
954 /*
955 * If IQE happens, the head points to the descriptor associated
956 * with the error. No new descriptors are fetched until the IQE
957 * is cleared.
958 */
959 if (fault & DMA_FSTS_IQE) {
960 head = readl(iommu->reg + DMAR_IQH_REG);
961 if ((head >> DMAR_IQ_SHIFT) == index) {
962 pr_err("VT-d detected invalid descriptor: "
963 "low=%llx, high=%llx\n",
964 (unsigned long long)qi->desc[index].low,
965 (unsigned long long)qi->desc[index].high);
966 memcpy(&qi->desc[index], &qi->desc[wait_index],
967 sizeof(struct qi_desc));
968 __iommu_flush_cache(iommu, &qi->desc[index],
969 sizeof(struct qi_desc));
970 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
971 return -EINVAL;
972 }
973 }
974
975 /*
976 * If ITE happens, all pending wait_desc commands are aborted.
977 * No new descriptors are fetched until the ITE is cleared.
978 */
979 if (fault & DMA_FSTS_ITE) {
980 head = readl(iommu->reg + DMAR_IQH_REG);
981 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
982 head |= 1;
983 tail = readl(iommu->reg + DMAR_IQT_REG);
984 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
985
986 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
987
988 do {
989 if (qi->desc_status[head] == QI_IN_USE)
990 qi->desc_status[head] = QI_ABORT;
991 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
992 } while (head != tail);
993
994 if (qi->desc_status[wait_index] == QI_ABORT)
995 return -EAGAIN;
996 }
997
998 if (fault & DMA_FSTS_ICE)
999 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1000
1001 return 0;
1002 }
1003
1004 /*
1005 * Submit the queued invalidation descriptor to the remapping
1006 * hardware unit and wait for its completion.
1007 */
1008 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1009 {
1010 int rc;
1011 struct q_inval *qi = iommu->qi;
1012 struct qi_desc *hw, wait_desc;
1013 int wait_index, index;
1014 unsigned long flags;
1015
1016 if (!qi)
1017 return 0;
1018
1019 hw = qi->desc;
1020
1021 restart:
1022 rc = 0;
1023
1024 raw_spin_lock_irqsave(&qi->q_lock, flags);
1025 while (qi->free_cnt < 3) {
1026 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1027 cpu_relax();
1028 raw_spin_lock_irqsave(&qi->q_lock, flags);
1029 }
1030
1031 index = qi->free_head;
1032 wait_index = (index + 1) % QI_LENGTH;
1033
1034 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1035
1036 hw[index] = *desc;
1037
1038 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1039 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1040 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1041
1042 hw[wait_index] = wait_desc;
1043
1044 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1045 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1046
1047 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1048 qi->free_cnt -= 2;
1049
1050 /*
1051 * update the HW tail register indicating the presence of
1052 * new descriptors.
1053 */
1054 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1055
1056 while (qi->desc_status[wait_index] != QI_DONE) {
1057 /*
1058 * We will leave the interrupts disabled, to prevent interrupt
1059 * context to queue another cmd while a cmd is already submitted
1060 * and waiting for completion on this cpu. This is to avoid
1061 * a deadlock where the interrupt context can wait indefinitely
1062 * for free slots in the queue.
1063 */
1064 rc = qi_check_fault(iommu, index);
1065 if (rc)
1066 break;
1067
1068 raw_spin_unlock(&qi->q_lock);
1069 cpu_relax();
1070 raw_spin_lock(&qi->q_lock);
1071 }
1072
1073 qi->desc_status[index] = QI_DONE;
1074
1075 reclaim_free_desc(qi);
1076 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1077
1078 if (rc == -EAGAIN)
1079 goto restart;
1080
1081 return rc;
1082 }
1083
1084 /*
1085 * Flush the global interrupt entry cache.
1086 */
1087 void qi_global_iec(struct intel_iommu *iommu)
1088 {
1089 struct qi_desc desc;
1090
1091 desc.low = QI_IEC_TYPE;
1092 desc.high = 0;
1093
1094 /* should never fail */
1095 qi_submit_sync(&desc, iommu);
1096 }
1097
1098 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1099 u64 type)
1100 {
1101 struct qi_desc desc;
1102
1103 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1104 | QI_CC_GRAN(type) | QI_CC_TYPE;
1105 desc.high = 0;
1106
1107 qi_submit_sync(&desc, iommu);
1108 }
1109
1110 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1111 unsigned int size_order, u64 type)
1112 {
1113 u8 dw = 0, dr = 0;
1114
1115 struct qi_desc desc;
1116 int ih = 0;
1117
1118 if (cap_write_drain(iommu->cap))
1119 dw = 1;
1120
1121 if (cap_read_drain(iommu->cap))
1122 dr = 1;
1123
1124 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1125 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1126 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1127 | QI_IOTLB_AM(size_order);
1128
1129 qi_submit_sync(&desc, iommu);
1130 }
1131
1132 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1133 u64 addr, unsigned mask)
1134 {
1135 struct qi_desc desc;
1136
1137 if (mask) {
1138 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1139 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1140 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1141 } else
1142 desc.high = QI_DEV_IOTLB_ADDR(addr);
1143
1144 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1145 qdep = 0;
1146
1147 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1148 QI_DIOTLB_TYPE;
1149
1150 qi_submit_sync(&desc, iommu);
1151 }
1152
1153 /*
1154 * Disable Queued Invalidation interface.
1155 */
1156 void dmar_disable_qi(struct intel_iommu *iommu)
1157 {
1158 unsigned long flags;
1159 u32 sts;
1160 cycles_t start_time = get_cycles();
1161
1162 if (!ecap_qis(iommu->ecap))
1163 return;
1164
1165 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1166
1167 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1168 if (!(sts & DMA_GSTS_QIES))
1169 goto end;
1170
1171 /*
1172 * Give a chance to HW to complete the pending invalidation requests.
1173 */
1174 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1175 readl(iommu->reg + DMAR_IQH_REG)) &&
1176 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1177 cpu_relax();
1178
1179 iommu->gcmd &= ~DMA_GCMD_QIE;
1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1181
1182 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1183 !(sts & DMA_GSTS_QIES), sts);
1184 end:
1185 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1186 }
1187
1188 /*
1189 * Enable queued invalidation.
1190 */
1191 static void __dmar_enable_qi(struct intel_iommu *iommu)
1192 {
1193 u32 sts;
1194 unsigned long flags;
1195 struct q_inval *qi = iommu->qi;
1196
1197 qi->free_head = qi->free_tail = 0;
1198 qi->free_cnt = QI_LENGTH;
1199
1200 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1201
1202 /* write zero to the tail reg */
1203 writel(0, iommu->reg + DMAR_IQT_REG);
1204
1205 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1206
1207 iommu->gcmd |= DMA_GCMD_QIE;
1208 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1209
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1212
1213 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1214 }
1215
1216 /*
1217 * Enable Queued Invalidation interface. This is a must to support
1218 * interrupt-remapping. Also used by DMA-remapping, which replaces
1219 * register based IOTLB invalidation.
1220 */
1221 int dmar_enable_qi(struct intel_iommu *iommu)
1222 {
1223 struct q_inval *qi;
1224 struct page *desc_page;
1225
1226 if (!ecap_qis(iommu->ecap))
1227 return -ENOENT;
1228
1229 /*
1230 * queued invalidation is already setup and enabled.
1231 */
1232 if (iommu->qi)
1233 return 0;
1234
1235 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1236 if (!iommu->qi)
1237 return -ENOMEM;
1238
1239 qi = iommu->qi;
1240
1241
1242 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1243 if (!desc_page) {
1244 kfree(qi);
1245 iommu->qi = NULL;
1246 return -ENOMEM;
1247 }
1248
1249 qi->desc = page_address(desc_page);
1250
1251 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1252 if (!qi->desc_status) {
1253 free_page((unsigned long) qi->desc);
1254 kfree(qi);
1255 iommu->qi = NULL;
1256 return -ENOMEM;
1257 }
1258
1259 qi->free_head = qi->free_tail = 0;
1260 qi->free_cnt = QI_LENGTH;
1261
1262 raw_spin_lock_init(&qi->q_lock);
1263
1264 __dmar_enable_qi(iommu);
1265
1266 return 0;
1267 }
1268
1269 /* iommu interrupt handling. Most stuff are MSI-like. */
1270
1271 enum faulttype {
1272 DMA_REMAP,
1273 INTR_REMAP,
1274 UNKNOWN,
1275 };
1276
1277 static const char *dma_remap_fault_reasons[] =
1278 {
1279 "Software",
1280 "Present bit in root entry is clear",
1281 "Present bit in context entry is clear",
1282 "Invalid context entry",
1283 "Access beyond MGAW",
1284 "PTE Write access is not set",
1285 "PTE Read access is not set",
1286 "Next page table ptr is invalid",
1287 "Root table address invalid",
1288 "Context table ptr is invalid",
1289 "non-zero reserved fields in RTP",
1290 "non-zero reserved fields in CTP",
1291 "non-zero reserved fields in PTE",
1292 "PCE for translation request specifies blocking",
1293 };
1294
1295 static const char *irq_remap_fault_reasons[] =
1296 {
1297 "Detected reserved fields in the decoded interrupt-remapped request",
1298 "Interrupt index exceeded the interrupt-remapping table size",
1299 "Present field in the IRTE entry is clear",
1300 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1301 "Detected reserved fields in the IRTE entry",
1302 "Blocked a compatibility format interrupt request",
1303 "Blocked an interrupt request due to source-id verification failure",
1304 };
1305
1306 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1307 {
1308 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1309 ARRAY_SIZE(irq_remap_fault_reasons))) {
1310 *fault_type = INTR_REMAP;
1311 return irq_remap_fault_reasons[fault_reason - 0x20];
1312 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1313 *fault_type = DMA_REMAP;
1314 return dma_remap_fault_reasons[fault_reason];
1315 } else {
1316 *fault_type = UNKNOWN;
1317 return "Unknown";
1318 }
1319 }
1320
1321 void dmar_msi_unmask(struct irq_data *data)
1322 {
1323 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1324 unsigned long flag;
1325
1326 /* unmask it */
1327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1328 writel(0, iommu->reg + DMAR_FECTL_REG);
1329 /* Read a reg to force flush the post write */
1330 readl(iommu->reg + DMAR_FECTL_REG);
1331 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1332 }
1333
1334 void dmar_msi_mask(struct irq_data *data)
1335 {
1336 unsigned long flag;
1337 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1338
1339 /* mask it */
1340 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1341 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1342 /* Read a reg to force flush the post write */
1343 readl(iommu->reg + DMAR_FECTL_REG);
1344 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1345 }
1346
1347 void dmar_msi_write(int irq, struct msi_msg *msg)
1348 {
1349 struct intel_iommu *iommu = irq_get_handler_data(irq);
1350 unsigned long flag;
1351
1352 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1353 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1354 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1355 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1356 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1357 }
1358
1359 void dmar_msi_read(int irq, struct msi_msg *msg)
1360 {
1361 struct intel_iommu *iommu = irq_get_handler_data(irq);
1362 unsigned long flag;
1363
1364 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1366 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1367 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1368 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1369 }
1370
1371 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1372 u8 fault_reason, u16 source_id, unsigned long long addr)
1373 {
1374 const char *reason;
1375 int fault_type;
1376
1377 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1378
1379 if (fault_type == INTR_REMAP)
1380 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1381 "fault index %llx\n"
1382 "INTR-REMAP:[fault reason %02d] %s\n",
1383 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1384 PCI_FUNC(source_id & 0xFF), addr >> 48,
1385 fault_reason, reason);
1386 else
1387 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1388 "fault addr %llx \n"
1389 "DMAR:[fault reason %02d] %s\n",
1390 (type ? "DMA Read" : "DMA Write"),
1391 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1392 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1393 return 0;
1394 }
1395
1396 #define PRIMARY_FAULT_REG_LEN (16)
1397 irqreturn_t dmar_fault(int irq, void *dev_id)
1398 {
1399 struct intel_iommu *iommu = dev_id;
1400 int reg, fault_index;
1401 u32 fault_status;
1402 unsigned long flag;
1403
1404 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1405 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1406 if (fault_status)
1407 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1408
1409 /* TBD: ignore advanced fault log currently */
1410 if (!(fault_status & DMA_FSTS_PPF))
1411 goto unlock_exit;
1412
1413 fault_index = dma_fsts_fault_record_index(fault_status);
1414 reg = cap_fault_reg_offset(iommu->cap);
1415 while (1) {
1416 u8 fault_reason;
1417 u16 source_id;
1418 u64 guest_addr;
1419 int type;
1420 u32 data;
1421
1422 /* highest 32 bits */
1423 data = readl(iommu->reg + reg +
1424 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1425 if (!(data & DMA_FRCD_F))
1426 break;
1427
1428 fault_reason = dma_frcd_fault_reason(data);
1429 type = dma_frcd_type(data);
1430
1431 data = readl(iommu->reg + reg +
1432 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1433 source_id = dma_frcd_source_id(data);
1434
1435 guest_addr = dmar_readq(iommu->reg + reg +
1436 fault_index * PRIMARY_FAULT_REG_LEN);
1437 guest_addr = dma_frcd_page_addr(guest_addr);
1438 /* clear the fault */
1439 writel(DMA_FRCD_F, iommu->reg + reg +
1440 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1441
1442 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1443
1444 dmar_fault_do_one(iommu, type, fault_reason,
1445 source_id, guest_addr);
1446
1447 fault_index++;
1448 if (fault_index >= cap_num_fault_regs(iommu->cap))
1449 fault_index = 0;
1450 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1451 }
1452
1453 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1454
1455 unlock_exit:
1456 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1457 return IRQ_HANDLED;
1458 }
1459
1460 int dmar_set_interrupt(struct intel_iommu *iommu)
1461 {
1462 int irq, ret;
1463
1464 /*
1465 * Check if the fault interrupt is already initialized.
1466 */
1467 if (iommu->irq)
1468 return 0;
1469
1470 irq = create_irq();
1471 if (!irq) {
1472 pr_err("IOMMU: no free vectors\n");
1473 return -EINVAL;
1474 }
1475
1476 irq_set_handler_data(irq, iommu);
1477 iommu->irq = irq;
1478
1479 ret = arch_setup_dmar_msi(irq);
1480 if (ret) {
1481 irq_set_handler_data(irq, NULL);
1482 iommu->irq = 0;
1483 destroy_irq(irq);
1484 return ret;
1485 }
1486
1487 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1488 if (ret)
1489 pr_err("IOMMU: can't request irq\n");
1490 return ret;
1491 }
1492
1493 int __init enable_drhd_fault_handling(void)
1494 {
1495 struct dmar_drhd_unit *drhd;
1496 struct intel_iommu *iommu;
1497
1498 /*
1499 * Enable fault control interrupt.
1500 */
1501 for_each_iommu(iommu, drhd) {
1502 u32 fault_status;
1503 int ret = dmar_set_interrupt(iommu);
1504
1505 if (ret) {
1506 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1507 (unsigned long long)drhd->reg_base_addr, ret);
1508 return -1;
1509 }
1510
1511 /*
1512 * Clear any previous faults.
1513 */
1514 dmar_fault(iommu->irq, iommu);
1515 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1516 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1517 }
1518
1519 return 0;
1520 }
1521
1522 /*
1523 * Re-enable Queued Invalidation interface.
1524 */
1525 int dmar_reenable_qi(struct intel_iommu *iommu)
1526 {
1527 if (!ecap_qis(iommu->ecap))
1528 return -ENOENT;
1529
1530 if (!iommu->qi)
1531 return -ENOENT;
1532
1533 /*
1534 * First disable queued invalidation.
1535 */
1536 dmar_disable_qi(iommu);
1537 /*
1538 * Then enable queued invalidation again. Since there is no pending
1539 * invalidation requests now, it's safe to re-enable queued
1540 * invalidation.
1541 */
1542 __dmar_enable_qi(iommu);
1543
1544 return 0;
1545 }
1546
1547 /*
1548 * Check interrupt remapping support in DMAR table description.
1549 */
1550 int __init dmar_ir_support(void)
1551 {
1552 struct acpi_table_dmar *dmar;
1553 dmar = (struct acpi_table_dmar *)dmar_tbl;
1554 if (!dmar)
1555 return 0;
1556 return dmar->flags & 0x1;
1557 }
1558
1559 static int __init dmar_free_unused_resources(void)
1560 {
1561 struct dmar_drhd_unit *dmaru, *dmaru_n;
1562
1563 /* DMAR units are in use */
1564 if (irq_remapping_enabled || intel_iommu_enabled)
1565 return 0;
1566
1567 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1568 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1569
1570 down_write(&dmar_global_lock);
1571 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1572 list_del(&dmaru->list);
1573 dmar_free_drhd(dmaru);
1574 }
1575 up_write(&dmar_global_lock);
1576
1577 return 0;
1578 }
1579
1580 late_initcall(dmar_free_unused_resources);
1581 IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.101771 seconds and 5 git commands to generate.