intel-iommu: Tidy up iommu->gcmd handling
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
10e5247f
KA
36
37#undef PREFIX
38#define PREFIX "DMAR:"
39
40/* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
43 */
44LIST_HEAD(dmar_drhd_units);
10e5247f
KA
45
46static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 47static acpi_size dmar_tbl_size;
10e5247f
KA
48
49static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
50{
51 /*
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
53 * the very end.
54 */
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
57 else
58 list_add(&drhd->list, &dmar_drhd_units);
59}
60
10e5247f
KA
61static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
63{
64 struct pci_bus *bus;
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
67 int count;
68
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
73
74 while (count) {
75 if (pdev)
76 pci_dev_put(pdev);
77 /*
78 * Some BIOSes list non-exist devices in DMAR table, just
79 * ignore it
80 */
81 if (!bus) {
82 printk(KERN_WARNING
83 PREFIX "Device scope bus [%d] not found\n",
84 scope->bus);
85 break;
86 }
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
88 if (!pdev) {
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
92 break;
93 }
94 path ++;
95 count --;
96 bus = pdev->subordinate;
97 }
98 if (!pdev) {
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
102 *dev = NULL;
103 return 0;
104 }
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
118static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
136 }
137 if (*cnt == 0)
138 return 0;
139
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
143
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
153 kfree(*devices);
154 return ret;
155 }
156 index ++;
157 }
158 start += scope->length;
159 }
160
161 return 0;
162}
163
164/**
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
168 */
169static int __init
170dmar_parse_one_drhd(struct acpi_dmar_header *header)
171{
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
174 int ret = 0;
10e5247f 175
e523b38e
DW
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
10e5247f
KA
186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
187 if (!dmaru)
188 return -ENOMEM;
189
1886e8a9 190 dmaru->hdr = header;
10e5247f 191 dmaru->reg_base_addr = drhd->address;
276dbf99 192 dmaru->segment = drhd->segment;
10e5247f
KA
193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
194
1886e8a9
SS
195 ret = alloc_iommu(dmaru);
196 if (ret) {
197 kfree(dmaru);
198 return ret;
199 }
200 dmar_register_drhd_unit(dmaru);
201 return 0;
202}
203
f82851a8 204static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
205{
206 struct acpi_dmar_hardware_unit *drhd;
f82851a8 207 int ret = 0;
1886e8a9
SS
208
209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
210
2e824f79
YZ
211 if (dmaru->include_all)
212 return 0;
213
214 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 215 ((void *)drhd) + drhd->header.length,
10e5247f
KA
216 &dmaru->devices_cnt, &dmaru->devices,
217 drhd->segment);
1c7d1bca 218 if (ret) {
1886e8a9 219 list_del(&dmaru->list);
10e5247f 220 kfree(dmaru);
1886e8a9 221 }
10e5247f
KA
222 return ret;
223}
224
aaa9d1dd
SS
225#ifdef CONFIG_DMAR
226LIST_HEAD(dmar_rmrr_units);
227
228static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
229{
230 list_add(&rmrr->list, &dmar_rmrr_units);
231}
232
233
10e5247f
KA
234static int __init
235dmar_parse_one_rmrr(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_reserved_memory *rmrr;
238 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
239
240 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
241 if (!rmrru)
242 return -ENOMEM;
243
1886e8a9 244 rmrru->hdr = header;
10e5247f
KA
245 rmrr = (struct acpi_dmar_reserved_memory *)header;
246 rmrru->base_address = rmrr->base_address;
247 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
248
249 dmar_register_rmrr_unit(rmrru);
250 return 0;
251}
252
253static int __init
254rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
255{
256 struct acpi_dmar_reserved_memory *rmrr;
257 int ret;
258
259 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 260 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 261 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
262 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
263
1886e8a9
SS
264 if (ret || (rmrru->devices_cnt == 0)) {
265 list_del(&rmrru->list);
10e5247f 266 kfree(rmrru);
1886e8a9 267 }
10e5247f
KA
268 return ret;
269}
aaa9d1dd 270#endif
10e5247f
KA
271
272static void __init
273dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
274{
275 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr;
277
278 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header;
281 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
5b6985ce 283 drhd->flags, (unsigned long long)drhd->address);
10e5247f
KA
284 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header;
287
288 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
5b6985ce
FY
290 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address);
10e5247f
KA
292 break;
293 }
294}
295
f6dd5c31
YL
296/**
297 * dmar_table_detect - checks to see if the platform supports DMAR devices
298 */
299static int __init dmar_table_detect(void)
300{
301 acpi_status status = AE_OK;
302
303 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
304 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
305 (struct acpi_table_header **)&dmar_tbl,
306 &dmar_tbl_size);
f6dd5c31
YL
307
308 if (ACPI_SUCCESS(status) && !dmar_tbl) {
309 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
310 status = AE_NOT_FOUND;
311 }
312
313 return (ACPI_SUCCESS(status) ? 1 : 0);
314}
aaa9d1dd 315
10e5247f
KA
316/**
317 * parse_dmar_table - parses the DMA reporting table
318 */
319static int __init
320parse_dmar_table(void)
321{
322 struct acpi_table_dmar *dmar;
323 struct acpi_dmar_header *entry_header;
324 int ret = 0;
325
f6dd5c31
YL
326 /*
327 * Do it again, earlier dmar_tbl mapping could be mapped with
328 * fixed map.
329 */
330 dmar_table_detect();
331
10e5247f
KA
332 dmar = (struct acpi_table_dmar *)dmar_tbl;
333 if (!dmar)
334 return -ENODEV;
335
5b6985ce 336 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 337 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
338 return -EINVAL;
339 }
340
341 printk (KERN_INFO PREFIX "Host address width %d\n",
342 dmar->width + 1);
343
344 entry_header = (struct acpi_dmar_header *)(dmar + 1);
345 while (((unsigned long)entry_header) <
346 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
347 /* Avoid looping forever on bad ACPI tables */
348 if (entry_header->length == 0) {
349 printk(KERN_WARNING PREFIX
350 "Invalid 0-length structure\n");
351 ret = -EINVAL;
352 break;
353 }
354
10e5247f
KA
355 dmar_table_print_dmar_entry(entry_header);
356
357 switch (entry_header->type) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
359 ret = dmar_parse_one_drhd(entry_header);
360 break;
361 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 362#ifdef CONFIG_DMAR
10e5247f 363 ret = dmar_parse_one_rmrr(entry_header);
aaa9d1dd 364#endif
10e5247f
KA
365 break;
366 default:
367 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n");
369 ret = 0; /* for forward compatibility */
370 break;
371 }
372 if (ret)
373 break;
374
375 entry_header = ((void *)entry_header + entry_header->length);
376 }
377 return ret;
378}
379
e61d98d8
SS
380int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
381 struct pci_dev *dev)
382{
383 int index;
384
385 while (dev) {
386 for (index = 0; index < cnt; index++)
387 if (dev == devices[index])
388 return 1;
389
390 /* Check our parent */
391 dev = dev->bus->self;
392 }
393
394 return 0;
395}
396
397struct dmar_drhd_unit *
398dmar_find_matched_drhd_unit(struct pci_dev *dev)
399{
2e824f79
YZ
400 struct dmar_drhd_unit *dmaru = NULL;
401 struct acpi_dmar_hardware_unit *drhd;
402
403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
407
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
e61d98d8 411
2e824f79
YZ
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
e61d98d8
SS
415 }
416
417 return NULL;
418}
419
1886e8a9
SS
420int __init dmar_dev_scope_init(void)
421{
04e2ea67 422 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
423 int ret = -ENODEV;
424
04e2ea67 425 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
426 ret = dmar_parse_dev(drhd);
427 if (ret)
428 return ret;
429 }
430
aaa9d1dd
SS
431#ifdef CONFIG_DMAR
432 {
04e2ea67
SS
433 struct dmar_rmrr_unit *rmrr, *rmrr_n;
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
435 ret = rmrr_parse_dev(rmrr);
436 if (ret)
437 return ret;
438 }
1886e8a9 439 }
aaa9d1dd 440#endif
1886e8a9
SS
441
442 return ret;
443}
444
10e5247f
KA
445
446int __init dmar_table_init(void)
447{
1886e8a9 448 static int dmar_table_initialized;
093f87d2
FY
449 int ret;
450
1886e8a9
SS
451 if (dmar_table_initialized)
452 return 0;
453
454 dmar_table_initialized = 1;
455
093f87d2
FY
456 ret = parse_dmar_table();
457 if (ret) {
1886e8a9
SS
458 if (ret != -ENODEV)
459 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
460 return ret;
461 }
462
10e5247f
KA
463 if (list_empty(&dmar_drhd_units)) {
464 printk(KERN_INFO PREFIX "No DMAR devices found\n");
465 return -ENODEV;
466 }
093f87d2 467
aaa9d1dd 468#ifdef CONFIG_DMAR
2d6b5f85 469 if (list_empty(&dmar_rmrr_units))
093f87d2 470 printk(KERN_INFO PREFIX "No RMRR found\n");
aaa9d1dd 471#endif
093f87d2 472
ad3ad3f6
SS
473#ifdef CONFIG_INTR_REMAP
474 parse_ioapics_under_ir();
475#endif
10e5247f
KA
476 return 0;
477}
478
2ae21010
SS
479void __init detect_intel_iommu(void)
480{
481 int ret;
482
f6dd5c31 483 ret = dmar_table_detect();
2ae21010 484
2ae21010 485 {
cacd4213 486#ifdef CONFIG_INTR_REMAP
1cb11583
SS
487 struct acpi_table_dmar *dmar;
488 /*
489 * for now we will disable dma-remapping when interrupt
490 * remapping is enabled.
491 * When support for queued invalidation for IOTLB invalidation
492 * is added, we will not need this any more.
493 */
494 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 495 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
496 printk(KERN_INFO
497 "Queued invalidation will be enabled to support "
498 "x2apic and Intr-remapping.\n");
cacd4213 499#endif
cacd4213 500#ifdef CONFIG_DMAR
2ae21010
SS
501 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
502 !dmar_disabled)
503 iommu_detected = 1;
2ae21010 504#endif
cacd4213 505 }
8e1568f3 506 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 507 dmar_tbl = NULL;
2ae21010
SS
508}
509
510
1886e8a9 511int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 512{
c42d9f32 513 struct intel_iommu *iommu;
e61d98d8
SS
514 int map_size;
515 u32 ver;
c42d9f32 516 static int iommu_allocated = 0;
43f7392b 517 int agaw = 0;
4ed0d3e6 518 int msagaw = 0;
c42d9f32
SS
519
520 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
521 if (!iommu)
1886e8a9 522 return -ENOMEM;
c42d9f32
SS
523
524 iommu->seq_id = iommu_allocated++;
9d783ba0 525 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 526
5b6985ce 527 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
528 if (!iommu->reg) {
529 printk(KERN_ERR "IOMMU: can't map the region\n");
530 goto error;
531 }
532 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
533 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
534
43f7392b 535#ifdef CONFIG_DMAR
1b573683
WH
536 agaw = iommu_calculate_agaw(iommu);
537 if (agaw < 0) {
538 printk(KERN_ERR
4ed0d3e6
FY
539 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
540 iommu->seq_id);
541 goto error;
542 }
543 msagaw = iommu_calculate_max_sagaw(iommu);
544 if (msagaw < 0) {
545 printk(KERN_ERR
546 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683
WH
547 iommu->seq_id);
548 goto error;
549 }
43f7392b 550#endif
1b573683 551 iommu->agaw = agaw;
4ed0d3e6 552 iommu->msagaw = msagaw;
1b573683 553
e61d98d8
SS
554 /* the registers might be more than one page */
555 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
556 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
557 map_size = VTD_PAGE_ALIGN(map_size);
558 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
559 iounmap(iommu->reg);
560 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
561 if (!iommu->reg) {
562 printk(KERN_ERR "IOMMU: can't map the region\n");
563 goto error;
564 }
565 }
566
567 ver = readl(iommu->reg + DMAR_VER_REG);
568 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
5b6985ce
FY
569 (unsigned long long)drhd->reg_base_addr,
570 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
571 (unsigned long long)iommu->cap,
572 (unsigned long long)iommu->ecap);
e61d98d8
SS
573
574 spin_lock_init(&iommu->register_lock);
575
576 drhd->iommu = iommu;
1886e8a9 577 return 0;
e61d98d8
SS
578error:
579 kfree(iommu);
1886e8a9 580 return -1;
e61d98d8
SS
581}
582
583void free_iommu(struct intel_iommu *iommu)
584{
585 if (!iommu)
586 return;
587
588#ifdef CONFIG_DMAR
589 free_dmar_iommu(iommu);
590#endif
591
592 if (iommu->reg)
593 iounmap(iommu->reg);
594 kfree(iommu);
595}
fe962e90
SS
596
597/*
598 * Reclaim all the submitted descriptors which have completed its work.
599 */
600static inline void reclaim_free_desc(struct q_inval *qi)
601{
602 while (qi->desc_status[qi->free_tail] == QI_DONE) {
603 qi->desc_status[qi->free_tail] = QI_FREE;
604 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
605 qi->free_cnt++;
606 }
607}
608
704126ad
YZ
609static int qi_check_fault(struct intel_iommu *iommu, int index)
610{
611 u32 fault;
612 int head;
613 struct q_inval *qi = iommu->qi;
614 int wait_index = (index + 1) % QI_LENGTH;
615
616 fault = readl(iommu->reg + DMAR_FSTS_REG);
617
618 /*
619 * If IQE happens, the head points to the descriptor associated
620 * with the error. No new descriptors are fetched until the IQE
621 * is cleared.
622 */
623 if (fault & DMA_FSTS_IQE) {
624 head = readl(iommu->reg + DMAR_IQH_REG);
625 if ((head >> 4) == index) {
626 memcpy(&qi->desc[index], &qi->desc[wait_index],
627 sizeof(struct qi_desc));
628 __iommu_flush_cache(iommu, &qi->desc[index],
629 sizeof(struct qi_desc));
630 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
631 return -EINVAL;
632 }
633 }
634
635 return 0;
636}
637
fe962e90
SS
638/*
639 * Submit the queued invalidation descriptor to the remapping
640 * hardware unit and wait for its completion.
641 */
704126ad 642int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 643{
704126ad 644 int rc = 0;
fe962e90
SS
645 struct q_inval *qi = iommu->qi;
646 struct qi_desc *hw, wait_desc;
647 int wait_index, index;
648 unsigned long flags;
649
650 if (!qi)
704126ad 651 return 0;
fe962e90
SS
652
653 hw = qi->desc;
654
f05810c9 655 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 656 while (qi->free_cnt < 3) {
f05810c9 657 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 658 cpu_relax();
f05810c9 659 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
660 }
661
662 index = qi->free_head;
663 wait_index = (index + 1) % QI_LENGTH;
664
665 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
666
667 hw[index] = *desc;
668
704126ad
YZ
669 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
670 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
671 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
672
673 hw[wait_index] = wait_desc;
674
675 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
676 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
677
678 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
679 qi->free_cnt -= 2;
680
fe962e90
SS
681 /*
682 * update the HW tail register indicating the presence of
683 * new descriptors.
684 */
685 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
686
687 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
688 /*
689 * We will leave the interrupts disabled, to prevent interrupt
690 * context to queue another cmd while a cmd is already submitted
691 * and waiting for completion on this cpu. This is to avoid
692 * a deadlock where the interrupt context can wait indefinitely
693 * for free slots in the queue.
694 */
704126ad
YZ
695 rc = qi_check_fault(iommu, index);
696 if (rc)
697 goto out;
698
fe962e90
SS
699 spin_unlock(&qi->q_lock);
700 cpu_relax();
701 spin_lock(&qi->q_lock);
702 }
704126ad
YZ
703out:
704 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
fe962e90
SS
705
706 reclaim_free_desc(qi);
f05810c9 707 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad
YZ
708
709 return rc;
fe962e90
SS
710}
711
712/*
713 * Flush the global interrupt entry cache.
714 */
715void qi_global_iec(struct intel_iommu *iommu)
716{
717 struct qi_desc desc;
718
719 desc.low = QI_IEC_TYPE;
720 desc.high = 0;
721
704126ad 722 /* should never fail */
fe962e90
SS
723 qi_submit_sync(&desc, iommu);
724}
725
4c25a2c1
DW
726void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
727 u64 type)
3481f210 728{
3481f210
YS
729 struct qi_desc desc;
730
3481f210
YS
731 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
732 | QI_CC_GRAN(type) | QI_CC_TYPE;
733 desc.high = 0;
734
4c25a2c1 735 qi_submit_sync(&desc, iommu);
3481f210
YS
736}
737
1f0ef2aa
DW
738void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
739 unsigned int size_order, u64 type)
3481f210
YS
740{
741 u8 dw = 0, dr = 0;
742
743 struct qi_desc desc;
744 int ih = 0;
745
3481f210
YS
746 if (cap_write_drain(iommu->cap))
747 dw = 1;
748
749 if (cap_read_drain(iommu->cap))
750 dr = 1;
751
752 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
753 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
754 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
755 | QI_IOTLB_AM(size_order);
756
1f0ef2aa 757 qi_submit_sync(&desc, iommu);
3481f210
YS
758}
759
eba67e5d
SS
760/*
761 * Disable Queued Invalidation interface.
762 */
763void dmar_disable_qi(struct intel_iommu *iommu)
764{
765 unsigned long flags;
766 u32 sts;
767 cycles_t start_time = get_cycles();
768
769 if (!ecap_qis(iommu->ecap))
770 return;
771
772 spin_lock_irqsave(&iommu->register_lock, flags);
773
774 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
775 if (!(sts & DMA_GSTS_QIES))
776 goto end;
777
778 /*
779 * Give a chance to HW to complete the pending invalidation requests.
780 */
781 while ((readl(iommu->reg + DMAR_IQT_REG) !=
782 readl(iommu->reg + DMAR_IQH_REG)) &&
783 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
784 cpu_relax();
785
786 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
787 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
788
789 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
790 !(sts & DMA_GSTS_QIES), sts);
791end:
792 spin_unlock_irqrestore(&iommu->register_lock, flags);
793}
794
eb4a52bc
FY
795/*
796 * Enable queued invalidation.
797 */
798static void __dmar_enable_qi(struct intel_iommu *iommu)
799{
c416daa9 800 u32 sts;
eb4a52bc
FY
801 unsigned long flags;
802 struct q_inval *qi = iommu->qi;
803
804 qi->free_head = qi->free_tail = 0;
805 qi->free_cnt = QI_LENGTH;
806
807 spin_lock_irqsave(&iommu->register_lock, flags);
808
809 /* write zero to the tail reg */
810 writel(0, iommu->reg + DMAR_IQT_REG);
811
812 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
813
eb4a52bc 814 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 815 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
816
817 /* Make sure hardware complete it */
818 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
819
820 spin_unlock_irqrestore(&iommu->register_lock, flags);
821}
822
fe962e90
SS
823/*
824 * Enable Queued Invalidation interface. This is a must to support
825 * interrupt-remapping. Also used by DMA-remapping, which replaces
826 * register based IOTLB invalidation.
827 */
828int dmar_enable_qi(struct intel_iommu *iommu)
829{
fe962e90
SS
830 struct q_inval *qi;
831
832 if (!ecap_qis(iommu->ecap))
833 return -ENOENT;
834
835 /*
836 * queued invalidation is already setup and enabled.
837 */
838 if (iommu->qi)
839 return 0;
840
fa4b57cc 841 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
842 if (!iommu->qi)
843 return -ENOMEM;
844
845 qi = iommu->qi;
846
fa4b57cc 847 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
fe962e90
SS
848 if (!qi->desc) {
849 kfree(qi);
850 iommu->qi = 0;
851 return -ENOMEM;
852 }
853
fa4b57cc 854 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
855 if (!qi->desc_status) {
856 free_page((unsigned long) qi->desc);
857 kfree(qi);
858 iommu->qi = 0;
859 return -ENOMEM;
860 }
861
862 qi->free_head = qi->free_tail = 0;
863 qi->free_cnt = QI_LENGTH;
864
865 spin_lock_init(&qi->q_lock);
866
eb4a52bc 867 __dmar_enable_qi(iommu);
fe962e90
SS
868
869 return 0;
870}
0ac2491f
SS
871
872/* iommu interrupt handling. Most stuff are MSI-like. */
873
9d783ba0
SS
874enum faulttype {
875 DMA_REMAP,
876 INTR_REMAP,
877 UNKNOWN,
878};
879
880static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
881{
882 "Software",
883 "Present bit in root entry is clear",
884 "Present bit in context entry is clear",
885 "Invalid context entry",
886 "Access beyond MGAW",
887 "PTE Write access is not set",
888 "PTE Read access is not set",
889 "Next page table ptr is invalid",
890 "Root table address invalid",
891 "Context table ptr is invalid",
892 "non-zero reserved fields in RTP",
893 "non-zero reserved fields in CTP",
894 "non-zero reserved fields in PTE",
895};
9d783ba0
SS
896
897static const char *intr_remap_fault_reasons[] =
898{
899 "Detected reserved fields in the decoded interrupt-remapped request",
900 "Interrupt index exceeded the interrupt-remapping table size",
901 "Present field in the IRTE entry is clear",
902 "Error accessing interrupt-remapping table pointed by IRTA_REG",
903 "Detected reserved fields in the IRTE entry",
904 "Blocked a compatibility format interrupt request",
905 "Blocked an interrupt request due to source-id verification failure",
906};
907
0ac2491f
SS
908#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
909
9d783ba0 910const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 911{
9d783ba0
SS
912 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
913 ARRAY_SIZE(intr_remap_fault_reasons))) {
914 *fault_type = INTR_REMAP;
915 return intr_remap_fault_reasons[fault_reason - 0x20];
916 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
917 *fault_type = DMA_REMAP;
918 return dma_remap_fault_reasons[fault_reason];
919 } else {
920 *fault_type = UNKNOWN;
0ac2491f 921 return "Unknown";
9d783ba0 922 }
0ac2491f
SS
923}
924
925void dmar_msi_unmask(unsigned int irq)
926{
927 struct intel_iommu *iommu = get_irq_data(irq);
928 unsigned long flag;
929
930 /* unmask it */
931 spin_lock_irqsave(&iommu->register_lock, flag);
932 writel(0, iommu->reg + DMAR_FECTL_REG);
933 /* Read a reg to force flush the post write */
934 readl(iommu->reg + DMAR_FECTL_REG);
935 spin_unlock_irqrestore(&iommu->register_lock, flag);
936}
937
938void dmar_msi_mask(unsigned int irq)
939{
940 unsigned long flag;
941 struct intel_iommu *iommu = get_irq_data(irq);
942
943 /* mask it */
944 spin_lock_irqsave(&iommu->register_lock, flag);
945 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
946 /* Read a reg to force flush the post write */
947 readl(iommu->reg + DMAR_FECTL_REG);
948 spin_unlock_irqrestore(&iommu->register_lock, flag);
949}
950
951void dmar_msi_write(int irq, struct msi_msg *msg)
952{
953 struct intel_iommu *iommu = get_irq_data(irq);
954 unsigned long flag;
955
956 spin_lock_irqsave(&iommu->register_lock, flag);
957 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
958 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
959 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
960 spin_unlock_irqrestore(&iommu->register_lock, flag);
961}
962
963void dmar_msi_read(int irq, struct msi_msg *msg)
964{
965 struct intel_iommu *iommu = get_irq_data(irq);
966 unsigned long flag;
967
968 spin_lock_irqsave(&iommu->register_lock, flag);
969 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
970 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
971 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
972 spin_unlock_irqrestore(&iommu->register_lock, flag);
973}
974
975static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
976 u8 fault_reason, u16 source_id, unsigned long long addr)
977{
978 const char *reason;
9d783ba0 979 int fault_type;
0ac2491f 980
9d783ba0 981 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 982
9d783ba0
SS
983 if (fault_type == INTR_REMAP)
984 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
985 "fault index %llx\n"
986 "INTR-REMAP:[fault reason %02d] %s\n",
987 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
988 PCI_FUNC(source_id & 0xFF), addr >> 48,
989 fault_reason, reason);
990 else
991 printk(KERN_ERR
992 "DMAR:[%s] Request device [%02x:%02x.%d] "
993 "fault addr %llx \n"
994 "DMAR:[fault reason %02d] %s\n",
995 (type ? "DMA Read" : "DMA Write"),
996 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
997 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
998 return 0;
999}
1000
1001#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1002irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1003{
1004 struct intel_iommu *iommu = dev_id;
1005 int reg, fault_index;
1006 u32 fault_status;
1007 unsigned long flag;
1008
1009 spin_lock_irqsave(&iommu->register_lock, flag);
1010 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1011 if (fault_status)
1012 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1013 fault_status);
0ac2491f
SS
1014
1015 /* TBD: ignore advanced fault log currently */
1016 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1017 goto clear_rest;
0ac2491f
SS
1018
1019 fault_index = dma_fsts_fault_record_index(fault_status);
1020 reg = cap_fault_reg_offset(iommu->cap);
1021 while (1) {
1022 u8 fault_reason;
1023 u16 source_id;
1024 u64 guest_addr;
1025 int type;
1026 u32 data;
1027
1028 /* highest 32 bits */
1029 data = readl(iommu->reg + reg +
1030 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1031 if (!(data & DMA_FRCD_F))
1032 break;
1033
1034 fault_reason = dma_frcd_fault_reason(data);
1035 type = dma_frcd_type(data);
1036
1037 data = readl(iommu->reg + reg +
1038 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1039 source_id = dma_frcd_source_id(data);
1040
1041 guest_addr = dmar_readq(iommu->reg + reg +
1042 fault_index * PRIMARY_FAULT_REG_LEN);
1043 guest_addr = dma_frcd_page_addr(guest_addr);
1044 /* clear the fault */
1045 writel(DMA_FRCD_F, iommu->reg + reg +
1046 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1047
1048 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049
1050 dmar_fault_do_one(iommu, type, fault_reason,
1051 source_id, guest_addr);
1052
1053 fault_index++;
1054 if (fault_index > cap_num_fault_regs(iommu->cap))
1055 fault_index = 0;
1056 spin_lock_irqsave(&iommu->register_lock, flag);
1057 }
9d783ba0
SS
1058clear_rest:
1059 /* clear all the other faults */
0ac2491f 1060 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1061 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1062
1063 spin_unlock_irqrestore(&iommu->register_lock, flag);
1064 return IRQ_HANDLED;
1065}
1066
1067int dmar_set_interrupt(struct intel_iommu *iommu)
1068{
1069 int irq, ret;
1070
9d783ba0
SS
1071 /*
1072 * Check if the fault interrupt is already initialized.
1073 */
1074 if (iommu->irq)
1075 return 0;
1076
0ac2491f
SS
1077 irq = create_irq();
1078 if (!irq) {
1079 printk(KERN_ERR "IOMMU: no free vectors\n");
1080 return -EINVAL;
1081 }
1082
1083 set_irq_data(irq, iommu);
1084 iommu->irq = irq;
1085
1086 ret = arch_setup_dmar_msi(irq);
1087 if (ret) {
1088 set_irq_data(irq, NULL);
1089 iommu->irq = 0;
1090 destroy_irq(irq);
1091 return 0;
1092 }
1093
0ac2491f
SS
1094 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1095 if (ret)
1096 printk(KERN_ERR "IOMMU: can't request irq\n");
1097 return ret;
1098}
9d783ba0
SS
1099
1100int __init enable_drhd_fault_handling(void)
1101{
1102 struct dmar_drhd_unit *drhd;
1103
1104 /*
1105 * Enable fault control interrupt.
1106 */
1107 for_each_drhd_unit(drhd) {
1108 int ret;
1109 struct intel_iommu *iommu = drhd->iommu;
1110 ret = dmar_set_interrupt(iommu);
1111
1112 if (ret) {
1113 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1114 " interrupt, ret %d\n",
1115 (unsigned long long)drhd->reg_base_addr, ret);
1116 return -1;
1117 }
1118 }
1119
1120 return 0;
1121}
eb4a52bc
FY
1122
1123/*
1124 * Re-enable Queued Invalidation interface.
1125 */
1126int dmar_reenable_qi(struct intel_iommu *iommu)
1127{
1128 if (!ecap_qis(iommu->ecap))
1129 return -ENOENT;
1130
1131 if (!iommu->qi)
1132 return -ENOENT;
1133
1134 /*
1135 * First disable queued invalidation.
1136 */
1137 dmar_disable_qi(iommu);
1138 /*
1139 * Then enable queued invalidation again. Since there is no pending
1140 * invalidation requests now, it's safe to re-enable queued
1141 * invalidation.
1142 */
1143 __dmar_enable_qi(iommu);
1144
1145 return 0;
1146}
This page took 0.216536 seconds and 5 git commands to generate.