x64, x2apic/intr-remap: code re-structuring, to be used by both DMA and Interrupt...
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
093f87d2 31#include "iova.h"
f661197e 32#include "intel-iommu.h"
10e5247f
KA
33
34#undef PREFIX
35#define PREFIX "DMAR:"
36
37/* No locks are needed as DMA remapping hardware unit
38 * list is constructed at boot time and hotplug of
39 * these units are not supported by the architecture.
40 */
41LIST_HEAD(dmar_drhd_units);
42LIST_HEAD(dmar_rmrr_units);
43
44static struct acpi_table_header * __initdata dmar_tbl;
45
46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
47{
48 /*
49 * add INCLUDE_ALL at the tail, so scan the list will find it at
50 * the very end.
51 */
52 if (drhd->include_all)
53 list_add_tail(&drhd->list, &dmar_drhd_units);
54 else
55 list_add(&drhd->list, &dmar_drhd_units);
56}
57
58static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
59{
60 list_add(&rmrr->list, &dmar_rmrr_units);
61}
62
63static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
64 struct pci_dev **dev, u16 segment)
65{
66 struct pci_bus *bus;
67 struct pci_dev *pdev = NULL;
68 struct acpi_dmar_pci_path *path;
69 int count;
70
71 bus = pci_find_bus(segment, scope->bus);
72 path = (struct acpi_dmar_pci_path *)(scope + 1);
73 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
74 / sizeof(struct acpi_dmar_pci_path);
75
76 while (count) {
77 if (pdev)
78 pci_dev_put(pdev);
79 /*
80 * Some BIOSes list non-exist devices in DMAR table, just
81 * ignore it
82 */
83 if (!bus) {
84 printk(KERN_WARNING
85 PREFIX "Device scope bus [%d] not found\n",
86 scope->bus);
87 break;
88 }
89 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 if (!pdev) {
91 printk(KERN_WARNING PREFIX
92 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
93 segment, bus->number, path->dev, path->fn);
94 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
101 printk(KERN_WARNING PREFIX
102 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
103 segment, scope->bus, path->dev, path->fn);
104 *dev = NULL;
105 return 0;
106 }
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 pci_dev_put(pdev);
111 printk(KERN_WARNING PREFIX
112 "Device scope type does not match for %s\n",
113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
120static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
134 else
135 printk(KERN_WARNING PREFIX
136 "Unsupported device scope\n");
137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
10e5247f
KA
177
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
1886e8a9 182 dmaru->hdr = header;
10e5247f
KA
183 drhd = (struct acpi_dmar_hardware_unit *)header;
184 dmaru->reg_base_addr = drhd->address;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
1886e8a9
SS
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
196static int __init
197dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198{
199 struct acpi_dmar_hardware_unit *drhd;
200 static int include_all;
201 int ret;
202
203 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
204
10e5247f
KA
205 if (!dmaru->include_all)
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 207 ((void *)drhd) + drhd->header.length,
10e5247f
KA
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 else {
211 /* Only allow one INCLUDE_ALL */
212 if (include_all) {
213 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
214 "device scope is allowed\n");
215 ret = -EINVAL;
216 }
217 include_all = 1;
218 }
219
1886e8a9
SS
220 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
221 list_del(&dmaru->list);
10e5247f 222 kfree(dmaru);
1886e8a9 223 }
10e5247f
KA
224 return ret;
225}
226
227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
1886e8a9 237 rmrru->hdr = header;
10e5247f
KA
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 254 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
1886e8a9
SS
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
10e5247f 259 kfree(rmrru);
1886e8a9 260 }
10e5247f
KA
261 return ret;
262}
263
264static void __init
265dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266{
267 struct acpi_dmar_hardware_unit *drhd;
268 struct acpi_dmar_reserved_memory *rmrr;
269
270 switch (header->type) {
271 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
272 drhd = (struct acpi_dmar_hardware_unit *)header;
273 printk (KERN_INFO PREFIX
274 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
275 drhd->flags, drhd->address);
276 break;
277 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
278 rmrr = (struct acpi_dmar_reserved_memory *)header;
279
280 printk (KERN_INFO PREFIX
281 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
282 rmrr->base_address, rmrr->end_address);
283 break;
284 }
285}
286
287/**
288 * parse_dmar_table - parses the DMA reporting table
289 */
290static int __init
291parse_dmar_table(void)
292{
293 struct acpi_table_dmar *dmar;
294 struct acpi_dmar_header *entry_header;
295 int ret = 0;
296
297 dmar = (struct acpi_table_dmar *)dmar_tbl;
298 if (!dmar)
299 return -ENODEV;
300
093f87d2
FY
301 if (dmar->width < PAGE_SHIFT_4K - 1) {
302 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
303 return -EINVAL;
304 }
305
306 printk (KERN_INFO PREFIX "Host address width %d\n",
307 dmar->width + 1);
308
309 entry_header = (struct acpi_dmar_header *)(dmar + 1);
310 while (((unsigned long)entry_header) <
311 (((unsigned long)dmar) + dmar_tbl->length)) {
312 dmar_table_print_dmar_entry(entry_header);
313
314 switch (entry_header->type) {
315 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
316 ret = dmar_parse_one_drhd(entry_header);
317 break;
318 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
319 ret = dmar_parse_one_rmrr(entry_header);
320 break;
321 default:
322 printk(KERN_WARNING PREFIX
323 "Unknown DMAR structure type\n");
324 ret = 0; /* for forward compatibility */
325 break;
326 }
327 if (ret)
328 break;
329
330 entry_header = ((void *)entry_header + entry_header->length);
331 }
332 return ret;
333}
334
e61d98d8
SS
335int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
336 struct pci_dev *dev)
337{
338 int index;
339
340 while (dev) {
341 for (index = 0; index < cnt; index++)
342 if (dev == devices[index])
343 return 1;
344
345 /* Check our parent */
346 dev = dev->bus->self;
347 }
348
349 return 0;
350}
351
352struct dmar_drhd_unit *
353dmar_find_matched_drhd_unit(struct pci_dev *dev)
354{
355 struct dmar_drhd_unit *drhd = NULL;
356
357 list_for_each_entry(drhd, &dmar_drhd_units, list) {
358 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
359 drhd->devices_cnt, dev))
360 return drhd;
361 }
362
363 return NULL;
364}
365
1886e8a9
SS
366int __init dmar_dev_scope_init(void)
367{
368 struct dmar_drhd_unit *drhd;
369 struct dmar_rmrr_unit *rmrr;
370 int ret = -ENODEV;
371
372 for_each_drhd_unit(drhd) {
373 ret = dmar_parse_dev(drhd);
374 if (ret)
375 return ret;
376 }
377
378 for_each_rmrr_units(rmrr) {
379 ret = rmrr_parse_dev(rmrr);
380 if (ret)
381 return ret;
382 }
383
384 return ret;
385}
386
10e5247f
KA
387
388int __init dmar_table_init(void)
389{
1886e8a9 390 static int dmar_table_initialized;
093f87d2
FY
391 int ret;
392
1886e8a9
SS
393 if (dmar_table_initialized)
394 return 0;
395
396 dmar_table_initialized = 1;
397
093f87d2
FY
398 ret = parse_dmar_table();
399 if (ret) {
1886e8a9
SS
400 if (ret != -ENODEV)
401 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
402 return ret;
403 }
404
10e5247f
KA
405 if (list_empty(&dmar_drhd_units)) {
406 printk(KERN_INFO PREFIX "No DMAR devices found\n");
407 return -ENODEV;
408 }
093f87d2
FY
409
410 if (list_empty(&dmar_rmrr_units)) {
411 printk(KERN_INFO PREFIX "No RMRR found\n");
412 return -ENODEV;
413 }
414
10e5247f
KA
415 return 0;
416}
417
418/**
419 * early_dmar_detect - checks to see if the platform supports DMAR devices
420 */
421int __init early_dmar_detect(void)
422{
423 acpi_status status = AE_OK;
424
425 /* if we could find DMAR table, then there are DMAR devices */
426 status = acpi_get_table(ACPI_SIG_DMAR, 0,
427 (struct acpi_table_header **)&dmar_tbl);
428
429 if (ACPI_SUCCESS(status) && !dmar_tbl) {
430 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
431 status = AE_NOT_FOUND;
432 }
433
434 return (ACPI_SUCCESS(status) ? 1 : 0);
435}
e61d98d8 436
1886e8a9 437int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 438{
c42d9f32 439 struct intel_iommu *iommu;
e61d98d8
SS
440 int map_size;
441 u32 ver;
c42d9f32
SS
442 static int iommu_allocated = 0;
443
444 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
445 if (!iommu)
1886e8a9 446 return -ENOMEM;
c42d9f32
SS
447
448 iommu->seq_id = iommu_allocated++;
e61d98d8
SS
449
450 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
451 if (!iommu->reg) {
452 printk(KERN_ERR "IOMMU: can't map the region\n");
453 goto error;
454 }
455 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
456 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
457
458 /* the registers might be more than one page */
459 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
460 cap_max_fault_reg_offset(iommu->cap));
461 map_size = PAGE_ALIGN_4K(map_size);
462 if (map_size > PAGE_SIZE_4K) {
463 iounmap(iommu->reg);
464 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
465 if (!iommu->reg) {
466 printk(KERN_ERR "IOMMU: can't map the region\n");
467 goto error;
468 }
469 }
470
471 ver = readl(iommu->reg + DMAR_VER_REG);
472 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
473 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
474 iommu->cap, iommu->ecap);
475
476 spin_lock_init(&iommu->register_lock);
477
478 drhd->iommu = iommu;
1886e8a9 479 return 0;
e61d98d8
SS
480error:
481 kfree(iommu);
1886e8a9 482 return -1;
e61d98d8
SS
483}
484
485void free_iommu(struct intel_iommu *iommu)
486{
487 if (!iommu)
488 return;
489
490#ifdef CONFIG_DMAR
491 free_dmar_iommu(iommu);
492#endif
493
494 if (iommu->reg)
495 iounmap(iommu->reg);
496 kfree(iommu);
497}
This page took 0.118319 seconds and 5 git commands to generate.