Commit | Line | Data |
---|---|---|
10e5247f KA |
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
98bcef56 | 17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <ashok.raj@intel.com> | |
19 | * Author: Shaohua Li <shaohua.li@intel.com> | |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | |
10e5247f | 21 | * |
e61d98d8 | 22 | * This file implements early detection/parsing of Remapping Devices |
10e5247f KA |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | |
e61d98d8 SS |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | |
10e5247f KA |
27 | */ |
28 | ||
29 | #include <linux/pci.h> | |
30 | #include <linux/dmar.h> | |
38717946 KA |
31 | #include <linux/iova.h> |
32 | #include <linux/intel-iommu.h> | |
fe962e90 | 33 | #include <linux/timer.h> |
10e5247f KA |
34 | |
35 | #undef PREFIX | |
36 | #define PREFIX "DMAR:" | |
37 | ||
38 | /* No locks are needed as DMA remapping hardware unit | |
39 | * list is constructed at boot time and hotplug of | |
40 | * these units are not supported by the architecture. | |
41 | */ | |
42 | LIST_HEAD(dmar_drhd_units); | |
10e5247f KA |
43 | |
44 | static struct acpi_table_header * __initdata dmar_tbl; | |
8e1568f3 | 45 | static acpi_size dmar_tbl_size; |
10e5247f KA |
46 | |
47 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |
48 | { | |
49 | /* | |
50 | * add INCLUDE_ALL at the tail, so scan the list will find it at | |
51 | * the very end. | |
52 | */ | |
53 | if (drhd->include_all) | |
54 | list_add_tail(&drhd->list, &dmar_drhd_units); | |
55 | else | |
56 | list_add(&drhd->list, &dmar_drhd_units); | |
57 | } | |
58 | ||
10e5247f KA |
59 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
60 | struct pci_dev **dev, u16 segment) | |
61 | { | |
62 | struct pci_bus *bus; | |
63 | struct pci_dev *pdev = NULL; | |
64 | struct acpi_dmar_pci_path *path; | |
65 | int count; | |
66 | ||
67 | bus = pci_find_bus(segment, scope->bus); | |
68 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
69 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
70 | / sizeof(struct acpi_dmar_pci_path); | |
71 | ||
72 | while (count) { | |
73 | if (pdev) | |
74 | pci_dev_put(pdev); | |
75 | /* | |
76 | * Some BIOSes list non-exist devices in DMAR table, just | |
77 | * ignore it | |
78 | */ | |
79 | if (!bus) { | |
80 | printk(KERN_WARNING | |
81 | PREFIX "Device scope bus [%d] not found\n", | |
82 | scope->bus); | |
83 | break; | |
84 | } | |
85 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | |
86 | if (!pdev) { | |
87 | printk(KERN_WARNING PREFIX | |
88 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
89 | segment, bus->number, path->dev, path->fn); | |
90 | break; | |
91 | } | |
92 | path ++; | |
93 | count --; | |
94 | bus = pdev->subordinate; | |
95 | } | |
96 | if (!pdev) { | |
97 | printk(KERN_WARNING PREFIX | |
98 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
99 | segment, scope->bus, path->dev, path->fn); | |
100 | *dev = NULL; | |
101 | return 0; | |
102 | } | |
103 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | |
104 | pdev->subordinate) || (scope->entry_type == \ | |
105 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | |
106 | pci_dev_put(pdev); | |
107 | printk(KERN_WARNING PREFIX | |
108 | "Device scope type does not match for %s\n", | |
109 | pci_name(pdev)); | |
110 | return -EINVAL; | |
111 | } | |
112 | *dev = pdev; | |
113 | return 0; | |
114 | } | |
115 | ||
116 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | |
117 | struct pci_dev ***devices, u16 segment) | |
118 | { | |
119 | struct acpi_dmar_device_scope *scope; | |
120 | void * tmp = start; | |
121 | int index; | |
122 | int ret; | |
123 | ||
124 | *cnt = 0; | |
125 | while (start < end) { | |
126 | scope = start; | |
127 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
128 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | |
129 | (*cnt)++; | |
130 | else | |
131 | printk(KERN_WARNING PREFIX | |
132 | "Unsupported device scope\n"); | |
133 | start += scope->length; | |
134 | } | |
135 | if (*cnt == 0) | |
136 | return 0; | |
137 | ||
138 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | |
139 | if (!*devices) | |
140 | return -ENOMEM; | |
141 | ||
142 | start = tmp; | |
143 | index = 0; | |
144 | while (start < end) { | |
145 | scope = start; | |
146 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
147 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | |
148 | ret = dmar_parse_one_dev_scope(scope, | |
149 | &(*devices)[index], segment); | |
150 | if (ret) { | |
151 | kfree(*devices); | |
152 | return ret; | |
153 | } | |
154 | index ++; | |
155 | } | |
156 | start += scope->length; | |
157 | } | |
158 | ||
159 | return 0; | |
160 | } | |
161 | ||
162 | /** | |
163 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | |
164 | * structure which uniquely represent one DMA remapping hardware unit | |
165 | * present in the platform | |
166 | */ | |
167 | static int __init | |
168 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | |
169 | { | |
170 | struct acpi_dmar_hardware_unit *drhd; | |
171 | struct dmar_drhd_unit *dmaru; | |
172 | int ret = 0; | |
10e5247f KA |
173 | |
174 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | |
175 | if (!dmaru) | |
176 | return -ENOMEM; | |
177 | ||
1886e8a9 | 178 | dmaru->hdr = header; |
10e5247f KA |
179 | drhd = (struct acpi_dmar_hardware_unit *)header; |
180 | dmaru->reg_base_addr = drhd->address; | |
181 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | |
182 | ||
1886e8a9 SS |
183 | ret = alloc_iommu(dmaru); |
184 | if (ret) { | |
185 | kfree(dmaru); | |
186 | return ret; | |
187 | } | |
188 | dmar_register_drhd_unit(dmaru); | |
189 | return 0; | |
190 | } | |
191 | ||
f82851a8 | 192 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
1886e8a9 SS |
193 | { |
194 | struct acpi_dmar_hardware_unit *drhd; | |
f82851a8 | 195 | int ret = 0; |
1886e8a9 SS |
196 | |
197 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | |
198 | ||
2e824f79 YZ |
199 | if (dmaru->include_all) |
200 | return 0; | |
201 | ||
202 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | |
1886e8a9 | 203 | ((void *)drhd) + drhd->header.length, |
10e5247f KA |
204 | &dmaru->devices_cnt, &dmaru->devices, |
205 | drhd->segment); | |
1c7d1bca | 206 | if (ret) { |
1886e8a9 | 207 | list_del(&dmaru->list); |
10e5247f | 208 | kfree(dmaru); |
1886e8a9 | 209 | } |
10e5247f KA |
210 | return ret; |
211 | } | |
212 | ||
aaa9d1dd SS |
213 | #ifdef CONFIG_DMAR |
214 | LIST_HEAD(dmar_rmrr_units); | |
215 | ||
216 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | |
217 | { | |
218 | list_add(&rmrr->list, &dmar_rmrr_units); | |
219 | } | |
220 | ||
221 | ||
10e5247f KA |
222 | static int __init |
223 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |
224 | { | |
225 | struct acpi_dmar_reserved_memory *rmrr; | |
226 | struct dmar_rmrr_unit *rmrru; | |
10e5247f KA |
227 | |
228 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | |
229 | if (!rmrru) | |
230 | return -ENOMEM; | |
231 | ||
1886e8a9 | 232 | rmrru->hdr = header; |
10e5247f KA |
233 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
234 | rmrru->base_address = rmrr->base_address; | |
235 | rmrru->end_address = rmrr->end_address; | |
1886e8a9 SS |
236 | |
237 | dmar_register_rmrr_unit(rmrru); | |
238 | return 0; | |
239 | } | |
240 | ||
241 | static int __init | |
242 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | |
243 | { | |
244 | struct acpi_dmar_reserved_memory *rmrr; | |
245 | int ret; | |
246 | ||
247 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | |
10e5247f | 248 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
1886e8a9 | 249 | ((void *)rmrr) + rmrr->header.length, |
10e5247f KA |
250 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
251 | ||
1886e8a9 SS |
252 | if (ret || (rmrru->devices_cnt == 0)) { |
253 | list_del(&rmrru->list); | |
10e5247f | 254 | kfree(rmrru); |
1886e8a9 | 255 | } |
10e5247f KA |
256 | return ret; |
257 | } | |
aaa9d1dd | 258 | #endif |
10e5247f KA |
259 | |
260 | static void __init | |
261 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |
262 | { | |
263 | struct acpi_dmar_hardware_unit *drhd; | |
264 | struct acpi_dmar_reserved_memory *rmrr; | |
265 | ||
266 | switch (header->type) { | |
267 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
268 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
269 | printk (KERN_INFO PREFIX | |
270 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | |
5b6985ce | 271 | drhd->flags, (unsigned long long)drhd->address); |
10e5247f KA |
272 | break; |
273 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
274 | rmrr = (struct acpi_dmar_reserved_memory *)header; | |
275 | ||
276 | printk (KERN_INFO PREFIX | |
277 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | |
5b6985ce FY |
278 | (unsigned long long)rmrr->base_address, |
279 | (unsigned long long)rmrr->end_address); | |
10e5247f KA |
280 | break; |
281 | } | |
282 | } | |
283 | ||
f6dd5c31 YL |
284 | /** |
285 | * dmar_table_detect - checks to see if the platform supports DMAR devices | |
286 | */ | |
287 | static int __init dmar_table_detect(void) | |
288 | { | |
289 | acpi_status status = AE_OK; | |
290 | ||
291 | /* if we could find DMAR table, then there are DMAR devices */ | |
8e1568f3 YL |
292 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
293 | (struct acpi_table_header **)&dmar_tbl, | |
294 | &dmar_tbl_size); | |
f6dd5c31 YL |
295 | |
296 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | |
297 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | |
298 | status = AE_NOT_FOUND; | |
299 | } | |
300 | ||
301 | return (ACPI_SUCCESS(status) ? 1 : 0); | |
302 | } | |
aaa9d1dd | 303 | |
10e5247f KA |
304 | /** |
305 | * parse_dmar_table - parses the DMA reporting table | |
306 | */ | |
307 | static int __init | |
308 | parse_dmar_table(void) | |
309 | { | |
310 | struct acpi_table_dmar *dmar; | |
311 | struct acpi_dmar_header *entry_header; | |
312 | int ret = 0; | |
313 | ||
f6dd5c31 YL |
314 | /* |
315 | * Do it again, earlier dmar_tbl mapping could be mapped with | |
316 | * fixed map. | |
317 | */ | |
318 | dmar_table_detect(); | |
319 | ||
10e5247f KA |
320 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
321 | if (!dmar) | |
322 | return -ENODEV; | |
323 | ||
5b6985ce | 324 | if (dmar->width < PAGE_SHIFT - 1) { |
093f87d2 | 325 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
10e5247f KA |
326 | return -EINVAL; |
327 | } | |
328 | ||
329 | printk (KERN_INFO PREFIX "Host address width %d\n", | |
330 | dmar->width + 1); | |
331 | ||
332 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | |
333 | while (((unsigned long)entry_header) < | |
334 | (((unsigned long)dmar) + dmar_tbl->length)) { | |
084eb960 TB |
335 | /* Avoid looping forever on bad ACPI tables */ |
336 | if (entry_header->length == 0) { | |
337 | printk(KERN_WARNING PREFIX | |
338 | "Invalid 0-length structure\n"); | |
339 | ret = -EINVAL; | |
340 | break; | |
341 | } | |
342 | ||
10e5247f KA |
343 | dmar_table_print_dmar_entry(entry_header); |
344 | ||
345 | switch (entry_header->type) { | |
346 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
347 | ret = dmar_parse_one_drhd(entry_header); | |
348 | break; | |
349 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
aaa9d1dd | 350 | #ifdef CONFIG_DMAR |
10e5247f | 351 | ret = dmar_parse_one_rmrr(entry_header); |
aaa9d1dd | 352 | #endif |
10e5247f KA |
353 | break; |
354 | default: | |
355 | printk(KERN_WARNING PREFIX | |
356 | "Unknown DMAR structure type\n"); | |
357 | ret = 0; /* for forward compatibility */ | |
358 | break; | |
359 | } | |
360 | if (ret) | |
361 | break; | |
362 | ||
363 | entry_header = ((void *)entry_header + entry_header->length); | |
364 | } | |
365 | return ret; | |
366 | } | |
367 | ||
e61d98d8 SS |
368 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
369 | struct pci_dev *dev) | |
370 | { | |
371 | int index; | |
372 | ||
373 | while (dev) { | |
374 | for (index = 0; index < cnt; index++) | |
375 | if (dev == devices[index]) | |
376 | return 1; | |
377 | ||
378 | /* Check our parent */ | |
379 | dev = dev->bus->self; | |
380 | } | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | struct dmar_drhd_unit * | |
386 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | |
387 | { | |
2e824f79 YZ |
388 | struct dmar_drhd_unit *dmaru = NULL; |
389 | struct acpi_dmar_hardware_unit *drhd; | |
390 | ||
391 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { | |
392 | drhd = container_of(dmaru->hdr, | |
393 | struct acpi_dmar_hardware_unit, | |
394 | header); | |
395 | ||
396 | if (dmaru->include_all && | |
397 | drhd->segment == pci_domain_nr(dev->bus)) | |
398 | return dmaru; | |
e61d98d8 | 399 | |
2e824f79 YZ |
400 | if (dmar_pci_device_match(dmaru->devices, |
401 | dmaru->devices_cnt, dev)) | |
402 | return dmaru; | |
e61d98d8 SS |
403 | } |
404 | ||
405 | return NULL; | |
406 | } | |
407 | ||
1886e8a9 SS |
408 | int __init dmar_dev_scope_init(void) |
409 | { | |
04e2ea67 | 410 | struct dmar_drhd_unit *drhd, *drhd_n; |
1886e8a9 SS |
411 | int ret = -ENODEV; |
412 | ||
04e2ea67 | 413 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
1886e8a9 SS |
414 | ret = dmar_parse_dev(drhd); |
415 | if (ret) | |
416 | return ret; | |
417 | } | |
418 | ||
aaa9d1dd SS |
419 | #ifdef CONFIG_DMAR |
420 | { | |
04e2ea67 SS |
421 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
422 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | |
aaa9d1dd SS |
423 | ret = rmrr_parse_dev(rmrr); |
424 | if (ret) | |
425 | return ret; | |
426 | } | |
1886e8a9 | 427 | } |
aaa9d1dd | 428 | #endif |
1886e8a9 SS |
429 | |
430 | return ret; | |
431 | } | |
432 | ||
10e5247f KA |
433 | |
434 | int __init dmar_table_init(void) | |
435 | { | |
1886e8a9 | 436 | static int dmar_table_initialized; |
093f87d2 FY |
437 | int ret; |
438 | ||
1886e8a9 SS |
439 | if (dmar_table_initialized) |
440 | return 0; | |
441 | ||
442 | dmar_table_initialized = 1; | |
443 | ||
093f87d2 FY |
444 | ret = parse_dmar_table(); |
445 | if (ret) { | |
1886e8a9 SS |
446 | if (ret != -ENODEV) |
447 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | |
093f87d2 FY |
448 | return ret; |
449 | } | |
450 | ||
10e5247f KA |
451 | if (list_empty(&dmar_drhd_units)) { |
452 | printk(KERN_INFO PREFIX "No DMAR devices found\n"); | |
453 | return -ENODEV; | |
454 | } | |
093f87d2 | 455 | |
aaa9d1dd | 456 | #ifdef CONFIG_DMAR |
2d6b5f85 | 457 | if (list_empty(&dmar_rmrr_units)) |
093f87d2 | 458 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
aaa9d1dd | 459 | #endif |
093f87d2 | 460 | |
ad3ad3f6 SS |
461 | #ifdef CONFIG_INTR_REMAP |
462 | parse_ioapics_under_ir(); | |
463 | #endif | |
10e5247f KA |
464 | return 0; |
465 | } | |
466 | ||
2ae21010 SS |
467 | void __init detect_intel_iommu(void) |
468 | { | |
469 | int ret; | |
470 | ||
f6dd5c31 | 471 | ret = dmar_table_detect(); |
2ae21010 | 472 | |
2ae21010 | 473 | { |
cacd4213 | 474 | #ifdef CONFIG_INTR_REMAP |
1cb11583 SS |
475 | struct acpi_table_dmar *dmar; |
476 | /* | |
477 | * for now we will disable dma-remapping when interrupt | |
478 | * remapping is enabled. | |
479 | * When support for queued invalidation for IOTLB invalidation | |
480 | * is added, we will not need this any more. | |
481 | */ | |
482 | dmar = (struct acpi_table_dmar *) dmar_tbl; | |
cacd4213 | 483 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
1cb11583 SS |
484 | printk(KERN_INFO |
485 | "Queued invalidation will be enabled to support " | |
486 | "x2apic and Intr-remapping.\n"); | |
cacd4213 | 487 | #endif |
cacd4213 | 488 | #ifdef CONFIG_DMAR |
2ae21010 SS |
489 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
490 | !dmar_disabled) | |
491 | iommu_detected = 1; | |
2ae21010 | 492 | #endif |
cacd4213 | 493 | } |
8e1568f3 | 494 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
f6dd5c31 | 495 | dmar_tbl = NULL; |
2ae21010 SS |
496 | } |
497 | ||
498 | ||
1886e8a9 | 499 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
e61d98d8 | 500 | { |
c42d9f32 | 501 | struct intel_iommu *iommu; |
e61d98d8 SS |
502 | int map_size; |
503 | u32 ver; | |
c42d9f32 | 504 | static int iommu_allocated = 0; |
43f7392b | 505 | int agaw = 0; |
c42d9f32 SS |
506 | |
507 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | |
508 | if (!iommu) | |
1886e8a9 | 509 | return -ENOMEM; |
c42d9f32 SS |
510 | |
511 | iommu->seq_id = iommu_allocated++; | |
e61d98d8 | 512 | |
5b6985ce | 513 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
e61d98d8 SS |
514 | if (!iommu->reg) { |
515 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
516 | goto error; | |
517 | } | |
518 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | |
519 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | |
520 | ||
43f7392b | 521 | #ifdef CONFIG_DMAR |
1b573683 WH |
522 | agaw = iommu_calculate_agaw(iommu); |
523 | if (agaw < 0) { | |
524 | printk(KERN_ERR | |
525 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", | |
526 | iommu->seq_id); | |
527 | goto error; | |
528 | } | |
43f7392b | 529 | #endif |
1b573683 WH |
530 | iommu->agaw = agaw; |
531 | ||
e61d98d8 SS |
532 | /* the registers might be more than one page */ |
533 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | |
534 | cap_max_fault_reg_offset(iommu->cap)); | |
5b6985ce FY |
535 | map_size = VTD_PAGE_ALIGN(map_size); |
536 | if (map_size > VTD_PAGE_SIZE) { | |
e61d98d8 SS |
537 | iounmap(iommu->reg); |
538 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | |
539 | if (!iommu->reg) { | |
540 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
541 | goto error; | |
542 | } | |
543 | } | |
544 | ||
545 | ver = readl(iommu->reg + DMAR_VER_REG); | |
546 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | |
5b6985ce FY |
547 | (unsigned long long)drhd->reg_base_addr, |
548 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | |
549 | (unsigned long long)iommu->cap, | |
550 | (unsigned long long)iommu->ecap); | |
e61d98d8 SS |
551 | |
552 | spin_lock_init(&iommu->register_lock); | |
553 | ||
554 | drhd->iommu = iommu; | |
1886e8a9 | 555 | return 0; |
e61d98d8 SS |
556 | error: |
557 | kfree(iommu); | |
1886e8a9 | 558 | return -1; |
e61d98d8 SS |
559 | } |
560 | ||
561 | void free_iommu(struct intel_iommu *iommu) | |
562 | { | |
563 | if (!iommu) | |
564 | return; | |
565 | ||
566 | #ifdef CONFIG_DMAR | |
567 | free_dmar_iommu(iommu); | |
568 | #endif | |
569 | ||
570 | if (iommu->reg) | |
571 | iounmap(iommu->reg); | |
572 | kfree(iommu); | |
573 | } | |
fe962e90 SS |
574 | |
575 | /* | |
576 | * Reclaim all the submitted descriptors which have completed its work. | |
577 | */ | |
578 | static inline void reclaim_free_desc(struct q_inval *qi) | |
579 | { | |
580 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | |
581 | qi->desc_status[qi->free_tail] = QI_FREE; | |
582 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | |
583 | qi->free_cnt++; | |
584 | } | |
585 | } | |
586 | ||
704126ad YZ |
587 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
588 | { | |
589 | u32 fault; | |
590 | int head; | |
591 | struct q_inval *qi = iommu->qi; | |
592 | int wait_index = (index + 1) % QI_LENGTH; | |
593 | ||
594 | fault = readl(iommu->reg + DMAR_FSTS_REG); | |
595 | ||
596 | /* | |
597 | * If IQE happens, the head points to the descriptor associated | |
598 | * with the error. No new descriptors are fetched until the IQE | |
599 | * is cleared. | |
600 | */ | |
601 | if (fault & DMA_FSTS_IQE) { | |
602 | head = readl(iommu->reg + DMAR_IQH_REG); | |
603 | if ((head >> 4) == index) { | |
604 | memcpy(&qi->desc[index], &qi->desc[wait_index], | |
605 | sizeof(struct qi_desc)); | |
606 | __iommu_flush_cache(iommu, &qi->desc[index], | |
607 | sizeof(struct qi_desc)); | |
608 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | |
609 | return -EINVAL; | |
610 | } | |
611 | } | |
612 | ||
613 | return 0; | |
614 | } | |
615 | ||
fe962e90 SS |
616 | /* |
617 | * Submit the queued invalidation descriptor to the remapping | |
618 | * hardware unit and wait for its completion. | |
619 | */ | |
704126ad | 620 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
fe962e90 | 621 | { |
704126ad | 622 | int rc = 0; |
fe962e90 SS |
623 | struct q_inval *qi = iommu->qi; |
624 | struct qi_desc *hw, wait_desc; | |
625 | int wait_index, index; | |
626 | unsigned long flags; | |
627 | ||
628 | if (!qi) | |
704126ad | 629 | return 0; |
fe962e90 SS |
630 | |
631 | hw = qi->desc; | |
632 | ||
f05810c9 | 633 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 | 634 | while (qi->free_cnt < 3) { |
f05810c9 | 635 | spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 | 636 | cpu_relax(); |
f05810c9 | 637 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 SS |
638 | } |
639 | ||
640 | index = qi->free_head; | |
641 | wait_index = (index + 1) % QI_LENGTH; | |
642 | ||
643 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | |
644 | ||
645 | hw[index] = *desc; | |
646 | ||
704126ad YZ |
647 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
648 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | |
fe962e90 SS |
649 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
650 | ||
651 | hw[wait_index] = wait_desc; | |
652 | ||
653 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | |
654 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | |
655 | ||
656 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | |
657 | qi->free_cnt -= 2; | |
658 | ||
fe962e90 SS |
659 | /* |
660 | * update the HW tail register indicating the presence of | |
661 | * new descriptors. | |
662 | */ | |
663 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | |
fe962e90 SS |
664 | |
665 | while (qi->desc_status[wait_index] != QI_DONE) { | |
f05810c9 SS |
666 | /* |
667 | * We will leave the interrupts disabled, to prevent interrupt | |
668 | * context to queue another cmd while a cmd is already submitted | |
669 | * and waiting for completion on this cpu. This is to avoid | |
670 | * a deadlock where the interrupt context can wait indefinitely | |
671 | * for free slots in the queue. | |
672 | */ | |
704126ad YZ |
673 | rc = qi_check_fault(iommu, index); |
674 | if (rc) | |
675 | goto out; | |
676 | ||
fe962e90 SS |
677 | spin_unlock(&qi->q_lock); |
678 | cpu_relax(); | |
679 | spin_lock(&qi->q_lock); | |
680 | } | |
704126ad YZ |
681 | out: |
682 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; | |
fe962e90 SS |
683 | |
684 | reclaim_free_desc(qi); | |
f05810c9 | 685 | spin_unlock_irqrestore(&qi->q_lock, flags); |
704126ad YZ |
686 | |
687 | return rc; | |
fe962e90 SS |
688 | } |
689 | ||
690 | /* | |
691 | * Flush the global interrupt entry cache. | |
692 | */ | |
693 | void qi_global_iec(struct intel_iommu *iommu) | |
694 | { | |
695 | struct qi_desc desc; | |
696 | ||
697 | desc.low = QI_IEC_TYPE; | |
698 | desc.high = 0; | |
699 | ||
704126ad | 700 | /* should never fail */ |
fe962e90 SS |
701 | qi_submit_sync(&desc, iommu); |
702 | } | |
703 | ||
3481f210 YS |
704 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
705 | u64 type, int non_present_entry_flush) | |
706 | { | |
3481f210 YS |
707 | struct qi_desc desc; |
708 | ||
709 | if (non_present_entry_flush) { | |
710 | if (!cap_caching_mode(iommu->cap)) | |
711 | return 1; | |
712 | else | |
713 | did = 0; | |
714 | } | |
715 | ||
716 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | |
717 | | QI_CC_GRAN(type) | QI_CC_TYPE; | |
718 | desc.high = 0; | |
719 | ||
704126ad | 720 | return qi_submit_sync(&desc, iommu); |
3481f210 YS |
721 | } |
722 | ||
723 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |
724 | unsigned int size_order, u64 type, | |
725 | int non_present_entry_flush) | |
726 | { | |
727 | u8 dw = 0, dr = 0; | |
728 | ||
729 | struct qi_desc desc; | |
730 | int ih = 0; | |
731 | ||
732 | if (non_present_entry_flush) { | |
733 | if (!cap_caching_mode(iommu->cap)) | |
734 | return 1; | |
735 | else | |
736 | did = 0; | |
737 | } | |
738 | ||
739 | if (cap_write_drain(iommu->cap)) | |
740 | dw = 1; | |
741 | ||
742 | if (cap_read_drain(iommu->cap)) | |
743 | dr = 1; | |
744 | ||
745 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | |
746 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | |
747 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | |
748 | | QI_IOTLB_AM(size_order); | |
749 | ||
704126ad | 750 | return qi_submit_sync(&desc, iommu); |
3481f210 YS |
751 | } |
752 | ||
fe962e90 SS |
753 | /* |
754 | * Enable Queued Invalidation interface. This is a must to support | |
755 | * interrupt-remapping. Also used by DMA-remapping, which replaces | |
756 | * register based IOTLB invalidation. | |
757 | */ | |
758 | int dmar_enable_qi(struct intel_iommu *iommu) | |
759 | { | |
760 | u32 cmd, sts; | |
761 | unsigned long flags; | |
762 | struct q_inval *qi; | |
763 | ||
764 | if (!ecap_qis(iommu->ecap)) | |
765 | return -ENOENT; | |
766 | ||
767 | /* | |
768 | * queued invalidation is already setup and enabled. | |
769 | */ | |
770 | if (iommu->qi) | |
771 | return 0; | |
772 | ||
773 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | |
774 | if (!iommu->qi) | |
775 | return -ENOMEM; | |
776 | ||
777 | qi = iommu->qi; | |
778 | ||
779 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | |
780 | if (!qi->desc) { | |
781 | kfree(qi); | |
782 | iommu->qi = 0; | |
783 | return -ENOMEM; | |
784 | } | |
785 | ||
786 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | |
787 | if (!qi->desc_status) { | |
788 | free_page((unsigned long) qi->desc); | |
789 | kfree(qi); | |
790 | iommu->qi = 0; | |
791 | return -ENOMEM; | |
792 | } | |
793 | ||
794 | qi->free_head = qi->free_tail = 0; | |
795 | qi->free_cnt = QI_LENGTH; | |
796 | ||
797 | spin_lock_init(&qi->q_lock); | |
798 | ||
799 | spin_lock_irqsave(&iommu->register_lock, flags); | |
800 | /* write zero to the tail reg */ | |
801 | writel(0, iommu->reg + DMAR_IQT_REG); | |
802 | ||
803 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | |
804 | ||
805 | cmd = iommu->gcmd | DMA_GCMD_QIE; | |
806 | iommu->gcmd |= DMA_GCMD_QIE; | |
807 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | |
808 | ||
809 | /* Make sure hardware complete it */ | |
810 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | |
811 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
812 | ||
813 | return 0; | |
814 | } |